gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright (c) 2011 Adi Roiban.
# See LICENSE for details.
"""
PQM related targets for paver.
"""
from __future__ import (
absolute_import,
print_function,
with_statement,
unicode_literals,
)
import os
import re
import sys
from paver.easy import call_task, needs, task
from paver.tasks import environment, consume_args, cmdopts
from brink.utils import BrinkPaver
from brink.configuration import SETUP
pave = BrinkPaver(SETUP)
RE_REVIEWERS = '.*reviewers{0,1}:{0,1} @.*'
RE_NEEDS_CHANGES = '.*needs{0,1}[\-_]changes{0,1}.*'
RE_CHANGES_APPROVED = '.*changes{0,1}[\-_]approved{0,1}.*'
_REQUIRED = object()
@task
@consume_args
def github(args):
"""
Helpers for interacting with GitHub website.
Admin commands:
* token PASSWORD - Get a new token to be used by PQM."
"""
def github_help():
print("Usage: github COMMNAND ARGUMENTS.")
print("")
print("List of commands:")
print(" open - Open the repository in githuh.")
print(" review - Open the page for creating a new pull request")
if not len(args):
github_help()
sys.exit(1)
import webbrowser
if args[0] == 'open':
webbrowser.open_new_tab(SETUP['repository']['github'])
sys.exit(0)
if args[0] == 'new':
pave.git.publish()
url = "%s/compare/%s?expand=1" % (
SETUP['repository']['github'], pave.git.branch_name)
webbrowser.open_new_tab(url)
sys.exit(0)
if args[0] == 'token':
print(_github_token(username='chevah-robot', password=args[1]))
sys.exit(0)
def _github_token(username, password):
"""
Return an authorization token from GitHub for chevah-robot account.
"""
from github import Github
github = Github(username, password, user_agent='pygithub/chevah-pqm')
user = github.get_user()
authorization = user.create_authorization(
scopes=['repo'],
note='Chevah PQM',
note_url='https://buildbot.chevah.com')
return authorization.token
def _get_repo(token):
"""
Return GitHub repository.
"""
from github import Github
repo = SETUP['repository']['github'].split('/')
repo = repo[-2] + '/' + repo[-1]
github = Github(token, user_agent='pygithub/chevah-pqm')
return github.get_repo(repo)
def _get_pull(repo, pull_id):
"""
Return the pull request details.
"""
from github import GithubException
try:
return repo.get_pull(pull_id)
except GithubException as error:
print("Failed to get PR details")
print(str(error))
sys.exit(1)
def _get_protected_branch(repo, branch):
"""
Return the branch details.
"""
from github import GithubException
try:
return repo.get_protected_branch(branch)
except GithubException as error:
print("Failed to get protected branch details")
print(str(error))
sys.exit(1)
def _check_review_properties(token, pull_id):
"""
Helper for calling this task from multiple places, without messing
with paver arguments.
"""
from github import GithubException
try:
repo = _get_repo(token=token)
pull_request = _get_pull(repo, pull_id=pull_id)
branch_name = pull_request.head.ref
branch_sha = pull_request.head.sha.lower()
# Fail early if branch can not be merged.
if not pull_request.mergeable:
print("\n> GitHub said that branch can not be merged.")
print("Check PR %s for %s.\n" % (pull_id, branch_name))
sys.exit(1)
master = _get_protected_branch(repo, 'master')
if master.protected and pull_request.mergeable_state == u'blocked':
branch_commit = repo.get_commit(branch_sha)
combined_status = branch_commit.get_combined_status().statuses
if not combined_status:
print('No branch status recorded by GitHub.')
else:
print('GitHub said that branch status is:')
for status in combined_status:
print(" %s: %s (%s)" % (
status.context, status.state, status.description))
print('The branch merge is blocked.')
print('Check GitHub PR page for more details.')
sys.exit(1)
comments = []
for comment in pull_request.get_issue_comments():
comments.append((
comment.user.login, comment.body, comment.updated_at))
reviews = []
for review in pull_request.get_reviews():
# For now the GitHub review has no modified/update date.
# We hope they are listed as they are made.
reviews.append((review.user.login, review.state))
except GithubException as error:
print("Failed to get GitHub details")
print(str(error))
sys.exit(1)
def getReviewTitle(content, ticket_id=None):
"""
Parse line and return merge commit message.
"""
result = content.strip()
if len(result.split('\n')) > 1:
print("Commit merge message should be single line.")
sys.exit(1)
# Make sure the title does not starts with ticket id as it will
# be appended later.
if ticket_id:
first_word = result.split(' ')[0]
if ticket_id in first_word:
# Redo the title without the first word.
result = ' '.join(result.split(' ')[1:])
# Make sure first letter is upper case.
result = result[0].upper() + result[1:]
# Make sure message end with '.' ... just for style.
result = result.rstrip('.').strip() + '.'
return result
def checkReviewApproval(comments, reviews, reviewers, sha):
"""
Check comments to see if review was approved by all reviewers.
"""
# We sort all comments in reverse order as they were updated.
comments = sorted(
comments, key=lambda comment: comment[2], reverse=True)
# This is iterated multiple times.
reviews = list(reversed(reviews))
# Get last comment of each review and check that the review was
# approved.
pending_approval = []
for reviewer in reviewers:
if _approvedByReviewer(reviewer, comments, reviews):
# All good.
continue
pending_approval.append((reviewer, 'Not approved yet.'))
if pending_approval:
print("Review not approved. See list below")
for reason in pending_approval:
print(reason)
sys.exit(1)
ticket_id = pave.getTicketIDFromBranchName(branch_name)
reviewers = _getGitHubReviewers(pull_request.body)
checkReviewApproval(
comments=comments,
reviews=reviews,
reviewers=reviewers,
sha=branch_sha,
)
review_title = getReviewTitle(pull_request.title, ticket_id)
commit_message = "[#%s] %s" % (ticket_id, review_title)
return (repo, pull_request, commit_message)
def _getGitHubReviewers(description):
"""
Return a list of reviewers from review request description.
"""
results = []
for line in description.splitlines():
result = re.match(RE_REVIEWERS, line)
if not result:
continue
for word in line.split(' '):
if word.startswith('@'):
results.append(word[1:].strip())
return results
def _approvedByReviewer(reviewer, comments, reviews):
"""
Return `True` if reviewer has approved the changes.
Approvals can come from multiple sources
* GitHub comment
* GitHub review actions
"""
reviewer_approval = False
# First try to see if the marker is in the comments.
for author, content, updated_at in comments:
action = _getActionFromComment(content)
if reviewer != author:
# Not a comment from reviewer.
continue
if action in ['needs-changes']:
# We have a needs-changes, before an approval.
reviewer_approval = False
break
if action == 'changes-approved':
reviewer_approval = True
break
for author, state in reviews:
if state == u'COMMENTED':
# Just a comment. Can be ignored.
continue
if reviewer != author:
# Not a review from reviewer.
continue
if state == u'CHANGES_REQUESTED':
# Change requested before an approval.
return False
if state == u'APPROVED':
return True
# If we are hear, it means that we don't a review action from the author,
# so we return the value from the comment action.
return reviewer_approval
def _getActionFromComment(comment):
"""
Return action associated with comment.
Supported commands:
* changes-approved - all good
* needs-changes - more work
"""
for line in comment.splitlines():
line = line.lower()
if re.match(RE_CHANGES_APPROVED, line):
return 'changes-approved'
if re.match(RE_NEEDS_CHANGES, line):
return 'needs-changes'
return 'no-action'
def _get_environment(name, default=_REQUIRED):
"""
Get environment variable.
"""
value = os.environ.get(name, default)
if value is _REQUIRED:
raise AssertionError(
'Variable %s not found in environment !' % (name))
return value
def _get_github_environment():
"""
Get GitHub data from environment.
"""
pull_id = _get_environment('GITHUB_PULL_ID')
try:
pull_id = int(pull_id)
except Exception:
print("Invalid pull_id: %s" % str(pull_id))
sys.exit(1)
token = _get_environment('GITHUB_TOKEN', default='')
if token is '':
token = None
return {
'token': token,
'pull_id': pull_id,
}
@task
def merge_init():
"""
Check if current branch can be merged.
Environment variables:
* GITHUB_PULL_ID
* GITHUB_TOKEN
"""
github_env = _get_github_environment()
from git import Repo
repo = Repo(os.getcwd())
git = repo.git
branch_name = _get_environment('BRANCH', repo.head.ref.name)
if branch_name in 'master' or branch_name.startswith('series-'):
print("You can not merge the main branches.")
sys.exit(1)
try:
int(pave.getTicketIDFromBranchName(branch_name))
except Exception:
print("Branch name '%s' does not start with ticket id." % (
branch_name))
sys.exit(1)
# Check pull request details on Github.
(_, pull_request, message) = _check_review_properties(
token=github_env['token'], pull_id=github_env['pull_id'])
pr_branch_name = pull_request.head.ref
remote_sha = pull_request.head.sha.lower()
local_sha = repo.head.commit.hexsha
if remote_sha != local_sha:
print("Local branch and review branch are at different revision.")
print("Local sha: %s %s" % (local_sha, branch_name))
print("Review sha: %s %s" % (remote_sha, pr_branch_name))
sys.exit(1)
# Clear any unused files from this repo as this might be done
# before a release.
print('Clean repo')
print(git.clean(force=True, quiet=True))
def _pr_merge(pr, commit_title, commit_message=None, merge_method=None):
"""
Merge the PR.
"""
from github import PullRequestMergeStatus
post_parameters = dict()
post_parameters["commit_title"] = commit_title
if commit_message:
post_parameters["commit_message"] = commit_message
if merge_method:
post_parameters["merge_method"] = merge_method
headers, data = pr._requester.requestJsonAndCheck(
"PUT",
pr.url + "/merge",
input=post_parameters,
)
return PullRequestMergeStatus.PullRequestMergeStatus(
pr._requester, headers, data, completed=True)
@task
@needs('update_setup', 'deps')
@consume_args
def merge_commit(args):
"""
Commit the merge and push changes.
Exit code:
* 0 - ok
* 1 - failure
* 2 - warning
Environment variables:
* GITHUB_PULL_ID
* GITHUB_TOKEN
"""
from github import GithubException
github_env = _get_github_environment()
(repo, pull_request, message) = _check_review_properties(
token=github_env['token'], pull_id=github_env['pull_id'])
branch_name = pull_request.head.ref
remote_sha = pull_request.head.sha.lower()
try:
print(_pr_merge(
pr=pull_request,
commit_title=pull_request.title,
merge_method='squash',
))
# We create the simple tag, without annotation as a ref.
# Low level git ftw.
print(repo.create_git_ref(
ref='refs/tags/' + SETUP['product']['version'],
sha=remote_sha,
))
print("\n> PR Merged for %s. Tag created at %s.\n" % (
branch_name, remote_sha,))
except GithubException as error:
print("\n> Failed to merge PR and create the tag.\n")
print(str(error))
sys.exit(1)
@task
def pqm():
"""
Submit the branch to PQM.
Arguments AFTER all options: PULL_ID [--force-clean]
"""
args = sys.argv[2:]
if len(args) < 1:
print('Please specify the pull request id for this branch.')
sys.exit(1)
result = pave.git.status()
if result:
print('Please commit all files and get review approval.')
print('PQM canceled.')
sys.exit(1)
try:
pull_id = int(args[0])
except Exception:
print("Pull id in bad format. It must be an integer.")
sys.exit(1)
pull_id_property = '--properties=github_pull_id=%s' % (pull_id)
arguments = ['gk-merge', pull_id_property]
if '--force-purge' in args:
arguments.append('--properties=force_purge=yes')
environment.args = arguments
from brink.pavement_commons import test_remote
test_remote(arguments)
@task
@cmdopts([
('target=', None, 'Base repository URI.'),
('latest=', None, '`no` if this release is not for latest version.'),
(
'pull-id=', None,
'ID of GitHub pull request for release. Required only for production.'
),
])
@task
def rqm(options):
"""
Submit the branch to release manager.
"""
result = pave.git.status()
if result:
print('Please commit all files before requesting the release.')
print('RQM cancelled.')
sys.exit(1)
target = pave.getOption(options, 'rqm', 'target', default_value=None)
if target == 'production':
target = 'gk-release'
else:
target = 'gk-release-staging'
test_arguments = 'latest=%s' % pave.getOption(
options, 'rqm', 'latest', default_value='yes')
pull_id_property = '--properties=github_pull_id=%s' % pave.getOption(
options, 'rqm', 'pull_id', default_value='not-defined')
arguments = [target, pull_id_property, test_arguments]
environment.args = arguments
from brink.pavement_commons import test_remote
test_remote(arguments)
@task
@cmdopts([
('target=', None, 'production | staging'),
])
def publish(options):
"""
Publish download files and documentation.
Environment variables:
* TEST_ARGUMENTS - [latest=yes|latest=no]
"""
target = pave.getOption(
options, 'publish', 'target', default_value='staging')
latest = _get_environment('TEST_ARGUMENTS', default='latest=no')
if latest == 'latest=yes':
latest = 'yes'
else:
latest = 'no'
arguments = [target, latest]
call_task('publish_documentation', args=arguments)
call_task('publish_distributables', args=arguments)
|
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import GPT2Config, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers.models.gpt2.modeling_tf_gpt2 import (
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGPT2DoubleHeadsModel,
TFGPT2ForSequenceClassification,
TFGPT2LMHeadModel,
TFGPT2Model,
shape_list,
)
class TFGPT2ModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.bos_token_id = self.vocab_size - 1
self.eos_token_id = self.vocab_size - 1
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = GPT2Config(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
return_dict=True,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFGPT2Model(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
inputs = [input_ids, None, input_mask] # None is the input for 'past'
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFGPT2Model(config=config)
# first forward pass
outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids)
outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1)
output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"]
output_from_past = model(next_tokens, token_type_ids=next_token_types, past=past)["last_hidden_state"]
# select random slice
random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6)
def create_and_check_gpt2_model_attention_mask_past(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = TFGPT2Model(config=config)
# create attention mask
half_seq_length = self.seq_length // 2
attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32)
attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32)
attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1)
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1
random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)
vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)
condition = tf.transpose(
tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))
)
input_ids = tf.where(condition, random_other_next_tokens, input_ids)
# append to next input_ids and attn_mask
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
attn_mask = tf.concat([attn_mask, tf.ones((shape_list(attn_mask)[0], 1), dtype=tf.int32)], axis=1)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-12)
def create_and_check_gpt2_model_past_large_inputs(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = TFGPT2Model(config=config)
input_ids = input_ids[:1, :]
input_mask = input_mask[:1, :]
token_type_ids = token_type_ids[:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, use_cache=True)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
next_token_types = ids_tensor((self.batch_size, 3), self.type_vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1)
next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1)
output_from_no_past = model(
next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask
)["last_hidden_state"]
output_from_past = model(
next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past=past
)["last_hidden_state"]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_gpt2_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFGPT2LMHeadModel(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_gpt2_double_head(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
):
model = TFGPT2DoubleHeadsModel(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)
)
self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))
def create_and_check_gpt2_for_sequence_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args
):
config.num_labels = self.num_labels
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": sequence_labels,
}
model = TFGPT2ForSequenceClassification(config)
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class TFGPT2ModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFGPT2Model, TFGPT2LMHeadModel, TFGPT2ForSequenceClassification, TFGPT2DoubleHeadsModel)
if is_tf_available()
else ()
)
all_generative_model_classes = (TFGPT2LMHeadModel,) if is_tf_available() else ()
test_head_masking = False
test_onnx = True
onnx_min_opset = 10
def setUp(self):
self.model_tester = TFGPT2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_gpt2_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model(*config_and_inputs)
def test_gpt2_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs)
def test_gpt2_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs)
def test_gpt2_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_past_large_inputs(*config_and_inputs)
def test_gpt2_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_lm_head(*config_and_inputs)
def test_gpt2_double_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_double_head(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_gpt2_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFGPT2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFGPT2ModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_gpt2(self):
model = TFGPT2LMHeadModel.from_pretrained("gpt2")
input_ids = tf.convert_to_tensor([[464, 3290]], dtype=tf.int32) # The dog
expected_output_ids = [
464,
3290,
373,
1043,
287,
257,
2214,
1474,
262,
16246,
286,
2688,
290,
2688,
27262,
13,
198,
198,
464,
3290,
] # The dog was found in a field near the intersection of West and West Streets.\n\nThe dog
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
@slow
def test_lm_generate_distilgpt2(self):
model = TFGPT2LMHeadModel.from_pretrained("distilgpt2")
input_ids = tf.convert_to_tensor([[464, 1893]], dtype=tf.int32) # The president
expected_output_ids = [
464,
1893,
286,
262,
1578,
1829,
11,
290,
262,
1893,
286,
262,
1578,
7526,
11,
423,
587,
287,
262,
2635,
] # The president of the United States, and the president of the United Kingdom, have been in the White
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:4364")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:4364")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a VapeCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a VapeCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
#!/usr/bin/env python3
"""
This script holds the functions that make the API requests, process the
JSON data and build a single grid in HTML where each tile holds movie
data (graphic, YouTube trailer link, story etc.)
Attributes:
API_KEY (str): YouTube Data API key.
PLAYLIST_ID (str): YouTube playlist id.
CES_ID (str): Google Custom Search Engine id.
movie_tile_template (str): Template for a single movie entry.
"""
import html
import json
import os
import dev_config
import re
import sqlite3
import sys
import time
import webbrowser
from urllib import parse
from urllib.request import Request
from urllib.request import urlopen
from urllib.error import URLError
class Movie(object):
"""
This class provides the necessary code for storing and using movie
information.
Args:
title (str): The movie title.
storyline (str): The movie synopsis.
poster_image_url (str): Link or path to movie poster data.
trailer_youtube_url (str): Link to movie trailer on YouTube.
link (str): URL to the movie's IMDb page.
"""
def __init__(self, title="No data", storyline="No data", image="No data",
trailer="No data", link="No data"):
self.title = title
self.storyline = storyline
self.poster_image_url = image
self.trailer_youtube_url = ("https://www.youtube.com/watch?v="+trailer)
self.link = link
def show_trailer(self):
"""
Opens the link to the movie trailer.
"""
try:
webbrowser.open(self.trailer_youtube_url)
except webbrowser.Error as e:
msg = "@Movie.show_trailer -- {}".format(e)
print(msg)
def http_get_request(url):
"""
Makes HTTP GET requests from constructed API urls.
Args:
url (str): URL for making the API request.
"""
request = Request(url)
try:
response_obj = urlopen(request)
response_data = json.loads(response_obj.read())
return response_data
except URLError as e:
msg = "@http_get_request -- {}".format(e)
print(msg)
API_KEY = dev_config.secret["API_KEY"]
PLAYLIST_ID = dev_config.secret["PLAYLIST_ID"]
CSE_ID = dev_config.secret["CSE_ID"]
LIMIT = dev_config.envar["LIMIT"]
def call_youtube_api(page_token):
"""
Fetches data from YouTube Data API v3.
page_token (str): Holds the `nextPage` token key from the data
retruned from the API (for pagination).
"""
url = ""
if page_token == None:
url = ("https://www.googleapis.com/youtube/v3/playlistItems?part="
"snippet&maxResults={}&playlistId={}&key={}"
.format(LIMIT, PLAYLIST_ID, API_KEY))
else:
url = ("https://www.googleapis.com/youtube/v3/playlistItems?part="
"snippet&maxResults={}&pageToken={}&playlistId={}"
"&key={}".format(LIMIT, page_token, PLAYLIST_ID, API_KEY))
playlist_data = http_get_request(url)
return playlist_data
def call_imdb_api(title):
"""
Fetches data using Google Custom Search JSON API v1 specified for IMDb.
https://developers.google.com/custom-search/v1/overview
Args:
title (str): Movie title to search parameter.
"""
imdb_url = ("https://www.googleapis.com/customsearch/v1?q={}&cx={}&key={}"
.format(title, CSE_ID, API_KEY))
imdb_data = http_get_request(imdb_url)
return imdb_data
def process_youtube_data(playlist_data):
"""
Processes the data returned from `call_youtube_api` to get the
movie title, storyline and trailer link.
Args:
playlist_data (dict): Decoded json data from the YouTube API.
"""
movies_array = []
for item in playlist_data["items"]:
title = item["snippet"]["title"]
storyline = item["snippet"]["description"]
trailer = item["snippet"]["resourceId"]["videoId"]
movieObj = Movie(title, storyline, "", trailer, "")
movies_array.append(movieObj)
return movies_array
def process_imdb_data(movies_array):
"""
Processes the data returned from `call_imdb_api` to get the
movie poster and IMDb link.
Args:
movies_array (array): Movie objects generated by
`process_youtube_data`.
"""
for movieObj in movies_array:
# Makes movie title URL safe
title = parse.quote_plus(movieObj.title)
imdb_data = call_imdb_api(title)
# Some YouTube titles have the year in them -- e.g., Labyrinth (1986)
# Using regular expression substitution to remove it.
# This is done after the API call for more accurate search results.
regex = r"(\([0-9]\w+\))"
title = parse.unquote_plus(title)
title = re.sub(regex, "", title)
movieObj.title = title
poster = imdb_data["items"][0]["pagemap"]["cse_image"][0]["src"]
imdb_link = imdb_data["items"][0]["link"]
movieObj.poster_image_url = poster
movieObj.link = imdb_link
movie_tile_template = """
<div class="col-md-6 col-lg-4 movie-tile text-center" data-trailer-youtube-id=
"{trailer_youtube_id}" data-storyline="{storyline}" data-link="{link}"
data-toggle="modal" data-target="#trailer">
<div class="img-box">
<img src="{poster_image_url}" alt="Movie DVD cover art">
</div>
<h2>{movie_title}</h2>
</div>
"""
def create_movie_tiles(movies):
"""
Builds the movie tiles by filling `movie_tile_template` will data
from each movie.
Args:
movies (arr): The list of movie objects.
Returns:
content (str): The data injected movie tiles HTML.
"""
content = ""
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(
r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(
r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match
else None)
data = {
"trailer_youtube_id": trailer_youtube_id,
"storyline": html.escape(movie.storyline, True),
"link": html.escape(movie.link, True),
"poster_image_url": movie.poster_image_url,
"movie_title": html.escape(movie.title, True)
}
content += movie_tile_template.format(**data)
return content
def create_db(close_conn=True):
"""
Opens a connection to SQLite database. If no database exists,
creates a new one.
Args:
close_conn (bool): Controls whether to close or return
a connection.
"""
conn = None
db_path = os.getcwd() + "/movies.db"
try:
conn = sqlite3.connect(db_path)
print("SQLite3 -- {}".format(sqlite3.version))
except sqlite3.Error as e:
msg = "@connect_db -- {}".format(e)
print(msg)
finally:
if conn and close_conn == True:
conn.close()
else:
return conn
def create_table_movie(conn=None):
"""
Creates a new table called "movies" in the connected database.
Args:
conn (class): sqlite3.Connection.
"""
try:
c = conn.cursor()
c.execute(
"""
CREATE TABLE movies
(page_num integer, data text)
""")
print("Table 'movies' created.")
except sqlite3.Error as e:
msg = "@create_table_movie -- {}".format(e)
print(msg)
def insert_movie_data(conn=None, page_data=None):
"""
Inserts pages data into the table "movies."
Args:
conn (class): sqlite3.Connection.
page_data (array): Table row data `(page number, movie tiles)`.
"""
if page_data == None:
print("No movie data available.")
try:
c = conn.cursor()
c.executemany("INSERT INTO movies VALUES (?, ?)", page_data)
conn.commit()
except sqlite3.Error as e:
msg = "@create_table_movie -- {}".format(e)
print(msg)
def generate_page_data():
"""
Generates data for the table "movies."
Returns:
Array of data tuples in the form `(page number, movie tiles)`.
"""
page_data = []
page_token = None
page_num = 1
done = False
try:
while done == False:
playlist_data = call_youtube_api(page_token)
movies_array = process_youtube_data(playlist_data)
process_imdb_data(movies_array)
movie_tiles = create_movie_tiles(movies_array)
page_data.append((page_num, movie_tiles))
if "nextPageToken" in playlist_data:
page_token = playlist_data["nextPageToken"]
page_num += 1
continue
else:
done = True
return page_data
except RuntimeError:
msg = "An error occurred when obtaining data for the database."
raise Exception("@generate_page_data -- {}".format(msg))
def create_movies_db():
"""
Creates a new database for the table "movies."
"""
conn = create_db(False)
create_table_movie(conn)
page_data = generate_page_data()
insert_movie_data(conn, page_data)
conn.close()
if __name__ == "__main__":
db_path = os.getcwd() + "/movies.db"
if os.path.isfile(db_path) == True:
prompt = ("This will overwrite the existing movies.db "
"file. Proceed? (Y/N): ")
valid_yes = ("y", "yes")
valid_no = ("n", "no")
while True:
answer = (input(prompt)).lower()
if answer in valid_yes:
os.remove(db_path)
break
elif answer in valid_no:
print("movies.db file generation cancelled.")
sys.exit(0)
else:
print("Please enter a valid reply.")
create_movies_db()
|
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
|
"""
Support for Z-Wave.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zwave/
"""
import logging
import os.path
import time
from pprint import pprint
import voluptuous as vol
from homeassistant.helpers import discovery, customize
from homeassistant.const import (
ATTR_BATTERY_LEVEL, ATTR_LOCATION, ATTR_ENTITY_ID, CONF_CUSTOMIZE,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, CONF_ENTITY_ID)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_time_change
from homeassistant.util import convert, slugify
import homeassistant.config as conf_util
import homeassistant.helpers.config_validation as cv
from . import const
REQUIREMENTS = ['pydispatcher==2.0.5']
_LOGGER = logging.getLogger(__name__)
CONF_AUTOHEAL = 'autoheal'
CONF_DEBUG = 'debug'
CONF_POLLING_INTENSITY = 'polling_intensity'
CONF_POLLING_INTERVAL = 'polling_interval'
CONF_USB_STICK_PATH = 'usb_path'
CONF_CONFIG_PATH = 'config_path'
CONF_IGNORED = 'ignored'
CONF_REFRESH_VALUE = 'refresh_value'
CONF_REFRESH_DELAY = 'delay'
DEFAULT_CONF_AUTOHEAL = True
DEFAULT_CONF_USB_STICK_PATH = '/zwaveusbstick'
DEFAULT_POLLING_INTERVAL = 60000
DEFAULT_DEBUG = False
DEFAULT_CONF_IGNORED = False
DEFAULT_CONF_REFRESH_VALUE = False
DEFAULT_CONF_REFRESH_DELAY = 2
DOMAIN = 'zwave'
NETWORK = None
# List of tuple (DOMAIN, discovered service, supported command classes,
# value type, genre type, specific device class).
DISCOVERY_COMPONENTS = [
('sensor',
[const.GENERIC_TYPE_WHATEVER],
[const.SPECIFIC_TYPE_WHATEVER],
[const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER,
const.COMMAND_CLASS_ALARM,
const.COMMAND_CLASS_SENSOR_ALARM],
const.TYPE_WHATEVER,
const.GENRE_USER),
('light',
[const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_REMOTE],
[const.SPECIFIC_TYPE_POWER_SWITCH_MULTILEVEL,
const.SPECIFIC_TYPE_SCENE_SWITCH_MULTILEVEL,
const.SPECIFIC_TYPE_NOT_USED],
[const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.TYPE_BYTE,
const.GENRE_USER),
('switch',
[const.GENERIC_TYPE_SENSOR_ALARM,
const.GENERIC_TYPE_SENSOR_BINARY,
const.GENERIC_TYPE_SWITCH_BINARY,
const.GENERIC_TYPE_ENTRY_CONTROL,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SENSOR_NOTIFICATION,
const.GENERIC_TYPE_GENERIC_CONTROLLER,
const.GENERIC_TYPE_SWITCH_REMOTE,
const.GENERIC_TYPE_REPEATER_SLAVE,
const.GENERIC_TYPE_THERMOSTAT,
const.GENERIC_TYPE_WALL_CONTROLLER],
[const.SPECIFIC_TYPE_WHATEVER],
[const.COMMAND_CLASS_SWITCH_BINARY],
const.TYPE_BOOL,
const.GENRE_USER),
('binary_sensor',
[const.GENERIC_TYPE_SENSOR_ALARM,
const.GENERIC_TYPE_SENSOR_BINARY,
const.GENERIC_TYPE_SWITCH_BINARY,
const.GENERIC_TYPE_METER,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SENSOR_NOTIFICATION,
const.GENERIC_TYPE_THERMOSTAT],
[const.SPECIFIC_TYPE_WHATEVER],
[const.COMMAND_CLASS_SENSOR_BINARY],
const.TYPE_BOOL,
const.GENRE_USER),
('lock',
[const.GENERIC_TYPE_ENTRY_CONTROL],
[const.SPECIFIC_TYPE_ADVANCED_DOOR_LOCK,
const.SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK],
[const.COMMAND_CLASS_DOOR_LOCK],
const.TYPE_BOOL,
const.GENRE_USER),
('cover',
[const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL],
[const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR],
[const.COMMAND_CLASS_SWITCH_BINARY,
const.COMMAND_CLASS_BARRIER_OPERATOR,
const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.TYPE_WHATEVER,
const.GENRE_USER),
('climate',
[const.GENERIC_TYPE_THERMOSTAT],
[const.SPECIFIC_TYPE_WHATEVER],
[const.COMMAND_CLASS_THERMOSTAT_SETPOINT],
const.TYPE_WHATEVER,
const.GENRE_WHATEVER),
]
RENAME_NODE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(const.ATTR_NAME): cv.string,
})
SET_CONFIG_PARAMETER_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Coerce(int),
vol.Optional(const.ATTR_CONFIG_SIZE): vol.Coerce(int)
})
PRINT_CONFIG_PARAMETER_SCHEMA = vol.Schema({
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
})
CHANGE_ASSOCIATION_SCHEMA = vol.Schema({
vol.Required(const.ATTR_ASSOCIATION): cv.string,
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_TARGET_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_GROUP): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=0x00): vol.Coerce(int)
})
_ZWAVE_CUSTOMIZE_SCHEMA_ENTRY = vol.Schema({
vol.Required(CONF_ENTITY_ID): cv.match_all,
vol.Optional(CONF_POLLING_INTENSITY): cv.positive_int,
vol.Optional(CONF_IGNORED, default=DEFAULT_CONF_IGNORED): cv.boolean,
vol.Optional(CONF_REFRESH_VALUE, default=DEFAULT_CONF_REFRESH_VALUE):
cv.boolean,
vol.Optional(CONF_REFRESH_DELAY, default=DEFAULT_CONF_REFRESH_DELAY):
cv.positive_int
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_AUTOHEAL, default=DEFAULT_CONF_AUTOHEAL): cv.boolean,
vol.Optional(CONF_CONFIG_PATH): cv.string,
vol.Optional(CONF_CUSTOMIZE, default=[]):
vol.All(customize.CUSTOMIZE_SCHEMA,
[_ZWAVE_CUSTOMIZE_SCHEMA_ENTRY]),
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
vol.Optional(CONF_POLLING_INTERVAL, default=DEFAULT_POLLING_INTERVAL):
cv.positive_int,
vol.Optional(CONF_USB_STICK_PATH, default=DEFAULT_CONF_USB_STICK_PATH):
cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def _obj_to_dict(obj):
"""Convert an object into a hash for debug."""
return {key: getattr(obj, key) for key
in dir(obj)
if key[0] != '_' and not hasattr(getattr(obj, key), '__call__')}
def _node_name(node):
"""Return the name of the node."""
return node.name or '{} {}'.format(
node.manufacturer_name, node.product_name)
def _value_name(value):
"""Return the name of the value."""
return '{} {}'.format(_node_name(value.node), value.label)
def _node_object_id(node):
"""Return the object_id of the node."""
node_object_id = '{}_{}'.format(slugify(_node_name(node)), node.node_id)
return node_object_id
def object_id(value):
"""Return the object_id of the device value.
The object_id contains node_id and value instance id
to not collide with other entity_ids.
"""
_object_id = "{}_{}_{}".format(slugify(_value_name(value)),
value.node.node_id, value.index)
# Add the instance id if there is more than one instance for the value
if value.instance > 1:
return '{}_{}'.format(_object_id, value.instance)
return _object_id
def nice_print_node(node):
"""Print a nice formatted node to the output (debug method)."""
node_dict = _obj_to_dict(node)
node_dict['values'] = {value_id: _obj_to_dict(value)
for value_id, value in node.values.items()}
print("\n\n\n")
print("FOUND NODE", node.product_name)
pprint(node_dict)
print("\n\n\n")
def get_config_value(node, value_index):
"""Return the current configuration value for a specific index."""
try:
for value in node.values.values():
# 112 == config command class
if value.command_class == 112 and value.index == value_index:
return value.data
except RuntimeError:
# If we get an runtime error the dict has changed while
# we was looking for a value, just do it again
return get_config_value(node, value_index)
# pylint: disable=R0914
def setup(hass, config):
"""Setup Z-Wave.
Will automatically load components to support devices found on the network.
"""
# pylint: disable=global-statement, import-error
global NETWORK
descriptions = conf_util.load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
try:
import libopenzwave
except ImportError:
_LOGGER.error("You are missing required dependency Python Open "
"Z-Wave. Please follow instructions at: "
"https://home-assistant.io/components/zwave/")
return False
from pydispatch import dispatcher
from openzwave.option import ZWaveOption
from openzwave.network import ZWaveNetwork
from openzwave.group import ZWaveGroup
default_zwave_config_path = os.path.join(os.path.dirname(
libopenzwave.__file__), 'config')
# Load configuration
use_debug = config[DOMAIN].get(CONF_DEBUG)
customize.set_customize(hass, DOMAIN, config[DOMAIN].get(CONF_CUSTOMIZE))
autoheal = config[DOMAIN].get(CONF_AUTOHEAL)
# Setup options
options = ZWaveOption(
config[DOMAIN].get(CONF_USB_STICK_PATH),
user_path=hass.config.config_dir,
config_path=config[DOMAIN].get(
CONF_CONFIG_PATH, default_zwave_config_path))
options.set_console_output(use_debug)
options.lock()
NETWORK = ZWaveNetwork(options, autostart=False)
if use_debug:
def log_all(signal, value=None):
"""Log all the signals."""
print("")
print("SIGNAL *****", signal)
if value and signal in (ZWaveNetwork.SIGNAL_VALUE_CHANGED,
ZWaveNetwork.SIGNAL_VALUE_ADDED,
ZWaveNetwork.SIGNAL_SCENE_EVENT,
ZWaveNetwork.SIGNAL_NODE_EVENT,
ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED):
pprint(_obj_to_dict(value))
print("")
dispatcher.connect(log_all, weak=False)
def value_added(node, value):
"""Called when a value is added to a node on the network."""
for (component,
generic_device_class,
specific_device_class,
command_class,
value_type,
value_genre) in DISCOVERY_COMPONENTS:
_LOGGER.debug("Component=%s Node_id=%s query start",
component, node.node_id)
if node.generic not in generic_device_class and \
None not in generic_device_class:
_LOGGER.debug("node.generic %s not None and in "
"generic_device_class %s",
node.generic, generic_device_class)
continue
if node.specific not in specific_device_class and \
None not in specific_device_class:
_LOGGER.debug("node.specific %s is not None and in "
"specific_device_class %s", node.specific,
specific_device_class)
continue
if value.command_class not in command_class and \
None not in command_class:
_LOGGER.debug("value.command_class %s is not None "
"and in command_class %s",
value.command_class, command_class)
continue
if value_type != value.type and value_type is not None:
_LOGGER.debug("value.type %s != value_type %s",
value.type, value_type)
continue
if value_genre != value.genre and value_genre is not None:
_LOGGER.debug("value.genre %s != value_genre %s",
value.genre, value_genre)
continue
# Configure node
_LOGGER.debug("Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Value type=%s, "
"Genre=%s", node.node_id,
node.generic, node.specific,
value.command_class, value.type,
value.genre)
name = "{}.{}".format(component, object_id(value))
node_config = customize.get_overrides(hass, DOMAIN, name)
if node_config.get(CONF_IGNORED):
_LOGGER.info("Ignoring device %s", name)
return
polling_intensity = convert(
node_config.get(CONF_POLLING_INTENSITY), int)
if polling_intensity:
value.enable_poll(polling_intensity)
else:
value.disable_poll()
discovery.load_platform(hass, component, DOMAIN, {
const.ATTR_NODE_ID: node.node_id,
const.ATTR_VALUE_ID: value.value_id,
}, config)
def scene_activated(node, scene_id):
"""Called when a scene is activated on any node in the network."""
hass.bus.fire(const.EVENT_SCENE_ACTIVATED, {
ATTR_ENTITY_ID: _node_object_id(node),
const.ATTR_OBJECT_ID: _node_object_id(node),
const.ATTR_SCENE_ID: scene_id
})
def node_event_activated(node, value):
"""Called when a nodeevent is activated on any node in the network."""
hass.bus.fire(const.EVENT_NODE_EVENT, {
const.ATTR_OBJECT_ID: _node_object_id(node),
const.ATTR_BASIC_LEVEL: value
})
def network_ready():
"""Called when all awake nodes have been queried."""
_LOGGER.info("Zwave network is ready for use. All awake nodes"
" have been queried. Sleeping nodes will be"
" queried when they awake.")
hass.bus.fire(const.EVENT_NETWORK_READY)
def network_complete():
"""Called when all nodes on network have been queried."""
_LOGGER.info("Zwave network is complete. All nodes on the network"
" have been queried")
hass.bus.fire(const.EVENT_NETWORK_COMPLETE)
dispatcher.connect(
value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED, weak=False)
dispatcher.connect(
scene_activated, ZWaveNetwork.SIGNAL_SCENE_EVENT, weak=False)
dispatcher.connect(
node_event_activated, ZWaveNetwork.SIGNAL_NODE_EVENT, weak=False)
dispatcher.connect(
network_ready, ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED, weak=False)
dispatcher.connect(
network_complete, ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED, weak=False)
def add_node(service):
"""Switch into inclusion mode."""
_LOGGER.info("Zwave add_node have been initialized.")
NETWORK.controller.add_node()
def add_node_secure(service):
"""Switch into secure inclusion mode."""
_LOGGER.info("Zwave add_node_secure have been initialized.")
NETWORK.controller.add_node(True)
def remove_node(service):
"""Switch into exclusion mode."""
_LOGGER.info("Zwave remove_node have been initialized.")
NETWORK.controller.remove_node()
def cancel_command(service):
"""Cancel a running controller command."""
_LOGGER.info("Cancel running ZWave command.")
NETWORK.controller.cancel_command()
def heal_network(service):
"""Heal the network."""
_LOGGER.info("ZWave heal running.")
NETWORK.heal()
def soft_reset(service):
"""Soft reset the controller."""
_LOGGER.info("Zwave soft_reset have been initialized.")
NETWORK.controller.soft_reset()
def test_network(service):
"""Test the network by sending commands to all the nodes."""
_LOGGER.info("Zwave test_network have been initialized.")
NETWORK.test()
def stop_zwave(_service_or_event):
"""Stop Z-Wave network."""
_LOGGER.info("Stopping ZWave network.")
NETWORK.stop()
if hass.state == 'RUNNING':
hass.bus.fire(const.EVENT_NETWORK_STOP)
def rename_node(service):
"""Rename a node."""
state = hass.states.get(service.data.get(ATTR_ENTITY_ID))
node_id = state.attributes.get(const.ATTR_NODE_ID)
node = NETWORK.nodes[node_id]
name = service.data.get(const.ATTR_NAME)
node.name = name
_LOGGER.info(
"Renamed ZWave node %d to %s", node_id, name)
def set_config_parameter(service):
"""Set a config parameter to a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = NETWORK.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
selection = service.data.get(const.ATTR_CONFIG_VALUE)
size = service.data.get(const.ATTR_CONFIG_SIZE, 2)
i = 0
for value in (
node.get_values(class_id=const.COMMAND_CLASS_CONFIGURATION)
.values()):
if value.index == param and value.type == const.TYPE_LIST:
_LOGGER.debug('Values for parameter %s: %s', param,
value.data_items)
i = len(value.data_items) - 1
if i == 0:
node.set_config_param(param, selection, size)
else:
if selection > i:
_LOGGER.info('Config parameter selection does not exist!'
' Please check zwcfg_[home_id].xml in'
' your homeassistant config directory. '
' Available selections are 0 to %s', i)
return
node.set_config_param(param, selection, size)
_LOGGER.info('Setting config parameter %s on Node %s '
'with selection %s and size=%s', param, node_id,
selection, size)
def print_config_parameter(service):
"""Print a config parameter from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = NETWORK.nodes[node_id]
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
_LOGGER.info("Config parameter %s on Node %s : %s",
param, node_id, get_config_value(node, param))
def change_association(service):
"""Change an association in the zwave network."""
association_type = service.data.get(const.ATTR_ASSOCIATION)
node_id = service.data.get(const.ATTR_NODE_ID)
target_node_id = service.data.get(const.ATTR_TARGET_NODE_ID)
group = service.data.get(const.ATTR_GROUP)
instance = service.data.get(const.ATTR_INSTANCE)
node = ZWaveGroup(group, NETWORK, node_id)
if association_type == 'add':
node.add_association(target_node_id, instance)
_LOGGER.info("Adding association for node:%s in group:%s "
"target node:%s, instance=%s", node_id, group,
target_node_id, instance)
if association_type == 'remove':
node.remove_association(target_node_id, instance)
_LOGGER.info("Removing association for node:%s in group:%s "
"target node:%s, instance=%s", node_id, group,
target_node_id, instance)
def start_zwave(_service_or_event):
"""Startup Z-Wave network."""
_LOGGER.info("Starting ZWave network.")
NETWORK.start()
hass.bus.fire(const.EVENT_NETWORK_START)
# Need to be in STATE_AWAKED before talking to nodes.
# Wait up to NETWORK_READY_WAIT_SECS seconds for the zwave network
# to be ready.
for i in range(const.NETWORK_READY_WAIT_SECS):
_LOGGER.debug(
"network state: %d %s", NETWORK.state, NETWORK.state_str)
if NETWORK.state >= NETWORK.STATE_AWAKED:
_LOGGER.info("zwave ready after %d seconds", i)
break
time.sleep(1)
else:
_LOGGER.warning(
"zwave not ready after %d seconds, continuing anyway",
const.NETWORK_READY_WAIT_SECS)
_LOGGER.info(
"final network state: %d %s", NETWORK.state, NETWORK.state_str)
polling_interval = convert(
config[DOMAIN].get(CONF_POLLING_INTERVAL), int)
if polling_interval is not None:
NETWORK.set_poll_interval(polling_interval, False)
poll_interval = NETWORK.get_poll_interval()
_LOGGER.info("zwave polling interval set to %d ms", poll_interval)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_zwave)
# Register node services for Z-Wave network
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE, add_node,
descriptions[const.SERVICE_ADD_NODE])
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE_SECURE,
add_node_secure,
descriptions[const.SERVICE_ADD_NODE_SECURE])
hass.services.register(DOMAIN, const.SERVICE_REMOVE_NODE, remove_node,
descriptions[const.SERVICE_REMOVE_NODE])
hass.services.register(DOMAIN, const.SERVICE_CANCEL_COMMAND,
cancel_command,
descriptions[const.SERVICE_CANCEL_COMMAND])
hass.services.register(DOMAIN, const.SERVICE_HEAL_NETWORK,
heal_network,
descriptions[const.SERVICE_HEAL_NETWORK])
hass.services.register(DOMAIN, const.SERVICE_SOFT_RESET, soft_reset,
descriptions[const.SERVICE_SOFT_RESET])
hass.services.register(DOMAIN, const.SERVICE_TEST_NETWORK,
test_network,
descriptions[const.SERVICE_TEST_NETWORK])
hass.services.register(DOMAIN, const.SERVICE_STOP_NETWORK, stop_zwave,
descriptions[const.SERVICE_STOP_NETWORK])
hass.services.register(DOMAIN, const.SERVICE_START_NETWORK,
start_zwave,
descriptions[const.SERVICE_START_NETWORK])
hass.services.register(DOMAIN, const.SERVICE_RENAME_NODE, rename_node,
descriptions[const.SERVICE_RENAME_NODE],
schema=RENAME_NODE_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_SET_CONFIG_PARAMETER,
set_config_parameter,
descriptions[
const.SERVICE_SET_CONFIG_PARAMETER],
schema=SET_CONFIG_PARAMETER_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_PRINT_CONFIG_PARAMETER,
print_config_parameter,
descriptions[
const.SERVICE_PRINT_CONFIG_PARAMETER],
schema=PRINT_CONFIG_PARAMETER_SCHEMA)
hass.services.register(DOMAIN, const.SERVICE_CHANGE_ASSOCIATION,
change_association,
descriptions[
const.SERVICE_CHANGE_ASSOCIATION],
schema=CHANGE_ASSOCIATION_SCHEMA)
# Setup autoheal
if autoheal:
_LOGGER.info("ZWave network autoheal is enabled.")
track_time_change(hass, heal_network, hour=0, minute=0, second=0)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_zwave)
return True
class ZWaveDeviceEntity(Entity):
"""Representation of a Z-Wave node entity."""
def __init__(self, value, domain):
"""Initialize the z-Wave device."""
# pylint: disable=import-error
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self._value = value
self.entity_id = "{}.{}".format(domain, self._object_id())
dispatcher.connect(
self.network_value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
def network_value_changed(self, value):
"""Called when a value has changed on the network."""
if self._value.value_id == value.value_id or \
self._value.node == value.node:
_LOGGER.debug('Value changed for label %s', self._value.label)
self.value_changed(value)
def value_changed(self, value):
"""Called when a value for this entity's node has changed."""
self.update_properties()
self.schedule_update_ha_state()
def update_properties(self):
"""Callback on data changes for node values."""
pass
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return an unique ID."""
return "ZWAVE-{}-{}".format(self._value.node.node_id,
self._value.object_id)
@property
def name(self):
"""Return the name of the device."""
return _value_name(self._value)
def _object_id(self):
"""Return the object_id of the device value.
The object_id contains node_id and value instance id to not collide
with other entity_ids.
"""
return object_id(self._value)
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
const.ATTR_NODE_ID: self._value.node.node_id,
}
try:
battery_level = self._value.node.get_battery_level()
except RuntimeError:
# If we get an runtime error the dict has changed while
# we was looking for a value, just do it again
battery_level = self._value.node.get_battery_level()
if battery_level is not None:
attrs[ATTR_BATTERY_LEVEL] = battery_level
location = self._value.node.location
if location:
attrs[ATTR_LOCATION] = location
return attrs
|
|
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``testenv.py``
`Environment verifying functionality`
"""
import time
import pytest
from . import loggers
def get_env_prop(env):
"""Read properties from all devices.
Args:
env(Environment): Environment instance
"""
def get_param(param):
"""Get single param.
"""
return "_".join(
{str(switch.get_env_prop(param)) for switch in getattr(env, "switch", {}).values()})
env_dict = {
'switchppVersion': get_param("switchppVersion"),
'chipName': get_param("chipName"),
'cpuArchitecture': get_param("cpuArchitecture"),
}
# Get params from devices
return env_dict
def setup_teardown(function):
"""Setup/Teardown decorator.
"""
def wrapper(*args, **kwargs):
args[0]._setup() # pylint: disable=protected-access
result = function(*args, **kwargs)
args[0]._teardown() # pylint: disable=protected-access
return result
return wrapper
class TestLinks(object):
"""Links verification class.
"""
class_logger = loggers.ClassLogger()
def __init__(self, env):
"""Initialize TestLinks class.
Args:
env(Environment): Environment instance
"""
self.env = env
self.class_logger.info("Links verification enabled.")
# Perform import on __init__ to avoid logger output redirection by pytest.
from . import helpers
self.wait_until_value_is_changed = helpers.wait_until_value_is_changed
self.set_all_ports_admin_disabled = helpers.set_all_ports_admin_disabled
self.set_ports_admin_enabled = helpers.set_ports_admin_enabled
def _setup(self):
"""Prepare env for test_links.
"""
self.class_logger.info("Links verification setup.")
# Clean up tg objects before tests in case use sanity_check_only before
# only if tg are present
for tg in getattr(self.env, "tg", {}).values():
tg.cleanup()
# check if env is alive
self.env.check()
# Perform clearconfig with 10 times retry
clear_config_ok = False
retry_max = 10
# Keep reboot count for each switch
retry_current = dict.fromkeys(self.env.switch, 0)
# General retry count
retry_current[0] = 0
while not clear_config_ok:
try:
switch_id = None
for switch_id in list(self.env.switch.keys()):
self.env.switch[switch_id].cleanup()
self.class_logger.debug("clearConfig is OK")
self.env.check()
clear_config_ok = True
except Exception as err:
self.class_logger.debug("Exception has been caught...")
if switch_id is not None:
retry_current[switch_id] += 1
else:
retry_current[0] += 1
# Check retry count. If any of switch counters has reached retry_max launch env check method
if retry_max in iter(retry_current.values()):
pytest.softexit("Cannot perform clearconfig on all devices. Last received error : %s" % err,
self.env)
break
else:
if switch_id is None:
self.env.switch[switch_id].restart()
self.env.check()
finally:
self.class_logger.debug("Current retry counts: %s" % (retry_current, ))
def _teardown(self):
"""Check procedure on teardown.
"""
self.class_logger.info("Links verification teardown.")
self.env.check()
def _check_tg_links(self, ports, sw):
"""This function verifies links between Traffic Generator and Device.
Args:
ports(dict): Ports dictionary in format {("sw1", "tg1"):{1: 25, 2: 26}}
sw(str): Device acronym, e.g."sw1"
Notes:
Verification based on STP packet "portid" field contents.
"""
self.class_logger.debug("Analyzing links for device %s" % (sw, ))
self.set_all_ports_admin_disabled(self.env.switch)
message = "Port on switch does not pass to Up state after retry."
self.set_ports_admin_enabled(self.env.switch, ports, fail_func=(pytest.softexit, [message, self.env]))
sw_id = int(sw[2:])
sw_bridge_port = ports[('sw1', 'tg1')][1]
sw_mac = self.env.switch[sw_id].ui.get_table_ports([sw_bridge_port])[0]["macAddress"].upper()
for tg_port, sw_port in zip(iter(ports[('tg1', sw)].values()), iter(ports[(sw, 'tg1')].values())):
self.class_logger.info("Verifying link TG:%s SW(id%s):%s" % (tg_port, sw_id, sw_port))
self.class_logger.debug("Starting sniffer on %s interface" % (tg_port, ))
self.env.tg[1].start_sniff([tg_port, ], filter_layer="STP", sniffing_time=5, packets_count=1)
data = self.env.tg[1].stop_sniff([tg_port, ])
if tg_port in data:
for packet in data[tg_port]:
portid = self.env.tg[1].get_packet_field(packet=packet, layer="STP", field="portid")
mac_from_pack = self.env.tg[1].get_packet_field(packet=packet, layer="Dot3", field="src").upper()
prt = portid % 256
self.class_logger.debug("Got port %s from sniffed STP data..." % (prt, ))
try:
assert prt == sw_port
except Exception:
self.class_logger.error(("Port ID got from sniffed data (%s) and provided in config (%s) " +
"are different. SwId: %s, SwIP: %s. Reporting failure...") %
(prt, sw_port, self.env.switch[sw_id].id,
self.env.switch[sw_id].ipaddr))
pytest.softexit("Wrong connection detected!", self.env)
try:
assert sw_mac == mac_from_pack
except Exception:
self.class_logger.error(("Device mac address got from sniffed data (%s) " +
"and provided in config (%s) " +
"are different. SwId: %s, SwIP: %s. Reporting failure...") %
(mac_from_pack, sw_mac, self.env.switch[sw_id].id,
self.env.switch[sw_id].ipaddr))
pytest.softexit("Wrong connection detected!", self.env)
else:
self.class_logger.error("Nothing sniffed on link tg1:%s<->sw%s:%s. Failure." %
(tg_port, sw_id, sw_port))
pytest.softexit("No data for port!", self.env)
def _check_sw_links(self, ports, sw1, sw2, check_method="direct"):
"""This function verifies links between Devices.
Args:
ports(dict): Ports dictionary in format {("sw1", "tg1"):{1: 25, 2: 26}}
sw1(str): Device acronym, e.g."sw1"
sw2(str): Device acronym, e.g."sw2"
check_method(str): Validation type. direct|indirect
Raises:
ValueError: unknown check_method value
Notes:
Verification based on operational state change as a response to admin disable/enable on the other end of the link.
(applicable only for real devices)
"""
self.class_logger.info("Verify link between switches {0} and {1}".format(sw1, sw2))
sw1_id = int(sw1[2:])
sw2_id = int(sw2[2:])
self.class_logger.info("{0} - {1}, {2} - {3}".format(sw1, self.env.switch[sw1_id].ipaddr,
sw2, self.env.switch[sw2_id].ipaddr))
for link_key in list(ports.keys()):
if (link_key[0] == sw1) and (link_key[1] == sw2):
for prt_key in list(ports[(sw1, sw2)].keys()):
dev1prt = ports[(sw1, sw2)][prt_key]
dev2prt = ports[(sw2, sw1)][prt_key]
dev1prt_id = self.env.switch[sw1_id].findprop("Ports", [dev1prt])
dev2prt_id = self.env.switch[sw2_id].findprop("Ports", [dev2prt])
flag1 = False
flag2 = False
self.class_logger.info("Check ports {0}-{1}".format(dev1prt, dev2prt))
if check_method == "direct":
try:
assert self.env.switch[sw2_id].getprop("Ports", "operationalStatus", dev2prt_id) == "Up"
except Exception:
self.class_logger.warning(("Operational status of given port (%s) on paired device (Id: %s, IP: %s) " +
"is already 'Down'. Check config!") %
(dev2prt, self.env.switch[sw2_id].id, self.env.switch[sw2_id].ipaddr))
self.class_logger.warning("SW1: %s, ip: %s, port: %s\nSW2: %s, ip: %s, port: %s" %
(self.env.switch[sw1_id].id, self.env.switch[sw1_id].ipaddr, dev1prt,
self.env.switch[sw2_id].id, self.env.switch[sw2_id].ipaddr, dev2prt))
self.env.switch[sw1_id].setprop("Ports", "adminMode", [dev1prt_id, "Down"])
time.sleep(2)
try:
self.wait_until_value_is_changed(self.env.switch[sw2_id], "Ports",
"operationalStatus", "Down", dev2prt_id, 10)
assert self.env.switch[sw2_id].getprop("Ports", "operationalStatus", dev2prt_id) == "Down"
except Exception:
self.class_logger.error("Port (%s) on paired device did not change its state! Reporting failure..." %
(dev2prt, ))
self.class_logger.error("SW1: %s, ip: %s, port: %s\nSW2: %s, ip: %s, port: %s" %
(self.env.switch[sw1_id].id, self.env.switch[sw1_id].ipaddr, dev1prt,
self.env.switch[sw2_id].id, self.env.switch[sw2_id].ipaddr, dev2prt))
self.class_logger.info("The following ports were checked:\n%s" % (ports, ))
pytest.softexit("Wrong connection detected!", self.env)
self.env.switch[sw1_id].setprop("Ports", "adminMode", [dev1prt_id, "Up"])
elif check_method == "indirect":
dev1_ports_tbl = self.env.switch[sw1_id].getprop_table("Ports")
for record in dev1_ports_tbl:
if record['portId'] == dev1prt:
dev1_prt_name = record['name']
break
dev2_ports_tbl = self.env.switch[sw2_id].getprop_table("Ports")
for record in dev2_ports_tbl:
if record['portId'] == dev2prt:
dev2_prt_name = record['name']
break
self.class_logger.debug("Port name for %s is '%s' and for %s is '%s'" %
(dev1prt, dev1_prt_name, dev2prt, dev2_prt_name))
dev1_lldp_tbl = self.env.switch[sw1_id].getprop_table("LldpRemotes")
self.class_logger.debug("LldpRemotes table length is %s on 1st device." % (len(dev1_lldp_tbl), ))
for record in dev1_lldp_tbl:
if record['remPortId'] == dev2_prt_name and record['remLocalPortNum'] == dev1prt:
flag1 = True
self.class_logger.debug("1st Record found!")
break
else:
self.class_logger.debug("1st Record not found. Moving forward!")
dev2_lldp_tbl = self.env.switch[sw2_id].getprop_table("LldpRemotes")
self.class_logger.debug("LldpRemotes table length is %s on 2nd device." % (len(dev2_lldp_tbl), ))
for record in dev2_lldp_tbl:
if record['remPortId'] == dev1_prt_name and record['remLocalPortNum'] == dev2prt:
flag2 = True
self.class_logger.debug("2nd Record found!")
break
else:
self.class_logger.debug("2nd Record not found. Moving forward!")
try:
assert flag1
assert flag2
except Exception:
self.class_logger.error("SW1: %s, ip: %s, port: %s\nSW2: %s, ip: %s, port: %s" %
(self.env.switch[sw1_id].id, self.env.switch[sw1_id].ipaddr, dev1prt,
self.env.switch[sw2_id].id, self.env.switch[sw2_id].ipaddr, dev2prt))
self.class_logger.info("The following ports were checked:\n%s" % (ports, ))
pytest.softexit("Wrong connection detected!", self.env)
else:
raise ValueError("Unknown value for 'check_method' argument specified: %s." % (check_method, ))
@setup_teardown
def test_links_simplified5(self):
""" "simplified" (5-links) setup:
"""
ports = self.env.get_ports(links=[['tg1', 'sw1', 5], ])
self._check_tg_links(ports, "sw1")
@setup_teardown
def test_links_simplified4(self):
""" "simplified" 5-links setup:
"""
ports = self.env.get_ports(links=[['tg1', 'sw1', 4], ])
time.sleep(15)
self._check_tg_links(ports, "sw1")
@setup_teardown
def test_links_simplified3(self):
""" "simplified" 3-links setup:
"""
ports = self.env.get_ports(links=[['tg1', 'sw1', 3], ])
time.sleep(15)
self._check_tg_links(ports, "sw1")
@setup_teardown
def test_links_simplified2(self):
""" "simplified" 2-links setup:
"""
ports = self.env.get_ports(links=[['tg1', 'sw1', 2], ])
time.sleep(15)
self._check_tg_links(ports, "sw1")
@setup_teardown
def test_links_golden(self):
"""std "golden" setup:
"""
ports = self.env.get_ports([['tg1', 'sw1', 5], ['tg1', 'sw2', 4], ['tg1', 'sw3', 3],
['sw1', 'sw2', 9], ['sw1', 'sw3', 4], ['sw2', 'sw3', 4]])
time.sleep(15)
self._check_tg_links(ports, "sw1")
self._check_tg_links(ports, "sw2")
self._check_tg_links(ports, "sw3")
self.class_logger.info("Ports to TG are OK.")
self._check_sw_links(ports, "sw1", "sw2")
self._check_sw_links(ports, "sw1", "sw3")
self._check_sw_links(ports, "sw2", "sw3")
self.class_logger.info("Ports among switches are OK.")
@setup_teardown
def test_links_diamond(self):
"""std "diamond" setup:
"""
ports = self.env.get_ports([['tg1', 'sw1', 3], ['tg1', 'sw2', 2], ['tg1', 'sw3', 2], ['tg1', 'sw4', 2],
['sw1', 'sw2', 2], ['sw1', 'sw3', 2], ['sw1', 'sw4', 1],
['sw2', 'sw4', 2], ['sw4', 'sw3', 2]])
time.sleep(15)
self._check_tg_links(ports, 'sw1')
self._check_tg_links(ports, 'sw2')
self._check_tg_links(ports, 'sw3')
self._check_tg_links(ports, 'sw4')
self.class_logger.info("Ports to TG are OK.")
self._check_sw_links(ports, 'sw1', 'sw2')
self._check_sw_links(ports, 'sw1', 'sw3')
self._check_sw_links(ports, 'sw1', 'sw4')
self._check_sw_links(ports, 'sw2', 'sw4')
self._check_sw_links(ports, 'sw3', 'sw4')
self.class_logger.info("Ports among switches are OK.")
@setup_teardown
def test_links_mixed(self):
""" "mixed" setup:
"""
ports = self.env.get_ports(links=[['tg1', 'sw1', 2], ['tg1', 'sw2', 1], ['tg1', 'sw3', 1],
['sw1', 'sw2', 1], ['sw1', 'sw3', 1], ['sw2', 'sw3', 2]])
time.sleep(15)
self._check_tg_links(ports, 'sw1')
self._check_tg_links(ports, 'sw2')
self._check_tg_links(ports, 'sw3')
self.class_logger.info("Ports to TG are OK.")
# Time to establish LLDP
time.sleep(35)
self._check_sw_links(ports, 'sw1', 'sw2', check_method="indirect")
self._check_sw_links(ports, 'sw1', 'sw3', check_method="indirect")
self._check_sw_links(ports, 'sw2', 'sw3')
self.class_logger.info("Ports among switches are OK.")
|
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import intel64
from chainer import function_node
from chainer.utils import collections_abc
from chainer.utils import type_check
import chainerx
_numpy_split_ok = numpy.lib.NumpyVersion(numpy.__version__) >= '1.11.0'
def _fix_numpy_split(ys, x, indices_or_sections, axis):
"""Make the output of np.split compatible with numpy >= 1.11"""
if all(y.ndim == x.ndim for y in ys):
return ys
tmp = [len(t) for t in numpy.split(
numpy.empty(x.shape[axis], dtype=numpy.int8), indices_or_sections, 0)]
shape = list(x.shape)
for i, t in enumerate(tmp):
y = ys[i]
if y.ndim != x.ndim:
assert y.size == 0
shape[axis] = t
ys[i] = y.reshape(shape)
return ys
def _get_indices_or_sections(indices_or_sections):
"""Checks and convert ``indices_or_sections`` argument
Converted value is one of: 1-D numpy.ndarray, list, int, and
NumPy int scalar.
Returns:
A binary tuple in which the 1st element is indices (sequence) and
the 2nd element is sections (scalar).
Only one of the two is not ``None`` and the other is ``None``.
"""
ios = indices_or_sections
is_seq = False
if isinstance(ios, numpy.ndarray):
# numpy.ndarray
if ios.dtype.kind != 'i' and ios.size > 0:
# Note: numpy.array([]) (dtype is float64) should be accepted.
raise TypeError('indices_or_sections must be integers')
if ios.ndim >= 2:
raise TypeError('indices_or_sections must be 1-D sequence')
is_seq = ios.ndim != 0
elif isinstance(ios, collections_abc.Sequence):
# Any sequence except numpy.ndarray
ios = list(ios)
is_seq = True
elif isinstance(indices_or_sections, six.integer_types):
# int
pass
else:
raise TypeError(
'indices_or_sections must be integer or 1-D array.\n'
'Actual: {}'.format(type(indices_or_sections)))
if is_seq and chainer.is_debug():
for p, n in six.moves.zip(ios, ios[1:]):
if p > n:
raise ValueError('indices_or_sections must be sorted')
if is_seq:
return ios, None
else:
return None, ios
class SplitAxis(function_node.FunctionNode):
"""Function that splits multiple arrays along the specified axis."""
def __init__(self, indices_or_sections, axis):
indices, sections = _get_indices_or_sections(indices_or_sections)
assert (indices is None) != (sections is None)
self.indices = indices
self.sections = sections
self.axis = axis
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].ndim > self.axis)
if self.indices is not None:
indices = self.indices
if len(indices) > 0:
max_index = type_check.make_variable(indices[-1], 'max_index')
type_check.expect(in_types[0].shape[self.axis] >= max_index)
else:
assert self.sections is not None
sections = type_check.make_variable(self.sections, 'sections')
type_check.expect(in_types[0].shape[self.axis] % sections == 0)
@property
def indices_or_sections(self):
return self.indices if self.indices is not None else self.sections
def forward_chainerx(self, inputs):
x, = inputs
return tuple(chainerx.split(x, self.indices_or_sections, self.axis))
def forward(self, inputs):
# Currently iDeep only supports 4 dims
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs, (4,))
and self._ideep_is_supported(inputs)):
return self._forward_ideep(inputs)
x, = inputs
self._xp = backend.get_array_module(x)
indices_or_sections = self.indices_or_sections
ret = self._xp.split(x, indices_or_sections, self.axis)
if self._xp == numpy and not _numpy_split_ok:
ret = _fix_numpy_split(ret, x, indices_or_sections, self.axis)
self._shapes = [r.shape for r in ret]
return tuple(ret)
def _ideep_is_supported(self, inputs):
# Returns True if iDeep supports current configuration of inputs and
# arguments. This is workaround for limitation in iDeep internal
# implementation.
if self.indices is not None:
indices = self.indices
if len(indices) == 0:
return False # Empty sequence
if indices[0] == 0:
return False # Sequence starting with 0
for i in six.moves.range(1, len(indices)):
if indices[i-1] == indices[i]:
return False # Sequence with duplicate index
else:
if self.sections == 1:
return False # 1
# Workaround for iDeep segfault issue
# See:
# https://github.com/chainer/chainer/pull/4281#issuecomment-365830630
# TODO(niboshi): Remove this after iDeep is fixed.
# Note: inputs[0].ndim is always 4.
if (self.axis == 1 or self.axis == -3) and inputs[0].shape[1] == 8:
return False
return True
def _forward_ideep(self, inputs):
x, = inputs
offsets = intel64.ideep.intVector()
# TODO(iDeep)
# bypass python3 issue when transfer array to std::vector<>
# https://github.com/SimpleITK/SimpleITK/issues/106
axis = self.axis % x.ndim
if self.indices is not None:
for i in self.indices:
offsets.push_back(int(i))
else:
d = x.shape[self.axis]
step = d // self.sections
for i in six.moves.range(step, d, step):
offsets.push_back(i)
ret = intel64.ideep.concat.Backward(
intel64.ideep.array(x), offsets, axis)
self._shapes = [r.shape for r in ret]
return ret
def backward(self, indexes, grad_outputs):
dtype = self.inputs[0].dtype
grads = [
self._xp.zeros(shape, dtype=dtype) if gy is None else gy
for gy, shape in six.moves.zip(grad_outputs, self._shapes)]
return chainer.functions.concat(grads, self.axis),
def split_axis(x, indices_or_sections, axis, force_tuple=True):
"""Splits given variables along an axis.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
A variable to be split.
indices_or_sections (int or 1-D array): If this argument is an integer,
N, the array will be divided into N equal arrays along axis.
If it is a 1-D array of sorted integers, it
indicates the positions where the array is split.
axis (int): Axis that the input array is split along.
force_tuple (bool): If ``True`` (the default) this method returns a
tuple even when the number of outputs is one. Otherwise, if
``False`` a Variable will be returned when the number of outputs
is one.
Returns:
tuple or Variable: Tuple of :class:`~chainer.Variable` objects
if the number of outputs is more than 1 or
:class:`~chainer.Variable` otherwise.
When ``force_tuple`` is ``True``, returned value is always a tuple
regardless of the number of outputs.
"""
res = SplitAxis(indices_or_sections, axis).apply((x,))
if force_tuple or len(res) != 1:
return res
return res[0]
|
|
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, ProxyManager
from .packages.urllib3.response import HTTPResponse
from .compat import urlparse, basestring, urldefrag
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError
from .auth import _basic_auth_str
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""Built-In HTTP Adapter for Urllib3."""
def __init__(self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE):
self.max_retries = DEFAULT_RETRIES
self.config = {}
super(HTTPAdapter, self).__init__()
self.init_poolmanager(pool_connections, pool_maxsize)
def init_poolmanager(self, connections, maxsize):
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize)
def cert_verify(self, conn, url, verify, cert):
if url.startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a connection for the given URL."""
proxies = proxies or {}
proxy = proxies.get(urlparse(url).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, urlparse(url).scheme)
conn = ProxyManager(self.poolmanager.connection_from_url(proxy))
else:
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Dispose of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a proxy, the full URL has to be
used. Otherwise, we should only use the path portion of the URL."""
proxies = proxies or {}
proxy = proxies.get(urlparse(request.url).scheme)
if proxy:
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. Currently this only adds a
Host: header if a proxy is being used."""
proxies = kwargs.get('proxies', {})
if proxies is None:
proxies = {}
proxy = proxies.get(urlparse(request.url).scheme)
username, password = get_auth_from_url(proxy)
if username and password:
request.headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object."""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except socket.error as sockerr:
raise ConnectionError(sockerr)
except MaxRetryError as e:
raise ConnectionError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e)
elif isinstance(e, TimeoutError):
raise Timeout(e)
else:
raise Timeout('Request timed out.')
r = self.build_response(request, resp)
if not stream:
r.content
return r
|
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
#
# Test Pep 342 enhancements to generator, including Throw(), Send(), Close() and yield expressions.
#
from iptest.assert_util import *
# Declare some dummy exceptions to throw
class MyError(Exception):
pass
class MyError2:
pass
# ensure the generator is finished.
def EnsureClosed(g):
try:
g.next()
Assert(False)
except StopIteration:
pass
# test __del__ on generators.
import sys
import gc
# Test that generator.__del__ is invoked and that it calls Close()
# Note that .NET's GC:
# 1) runs on another thread,
# 2) runs at a random time (but can be forcibly invoked from gc.collect)
# So other generators that go out of scope will get closed() called at random times from wherever
# the generator was left. This can introduce some nondeterminism in the tests.
# Note that silverlight doesn't support finalizers, so we don't test Generator.__del__ on that platform.
skiptest("silverlight")
def test_del():
global l
l=[0]
def nested():
def ff3(l):
try:
yield 10
finally:
l[0] += 1
g=ff3(l)
AreEqual(g.next(), 10) # move to inside the finally
del g
nested()
gc.collect()
AreEqual(l,[1]) # finally should have execute now.
# Yield can appear in lambda expressions (or any function body).
# A generator lambda expression yields its final result, instead of just returning it.
def test_yield_lambda():
f=lambda x: (3+(yield x), (yield x*2))
g=f(10)
AreEqual(g.next(), 10)
AreEqual(g.send(9), 10*2)
if is_cpython: #http://ironpython.codeplex.com/workitem/28219
AssertError(StopIteration, g.send, 5)
else:
AreEqual(g.send(5), (3+9, 5))
# This usage of lambda expression tests a different parsing path in IPY. (old lambda expressions)
def test_yield_old_lambda():
l=[x for x in lambda : (yield 3),8]
AreEqual(l[1], 8)
f=l[0]
g=f()
AreEqual(g.next(), 3)
def test_type_generator():
def g(): yield 10
def f(): x += yield
AreEqual(type(f()), type(g()))
# CPython 2.5 allows yield as a default parameter for lambda expressions
# (though not for regular def functions)
def test_yield_default_param():
# This will return a generator that
# defines a lambda expression, with default param initialized by send()
# returns that lambda expression
def f():
yield lambda x=(yield 25): x * 2
g=f()
AreEqual(g.next(), 25)
l = g.send(15) # this sends in the default parameter, yields the lambda expression
AreEqual(l(), 15*2) # this now uses the default param
AreEqual(l(3), 3*2) # use a non-default param.
AreEqual(l(), 15*2)
#
# A yield expression can occur anywhere. Test various spots.
#
# Test yield in a genexp body. This is a little bizare, but the CPython tests have this.
def test_yield_genexp():
# def f():
# for i in range(5):
# yield (yield i)
#
# Since list() ctor only calls next, (yield i) returns None
g=((yield i) for i in range(5))
x = list(g)
AreEqual(x, [0, None, 1, None, 2, None, 3, None, 4, None])
# test using yield expressions in indexing
def test_yield_index():
def f():
# eval order is 1[2]=3
(yield)[(yield)]='x'
yield
g=f()
AreEqual(g.next(), None)
l=[10,20,30]
g.send(l)
g.send(1)
AreEqual(l[1], 'x')
#---------------------------
# test send with yield expression
def test_yield_exp():
def side_effect(l, i, res):
l[i] += 1
return res
def f(l):
# first test simple yield expression
AreEqual((yield 3), 100)
# test an empty yield. Equivalent to 'yield None'
yield
# now test yield embedded in a complex expression with side-effects and evaluation order.
x = side_effect(l, 0, 5) + (yield 10) + side_effect(l, 1, 2)
yield x
l=[0,0]
g=f(l)
AreEqual(g.next(), 3)
AreEqual(g.send(100), None)
AreEqual(g.next(), 10)
AreEqual(l, [1,0])
AreEqual(g.send(30), 37)
AreEqual(l, [1,1])
# test different parsing configurations of yield expression
# - Top-level assignment (=, +=) does not require parenthesis.
# - else yield used as expression does require parenthesis.
# - argument to yield is optional
def test_yield_exp_parse():
def f():
# yield as statement, yielding tuple
yield 1,2
# top-level assignment. Doesn't need parenthesis
x = yield
AreEqual(x,15)
x = yield 10
AreEqual(x,None)
y = 5
y += yield 99
AreEqual(y, 105)
y += yield
AreEqual(y, 145)
# test precedence. This is w = (yield (1,2)). Not w=(yield 1), 2
w = yield 1,2
AreEqual(w,39)
# yield in an expression, must be in parenthsis
z = (yield) / (yield)
AreEqual(z,100/25)
yield 123
g=f()
AreEqual(g.next(), (1,2))
AreEqual(g.next(), None)
AreEqual(g.send(15), 10)
AreEqual(g.next(), 99)
AreEqual(g.send(100), None)
AreEqual(g.send(40), (1,2))
AreEqual(g.send(39), None)
AreEqual(g.send(100), None)
AreEqual(g.send(25), 123)
# Test some goofier places to put a yield expression
def test_yy():
def f():
yield (yield 5)
g=f()
AreEqual(g.next(), 5)
AreEqual(g.send(15), 15)
# Test Send after Close(), should throw StopException, just like Next()
def test_send_after_closed():
l = [0]
def f():
x = yield 10
l[0] += 1
AreEqual(x, 15)
g = f()
AreEqual(g.next(), 10)
def t():
g.send(15)
AreEqual(l, [0])
AssertError(StopIteration, t)
AreEqual(l, [1])
EnsureClosed(g)
AssertError(StopIteration, t)
AreEqual(l, [1]) # no more change
# Test: send(non-none) fails on newly created generator
def test_send_unstarted():
def f():
x = yield 10
AreEqual(x,None) # next() is like send(None)
yield 5
g = f()
def t():
g.send(1)
AssertError(TypeError, t) # can't send non-null on unstarted
# should not change generator status
AreEqual(g.next(), 10)
AreEqual(g.next(), 5)
# Ensure that sending an exception doesn't become a throw
def test_send_exception():
def f():
y = yield
AreEqual(y, MyError)
yield
g=f()
g.next()
g.send(MyError)
#-----------------------------
#
# Throw not handled in iterator
#
def test_throw_unhandled():
# Simple iterator
def f():
# Caller will throw an exception after getting this value
yield 5
Assert(False) # Iterator should not get here
g = f()
i = g.next()
AreEqual(i,5)
# This should go uncaught from the iterator
try:
g.throw(MyError)
Assert(False) # expected exception
except MyError:
pass # 'Good: Exception passed through generator and caught by caller'
#
# Throw handled in iterator
#
def test_throw_handled():
def f2():
yield 1
try:
yield 2 # caller throws from here
Assert(False) # unreachable
except MyError:
pass # 'Good: Generator caught exception from throw'
yield 3
yield 4
g = f2()
AreEqual(g.next(),1)
AreEqual(g.next(),2)
# generator will catch this.
# this throws from the last yield point, resumes executing the generator
# and returns the result of the next yield point.
i = g.throw(MyError)
AreEqual(i,3)
# Test that we can call next() after throw.
AreEqual(g.next(),4)
#
# Test another throw overload passing (type,value).
#
def test_throw_value():
class MyClass2:
def __init__(self,val):
self.val = val
def f():
try:
yield 5
Assert(false)
except MyClass2, x:
AreEqual(x.val,10)
yield 15
g=f()
AreEqual(g.next(), 5)
AreEqual(g.throw(MyClass2, 10), 15)
#
# Test catch and rethrow
#
def test_catch_rethrow():
def f4():
try:
yield 1
Assert(False)
except MyError:
raise MyError2
g=f4()
g.next() # move into try block
try:
g.throw(MyError) # will get caught and rethrow MyError 2
Assert(False)
except MyError2: # catch different error than thrown
pass
#
# Throw as first call on the iterator.
# In this case, throw does not get to the first yield point.
#
def test_throw_unstarted():
def f3():
# haven't called next yet, so throw shouldn't execute anything
# it should also be before (outside) the try block on the first line.
try:
Assert(False)
yield 5
except:
Assert(False)
# 'Test: throw before first yield'
g = f3()
try:
g.throw(MyError)
Assert(False)
except MyError:
pass
EnsureClosed(g) # generator should now be closed.
# Throw after closed, should raise its exception,
# not another StopIteration / other exception
# Note this is a little inconsistent with Next(), which raises a StopIteration exception
# on closed generators.
def test_throw_closed():
def f5():
yield 1
g=f5()
AreEqual(g.next(),1)
# Loop this to ensure that we're in steady state.
for i in range(0,3):
try:
# G is now closed.
g.throw(MyError)
Assert(False)
except MyError:
pass # 'Good: caught our own exception'
#
# test that a generator.Throw() works when stopped in a finally
#
def test_throw_from_finally():
def f(l):
try:
pass
finally:
pass # ' good: inside finally'
l[0] = 1
yield 1
try:
yield 2 # throw here
Assert(False) #
except MyError:
l[0] = 2
l=[0]
g=f(l)
AreEqual(g.next(), 1)
AreEqual(l[0], 1)
AreEqual(g.next(), 2) # move into finally block
try:
# throw, it will catch and run to completion
g.throw(MyError)
Assert(False)
except StopIteration:
AreEqual(l[0], 2)
pass # ' good: threw and generator ran to completion'
pass
#
# Test that finallys properly execute when Gen.Throw is called.
# This verifies that the exception is really being raised from the right spot
# within the generator body.
#
# simple generator with finally
# set l[0]=1 to indicate that finally block was executed.
def f1(l):
yield 1
try:
yield 2
pass # ' Non exception case'
finally:
pass # ' inside finally'
l[0] = 1
yield 3
def test_throw_run_finally_nonexception():
# Sanity check
# 'Test: simple finally, no exception'
l = [0]
g=f1(l)
AreEqual(g.next(), 1)
AreEqual(g.next(), 2)
AreEqual(l[0], 0)
AreEqual(g.next(), 3)
AreEqual(l[0], 1)
EnsureClosed(g)
#
# Now try throwing before finally
#
def test_throw_before_finally():
l = [0]
g=f1(l)
AreEqual(g.next(), 1)
try:
g.throw(MyError)
Assert(False)
except MyError:
pass
AreEqual(l[0], 0) # finally should not have been executed
# since we terminated with an exception, generator should be closed
EnsureClosed(g)
#
# Now try throwing in range of finally, so that finally is executed
#
def test_throw_run_finally_exception():
# print 'Test: throw inside try-finally'
l = [0]
g=f1(l)
AreEqual(g.next(), 1)
AreEqual(g.next(), 2)
try:
g.throw(MyError)
Assert(False)
except MyError:
pass
# since we terminated with an exception, generator should be closed
EnsureClosed(g)
AreEqual(l[0], 1) # finally should have run
#
# Test that code/exceptions are being invoked from the right callstack,
# either
# a) inside the generator body, or
# b) at the call to Generator.Throw(), but outside the generator body.
# This is important so that the right set of catch blocks get applied.
#
# Creating the exception occurs inside the generator.
def test_ctor_throws():
# Simple class to raise an error in __init__
class MyErrorClass:
def __init__(self):
raise MyError
def f():
try:
yield 5
yield 7
except MyError:
yield 12
g=f()
AreEqual(g.next(), 5)
# MyError's ctor will raise an exception. It should be invoked in the generator's body,
# and so the generator can catch it and continue running to yield a value.
AreEqual(g.throw(MyErrorClass), 12)
g.close()
#
# Test corner case with Throw(None)
#
def test_throw_none():
def f():
try:
yield 5 # we'll be stopped here and do g.throw(none)
yield 10
except TypeError:
Assert(false) # error shouldn't be raised inside of generator, so can't be caught here
g=f()
AreEqual(g.next(), 5)
# g.throw(None) should:
# - throw a TypeError immediately, not from generator body (So generator can't catch it)
# - does not update generator
def t():
g.throw(None)
AssertError(TypeError, t)
# verify that generator is still valid and can be resumed
AreEqual(g.next(), 10)
#
# Test close(), which builds on throw()
#
def f(l):
try:
yield 1
finally:
l[0] += 1
l =[0]
g=f(l)
# Test close() on unstarted and closed generators
def test_close_ends():
def f():
Assert(False) # we won't execute the generator
yield 10
g = f()
g.close() # close on unstarted
EnsureClosed(g)
f().close() # close already closed, should be nop.
def test_close_catch_exit():
def f():
try:
yield 1 # caller will close() from here
Assert(False)
except GeneratorExit:
pass # catch but exit, that's ok.
g=f()
AreEqual(g.next(), 1)
g.close()
def test_close_rethrow():
def f():
try:
yield 1 # caller will close() from here
Assert(False)
except GeneratorExit:
# print 'caught and rethrow'
raise MyError
g=f()
AreEqual(g.next(), 1)
# close(), which will raise a GeneratorExit, which gets caught and rethrown as MyError
def t():
g.close()
AssertError(MyError, t)
def test_close_illegal_swallow():
def f():
try:
yield 1 # caller will close() from here
Assert(False)
except GeneratorExit:
yield 2 # illegal, don't swallow GeneratorExit
g=f()
AreEqual(g.next(), 1)
# close(), which will raise a GeneratorExit, which gets caught and rethrown as MyError
def t():
g.close()
AssertError(RuntimeError, t)
#
# A (yield) expressions can appear in practically any spot as a normal expression.
# Test a smattering of interesting spots for a variety of coverage.
#
#
# this is straight from the sample in Pep342
# Useful to skip the first call to generator.next() for generators that are consumers.
def consumer(func):
def wrapper(*args,**kw):
gen = func(*args, **kw)
gen.next()
return gen
wrapper.__name__ = func.__name__
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
return wrapper
# Yield in the middle of a tuple
def test_exp_tuple():
def f():
yield (1,(yield),3)
g=f()
g.next()
AreEqual(g.send(5), (1, 5, 3))
#
# Yield as a base class
#
def test_exp_base_class():
class MyBase(object):
def b(self):
return 5
# generator to make a base class.
@consumer
def M():
# yield expression as a base class.
class Foo((yield)):
def m(self):
print 'x'
yield Foo
g=M()
F = g.send(MyBase)
c=F()
AreEqual(c.b(),5) # invokes base method
#
# In print redirection slot.
#
class MyWriter:
data=""
def write(self,l):
self.data += l
def test_exp_print_redirect():
@consumer
def f(text):
print >> (yield), text,
yield # extra spot to stop on so send() won't immediately throw
c=MyWriter()
f("abc").send(c)
AreEqual(c.data, "abc")
#
# In dict literals
#
def test_exp_dict_literals():
def f():
# Note eval order is: {2:1, 4:3}
d = { (yield 2): (yield 1), (yield): (yield) }
yield d
g=f()
AreEqual(g.next(), 1)
AreEqual(g.send('a'), 2)
g.send(10)
g.send('b')
d2 = g.send(20) # {10: 'a', 20: 'b'}
AreEqual(d2, {10: 'a', 20: 'b'})
#
# Test yield expressions in compound comparisons
#
def gen_compare():
f = ((yield 1) < (yield 2) < (yield 3))
yield f
# Compare expecting true.
def test_exp_compare1():
g=gen_compare()
AreEqual(g.next(), 1)
AreEqual(g.send(5), 2)
AreEqual(g.send(10), 3)
AreEqual(g.send(15), True)
EnsureClosed(g)
# compare expecting false. This will short-circuit
def test_exp_compare2():
g=gen_compare()
AreEqual(g.next(), 1)
AreEqual(g.send(5), 2)
AreEqual(g.send(2), False)
EnsureClosed(g)
#
# Use as the argument to Raise
#
def test_exp_raise():
@consumer
def f():
raise (yield), (yield)
Assert(False)
g=f()
g.send(ValueError)
try:
g.send(15)
except ValueError, x:
AreEqual(x.args[0], 15)
# Generator is now closed
EnsureClosed(g)
#
# Slicing. Nothing fancy here, just another place to try yield
#
def test_exp_slice():
@consumer
def f():
l=range(0,10)
yield l[(yield):(yield)]
g=f()
g.send(4)
AreEqual(g.send(7), [4, 5, 6])
#
# Layering. Have multiple coroutines calling each other.
#
def test_layering():
# manually implement the @consumer pattern
def append_dict(d):
def f2():
while True:
(a,b) = ((yield), (yield))
d[a]=(b)
g=f2()
g.next()
return g
# Wrapper around a generator.
@consumer
def splitter(g):
# take in a tuple, split it apart
try:
while True:
for x in (yield):
g.send(x)
finally:
g.close()
d={}
g=splitter(append_dict(d))
#
g.send(('a', 10))
AreEqual(d, {'a': 10})
#
g.send(('b', 20))
AreEqual(d, {'a': 10, 'b': 20})
#
g.send(('c', 30))
AreEqual(d, {'a': 10, 'c': 30, 'b': 20})
#
# watered down example from Pep342
#
def test_layering_2():
#
@consumer
def Pager(dest):
# group in threes
while True:
try:
s = ""
s += '[%s,' % ((yield))
s += str((yield))
s += ',%d]' % ((yield))
except GeneratorExit:
dest.send(s + "...incomplete")
dest.close()
return
else:
dest.send(s)
#
@consumer
def Writer(outstream):
while True:
try:
print >> outstream, 'Page=' + (yield)
except GeneratorExit:
print >> outstream, 'done'
raise
#
def DoIt(l, outstream):
pipeline = Pager(Writer(outstream))
for i in l:
pipeline.send(i)
pipeline.close()
#
o=MyWriter()
DoIt(range(8), o)
AreEqual(o.data, 'Page=[0,1,2]\nPage=[3,4,5]\nPage=[6,7...incomplete\ndone\n')
#
# Test Yield in expressions in an except block
# even crazier example, (yield) in both Type + Value spots in Except clause
#
# generator to use with test_yield_except_crazy*
def getCatch():
yield 1
l=[0,1,2]
try:
raise MyError, 'a'
except (yield 'a'), l[(yield 'b')]:
AreEqual(sys.exc_info(), (None,None,None)) # will print None from the yields
Assert(l[1] != 1) # validate that the catch properly assigned to it.
yield 'c'
except (yield 'c'): # especially interesting here
yield 'd'
except:
print 'Not caught'
print 4
# executes the generators 1st except clause
def test_yield_except_crazy1():
g=getCatch()
AreEqual(g.next(), 1)
AreEqual(g.next(), 'a')
AreEqual(sys.exc_info(), (None, None, None))
AreEqual(g.send(MyError), 'b')
AreEqual(sys.exc_info(), (None, None, None))
AreEqual(g.send(1), 'c')
g.close()
# executes the generators 2nd except clause
def test_yield_except_crazy2():
# try the 2nd clause
g=getCatch()
AreEqual(g.next(), 1)
AreEqual(g.next(), 'a')
AreEqual(g.send(ValueError), 'c') # Cause us to skip the first except handler
AreEqual(g.send(MyError), 'd')
g.close()
# Yield statements without any return values.
def test_yield_empty():
def f():
yield
g = f()
AreEqual(g.next(), None)
def f():
if True:
yield
yield
g = f()
AreEqual(g.next(), None)
AreEqual(g.next(), None)
def test_throw_stop_iteration():
def f():
raise StopIteration('foo')
yield 3
x = f()
try:
x.next()
except StopIteration, e:
AreEqual(e.message, 'foo')
run_test(__name__)
|
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.runtime import Context
from units.compat import unittest
from units.compat.mock import patch
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.template import Templar, AnsibleContext, AnsibleEnvironment
from ansible.utils.unsafe_proxy import AnsibleUnsafe, wrap_var
from units.mock.loader import DictDataLoader
class BaseTemplar(object):
def setUp(self):
self.test_vars = dict(
foo="bar",
bam="{{foo}}",
num=1,
var_true=True,
var_false=False,
var_dict=dict(a="b"),
bad_dict="{a='b'",
var_list=[1],
recursive="{{recursive}}",
some_var="blip",
some_static_var="static_blip",
some_keyword="{{ foo }}",
some_unsafe_var=wrap_var("unsafe_blip"),
some_static_unsafe_var=wrap_var("static_unsafe_blip"),
some_unsafe_keyword=wrap_var("{{ foo }}"),
str_with_error="{{ 'str' | from_json }}",
)
self.fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
self.templar = Templar(loader=self.fake_loader, variables=self.test_vars)
def is_unsafe(self, obj):
if obj is None:
return False
if hasattr(obj, '__UNSAFE__'):
return True
if isinstance(obj, AnsibleUnsafe):
return True
if isinstance(obj, dict):
for key in obj.keys():
if self.is_unsafe(key) or self.is_unsafe(obj[key]):
return True
if isinstance(obj, list):
for item in obj:
if self.is_unsafe(item):
return True
if isinstance(obj, string_types) and hasattr(obj, '__UNSAFE__'):
return True
return False
# class used for testing arbitrary objects passed to template
class SomeClass(object):
foo = 'bar'
def __init__(self):
self.blip = 'blip'
class SomeUnsafeClass(AnsibleUnsafe):
def __init__(self):
super(SomeUnsafeClass, self).__init__()
self.blip = 'unsafe blip'
class TestTemplarTemplate(BaseTemplar, unittest.TestCase):
def test_lookup_jinja_dict_key_in_static_vars(self):
res = self.templar.template("{'some_static_var': '{{ some_var }}'}",
static_vars=['some_static_var'])
# self.assertEqual(res['{{ a_keyword }}'], "blip")
print(res)
def test_is_possibly_template_true(self):
tests = [
'{{ foo }}',
'{% foo %}',
'{# foo #}',
'{# {{ foo }} #}',
'{# {{ nothing }} {# #}',
'{# {{ nothing }} {# #} #}',
'{% raw %}{{ foo }}{% endraw %}',
'{{',
'{%',
'{#',
'{% raw',
]
for test in tests:
self.assertTrue(self.templar.is_possibly_template(test))
def test_is_possibly_template_false(self):
tests = [
'{',
'%',
'#',
'foo',
'}}',
'%}',
'raw %}',
'#}',
]
for test in tests:
self.assertFalse(self.templar.is_possibly_template(test))
def test_is_possible_template(self):
"""This test ensures that a broken template still gets templated"""
# Purposefully invalid jinja
self.assertRaises(AnsibleError, self.templar.template, '{{ foo|default(False)) }}')
def test_is_template_true(self):
tests = [
'{{ foo }}',
'{% foo %}',
'{# foo #}',
'{# {{ foo }} #}',
'{# {{ nothing }} {# #}',
'{# {{ nothing }} {# #} #}',
'{% raw %}{{ foo }}{% endraw %}',
]
for test in tests:
self.assertTrue(self.templar.is_template(test))
def test_is_template_false(self):
tests = [
'foo',
'{{ foo',
'{% foo',
'{# foo',
'{{ foo %}',
'{{ foo #}',
'{% foo }}',
'{% foo #}',
'{# foo %}',
'{# foo }}',
'{{ foo {{',
'{% raw %}{% foo %}',
]
for test in tests:
self.assertFalse(self.templar.is_template(test))
def test_is_template_raw_string(self):
res = self.templar.is_template('foo')
self.assertFalse(res)
def test_is_template_none(self):
res = self.templar.is_template(None)
self.assertFalse(res)
def test_template_convert_bare_string(self):
res = self.templar.template('foo', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_nested(self):
res = self.templar.template('bam', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_unsafe(self):
res = self.templar.template('some_unsafe_var', convert_bare=True)
self.assertEqual(res, 'unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_bare_filter(self):
res = self.templar.template('bam|capitalize', convert_bare=True)
self.assertEqual(res, 'Bar')
def test_template_convert_bare_filter_unsafe(self):
res = self.templar.template('some_unsafe_var|capitalize', convert_bare=True)
self.assertEqual(res, 'Unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_data(self):
res = self.templar.template('{{foo}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
@patch('ansible.template.safe_eval', side_effect=AnsibleError)
def test_template_convert_data_template_in_data(self, mock_safe_eval):
res = self.templar.template('{{bam}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_convert_data_bare(self):
res = self.templar.template('bam', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bam')
def test_template_convert_data_to_json(self):
res = self.templar.template('{{bam|to_json}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, '"bar"')
def test_template_convert_data_convert_bare_data_bare(self):
res = self.templar.template('bam', convert_data=True, convert_bare=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_unsafe_non_string(self):
unsafe_obj = AnsibleUnsafe()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_unsafe_non_string_subclass(self):
unsafe_obj = SomeUnsafeClass()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_weird(self):
data = u'''1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7'''
self.assertRaisesRegexp(AnsibleError,
'template error while templating string',
self.templar.template,
data)
def test_template_with_error(self):
"""Check that AnsibleError is raised, fail if an unhandled exception is raised"""
self.assertRaises(AnsibleError, self.templar.template, "{{ str_with_error }}")
class TestTemplarMisc(BaseTemplar, unittest.TestCase):
def test_templar_simple(self):
templar = self.templar
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
self.assertEqual(templar.template("{{var_true}}"), True)
self.assertEqual(templar.template("{{var_false}}"), False)
self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
self.assertEqual(templar.template("{{var_list}}"), [1])
self.assertEqual(templar.template(1, convert_bare=True), 1)
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
# test with fail_on_undefined=False
self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
# test setting available_variables
templar.available_variables = dict(foo="bam")
self.assertEqual(templar.template("{{foo}}"), "bam")
# variables must be a dict() for available_variables setter
# FIXME Use assertRaises() as a context manager (added in 2.7) once we do not run tests on Python 2.6 anymore.
try:
templar.available_variables = "foo=bam"
except AssertionError:
pass
except Exception as e:
self.fail(e)
def test_templar_escape_backslashes(self):
# Rule of thumb: If escape backslashes is True you should end up with
# the same number of backslashes as when you started.
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
old_exts = C.DEFAULT_JINJA2_EXTENSIONS
try:
C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
finally:
C.DEFAULT_JINJA2_EXTENSIONS = old_exts
class TestTemplarLookup(BaseTemplar, unittest.TestCase):
def test_lookup_missing_plugin(self):
self.assertRaisesRegexp(AnsibleError,
r'lookup plugin \(not_a_real_lookup_plugin\) not found',
self.templar._lookup,
'not_a_real_lookup_plugin',
'an_arg', a_keyword_arg='a_keyword_arg_value')
def test_lookup_list(self):
res = self.templar._lookup('list', 'an_arg', 'another_arg')
self.assertEqual(res, 'an_arg,another_arg')
def test_lookup_jinja_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'an_undefined_jinja_var' is undefined",
self.templar._lookup,
'list', '{{ an_undefined_jinja_var }}')
def test_lookup_jinja_defined(self):
res = self.templar._lookup('list', '{{ some_var }}')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_string_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
'{{ some_var }}')
def test_lookup_jinja_dict_list_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
['foo', 'bar'])
def test_lookup_jinja_kwargs(self):
res = self.templar._lookup('list', 'blip', random_keyword='12345')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_list_wantlist(self):
res = self.templar._lookup('list', '{{ some_var }}', wantlist=True)
self.assertEqual(res, ["blip"])
def test_lookup_jinja_list_wantlist_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'some_undefined_var' is undefined",
self.templar._lookup,
'list',
'{{ some_undefined_var }}',
wantlist=True)
def test_lookup_jinja_list_wantlist_unsafe(self):
res = self.templar._lookup('list', '{{ some_unsafe_var }}', wantlist=True)
for lookup_result in res:
self.assertTrue(self.is_unsafe(lookup_result))
# self.assertIsInstance(lookup_result, AnsibleUnsafe)
# Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_var }}'})
self.assertEqual(res['{{ a_keyword }}'], "blip")
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe(self):
res = self.templar._lookup('list', {'{{ some_unsafe_key }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ some_unsafe_key }}']))
# self.assertIsInstance(res['{{ some_unsafe_key }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe_value(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ a_keyword }}']))
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_none(self):
res = self.templar._lookup('list', None)
self.assertIsNone(res)
class TestAnsibleContext(BaseTemplar, unittest.TestCase):
def _context(self, variables=None):
variables = variables or {}
env = AnsibleEnvironment()
context = AnsibleContext(env, parent={}, name='some_context',
blocks={})
for key, value in variables.items():
context.vars[key] = value
return context
def test(self):
context = self._context()
self.assertIsInstance(context, AnsibleContext)
self.assertIsInstance(context, Context)
def test_resolve_unsafe(self):
context = self._context(variables={'some_unsafe_key': wrap_var('some_unsafe_string')})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_list(self):
context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res[0], AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_dict(self):
context = self._context(variables={'some_unsafe_key':
{'an_unsafe_dict': wrap_var('some unsafe string 1')}
})
res = context.resolve('some_unsafe_key')
self.assertTrue(self.is_unsafe(res['an_unsafe_dict']),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res['an_unsafe_dict'])
def test_resolve(self):
context = self._context(variables={'some_key': 'some_string'})
res = context.resolve('some_key')
self.assertEqual(res, 'some_string')
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
def test_resolve_none(self):
context = self._context(variables={'some_key': None})
res = context.resolve('some_key')
self.assertEqual(res, None)
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
|
|
#!/usr/bin/python
#
# Written by Gavin Heverly-Coulson
# Email: gavin <at> quantumgeranium.com
#
# Reads the energies from the Quantum Espresso output files
# that result from running the jobs built by pes_builder.py
# Prints a file containing the surface energies (in J/m^2) for each
# point on the PES. Also writes a gnuplot input file to make
# an image to visualize the PES.
#
#
# This work is licensed under a Simplified BSD License
# Copyright (c) 2014, Gavin Heverly-Coulson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import math
# Find a line containing a particular keyword in a the file list
# created from the reader.readlines() function.
# Returns -1 if the line is not found.
def findLine(lineList, keyword):
counter = 0
lineNum = -1
while (counter < len(lineList)):
if (keyword in lineList[counter]):
lineNum = counter
break
counter += 1
return lineNum
filename = sys.argv[1]
# Open the list of folders and read it in
reader = open("folders", 'r')
files = reader.readlines()
reader.close()
# Strip the whitespace/newlines from the folderlist
foo = 0
while foo < len(files):
files[foo] = files[foo].strip()
foo += 1
outFilename = filename + ".out"
resultsFilename = filename + "_results.dat"
# Write all energies to this string, will write it to file at end
resultsString = "b\\a"
# Count the number of points on PES in a and b directions
aVals = []
bVals = []
for i in files:
temp = i.split('/')
if temp[0][2:] not in aVals:
aVals.append(temp[0][2:])
if temp[1][2:] not in bVals:
bVals.append(temp[1][2:])
#print "A Values:"
#for i in aVals:
# print i
#print "\nB Values:"
#for i in bVals:
# print i
for i in aVals:
resultsString += " " + i
resultsString += "\n"
# Build the empty matrix for the energies
# Will be indexed as: energies[b][a]
energies = [[None for i in range(len(aVals))] for j in range(len(bVals))]
# Calculate the cross-sectional area (area of AB plane)
reader = open("{0}/{1}.in".format(files[0], filename), 'r')
inFile = reader.readlines()
reader.close()
celldmLine = findLine(inFile, "celldm(1)")
celldmTemp = inFile[celldmLine].split(',')[0]
celldm = float(celldmTemp.split('=')[1]) * 5.2917721e-11 # Store in metre
latVectsLine = findLine(inFile, "CELL_")
aLength = (celldm * float(inFile[latVectsLine+1].split()[0]))
b_x = (celldm * float(inFile[latVectsLine+2].split()[0]))
b_y = (celldm * float(inFile[latVectsLine+2].split()[1]))
bLength = math.sqrt( (b_x**2) + (b_y**2) )
xsArea = aLength * bLength
for a in range(len(aVals)):
for b in range(len(bVals)):
print "delta A = {0}, delta B = {1}".format(aVals[a], bVals[b])
filePath = "a_{0}/b_{1}/{2}".format(aVals[a], bVals[b], outFilename)
reader = open(filePath, 'r')
outFile = reader.readlines()
reader.close()
# check if calculation converged
converged = findLine(outFile, "convergence NOT achieved")
if (converged > -1):
energies[b][a] = "XXXX"
print " NOT converged"
else:
# Working under the assumption that this was an SCF job
engLine = findLine(outFile, "!")
energyRy = float(outFile[engLine].split()[4])
energyJ = energyRy * 2.1798715e-18
surfaceEnergy = energyJ / (aLength * bLength) # Energy in J/m^2
print " Energy Ry = {0}\n Surface Energy = {1}".format(energyRy, surfaceEnergy)
energies[b][a] = surfaceEnergy
# Add the energies we just calculated to the resultsString
for b in range(len(bVals)):
resultsString += bVals[b]
for a in range(len(aVals)):
resultsString += " " + str(energies[b][a])
resultsString += "\n"
# Write the resultsString to resultsFilename
writer = open(resultsFilename, 'w')
writer.write(resultsString)
writer.close()
# Calculate relative energies and format them for making the plot in gnuplot
stepSize = float(aVals[1]) - float(aVals[0])
# Ensure that the starting high and low energies aren't "XXXX"
for i in range(len(energies[0])):
if energies[0][i] != "XXXX":
lowestEng = energies[0][i]
highestEng = energies[0][i]
break
# Find the highest and lowest energies
for i in energies:
for j in i:
if (j < lowestEng) and (j != "XXXX"):
lowestEng = j
elif (j > highestEng) and (j != "XXXX"):
highestEng = j
print "Lowest energy = {0}\nHighest energy = {1}".format(lowestEng, highestEng)
deltaE = []
for i in energies:
temp = []
for j in i:
# If point didn't converge, set its relative energy to the largest difference observed
if j == "XXXX":
temp.append((highestEng - lowestEng))
else:
temp.append((j - lowestEng))
deltaE.append(temp)
plotDataName = filename + "_plot.dat"
writer = open(plotDataName, 'w')
for i in deltaE:
for j in i:
writer.write(" " + str(j))
writer.write("\n")
writer.close()
aRange = [float(aVals[0])-(stepSize/2), float(aVals[-1])-(stepSize/2)]
bRange = [float(bVals[0])-(stepSize/2), float(bVals[-1])-(stepSize/2)]
gnuplot = []
gnuplot.append("reset\n\nset terminal png size 700,524 enhanced font 'Verdana,10'\nset output '")
gnuplot.append("{0}.png'\n\nunset key\n\nset style line 11 lc rgb '#808080' lt 1\nset border 3 front ls 11\nset tics nomirror out scale 0.75\n\n".format(filename))
gnuplot.append('set cbtics scale 0\nset palette defined ( 0 "#000090", 1 "#000fff", 2 "#0090ff", 3 "#0fffee", 4 "#90ff70", 5 "#ffee00", 6 "#ff7000", 7 "#ee0000", 8 "#7f0000")\n\n')
# Need to have aMin and bMin minus stepsize/2 and aMax and bMax plus stepsize/2
gnuplot.append("set xrange [{0[0]}:{0[1]}]\nset yrange [{1[0]}:{1[1]}]\n".format(aRange, bRange))
gnuplot.append("set xlabel 'delta A'\nset ylabel 'delta B'\nset cblabel 'Relative energy (J/m^2)'\n\n")
# Need to multiply $1 and $2 by step distance and add minimum a/b values
gnuplot.append("plot '{0}' u (($1*{1})+({2})):(($2*{3})+({4})):($3) matrix with image".format(plotDataName, stepSize, aVals[0], stepSize, bVals[0]))
gnuplotName = sys.argv[1] + "_plot.gp"
gnuWriter = open(gnuplotName, 'w')
gnuWriter.write(''.join(gnuplot))
gnuWriter.close()
# eof
|
|
# Copyright (c) 2009, Peter Sagerson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
LDAP authentication backend
Complete documentation can be found in docs/howto/auth-ldap.txt (or the thing it
compiles to).
Use of this backend requires the python-ldap module. To support unit tests, we
import ldap in a single centralized place (config._LDAPConfig) so that the test
harness can insert a mock object.
A few notes on naming conventions. If an identifier ends in _dn, it is a string
representation of a distinguished name. If it ends in _info, it is a 2-tuple
containing a DN and a dictionary of lists of attributes. ldap.search_s returns a
list of such structures. An identifier that ends in _attrs is the dictionary of
attributes from the _info structure.
A connection is an LDAPObject that has been successfully bound with a DN and
password. The identifier 'user' always refers to a User model object; LDAP user
information will be user_dn or user_info.
Additional classes can be found in the config module next to this one.
"""
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
import sys
import traceback
import pprint
import copy
import django.db
from django.contrib.auth.models import User, Group, Permission, SiteProfileNotAvailable
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
import django.dispatch
from django_auth_ldap.config import _LDAPConfig, LDAPSearch, LDAPGroupType
logger = _LDAPConfig.get_logger()
# Signals for populating user objects.
populate_user = django.dispatch.Signal(providing_args=["user", "ldap_user"])
populate_user_profile = django.dispatch.Signal(providing_args=["profile", "ldap_user"])
class LDAPBackend(object):
"""
The main backend class. This implements the auth backend API, although it
actually delegates most of its work to _LDAPUser, which is defined next.
"""
supports_anonymous_user = False
supports_object_permissions = False
ldap = None # The cached ldap module (or mock object)
def __init__(self):
self.ldap = self.ldap_module()
def ldap_module(cls):
"""
Requests the ldap module from _LDAPConfig. Under a test harness, this
will be a mock object. We only do this once because this is where we
apply AUTH_LDAP_GLOBAL_OPTIONS.
"""
if cls.ldap is None:
cls.ldap = _LDAPConfig.get_ldap()
for opt, value in ldap_settings.AUTH_LDAP_GLOBAL_OPTIONS.iteritems():
cls.ldap.set_option(opt, value)
return cls.ldap
ldap_module = classmethod(ldap_module)
#
# The Django auth backend API
#
def authenticate(self, username, password):
ldap_user = _LDAPUser(self, username=username)
user = ldap_user.authenticate(password)
return user
def get_user(self, user_id):
user = None
try:
user = User.objects.get(pk=user_id)
_LDAPUser(self, user=user) # This sets user.ldap_user
except User.DoesNotExist:
pass
return user
def has_perm(self, user, perm):
return perm in self.get_all_permissions(user)
def has_module_perms(self, user, app_label):
for perm in self.get_all_permissions(user):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_all_permissions(self, user):
return self.get_group_permissions(user)
def get_group_permissions(self, user):
if not hasattr(user, 'ldap_user') and ldap_settings.AUTH_LDAP_AUTHORIZE_ALL_USERS:
_LDAPUser(self, user=user) # This sets user.ldap_user
if hasattr(user, 'ldap_user'):
return user.ldap_user.get_group_permissions()
else:
return set()
#
# Bonus API: populate the Django user from LDAP without authenticating.
#
def populate_user(self, username):
ldap_user = _LDAPUser(self, username=username)
user = ldap_user.populate_user()
return user
#
# Hooks for subclasses
#
def get_or_create_user(self, username, ldap_user):
"""
This must return a (User, created) 2-tuple for the given LDAP user.
username is the Django-friendly username of the user. ldap_user.dn is
the user's DN and ldap_user.attrs contains all of their LDAP attributes.
"""
return User.objects.get_or_create(username__iexact=username, defaults={'username': username.lower()})
def ldap_to_django_username(self, username):
return username
def django_to_ldap_username(self, username):
return username
class _LDAPUser(object):
"""
Represents an LDAP user and ultimately fields all requests that the
backend receives. This class exists for two reasons. First, it's
convenient to have a separate object for each request so that we can use
object attributes without running into threading problems. Second, these
objects get attached to the User objects, which allows us to cache
expensive LDAP information, especially around groups and permissions.
self.backend is a reference back to the LDAPBackend instance, which we need
to access the ldap module and any hooks that a subclass has overridden.
"""
class AuthenticationFailed(Exception):
pass
#
# Initialization
#
def __init__(self, backend, username=None, user=None):
"""
A new LDAPUser must be initialized with either a username or an
authenticated User object. If a user is given, the username will be
ignored.
"""
self.backend = backend
self.ldap = backend.ldap_module()
self._username = username
self._user_dn = None
self._user_attrs = None
self._user = None
self._groups = None
self._group_permissions = None
self._connection = None
self._connection_bound = False # True if we're bound as AUTH_LDAP_BIND_*
if user is not None:
self._set_authenticated_user(user)
if username is None and user is None:
raise Exception("Internal error: _LDAPUser improperly initialized.")
def __deepcopy__(self, memo):
obj = object.__new__(self.__class__)
obj.backend = self.backend
obj.ldap = self.ldap
obj._user = copy.deepcopy(self._user, memo)
# This is all just cached immutable data. There's no point copying it.
obj._username = self._username
obj._user_dn = self._user_dn
obj._user_attrs = self._user_attrs
obj._groups = self._groups
obj._group_permissions = self._group_permissions
# The connection couldn't be copied even if we wanted to
obj._connection = self._connection
obj._connection_bound = self._connection_bound
return obj
def _set_authenticated_user(self, user):
self._user = user
self._username = self.backend.django_to_ldap_username(user.username)
user.ldap_user = self
user.ldap_username = self._username
#
# Entry points
#
def authenticate(self, password):
"""
Authenticates against the LDAP directory and returns the corresponding
User object if successful. Returns None on failure.
"""
user = None
try:
self._authenticate_user_dn(password)
self._check_requirements()
self._get_or_create_user()
user = self._user
except self.AuthenticationFailed, e:
logger.debug(u"Authentication failed for %s" % self._username)
except self.ldap.LDAPError, e:
logger.warning(u"Caught LDAPError while authenticating %s: %s",
self._username, pprint.pformat(e))
except Exception, e:
logger.error(u"Caught Exception while authenticating %s: %s",
self._username, pprint.pformat(e))
logger.error(''.join(traceback.format_tb(sys.exc_info()[2])))
raise
return user
def get_group_permissions(self):
"""
If allowed by the configuration, this returns the set of permissions
defined by the user's LDAP group memberships.
"""
if self._group_permissions is None:
self._group_permissions = set()
if ldap_settings.AUTH_LDAP_FIND_GROUP_PERMS:
try:
self._load_group_permissions()
except self.ldap.LDAPError, e:
logger.warning("Caught LDAPError loading group permissions: %s",
pprint.pformat(e))
return self._group_permissions
def populate_user(self):
"""
Populates the Django user object using the default bind credentials.
"""
user = None
try:
self._get_or_create_user(force_populate=True)
user = self._user
except self.ldap.LDAPError, e:
logger.warning(u"Caught LDAPError while authenticating %s: %s",
self._username, pprint.pformat(e))
except Exception, e:
logger.error(u"Caught Exception while authenticating %s: %s",
self._username, pprint.pformat(e))
logger.error(''.join(traceback.format_tb(sys.exc_info()[2])))
raise
return user
#
# Public properties (callbacks). These are all lazy for performance reasons.
#
def _get_user_dn(self):
if self._user_dn is None:
self._load_user_dn()
return self._user_dn
dn = property(_get_user_dn)
def _get_user_attrs(self):
if self._user_attrs is None:
self._load_user_attrs()
return self._user_attrs
attrs = property(_get_user_attrs)
def _get_bound_connection(self):
if not self._connection_bound:
self._bind()
return self._get_connection()
connection = property(_get_bound_connection)
#
# Authentication
#
def _authenticate_user_dn(self, password):
"""
Binds to the LDAP server with the user's DN and password. Raises
AuthenticationFailed on failure.
"""
if self.dn is None:
raise self.AuthenticationFailed("Failed to map the username to a DN.")
try:
self._bind_as(self.dn, password)
except self.ldap.INVALID_CREDENTIALS:
raise self.AuthenticationFailed("User DN/password rejected by LDAP server.")
def _load_user_attrs(self):
if self.dn is not None:
search = LDAPSearch(self.dn, self.ldap.SCOPE_BASE)
results = search.execute(self.connection)
if results is not None and len(results) > 0:
self._user_attrs = results[0][1]
def _load_user_dn(self):
"""
Populates self._user_dn with the distinguished name of our user. This
will either construct the DN from a template in
AUTH_LDAP_USER_DN_TEMPLATE or connect to the server and search for it.
"""
if self._using_simple_bind_mode():
self._construct_simple_user_dn()
else:
self._search_for_user_dn()
def _using_simple_bind_mode(self):
return (ldap_settings.AUTH_LDAP_USER_DN_TEMPLATE is not None)
def _construct_simple_user_dn(self):
template = ldap_settings.AUTH_LDAP_USER_DN_TEMPLATE
username = self.ldap.dn.escape_dn_chars(self._username)
self._user_dn = template % {'user': username}
def _search_for_user_dn(self):
"""
Searches the directory for a user matching AUTH_LDAP_USER_SEARCH.
Populates self._user_dn and self._user_attrs.
"""
search = ldap_settings.AUTH_LDAP_USER_SEARCH
if search is None:
raise ImproperlyConfigured('AUTH_LDAP_USER_SEARCH must be an LDAPSearch instance.')
results = search.execute(self.connection, {'user': self._username})
if results is not None and len(results) == 1:
(self._user_dn, self._user_attrs) = results[0]
def _check_requirements(self):
"""
Checks all authentication requirements beyond credentials. Raises
AuthenticationFailed on failure.
"""
self._check_required_group()
def _check_required_group(self):
"""
Returns True if the group requirement (AUTH_LDAP_REQUIRE_GROUP) is
met. Always returns True if AUTH_LDAP_REQUIRE_GROUP is None.
"""
required_group_dn = ldap_settings.AUTH_LDAP_REQUIRE_GROUP
if required_group_dn is not None:
is_member = self._get_groups().is_member_of(required_group_dn)
if not is_member:
raise self.AuthenticationFailed("User is not a member of AUTH_LDAP_REQUIRE_GROUP")
#
# User management
#
def _get_or_create_user(self, force_populate=False):
"""
Loads the User model object from the database or creates it if it
doesn't exist. Also populates the fields, subject to
AUTH_LDAP_ALWAYS_UPDATE_USER.
"""
save_user = False
username = self.backend.ldap_to_django_username(self._username)
self._user, created = self.backend.get_or_create_user(username, self)
self._user.ldap_user = self
self._user.ldap_username = self._username
should_populate = force_populate or ldap_settings.AUTH_LDAP_ALWAYS_UPDATE_USER or created
if created:
logger.debug("Created Django user %s", username)
self._user.set_unusable_password()
save_user = True
if should_populate:
logger.debug("Populating Django user %s", username)
self._populate_user()
save_user = True
if ldap_settings.AUTH_LDAP_MIRROR_GROUPS:
self._mirror_groups()
# Give the client a chance to finish populating the user just before
# saving.
if should_populate:
signal_responses = populate_user.send(self.backend.__class__, user=self._user, ldap_user=self)
if len(signal_responses) > 0:
save_user = True
if save_user:
self._user.save()
# We populate the profile after the user model is saved to give the
# client a chance to create the profile.
if should_populate:
self._populate_and_save_user_profile()
def _populate_user(self):
"""
Populates our User object with information from the LDAP directory.
"""
self._populate_user_from_attributes()
self._populate_user_from_group_memberships()
def _populate_user_from_attributes(self):
for field, attr in ldap_settings.AUTH_LDAP_USER_ATTR_MAP.iteritems():
try:
setattr(self._user, field, self.attrs[attr][0])
except (KeyError, IndexError):
logger.warning("%s does not have a value for the attribute %s", self.dn, attr)
def _populate_user_from_group_memberships(self):
for field, group_dn in ldap_settings.AUTH_LDAP_USER_FLAGS_BY_GROUP.iteritems():
value = self._get_groups().is_member_of(group_dn)
setattr(self._user, field, value)
def _populate_and_save_user_profile(self):
"""
Populates a User profile object with fields from the LDAP directory.
"""
try:
profile = self._user.get_profile()
save_profile = False
logger.debug("Populating Django user profile for %s", self._user.username)
for field, attr in ldap_settings.AUTH_LDAP_PROFILE_ATTR_MAP.iteritems():
try:
# user_attrs is a hash of lists of attribute values
setattr(profile, field, self.attrs[attr][0])
save_profile = True
except (KeyError, IndexError):
logger.warning("%s does not have a value for the attribute %s", self.dn, attr)
signal_responses = populate_user_profile.send(self.backend.__class__, profile=profile, ldap_user=self)
if len(signal_responses) > 0:
save_profile = True
if save_profile:
profile.save()
except (SiteProfileNotAvailable, ObjectDoesNotExist):
logger.debug("Django user %s does not have a profile to populate", self._user.username)
def _mirror_groups(self):
"""
Mirrors the user's LDAP groups in the Django database and updates the
user's membership.
"""
group_names = self._get_groups().get_group_names()
groups = [Group.objects.get_or_create(name=group_name)[0] for group_name
in group_names]
self._user.groups = groups
#
# Group information
#
def _load_group_permissions(self):
"""
Populates self._group_permissions based on LDAP group membership and
Django group permissions.
"""
group_names = self._get_groups().get_group_names()
perms = Permission.objects.filter(group__name__in=group_names
).values_list('content_type__app_label', 'codename'
).order_by()
self._group_permissions = set(["%s.%s" % (ct, name) for ct, name in perms])
def _get_groups(self):
"""
Returns an _LDAPUserGroups object, which can determine group
membership.
"""
if self._groups is None:
self._groups = _LDAPUserGroups(self)
return self._groups
#
# LDAP connection
#
def _bind(self):
"""
Binds to the LDAP server with AUTH_LDAP_BIND_DN and
AUTH_LDAP_BIND_PASSWORD.
"""
self._bind_as(ldap_settings.AUTH_LDAP_BIND_DN,
ldap_settings.AUTH_LDAP_BIND_PASSWORD)
self._connection_bound = True
def _bind_as(self, bind_dn, bind_password):
"""
Binds to the LDAP server with the given credentials. This does not trap
exceptions.
If successful, we set self._connection_bound to False under the
assumption that we're not binding as the default user. Callers can set
it to True as appropriate.
"""
self._get_connection().simple_bind_s(bind_dn.encode('utf-8'),
bind_password.encode('utf-8'))
self._connection_bound = False
def _get_connection(self):
"""
Returns our cached LDAPObject, which may or may not be bound.
"""
if self._connection is None:
self._connection = self.ldap.initialize(ldap_settings.AUTH_LDAP_SERVER_URI)
for opt, value in ldap_settings.AUTH_LDAP_CONNECTION_OPTIONS.iteritems():
self._connection.set_option(opt, value)
if ldap_settings.AUTH_LDAP_START_TLS:
logger.debug("Initiating TLS")
self._connection.start_tls_s()
return self._connection
class _LDAPUserGroups(object):
"""
Represents the set of groups that a user belongs to.
"""
def __init__(self, ldap_user):
self._ldap_user = ldap_user
self._group_type = None
self._group_search = None
self._group_infos = None
self._group_dns = None
self._group_names = None
self._init_group_settings()
def _init_group_settings(self):
"""
Loads the settings we need to deal with groups. Raises
ImproperlyConfigured if anything's not right.
"""
self._group_type = ldap_settings.AUTH_LDAP_GROUP_TYPE
if self._group_type is None:
raise ImproperlyConfigured("AUTH_LDAP_GROUP_TYPE must be an LDAPGroupType instance.")
self._group_search = ldap_settings.AUTH_LDAP_GROUP_SEARCH
if self._group_search is None:
raise ImproperlyConfigured("AUTH_LDAP_GROUP_SEARCH must be an LDAPSearch instance.")
def get_group_names(self):
"""
Returns the list of Django group names that this user belongs to by
virtue of LDAP group memberships.
"""
if self._group_names is None:
self._load_cached_attr("_group_names")
if self._group_names is None:
group_infos = self._get_group_infos()
self._group_names = [self._group_type.group_name_from_info(group_info)
for group_info in group_infos]
self._cache_attr("_group_names")
return self._group_names
def is_member_of(self, group_dn):
"""
Returns true if our user is a member of the given group.
"""
is_member = None
# If we have self._group_dns, we'll use it. Otherwise, we'll try to
# avoid the cost of loading it.
if self._group_dns is None:
is_member = self._group_type.is_member(self._ldap_user, group_dn)
if is_member is None:
is_member = (group_dn in self._get_group_dns())
logger.debug("%s is%sa member of %s", self._ldap_user.dn,
is_member and " " or " not ", group_dn)
return is_member
def _get_group_dns(self):
"""
Returns a (cached) set of the distinguished names in self._group_infos.
"""
if self._group_dns is None:
group_infos = self._get_group_infos()
self._group_dns = set([group_info[0] for group_info in group_infos])
return self._group_dns
def _get_group_infos(self):
"""
Returns a (cached) list of group_info structures for the groups that our
user is a member of.
"""
if self._group_infos is None:
self._group_infos = self._group_type.user_groups(self._ldap_user,
self._group_search)
return self._group_infos
def _load_cached_attr(self, attr_name):
if ldap_settings.AUTH_LDAP_CACHE_GROUPS:
key = self._cache_key(attr_name)
value = cache.get(key)
setattr(self, attr_name, value)
def _cache_attr(self, attr_name):
if ldap_settings.AUTH_LDAP_CACHE_GROUPS:
key = self._cache_key(attr_name)
value = getattr(self, attr_name, None)
cache.set(key, value, ldap_settings.AUTH_LDAP_GROUP_CACHE_TIMEOUT)
def _cache_key(self, attr_name):
return u'auth_ldap.%s.%s.%s' % (self.__class__.__name__, attr_name, self._ldap_user.dn)
class LDAPSettings(object):
"""
This is a simple class to take the place of the global settings object. An
instance will contain all of our settings as attributes, with default values
if they are not specified by the configuration.
"""
defaults = {
'AUTH_LDAP_ALWAYS_UPDATE_USER': True,
'AUTH_LDAP_AUTHORIZE_ALL_USERS': False,
'AUTH_LDAP_BIND_DN': '',
'AUTH_LDAP_BIND_PASSWORD': '',
'AUTH_LDAP_CACHE_GROUPS': False,
'AUTH_LDAP_CONNECTION_OPTIONS': {},
'AUTH_LDAP_FIND_GROUP_PERMS': False,
'AUTH_LDAP_GLOBAL_OPTIONS': {},
'AUTH_LDAP_GROUP_CACHE_TIMEOUT': None,
'AUTH_LDAP_GROUP_SEARCH': None,
'AUTH_LDAP_GROUP_TYPE': None,
'AUTH_LDAP_MIRROR_GROUPS': False,
'AUTH_LDAP_PROFILE_ATTR_MAP': {},
'AUTH_LDAP_REQUIRE_GROUP': None,
'AUTH_LDAP_SERVER_URI': 'ldap://localhost',
'AUTH_LDAP_START_TLS': False,
'AUTH_LDAP_USER_ATTR_MAP': {},
'AUTH_LDAP_USER_DN_TEMPLATE': None,
'AUTH_LDAP_USER_FLAGS_BY_GROUP': {},
'AUTH_LDAP_USER_SEARCH': None,
}
def __init__(self):
"""
Loads our settings from django.conf.settings, applying defaults for any
that are omitted.
"""
from django.conf import settings
for name, default in self.defaults.iteritems():
value = getattr(settings, name, default)
setattr(self, name, value)
# Our global settings object
ldap_settings = LDAPSettings()
|
|
"""Category operations used by neo4j graph rewriting tool."""
from . import generic
from . import rewriting
from regraph.exceptions import (HierarchyError,
TypingWarning)
def pullback(b, c, d, a=None, inplace=False):
"""Find the pullback from b -> d <- c.
Returns
-------
query1 : str
Generated query for creating all the nodes in A
and the typing edges
query2 : str
Generated query for creating all the edges of A
"""
if a is None:
a = "pb_" + "_".join([b, c, d])
carry_vars = set()
# Match all the pair of nodes with the same image in d
query1 = "MATCH (n:{})-[:typing]->(:{})<-[:typing]-(m:{})\n".format(
b, d, c)
# For each pair, collect all the merged properties
# create a new node and set the properties
query1 += (
generic.merge_properties(
var_list=["n", "m"],
new_props_var='new_props',
method='intersection',
carry_vars=carry_vars) +
rewriting.add_node(
var_name="new_node_a",
node_id="pb",
node_id_var="id_var",
node_label=a,
carry_vars=carry_vars,
ignore_naming=True)[0] +
"SET new_node_a += new_props\n" +
"SET new_node_a.id = toString(id(new_node_a))\n"
)
carry_vars.remove("id_var")
carry_vars.remove("new_props")
# Add the typing edges
query1 += (
generic.with_vars(carry_vars) + "\n" +
rewriting.add_edge(
edge_var='new_typing_to_n',
source_var='new_node_a',
target_var='n',
edge_label='typing') + "\n" +
rewriting.add_edge(
edge_var='new_typing_to_m',
source_var='new_node_a',
target_var='m',
edge_label='typing') + "\n"
)
# Add the graph edges
carry_vars = set()
query2 = (
"MATCH (x:{})-[:typing]->(:{})-[r1:edge]->(:{})<-[:typing]-(y:{}),\n".format(
a, b, b, a) +
"(x)-[:typing]->(:{})-[r2:edge]->(:{})<-[:typing]-(y)\n".format(
c, c)
)
# Collect all the merged properties of the edges r1 and r2
query2 += (
generic.merge_properties(
var_list=["r1", "r2"],
new_props_var='new_props',
method='intersection',
carry_vars={'x', 'y'}) +
"MERGE (x)-[r:edge]->(y)\n" +
"SET r += new_props"
)
return query1, query2
def pushout(a, b, c, d=None, inplace=False):
"""Find the pushout of the span b <- a -> c.
Returns
-------
query1 : str
Generated query for copying the nodes of B in D
query2 : str
Generated query for creating the exclusive images
(nodes of D) of the nodes of C
query3 : str
Generated query for adding the typing edges between C and D
query4 : str
Generated query for adding edges of C in D
query5 : str
Generated query for merging the nodes in D that need
to be merged
"""
if d is None:
d = "pb_" + "_".join([a, b, c])
carry_vars = set()
c_to_d = "({}:{})<-[:typing]-(:{})-[:typing]->(:{})-[:typing]->({}:{})"
query1 = (
"\n// We copy the nodes of B in D\n" +
generic.clone_graph(
original_graph=b,
cloned_graph=d)[0]
)
query2 = (
"\n// We create the images of the exclusive nodes of C\n" +
"MATCH (m:{})\n".format(c) +
"WHERE NOT (m)<-[:typing]-(:{})\n".format(a) +
rewriting.add_node(
var_name="new_node_d",
node_id="pb",
node_id_var="id_var",
node_label=d,
carry_vars={"m"},
ignore_naming=True)[0] +
"SET new_node_d += properties(m)\n" +
"SET new_node_d.id = toString(id(new_node_d))\n" +
rewriting.add_edge(
edge_var='new_typing',
source_var='m',
target_var='new_node_d',
edge_label='typing')
)
query3 = (
"\n// We add the missing typing edges between C and D " +
"and merge the properties\n" +
"MATCH " + c_to_d.format("m", c, a, b, "x", d) + "\n" +
rewriting.add_edge(
edge_var='new_typing',
source_var='m',
target_var='x',
edge_label='typing') +
generic.merge_properties(
var_list=["m", "x"],
new_props_var='new_props',
method='union') +
"SET x += new_props\n" +
"SET x.id = toString(id(x))\n"
)
query4 = (
"\n// We add the edges of C in D\n" +
"MATCH (x:{})<-[:typing]-(:{})-[rel_c:edge]->(:{})-[:typing]->(y:{})\n".format(
d, c, c, d) +
"OPTIONAL MATCH (x)-[rel_d:edge]->(y)\n" +
"FOREACH(_ IN CASE WHEN rel_d IS NULL THEN [1] ELSE [] END |\n" +
"\tMERGE (x)-[new_rel:edge]->(y)\n" +
"\tON CREATE SET new_rel = properties(rel_c) )\n" +
generic.with_vars(['rel_c', 'rel_d']) + "\n" +
"WHERE rel_d IS NOT NULL\n" +
generic.merge_properties(
var_list=["rel_c", "rel_d"],
new_props_var='new_props',
method='union') +
"SET rel_d += new_props\n"
)
carry_vars = set()
query5 = (
"\n//We search for all the nodes in D that we need to merge\n" +
"MATCH (n:{})<-[:typing]-(:{})<-[:typing]-(:{})-[:typing]->(m:{})\n".format(
d, b, a, c) +
"WITH collect(n) as nodes_to_merge, m\n"
)
carry_vars.update(["m", "nodes_to_merge"])
query5 += (
"WITH size(nodes_to_merge) as number_of_nodes," +
", ".join(carry_vars) + "\n" +
"WHERE number_of_nodes <> 1\n"
)
carry_vars.update(["number_of_nodes"])
query5 += (
rewriting.merging_from_list(
list_var='nodes_to_merge',
merged_var='merged_node',
merged_id='id',
merged_id_var='merged_id',
node_label=d,
edge_label='edge',
merge_typing=True,
carry_vars=carry_vars,
ignore_naming=True,
multiple_rows=True,
multiple_var='m')[0] + "\n" +
generic.return_vars(["merged_id"])
)
return query1, query2, query3, query4, query5
def pullback_complement(a, b, d, c=None, inplace=False):
pass
|
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
import socket
import sys
import time
from neutron_lib import constants as n_constants
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from osprofiler import profiler
import six
from neutron._i18n import _, _LE, _LI, _LW
from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as agent_sg_rpc
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import profiler as setup_profiler
from neutron.common import topics
from neutron import context
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
import exceptions as exc
from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC (works with NoopFirewallDriver)
# 1.2 Support DVR (Distributed Virtual Router) RPC (not supported)
# 1.3 Added param devices_to_update to security_groups_provider_updated
# (works with NoopFirewallDriver)
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, context, agent, sg_agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
vnic_type = port.get(portbindings.VNIC_TYPE)
if vnic_type and vnic_type == portbindings.VNIC_DIRECT_PHYSICAL:
LOG.debug("The SR-IOV agent doesn't handle %s ports.",
portbindings.VNIC_DIRECT_PHYSICAL)
return
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
mac = port['mac_address']
pci_slot = None
if port.get(portbindings.PROFILE):
pci_slot = port[portbindings.PROFILE].get('pci_slot')
if pci_slot:
self.agent.updated_devices.add((mac, pci_slot))
LOG.debug("port_update RPC received for port: %(id)s with MAC "
"%(mac)s and PCI slot %(pci_slot)s slot",
{'id': port['id'], 'mac': mac, 'pci_slot': pci_slot})
else:
LOG.debug("No PCI Slot for port %(id)s with MAC %(mac)s; "
"skipping", {'id': port['id'], 'mac': mac,
'pci_slot': pci_slot})
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
LOG.debug("network_update message received for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.agent.network_ports[network_id]})
for port_data in self.agent.network_ports[network_id]:
self.agent.updated_devices.add(port_data['device'])
@profiler.trace_cls("rpc")
class SriovNicSwitchAgent(object):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval):
self.polling_interval = polling_interval
self.network_ports = collections.defaultdict(list)
self.conf = cfg.CONF
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc)
self._setup_rpc()
self.ext_manager = self._create_agent_extension_manager(
self.connection)
configurations = {'device_mappings': physical_devices_mappings,
'extensions': self.ext_manager.names()}
#TODO(mangelajo): optimize resource_versions (see ovs agent)
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': self.conf.host,
'topic': n_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH,
'resource_versions': resources.LOCAL_RESOURCE_VERSIONS,
'start_flag': True}
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
# RPC network init
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
devices = len(self.eswitch_mgr.get_assigned_devices_info())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
# we only want to update resource versions on startup
self.agent_state.pop('resource_versions', None)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _create_agent_extension_manager(self, connection):
ext_manager.register_opts(self.conf)
mgr = ext_manager.L2AgentExtensionsManager(self.conf)
mgr.initialize(connection, 'sriov')
return mgr
def setup_eswitch_mgr(self, device_mappings, exclude_devices=None):
exclude_devices = exclude_devices or {}
self.eswitch_mgr = esm.ESwitchManager()
self.eswitch_mgr.discover_devices(device_mappings, exclude_devices)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices_info()
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
# we don't want to process updates for devices that don't exist
device_info['updated'] = (updated_devices & curr_devices -
device_info['removed'])
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.sg_agent.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
spoofcheck)
except Exception:
LOG.warning(_LW("Failed to set spoofcheck for device %s"),
device)
LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
{"device": device, "spoofcheck": spoofcheck})
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.IpCommandOperationNotSupportedError:
LOG.warning(_LW("Device %s does not support state change"),
device)
except exc.SriovNicError:
LOG.warning(_LW("Failed to set device %s state"), device)
return
if admin_state_up:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.info(_LI("No device with MAC %s defined on agent."), device)
def _update_network_ports(self, network_id, port_id, mac_pci_slot):
self._clean_network_ports(mac_pci_slot)
self.network_ports[network_id].append({
"port_id": port_id,
"device": mac_pci_slot})
def _clean_network_ports(self, mac_pci_slot):
for netid, ports_list in six.iteritems(self.network_ports):
for port_data in ports_list:
if mac_pci_slot == port_data['device']:
ports_list.remove(port_data)
if ports_list == []:
self.network_ports.pop(netid)
return port_data['port_id']
def treat_devices_added_updated(self, devices_info):
try:
macs_list = set([device_info[0] for device_info in devices_info])
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, macs_list, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for devices "
"with MAC addresses %(devices)s: %(e)s",
{'devices': macs_list, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port with MAC address %s is added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
port_id = device_details['port_id']
profile = device_details['profile']
spoofcheck = device_details.get('port_security_enabled', True)
self.treat_device(device,
profile.get('pci_slot'),
device_details['admin_state_up'],
spoofcheck)
self._update_network_ports(device_details['network_id'],
port_id,
(device, profile.get('pci_slot')))
self.ext_manager.handle_port(self.context, device_details)
else:
LOG.info(_LI("Device with MAC %s not defined on plugin"),
device)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
mac, pci_slot = device
LOG.info(_LI("Removing device with MAC address %(mac)s and "
"PCI slot %(pci_slot)s"),
{'mac': mac, 'pci_slot': pci_slot})
try:
port_id = self._clean_network_ports(device)
if port_id:
port = {'port_id': port_id,
'device': mac,
'profile': {'pci_slot': pci_slot}}
self.ext_manager.delete_port(self.context, port)
else:
LOG.warning(_LW("port_id to device with MAC "
"%s not found"), mac)
dev_details = self.plugin_rpc.update_device_down(self.context,
mac,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device with MAC address "
"%(mac)s and PCI slot %(pci_slot)s due to %(exc)s",
{'mac': mac, 'pci_slot': pci_slot, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info(_LI("Port with MAC %(mac)s and PCI slot "
"%(pci_slot)s updated."),
{'mac': mac, 'pci_slot': pci_slot})
else:
LOG.debug("Device with MAC %(mac)s and PCI slot "
"%(pci_slot)s not defined on plugin",
{'mac': mac, 'pci_slot': pci_slot})
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!"))
while True:
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
# Save updated devices dict to perform rollback in case
# resync would be needed, and then clear self.updated_devices.
# As the greenthread should not yield between these
# two statements, this will should be thread-safe.
updated_devices_copy = self.updated_devices
self.updated_devices = set()
try:
device_info = self.scan_devices(devices, updated_devices_copy)
if self._device_info_has_changes(device_info):
LOG.debug("Agent loop found changes! %s", device_info)
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# Restore devices that were removed from this set earlier
# without overwriting ones that may have arrived since.
self.updated_devices |= updated_devices_copy
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
class SriovNicAgentConfigParser(object):
def __init__(self):
self.device_mappings = {}
self.exclude_devices = {}
def parse(self):
"""Parses device_mappings and exclude_devices.
Parse and validate the consistency in both mappings
"""
self.device_mappings = helpers.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False)
self.exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self._validate()
def _validate(self):
"""Validate configuration.
Validate that network_device in excluded_device
exists in device mappings
"""
dev_net_set = set(itertools.chain.from_iterable(
six.itervalues(self.device_mappings)))
for dev_name in self.exclude_devices.keys():
if dev_name not in dev_net_set:
raise ValueError(_("Device name %(dev_name)s is missing from "
"physical_device_mappings") % {'dev_name':
dev_name})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
config_parser = SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
except ValueError:
LOG.exception(_LE("Failed on Agent configuration parse. "
"Agent terminated!"))
raise SystemExit(1)
LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
LOG.info(_LI("Exclude Devices: %s"), exclude_devices)
polling_interval = cfg.CONF.AGENT.polling_interval
try:
agent = SriovNicSwitchAgent(device_mappings,
exclude_devices,
polling_interval)
except exc.SriovNicError:
LOG.exception(_LE("Agent Initialization Failed"))
raise SystemExit(1)
# Start everything.
setup_profiler.setup("neutron-sriov-nic-agent", cfg.CONF.host)
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
|
|
#takes an article, finds heads of verbs and extracts all features
print 'importing'
from nltk.tree import *
from nltk.corpus import verbnet as vn
import csv
import os
import sys
from parc_reader import ParcCorenlpReader as P
import multiprocessing
from multiprocessing import Manager
print 'imports done'
import time
import numpy as np
maxNumFiles = -1
minNumFile = 0
data_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'data/'))
#gets datapath, creates a list of files and any nested files (only goes down by one subdirectory)
def openDirectory(datapath):
listOfFiles = []
for item in os.listdir(datapath):
if os.path.isdir(os.path.join(datapath, item)):
item = os.path.join(datapath, item)
for newItem in os.listdir(item):
newItem = os.path.join(item, newItem)
listOfFiles.append(newItem)
elif os.path.isfile(os.path.join(datapath, item)):
item = os.path.join(datapath, item)
listOfFiles.append(item)
return listOfFiles
#write to a csv output file as designated by command line
def writeToTSV(rows, outputFile):
with open(outputFile, 'w') as myfile:
writer = csv.writer(myfile, delimiter=',')
writer.writerows(rows)
myfile.close()
print '\nData written to ' + outputFile + '\n'
#to multiprocess this stuff
def workerFunction(myFile, listOfAnnotatedFiles, listOfRawFiles, flagNoLabels, return_list):
filename = myFile.split('/')[-1]
fileNoXML = filename.split('.xml')[0]
#print filename
myAnnotatedFile = None
#this means there is an annotated file list, error if no corresponding file is found
if flagNoLabels == False:
myAnnotatedFile = [s for s in listOfAnnotatedFiles if filename in s]
myRawFile = [s for s in listOfRawFiles if fileNoXML in s][0]
if len(myAnnotatedFile) == 1:
myAnnotatedFile = myAnnotatedFile[0]
else:
print 'error opening Annotated File. There is probably no matching annotated file'
j = j + 1
return
rows, article = openFile(myFile, myAnnotatedFile, myRawFile)
print filename
return_list += rows
#divides list of files into list of lists
def chunks(l, n):
n = max(1, n)
return (l[i:i+n] for i in xrange(0, len(l), n))
#split lists and then calls the multiprocessor
#get the files, open them, extract verbs and features and create a large array of rows
def findFiles(listOfNLPFiles, listOfAnnotatedFiles, listOfRawFiles, output):
if listOfAnnotatedFiles == None:
flagNoLabels = True
else:
flagNoLabels = False
splitLists = list(chunks(listOfNLPFiles, len(listOfNLPFiles)/10))
lastList = splitLists[-1]
del splitLists[-1]
lengthLists = len(splitLists[0])
jobs = []
manager = Manager()
return_list = manager.list()
j = 0
#first lists are all equally sized, pick one from each at each iteration
for i in range(lengthLists):
#if i == 1:
# break
for thisList in splitLists:
myFile = thisList[i]
p = multiprocessing.Process(target = workerFunction, args=(myFile, listOfAnnotatedFiles, listOfRawFiles, flagNoLabels, return_list))
jobs.append(p)
p.start()
time.sleep(3)
#append the files from last list (remainder of total files divided by 10)
for myFile in lastList:
p = multiprocessing.Process(target = workerFunction, args=(myFile, listOfAnnotatedFiles, listOfRawFiles, flagNoLabels, return_list))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
open(os.path.join(data_dir, output), 'w+').close()
#subListed = subsample(return_list)
#for training set
#writeToTSV(subListed, os.path.join(data_dir, output))
#for testing sets
writeToTSV(return_list, os.path.join(data_dir, output))
def subsample(listVerbs):
yesses = []
noes = []
for verb in listVerbs:
label = verb[-2]
if label == 'label=Y':
yesses.append(verb)
elif label == 'label=N':
noes.append(verb)
lengthYes = len(yesses)
fortyfivePervent = round((lengthYes*100)/45) - lengthYes
newNoesIndices = np.random.choice(len(noes), fortyfivePervent, replace=False)
newNoes = []
for index in newNoesIndices:
newNoes.append(noes[index])
return yesses+newNoes
#non-multiprocessing option
#get the files, open them, extract verbs and features and create a large array of rows
def findFiles1(listOfNLPFiles, listOfAnnotatedFiles, listOfRawFiles, output):
if listOfAnnotatedFiles == None:
flagNoLabels = True
else:
flagNoLabels = False
myRows = []
j = 0
#open each NLP File
for myFile in listOfNLPFiles:
if j < minNumFile:
j = j + 1
continue
files = len(listOfNLPFiles)
filename = myFile.split('/')[-1]
fileNoXML = filename.split('.xml')[0]
print filename
myAnnotatedFile = None
#this means there is an annotated file list, error if no corresponding file is found
if flagNoLabels == False:
myAnnotatedFile = [s for s in listOfAnnotatedFiles if filename in s]
myRawFile = [s for s in listOfRawFiles if fileNoXML in s][0]
if len(myAnnotatedFile) == 1:
myAnnotatedFile = myAnnotatedFile[0]
else:
print 'error opening Annotated File. There is probably no matching annotated file'
j = j + 1
continue
print('opening file: ' + myFile + ' ' + str(j) + ' out of ' + str(files))
fileRows = openFile(myFile, myAnnotatedFile, myRawFile)
myRows += fileRows
j = j + 1
if j == maxNumFiles:
break
open(os.path.join(data_dir, output), 'w').close()
writeToTSV(myRows, os.path.join(data_dir, output))
#opens both versions of the file, makes sure they're both okay
def openFile(coreNLPFileName, annotatedFileName, raw_file):
rows = []
flagNoLabels = False
annotated_text = ''
if annotatedFileName != None:
try:
parc_xml = open(annotatedFileName).read()
corenlp_xml = open(coreNLPFileName).read()
raw_text = open(raw_file).read()
article = P(corenlp_xml, parc_xml, raw_text)
filename = coreNLPFileName.split('/')[-1]
#find the verbs in this file
#extract the features from each verb
#listOfVerbs = findVerbs(annotated_text, filename)
listOfVerbs = findVerbs(article, filename)
rows = prepareVerb(listOfVerbs, article, flagNoLabels)
except:
print 'error opening file'
raise
return rows
else:
corenlp_xml = open(coreNLPFileName).read()
raw_text = open(raw_file).read()
filename = coreNLPFileName.split('/')[-1]
flagNoLabels = True
parc_xml = None
article = P(corenlp_xml, parc_xml, raw_text)
listOfVerbs = findVerbs(article, filename)
rows = prepareVerb(listOfVerbs, article, flagNoLabels)
return rows, article
#use the constituency parse to find the verbs
def findVerbs(document,filename):
verbPhraseList = []
#skip over ROOT to beginning of the sentence S
#implement cynthia's algorithm to find head verbs
#procedureGETHEADVERBS(document)
#for VP in document do
#if not VP has another VP as direct child then
#for all children of VP do
#if child is terminal node and child.PoS starts with VB then
#add child to head verbs
allVerbTokens = []
for sentence in document.sentences:
for token in sentence['tokens']:
if token['pos'].startswith('V'):
parent = token['c_parent']
verbPhraseDependence = False
children = parent['c_children']
for child in children:
if child['c_tag'] == 'VP':
verbPhraseDependence = True
continue
if verbPhraseDependence:
continue
for child in children:
if child['c_tag'].startswith('V') and child['word'] != None:
allVerbTokens.append(child)
#extract syntactic features (depth, parentNode, parentSiblingNodes)
finalListVerbPhrases = []
for verb in allVerbTokens:
depth = verb['c_depth']
parentNode = verb['c_parent']['c_tag']
grandparents = verb['c_parent']['c_parent']
if grandparents == None:
continue
auntsAndUncles = grandparents['c_children']
parentSiblingNodes = []
for aunt in auntsAndUncles:
parentSiblingNodes.append(aunt['c_tag'])
finalListVerbPhrases.append((verb['word'], verb['sentence_id'], verb['id'], filename, (depth,parentNode,parentSiblingNodes)))
return finalListVerbPhrases
#takes verb and extracts all features
def prepareVerb(listOfVerbs, article, flagNoLabels):
rows = []
openQuote = False
for sentence in article.sentences:
sentenceID = sentence['id']
beginnings = []
endings = []
for (word, sentID, tokenID, filename, syntacticFeatures) in listOfVerbs:
if sentID == sentenceID:
token = sentence['tokens'][tokenID]
try:
rowOfFeats = extractFeatures(token, sentence, filename, syntacticFeatures, openQuote)
if rowOfFeats != None:
rows.append(rowOfFeats)
except:
raise
for token in sentence['tokens']:
if (token['word'] == "''" or token['word'] == '``') and openQuote == False:
openQuote = True
elif (token['word'] == "''" or token['word'] == '``') and openQuote == True:
openQuote = False
return rows
#finds and assigns all the features
def extractFeatures(token, sentence, filename, syntacticFeatures, openQuote):
rowOfFeats = []
verb = token['word']
idVerb = token['id']
Features = Verb(token['word'], token['lemma'], token['pos'])
Features.set_metadata(sentence['id'], idVerb, filename)
if token.has_key('attribution'):
role = token['role']
if role == 'cue':
Features.set_label('Y')
elif role == 'content':
return None
else:
Features.set_label('N')
else:
Features.set_label('N')
if idVerb > 0:
prevToken = sentence['tokens'][idVerb - 1]
else:
prevToken = None
if idVerb < len(sentence['tokens']) - 1:
nexToken = sentence['tokens'][idVerb + 1]
else:
nexToken = None
if prevToken != None:
Features.set_previousToken(prevToken['word'], prevToken['lemma'], prevToken['pos'])
if prevToken['word'] == ':':
Features.set_colonAdjacent()
elif prevToken['word'] == '``' or prevToken['word'] == "''":
Features.set_quoteAdjacentInside()
else:
Features.set_previousToken('NONE!!', 'NONE!!', 'NONE!!')
if nexToken != None:
Features.set_nextToken(nexToken['word'], nexToken['lemma'], nexToken['pos'])
if nexToken['word'] == ':':
Features.set_colonAdjacent()
elif nexToken['word'] == '``' or nexToken['word'] == "''":
Features.set_quoteAdjacentInside()
else:
Features.set_nextToken('NONE!!', 'NONE!!', 'NONE!!')
Features.set_verbNet(";!".join(vn.classids(token['lemma'])))
Features.set_distances(token['id'], len(sentence['tokens']) - (token['id'] + 1))
quoteMarkers = findQuoteMarkers(sentence, openQuote)
FEATinQuotes = 'False'
for (beg, end) in quoteMarkers:
if idVerb > beg and idVerb < end:
Features.set_insideQuotes()
(depth, parentNode, parentSiblings) = syntacticFeatures
Features.set_syntactic(depth, parentNode, ";!".join(parentSiblings))
Features.makeList()
rowOfFeats = Features.getList()
return rowOfFeats
#identifies quote markers to see if there are mismatched quotes and returns
#the index positions of each quotation mark
def findQuoteMarkers(sentence, openQuotes):
begQuote = 0
endQuote = 0
listQuoteBeginnings = []
listQuoteEndings = []
found = False
if openQuotes:
listQuoteBeginnings = [-1]
for quoteToken in sentence['tokens']:
if quoteToken['word'] == '``' or (quoteToken['word'] == "''" and openQuotes == False):
openQuotes = True
listQuoteBeginnings.append(quoteToken['id'])
found = True
elif quoteToken['word'] == "''" and openQuotes == True:
openQuotes = False
listQuoteEndings.append(quoteToken['id'])
found = True
if found == False and openQuotes == True:
return [(-1,len(sentence['tokens']))]
if len(listQuoteBeginnings) > len(listQuoteEndings):
listQuoteEndings.append(len(sentence['tokens']))
elif len(listQuoteBeginnings) < len(listQuoteEndings):
listQuoteBeginnings = [-1] + listQuoteBeginnings
quoteMarkers = zip(listQuoteBeginnings, listQuoteEndings)
return quoteMarkers
#parse command line args
def main():
usageMessage = '\nCorrect usage of the Verb Cue Feature Extractor command is as follows: \n' + \
'\n\n WHEN AN ANNOTATED FILESET EXISTS TO GET LABELS FROM:\n' + \
'To extract verbs and their features: \n python source/intermediaries/verbCuesFeatureExtractor.py -labelled /pathToCoreNLPDirectory /pathToAnnotatedFilesDirectory /pathToRawFiles nameOfOutputFile.csv \n' + \
'\nTo use the default path names for the PARC training data, and filename PARCTrainVerbFeats.csv please use the command with the label -default, as follows: \n' + \
'\t python source/intermediaries/verbCuesFeatureExtractor.py -labelled -default' + \
'\n\n WHEN THE LABELS ARE UNKNOWN:\n' + \
'To extract verbs and their features: \n python source/intermediaries/verbCuesFeatureExtractor.py -unlabelled /pathToCoreNLPDirectory /pathToRaw nameOfOutputFile.csv \n' + \
'\nFor reference, the path to the CoreNLP file is: /home/ndg/dataset/ptb2-corenlp/CoreNLP_tokenized/ + train, test or dev depending on your needs. \n' + \
'The path to the Parc3 files is /home/ndg/dataset/parc3/ + train, test or dev depending on your needs.\n' + \
'The path to the raw files is /home/ndg/dataset/ptb2-corenlp/masked_raw/ + train, test, or dev'
args = sys.argv
if len(args) == 6:
flag = args[1]
pathToCORENLP = args[2]
pathToAnnotatedFiles = args[3]
pathToRaw = args[4]
nameCSVOutput = args[5]
if flag != '-labelled':
print usageMessage
return
if os.path.isdir(pathToCORENLP):
print 'valid path to a directory'
else:
print 'ERROR: The path to this coreNLP directory does not exist.'
print usageMessage
return
if os.path.isdir(pathToAnnotatedFiles):
print 'valid path to a directory'
else:
print 'ERROR: The path to this annotated file directory does not exist.'
print usageMessage
return
if os.path.isfile(data_dir + nameCSVOutput):
print "That file already exists, you probably don't want to overwrite it"
var = raw_input("Are you sure you want to overwrite this file? Please answer Y or N\n")
if var == 'Y' or var == 'y':
coreNLPFiles = openDirectory(pathToCORENLP)
annotatedFiles = openDirectory(pathToAnnotatedFiles)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, annotatedFiles, rawFiles, nameCSVOutput)
return
else:
return
else:
print 'valid filename'
coreNLPFiles = openDirectory(pathToCORENLP)
annotatedFiles = openDirectory(pathToAnnotatedFiles)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, annotatedFiles, rawFiles, nameCSVOutput)
elif len(args) == 5:
pathToCORENLP = args[2]
nameCSVOutput = args[3]
pathToRaw = args[4]
if args[1] != '-unlabelled':
print usageMessage
return
if os.path.isdir(pathToCORENLP):
print 'valid path to a directory'
else:
print 'ERROR: The path to this coreNLP directory does not exist.'
print usageMessage
return
if os.path.isfile(data_dir + nameCSVOutput):
print "That file already exists, you probably don't want to overwrite it"
var = raw_input("Are you sure you want to overwrite this file? Please answer Y or N\n")
if var == 'Y' or var == 'y':
coreNLPFiles = openDirectory(pathToCORENLP)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, None, rawFiles, nameCSVOutput)
return
else:
return
coreNLPFiles = openDirectory(pathToCORENLP)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, None, rawFiles, nameCSVOutput)
elif len(args) == 3:
if args[1] == '-labelled' and args[2] == '-default':
pathToCORENLP = '/home/ndg/dataset/ptb2-corenlp/CoreNLP/train/'
pathToAnnotatedFiles = '/home/ndg/dataset/parc3/train/'
pathToRaw = '/home/ndg/dataset/ptb2-corenlp/masked_raw/train/'
nameCSVOutput = 'PARCTrainFeatsAll.csv'
coreNLPFiles = openDirectory(pathToCORENLP)
annotatedFiles = openDirectory(pathToAnnotatedFiles)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, annotatedFiles, rawFiles, nameCSVOutput)
else:
print usageMessage
else:
print usageMessage
#object Verb
#one attribute per feature
class Verb(object):
FEATcolonAdjacency = 'False'
FEATquotationAdjacency = 'False'
FEATpreviousToken = None
FEATpreviousLemma = None
FEATpreviousPOS = None
FEATinQuotes = 'False'
FEATthisToken = None
FEATthisLemma = None
FEATthisPOS = None
FEATnextLemma = None
FEATnextToken = None
FEATnextPOS = None
FEATverbNetClasses = ''
FEATdepth = None
FEATparentNode = None
FEATparentSiblings = ''
FEATdistanceStart = None
FEATdistanceEnd = None
metadataSentId = None
metadataTokenId = None
metadataFilename = None
label = None
rowOfFeats = []
def __init__(self, Token, Lemma, POS):
self.FEATthisToken = Token
self.FEATthisLemma = Lemma
self.FEATthisPOS = POS
def set_colonAdjacent(self):
self.FEATcolonAdjacency = 'True'
def set_quoteAdjacentInside(self):
self.FEATquotationAdjacency = 'True'
def set_insideQuotes(self):
self.FEATinQuotes = 'True'
def set_previousToken(self, prevToken, prevLemma, prevPOS):
self.FEATpreviousToken = prevToken
self.FEATpreviousLemma = prevLemma
self.FEATpreviousPOS = prevPOS
def set_nextToken(self, nexToken, nexLemma, nextPOS):
self.FEATnextToken = nexToken
self.FEATnextLemma = nexLemma
self.FEATnextPOS = nextPOS
def set_verbNet(self, classes):
self.FEATverbNetClasses = classes
def set_syntactic(self, depth, parentNode, parentSiblings):
self.FEATdepth = str(depth)
self.FEATparentNode = parentNode
self.FEATparentSiblings = parentSiblings
def set_distances(self, start, end):
self.FEATdistanceStart = str(start)
self.FEATdistanceEnd = str(end)
def set_metadata(self, sentID, tokID, filename):
self.metadataSentId = str(sentID)
self.metadataTokenId = str(tokID)
self.metadataFilename = filename
def set_label(self, value):
self.label = value
def makeList(self):
self.rowOfFeats = ['thisToken=' + str(self.FEATthisToken), 'thisLemma=' + str(self.FEATthisLemma), 'thisPos=' + str(self.FEATthisPOS), \
'lastToken=' + str(self.FEATpreviousToken), 'lastLemma=' + str(self.FEATpreviousLemma), 'lastPos=' + str(self.FEATpreviousPOS), \
'nextToken=' + str(self.FEATnextToken), 'nextLemma=' + str(self.FEATnextLemma), 'nextPos=' + self.FEATnextPOS,\
'colonAdj=' + self.FEATcolonAdjacency, 'quoteAdj=' + self.FEATquotationAdjacency, \
'VNclasses='+str(self.FEATverbNetClasses), \
'depth=' + self.FEATdepth, 'parentNode='+str(self.FEATparentNode), 'siblings='+str(self.FEATparentSiblings), \
'distStart=' + self.FEATdistanceStart, 'distEnd='+self.FEATdistanceEnd, 'inQuotes=' + self.FEATinQuotes, \
'label=' + self.label,
'metaData='+ self.metadataSentId + ';' + self.metadataTokenId + ';' + self.metadataFilename]
def getList(self):
return self.rowOfFeats
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""Supports the Ion Velocity Meter (IVM)
onboard the Ionospheric Connections (ICON) Explorer.
Parameters
----------
platform : string
'icon'
name : string
'ivm'
tag : string
None supported
sat_id : string
'a' or 'b'
Warnings
--------
- No download routine as ICON has not yet been launched
- Data not yet publicly available
Example
-------
import pysat
ivm = pysat.Instrument('icon', 'ivm', sat_id='a', tag='level_2',
clean_level='clean')
ivm.download(pysat.datetime(2019, 1, 30), pysat.datetime(2019, 12, 31))
ivm.load(2017,363)
Author
------
R. A. Stoneback
"""
from __future__ import print_function
from __future__ import absolute_import
import functools
import numpy as np
import pandas as pds
import warnings
import pysat
from .methods import nasa_cdaweb as cdw
platform = 'icon'
name = 'ivm'
tags = {'level_2': 'Level 2 public geophysical data'}
# dictionary of sat_ids ad tags supported by each
sat_ids = {'a': ['level_2'],
'b': ['level_2']}
_test_dates = {'a': {'level_2': pysat.datetime(2018, 1, 1)},
'b': {'level_2': pysat.datetime(2018, 1, 1)}}
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object
Returns
--------
Void : (NoneType)
modified in-place, as desired.
"""
print("Mission acknowledgements and data restrictions will be printed " +
"here when available.")
pass
def default(inst):
"""Default routine to be applied when loading data.
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object
Note
----
Removes ICON preamble on variable names.
"""
remove_icon_names(inst)
def load(fnames, tag=None, sat_id=None):
"""Loads ICON IVM data using pysat into pandas.
This routine is called as needed by pysat. It is not intended
for direct user interaction.
Parameters
----------
fnames : array-like
iterable of filename strings, full path, to data files to be loaded.
This input is nominally provided by pysat itself.
tag : string
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
sat_id : string
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
**kwargs : extra keywords
Passthrough for additional keyword arguments specified when
instantiating an Instrument object. These additional keywords
are passed through to this routine by pysat.
Returns
-------
data, metadata
Data and Metadata are formatted for pysat. Data is a pandas
DataFrame while metadata is a pysat.Meta instance.
Note
----
Any additional keyword arguments passed to pysat.Instrument
upon instantiation are passed along to this routine.
Examples
--------
::
inst = pysat.Instrument('icon', 'ivm', sat_id='a', tag='level_2')
inst.load(2019,1)
"""
return pysat.utils.load_netcdf4(fnames, epoch_name='Epoch',
units_label='Units',
name_label='Long_Name',
notes_label='Var_Notes',
desc_label='CatDesc',
plot_label='FieldNam',
axis_label='LablAxis',
scale_label='ScaleTyp',
min_label='ValidMin',
max_label='ValidMax',
fill_label='FillVal')
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a list of files corresponding to ICON IVM.
This routine is invoked by pysat and is not intended for direct use by
the end user.
Multiple data levels may be supported via the 'tag' input string.
Currently defaults to level-2 data, or L2 in the filename.
Parameters
----------
tag : string ('')
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
sat_id : string ('')
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
data_path : string
Full path to directory containing files to be loaded. This
is provided by pysat. The user may specify their own data path
at Instrument instantiation and it will appear here.
format_str : string (None)
String template used to parse the datasets filenames. If a user
supplies a template string at Instrument instantiation
then it will appear here, otherwise defaults to None.
Returns
-------
pandas.Series
Series of filename strings, including the path, indexed by datetime.
Examples
--------
::
If a filename is SPORT_L2_IVM_2019-01-01_v01r0000.NC then the template
is 'SPORT_L2_IVM_{year:04d}-{month:02d}-{day:02d}_' +
'v{version:02d}r{revision:04d}.NC'
Note
----
The returned Series should not have any duplicate datetimes. If there are
multiple versions of a file the most recent version should be kept and the
rest discarded. This routine uses the pysat.Files.from_os constructor, thus
the returned files are up to pysat specifications.
"""
desc = None
tag = 'level_2'
if tag == 'level_1':
code = 'L1'
desc = None
elif tag == 'level_2':
code = 'L2'
desc = None
else:
raise ValueError('Unsupported tag supplied: ' + tag)
if format_str is None:
format_str = 'ICON_'+code+'_IVM-'+sat_id.upper()
if desc is not None:
format_str += '_' + desc + '_'
format_str += '_{year:4d}-{month:02d}-{day:02d}'
format_str += '_v{version:02d}r{revision:03d}.NC'
return pysat.Files.from_os(data_path=data_path,
format_str=format_str)
def download(date_array, tag, sat_id, data_path=None, user=None,
password=None):
"""Will download data for ICON IVM, after successful launch and operations.
Parameters
----------
date_array : array-like
list of datetimes to download data for. The sequence of dates need not
be contiguous.
tag : string ('')
Tag identifier used for particular dataset. This input is provided by
pysat.
sat_id : string ('')
Satellite ID string identifier used for particular dataset. This input
is provided by pysat.
data_path : string (None)
Path to directory to download data to.
user : string (None)
User string input used for download. Provided by user and passed via
pysat. If an account is required for dowloads this routine here must
error if user not supplied.
password : string (None)
Password for data download.
**kwargs : dict
Additional keywords supplied by user when invoking the download
routine attached to a pysat.Instrument object are passed to this
routine via kwargs.
Returns
--------
Void : (NoneType)
Downloads data to disk.
"""
warnings.warn("Downloads aren't yet available.")
return
def clean(inst, clean_level=None):
"""Provides data cleaning based upon clean_level.
clean_level is set upon Instrument instantiation to
one of the following:
'Clean'
'Dusty'
'Dirty'
'None'
Routine is called by pysat, and not by the end user directly.
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Note
----
Supports 'clean', 'dusty', 'dirty', 'none'
"""
if clean_level != 'none':
warnings.warn("Cleaning actions for ICON IVM are not yet defined.")
return
def remove_icon_names(inst, target=None):
"""Removes leading text on ICON project variable names
Parameters
----------
inst : pysat.Instrument
ICON associated pysat.Instrument object
target : str
Leading string to remove. If none supplied,
ICON project standards are used to identify and remove
leading text
Returns
-------
None
Modifies Instrument object in place
"""
if target is None:
lev = inst.tag
if lev == 'level_2':
lev = 'L2'
elif lev == 'level_0':
lev = 'L0'
elif lev == 'level_0p':
lev = 'L0P'
elif lev == 'level_1.5':
lev = 'L1-5'
elif lev == 'level_1':
lev = 'L1'
else:
raise ValueError('Uknown ICON data level')
# get instrument code
sid = inst.sat_id.lower()
if sid == 'a':
sid = 'IVM_A'
elif sid == 'b':
sid = 'IVM_B'
else:
raise ValueError('Unknown ICON satellite ID')
prepend_str = '_'.join(('ICON', lev, sid)) + '_'
else:
prepend_str = target
inst.data.rename(columns=lambda x: x.split(prepend_str)[-1], inplace=True)
inst.meta.data.rename(index=lambda x: x.split(prepend_str)[-1],
inplace=True)
orig_keys = inst.meta.keys_nD()
for keynd in orig_keys:
new_key = keynd.split(prepend_str)[-1]
new_meta = inst.meta.pop(keynd)
new_meta.data.rename(index=lambda x: x.split(prepend_str)[-1],
inplace=True)
inst.meta[new_key] = new_meta
return
|
|
# -*- coding: utf-8 -*-
"""Factories for the OSF models, including an abstract ModularOdmFactory.
Example usage: ::
>>> from tests.factories import UserFactory
>>> user1 = UserFactory()
>>> user1.username
[email protected]
>>> user2 = UserFactory()
[email protected]
Factory boy docs: http://factoryboy.readthedocs.org/
"""
import datetime
import functools
from factory import base, Sequence, SubFactory, post_generation, LazyAttribute
from mock import patch, Mock
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import StoredObject
from framework.auth import User, Auth
from framework.auth.utils import impute_names_model
from framework.sessions.model import Session
from website.addons import base as addons_base
from website.oauth.models import (
ApiOAuth2Application,
ApiOAuth2PersonalToken,
ExternalAccount,
ExternalProvider
)
from website.project.model import (
Comment, DraftRegistration, Embargo, MetaSchema, Node, NodeLog, Pointer,
PrivateLink, RegistrationApproval, Retraction, Sanction, Tag, WatchConfig,
ensure_schemas
)
from website.notifications.model import NotificationSubscription, NotificationDigest
from website.archiver.model import ArchiveTarget, ArchiveJob
from website.archiver import ARCHIVER_SUCCESS
from website.project.licenses import NodeLicense, NodeLicenseRecord, ensure_licenses
ensure_licenses = functools.partial(ensure_licenses, warn=False)
from website.addons.wiki.model import NodeWikiPage
from tests.base import fake
from tests.base import DEFAULT_METASCHEMA
# TODO: This is a hack. Check whether FactoryBoy can do this better
def save_kwargs(**kwargs):
for value in kwargs.itervalues():
if isinstance(value, StoredObject) and not value._is_loaded:
value.save()
def FakerAttribute(provider, **kwargs):
"""Attribute that lazily generates a value using the Faker library.
Example: ::
class UserFactory(ModularOdmFactory):
name = FakerAttribute('name')
"""
fake_gen = getattr(fake, provider)
if not fake_gen:
raise ValueError('{0!r} is not a valid faker provider.'.format(provider))
return LazyAttribute(lambda x: fake_gen(**kwargs))
class ModularOdmFactory(base.Factory):
"""Base factory for modular-odm objects.
"""
ABSTRACT_FACTORY = True
@classmethod
def _build(cls, target_class, *args, **kwargs):
"""Build an object without saving it."""
save_kwargs(**kwargs)
return target_class(*args, **kwargs)
@classmethod
def _create(cls, target_class, *args, **kwargs):
save_kwargs(**kwargs)
instance = target_class(*args, **kwargs)
instance.save()
return instance
class UserFactory(ModularOdmFactory):
FACTORY_FOR = User
username = Sequence(lambda n: "fred{0}@example.com".format(n))
# Don't use post generation call to set_password because
# It slows down the tests dramatically
password = "password"
fullname = Sequence(lambda n: "Freddie Mercury{0}".format(n))
is_registered = True
is_claimed = True
date_confirmed = datetime.datetime(2014, 2, 21)
merged_by = None
email_verifications = {}
verification_key = None
@post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
if create:
self.save()
@post_generation
def set_emails(self, create, extracted):
if self.username not in self.emails:
self.emails.append(self.username)
self.save()
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@post_generation
def add_auth(self, create, extracted):
self.set_password('password')
self.save()
self.auth = (self.username, 'password')
class TagFactory(ModularOdmFactory):
FACTORY_FOR = Tag
_id = Sequence(lambda n: "scientastic-{}".format(n))
class ApiOAuth2ApplicationFactory(ModularOdmFactory):
FACTORY_FOR = ApiOAuth2Application
owner = SubFactory(UserFactory)
name = Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n))
home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/'
callback_url = 'http://example.uk'
class ApiOAuth2PersonalTokenFactory(ModularOdmFactory):
FACTORY_FOR = ApiOAuth2PersonalToken
owner = SubFactory(UserFactory)
scopes = 'osf.full_write osf.full_read'
name = Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n))
class PrivateLinkFactory(ModularOdmFactory):
FACTORY_FOR = PrivateLink
name = "link"
key = "foobarblaz"
anonymous = False
creator = SubFactory(AuthUserFactory)
class AbstractNodeFactory(ModularOdmFactory):
FACTORY_FOR = Node
title = 'The meaning of life'
description = 'The meaning of life is 42.'
creator = SubFactory(AuthUserFactory)
class ProjectFactory(AbstractNodeFactory):
category = 'project'
class FolderFactory(ProjectFactory):
is_folder = True
class DashboardFactory(FolderFactory):
is_dashboard = True
class NodeFactory(AbstractNodeFactory):
category = 'hypothesis'
parent = SubFactory(ProjectFactory)
class RegistrationFactory(AbstractNodeFactory):
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception("Cannot build registration without saving.")
@classmethod
def _create(cls, target_class, project=None, schema=None, user=None,
data=None, archive=False, embargo=None, registration_approval=None, retraction=None, is_public=False,
*args, **kwargs):
save_kwargs(**kwargs)
# Original project to be registered
project = project or target_class(*args, **kwargs)
project.save()
# Default registration parameters
schema = schema or DEFAULT_METASCHEMA
user = user or project.creator
data = data or {'some': 'data'}
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
data=data
)
def add_approval_step(reg):
if embargo:
reg.embargo = embargo
elif registration_approval:
reg.registration_approval = registration_approval
elif retraction:
reg.retraction = retraction
else:
reg.require_approval(reg.creator)
reg.save()
reg.sanction.add_authorizer(reg.creator)
reg.sanction.save()
if archive:
reg = register()
add_approval_step(reg)
else:
with patch('framework.tasks.handlers.enqueue_task'):
reg = register()
add_approval_step(reg)
with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)):
reg.archive_job.status = ARCHIVER_SUCCESS
reg.archive_job.save()
reg.sanction.state = Sanction.APPROVED
reg.sanction.save()
ArchiveJob(
src_node=project,
dst_node=reg,
initiator=user,
)
if is_public:
reg.is_public = True
reg.save()
return reg
class PointerFactory(ModularOdmFactory):
FACTORY_FOR = Pointer
node = SubFactory(NodeFactory)
class NodeLogFactory(ModularOdmFactory):
FACTORY_FOR = NodeLog
action = 'file_added'
user = SubFactory(UserFactory)
class WatchConfigFactory(ModularOdmFactory):
FACTORY_FOR = WatchConfig
node = SubFactory(NodeFactory)
class SanctionFactory(ModularOdmFactory):
ABSTRACT_FACTORY = True
@classmethod
def _create(cls, target_class, approve=False, *args, **kwargs):
user = kwargs.get('user') or UserFactory()
sanction = ModularOdmFactory._create(target_class, initiated_by=user, *args, **kwargs)
reg_kwargs = {
'creator': user,
'user': user,
sanction.SHORT_NAME: sanction
}
RegistrationFactory(**reg_kwargs)
if not approve:
sanction.state = Sanction.UNAPPROVED
sanction.save()
return sanction
class RetractionFactory(SanctionFactory):
FACTORY_FOR = Retraction
user = SubFactory(UserFactory)
class EmbargoFactory(SanctionFactory):
FACTORY_FOR = Embargo
user = SubFactory(UserFactory)
class RegistrationApprovalFactory(SanctionFactory):
FACTORY_FOR = RegistrationApproval
user = SubFactory(UserFactory)
class NodeWikiFactory(ModularOdmFactory):
FACTORY_FOR = NodeWikiPage
page_name = 'home'
content = 'Some content'
version = 1
user = SubFactory(UserFactory)
node = SubFactory(NodeFactory)
@post_generation
def set_node_keys(self, create, extracted):
self.node.wiki_pages_current[self.page_name] = self._id
self.node.wiki_pages_versions[self.page_name] = [self._id]
self.node.save()
class UnregUserFactory(ModularOdmFactory):
"""Factory for an unregistered user. Uses User.create_unregistered()
to create an instance.
"""
FACTORY_FOR = User
email = Sequence(lambda n: "brian{0}@queen.com".format(n))
fullname = Sequence(lambda n: "Brian May{0}".format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
'''Build an object without saving it.'''
return target_class.create_unregistered(*args, **kwargs)
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class.create_unregistered(*args, **kwargs)
instance.save()
return instance
class UnconfirmedUserFactory(ModularOdmFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
FACTORY_FOR = User
username = Sequence(lambda n: 'roger{0}@queen.com'.format(n))
fullname = Sequence(lambda n: 'Roger Taylor{0}'.format(n))
password = 'killerqueen'
@classmethod
def _build(cls, target_class, username, password, fullname):
'''Build an object without saving it.'''
return target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.save()
return instance
class AuthFactory(base.Factory):
FACTORY_FOR = Auth
user = SubFactory(UserFactory)
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
'''Build an object without saving it.'''
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
# Deprecated unregistered user factory, used mainly for testing migration
class DeprecatedUnregUser(object):
'''A dummy "model" for an unregistered user.'''
def __init__(self, nr_name, nr_email):
self.nr_name = nr_name
self.nr_email = nr_email
def to_dict(self):
return {"nr_name": self.nr_name, "nr_email": self.nr_email}
class DeprecatedUnregUserFactory(base.Factory):
"""Generates a dictonary represenation of an unregistered user, in the
format expected by the OSF.
::
>>> from tests.factories import UnregUserFactory
>>> UnregUserFactory()
{'nr_name': 'Tom Jones0', 'nr_email': '[email protected]'}
>>> UnregUserFactory()
{'nr_name': 'Tom Jones1', 'nr_email': '[email protected]'}
"""
FACTORY_FOR = DeprecatedUnregUser
nr_name = Sequence(lambda n: "Tom Jones{0}".format(n))
nr_email = Sequence(lambda n: "tom{0}@example.com".format(n))
@classmethod
def _create(cls, target_class, *args, **kwargs):
return target_class(*args, **kwargs).to_dict()
_build = _create
class CommentFactory(ModularOdmFactory):
FACTORY_FOR = Comment
content = Sequence(lambda n: 'Comment {0}'.format(n))
is_public = True
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or node
instance = target_class(
node=node,
user=user,
target=target,
*args, **kwargs
)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or node
instance = target_class(
node=node,
user=user,
target=target,
*args, **kwargs
)
instance.save()
return instance
class NotificationSubscriptionFactory(ModularOdmFactory):
FACTORY_FOR = NotificationSubscription
class NotificationDigestFactory(ModularOdmFactory):
FACTORY_FOR = NotificationDigest
class ExternalAccountFactory(ModularOdmFactory):
FACTORY_FOR = ExternalAccount
provider = 'mock2'
provider_id = Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = Sequence(lambda n: 'user-{0}'.format(n))
class SessionFactory(ModularOdmFactory):
FACTORY_FOR = Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class MockOAuth2Provider(ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = "mock2_client_id"
client_secret = "mock2_client_secret"
auth_url_base = "https://mock2.com/auth"
callback_url = "https://mock2.com/callback"
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class MockAddonNodeSettings(addons_base.AddonNodeSettingsBase):
pass
class MockAddonUserSettings(addons_base.AddonUserSettingsBase):
pass
class MockAddonUserSettingsMergeable(addons_base.AddonUserSettingsBase):
def merge(self):
pass
class MockOAuthAddonUserSettings(addons_base.AddonOAuthUserSettingsBase):
oauth_provider = MockOAuth2Provider
class MockOAuthAddonNodeSettings(addons_base.AddonOAuthNodeSettingsBase):
oauth_provider = MockOAuth2Provider
class ArchiveTargetFactory(ModularOdmFactory):
FACTORY_FOR = ArchiveTarget
class ArchiveJobFactory(ModularOdmFactory):
FACTORY_FOR = ArchiveJob
class DraftRegistrationFactory(ModularOdmFactory):
FACTORY_FOR = DraftRegistration
@classmethod
def _create(cls, *args, **kwargs):
branched_from = kwargs.get('branched_from')
initiator = kwargs.get('initiator')
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
if not branched_from:
project_params = {}
if initiator:
project_params['creator'] = initiator
branched_from = ProjectFactory(**project_params)
initiator = branched_from.creator
try:
registration_schema = registration_schema or MetaSchema.find()[0]
except IndexError:
ensure_schemas()
registration_metadata = registration_metadata or {}
draft = DraftRegistration.create_from_node(
branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
)
return draft
class NodeLicenseRecordFactory(ModularOdmFactory):
FACTORY_FOR = NodeLicenseRecord
@classmethod
def _create(cls, *args, **kwargs):
try:
NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
except NoResultsFound:
ensure_licenses()
kwargs['node_license'] = kwargs.get(
'node_license',
NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
)
return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
|
|
"""
This is a very carefully-controlled file used to allow all other python modules
to log their activity in a sane manner. Code which wishes to log its activity
will usually include lines such as the following at the top:
####################################
# SETTING UP THE LOGGER
import os
from LiteLog import litelog
ROOTPATH = os.path.splitext(__file__)[0]
LOGPATH = "{0}.log".format(ROOTPATH) # this simply specifies the absolute path -- feel free to change this.
LOGGER = litelog.get(__name__, path=LOGPATH)
LOGGER.info("----------BEGIN----------")
# do the following step if you want
# a global 'debug' log file:
litelog.set_debug(__file__)
####################################
@litelog.logwrap # <--- do this if you want a __debug__.log to record I/O/Error of function calls
def f():
...
LOGGER.info('just a test') # <--- do this if you want to log custom
# messages to the script's personal log.
This will create a <filename>.log file adjacent to the source code file itself.
This is a boon to (human) debuggers. The logger allows different levels of
criticality (debug, info, warning, error, critical). See
https://docs.python.org/2/library/logging.html
for more details.
(c) 2015 Matthew Cotton
"""
import logging
from logging.handlers import RotatingFileHandler
import functools
import inspect
import os
import sys
import traceback
import warnings
####################################
# This code defines the ABSOLUTE path of where the
# __debug__.log file should be located
# TODO: Attempt to place __debug__.log at the base of each project?
# i.e., automatically determine who has imported us?
_DEBUG_LOG_PATH = None
_META_LOGGER = None
_FIRST = True
def set_debug(pyfile_obj, meta_logger=True):
"""Puts __debug__.log next to the provided Python __file__"""
path = os.path.realpath(pyfile_obj)
set_debug_by_path(path, meta_logger=meta_logger)
def set_debug_by_path(path, meta_logger=True):
"""Puts __debug__.log in the given directory"""
global _DEBUG_LOG_PATH
path = os.path.join(path, '') # enfore trailing <SEP>
_DEBUG_LOG_PATH = os.path.join(os.path.dirname(os.path.dirname(path)), "__debug__.log")
if meta_logger:
_create_meta_logger()
def _create_meta_logger():
"""
We only set up meta-logger if the user wants it
Please only call this once... ?
"""
global _META_LOGGER
if _DEBUG_LOG_PATH is None:
raise RuntimeError("_DEBUG_LOG_PATH not set! Can't create meta-logger.")
elif _META_LOGGER is not None:
# _META_LOGGER already exists! Refusing to create a new one
return
parent = os.path.dirname(_DEBUG_LOG_PATH)
_META_LOGGER = get("__debug__", _DEBUG_LOG_PATH, is_debug_log=True) # !!!
####################################
class MattsCustomFormatter(logging.Formatter):
"""
Class which acts as a Formatter object, but which automatically indents
log messages based on how many function calls deep the messages originate.
"""
# TODO: fine-tune indentation levels
# SOURCE: http://stackoverflow.com/questions/9212228/using-custom-formatter-classes-with-pythons-logging-config-module
# SOURCE: http://code.activestate.com/recipes/412603-stack-based-indentation-of-formatted-logging/
def __init__( self, name, fmt=None, datefmt=None, is_debug_log=False ):
logging.Formatter.__init__(self, fmt, datefmt)
self.name = name
self.is_debug_log = is_debug_log
self.baseline = self.determine_baseline()
def determine_baseline(self):
stack = inspect.stack()
stack = [elems for elems in stack if __file__ not in elems[1]]
# for row in stack:
# print row
return len(stack)
def format( self, record ):
####################################
# fetch the function call stack;
# ignore logwraps as function calls,
# and filter things like builtins
stack = inspect.stack()
stack = [e[1] for e in stack]
# print "UNMODDED:"
# for fi in stack:
# print fi
# print "MODDED:"
stack = [e for e in stack if "Python.framework" not in e]
stack = [e for e in stack if "logging/__init__.py" not in e]
stack = [e for e in stack if "litelog.py" not in e]
# stack = stack[self.baseline:]
# for fi in stack:
# print fi
# print "is debug log:", self.is_debug_log
# print len(stack), self.baseline
# print stack
# print
MattsCustomFormatter.STACK = stack
####################################
# establish the indent
indent = bytearray('...') * (len(stack) - self.baseline)
if indent:
indent[0] = ' '
record.indent = indent
# record.function = stack[8][3]
record.message = record.getMessage()
record.asctime = self.formatTime(record, self.datefmt)
output_string = self._fmt.format(**record.__dict__) # REVOLUTIONARY! Not.
####################################
# the rest of this is taken from
# the actual logging module!
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it'output_string constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if output_string[-1:] != "\n":
output_string = output_string + "\n"
try:
output_string = output_string + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when output_string is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
output_string = output_string + record.exc_text.decode(sys.getfilesystemencoding(), 'replace')
del record.indent
# del record.function
return output_string
def get(name, path='activity.log', is_debug_log=False):
"""
Returns a logger object so that a given file can log its activity.
If two loggers are created with the same name, they will output 2x to the same file.
"""
# SOURCE: http://stackoverflow.com/questions/7621897/python-logging-module-globally
# formatter = IndentFormatter("%(asctime)s [%(levelname)8s] %(module)30s:%(indent)s%(message)s")
formatter = MattsCustomFormatter(name, "{asctime} [{levelname:8}] {module} :{indent} {message}", is_debug_log=is_debug_log)
handler = RotatingFileHandler(path, maxBytes=1024 * 100, backupCount=3)
handler.setFormatter(formatter)
logger = logging.getLogger(name) # will return same logger if same name given
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
def logwrap(func):
"""
This function is a decorator which allows all input/output/errors of any
given function to be logged, timestamped, and output to a SINGLE __debug__.log FILE!
Useful for more egregious errors (such as logical errors,
or the abuse of function signatures).
"""
global _FIRST, _META_LOGGER
if _META_LOGGER == None:
warnings.warn("_META_LOGGER not set up! @log.logwrap will have no effect!")
return func
if _FIRST:
_META_LOGGER.debug("----------BEGIN----------")
_FIRST = False
@functools.wraps(func)
def wrapped(*args, **kwargs):
"""
Replacement function which wraps I/O and erroring.
"""
_META_LOGGER.debug("<{}> called with:".format(func.__name__))
_META_LOGGER.debug("args: {}".format(args))
_META_LOGGER.debug("kwargs: {}".format(kwargs))
try:
out = func(*args, **kwargs)
_META_LOGGER.debug("<{}> returned: {}".format(func.__name__, out))
return out
except:
# SOURCE: http://stackoverflow.com/questions/9005941/python-exception-decorator-how-to-preserve-stacktrace
_META_LOGGER.debug("<{}> threw error: {}\n".format(func.__name__, traceback.format_exc()))
(errorobj, errortype, errtraceback) = sys.exc_info() # error/type/traceback
raise errorobj, errortype, errtraceback
return wrapped
|
|
"""Beatles Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
The Beatles Dataset includes beat and metric position, chord, key, and segmentation
annotations for 179 Beatles songs. Details can be found in http://matthiasmauch.net/_pdf/mauch_omp_2009.pdf and
http://isophonics.net/content/reference-annotations-beatles.
"""
import csv
import os
from typing import BinaryIO, Optional, TextIO, Tuple
from deprecated.sphinx import deprecated
import librosa
import numpy as np
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import annotations
from mirdata import io
BIBTEX = """@inproceedings{mauch2009beatles,
title={OMRAS2 metadata project 2009},
author={Mauch, Matthias and Cannam, Chris and Davies, Matthew and Dixon, Simon and Harte,
Christopher and Kolozali, Sefki and Tidhar, Dan and Sandler, Mark},
booktitle={12th International Society for Music Information Retrieval Conference},
year={2009},
series = {ISMIR}
}"""
INDEXES = {
"default": "1.2",
"test": "1.2",
"1.2": core.Index(filename="beatles_index_1.2.json"),
}
REMOTES = {
"annotations": download_utils.RemoteFileMetadata(
filename="The Beatles Annotations.tar.gz",
url="http://isophonics.net/files/annotations/The%20Beatles%20Annotations.tar.gz",
checksum="62425c552d37c6bb655a78e4603828cc",
destination_dir="annotations",
)
}
DOWNLOAD_INFO = """
Unfortunately the audio files of the Beatles dataset are not available
for download. If you have the Beatles dataset, place the contents into
a folder called Beatles with the following structure:
> Beatles/
> annotations/
> audio/
and copy the Beatles folder to {}
"""
LICENSE_INFO = (
"Unfortunately we couldn't find the license information for the Beatles dataset."
)
class Track(core.Track):
"""Beatles track class
Args:
track_id (str): track id of the track
data_home (str): path where the data lives
Attributes:
audio_path (str): track audio path
beats_path (str): beat annotation path
chords_path (str): chord annotation path
keys_path (str): key annotation path
sections_path (str): sections annotation path
title (str): title of the track
track_id (str): track id
Cached Properties:
beats (BeatData): human-labeled beat annotations
chords (ChordData): human-labeled chord annotations
key (KeyData): local key annotations
sections (SectionData): section annotations
"""
def __init__(
self,
track_id,
data_home,
dataset_name,
index,
metadata,
):
super().__init__(
track_id,
data_home,
dataset_name,
index,
metadata,
)
self.beats_path = self.get_path("beat")
self.chords_path = self.get_path("chords")
self.keys_path = self.get_path("keys")
self.sections_path = self.get_path("sections")
self.audio_path = self.get_path("audio")
self.title = os.path.basename(self._track_paths["sections"][0]).split(".")[0]
@core.cached_property
def beats(self) -> Optional[annotations.BeatData]:
return load_beats(self.beats_path)
@core.cached_property
def chords(self) -> Optional[annotations.ChordData]:
return load_chords(self.chords_path)
@core.cached_property
def key(self) -> Optional[annotations.KeyData]:
return load_key(self.keys_path)
@core.cached_property
def sections(self) -> Optional[annotations.SectionData]:
return load_sections(self.sections_path)
@property
def audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
def to_jams(self):
"""the track's data in jams format
Returns:
jams.JAMS: return track data in jam format
"""
return jams_utils.jams_converter(
audio_path=self.audio_path,
beat_data=[(self.beats, None)],
section_data=[(self.sections, None)],
chord_data=[(self.chords, None)],
key_data=[(self.key, None)],
metadata={"artist": "The Beatles", "title": self.title},
)
@io.coerce_to_bytes_io
def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load a Beatles audio file.
Args:
fhandle (str or file-like): path or file-like object pointing to an audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
return librosa.load(fhandle, sr=None, mono=True)
@io.coerce_to_string_io
def load_beats(fhandle: TextIO) -> annotations.BeatData:
"""Load Beatles format beat data from a file
Args:
fhandle (str or file-like): path or file-like object pointing to a beat annotation file
Returns:
BeatData: loaded beat data
"""
beat_times, beat_positions = [], []
dialect = csv.Sniffer().sniff(fhandle.read(1024))
fhandle.seek(0)
reader = csv.reader(fhandle, dialect)
for line in reader:
beat_times.append(float(line[0]))
beat_positions.append(line[-1])
beat_positions = _fix_newpoint(np.array(beat_positions)) # type: ignore
# After fixing New Point labels convert positions to int
beat_data = annotations.BeatData(
np.array(beat_times),
"s",
np.array([int(b) for b in beat_positions]),
"bar_index",
)
return beat_data
@io.coerce_to_string_io
def load_chords(fhandle: TextIO) -> annotations.ChordData:
"""Load Beatles format chord data from a file
Args:
fhandle (str or file-like): path or file-like object pointing to a chord annotation file
Returns:
ChordData: loaded chord data
"""
start_times, end_times, chords = [], [], []
dialect = csv.Sniffer().sniff(fhandle.read(1024))
fhandle.seek(0)
reader = csv.reader(fhandle, dialect)
for line in reader:
start_times.append(float(line[0]))
end_times.append(float(line[1]))
chords.append(line[2])
return annotations.ChordData(
np.array([start_times, end_times]).T, "s", chords, "harte"
)
@io.coerce_to_string_io
def load_key(fhandle: TextIO) -> annotations.KeyData:
"""Load Beatles format key data from a file
Args:
fhandle (str or file-like): path or file-like object pointing to a key annotation file
Returns:
KeyData: loaded key data
"""
start_times, end_times, keys = [], [], []
reader = csv.reader(fhandle, delimiter="\t")
for line in reader:
if line[2] == "Key":
start_times.append(float(line[0]))
end_times.append(float(line[1]))
keys.append(line[3])
return annotations.KeyData(
np.array([start_times, end_times]).T, "s", keys, "key_mode"
)
@io.coerce_to_string_io
def load_sections(fhandle: TextIO) -> annotations.SectionData:
"""Load Beatles format section data from a file
Args:
fhandle (str or file-like): path or file-like object pointing to a section annotation file
Returns:
SectionData: loaded section data
"""
start_times, end_times, sections = [], [], []
reader = csv.reader(fhandle, delimiter="\t")
for line in reader:
start_times.append(float(line[0]))
end_times.append(float(line[1]))
sections.append(line[3])
return annotations.SectionData(
np.array([start_times, end_times]).T, "s", sections, "open"
)
def _fix_newpoint(beat_positions: np.ndarray) -> np.ndarray:
"""Fills in missing beat position labels by inferring the beat position
from neighboring beats.
"""
while np.any(beat_positions == "New Point"):
idxs = np.where(beat_positions == "New Point")[0]
for i in idxs:
if i < len(beat_positions) - 1:
if not beat_positions[i + 1] == "New Point":
beat_positions[i] = str(np.mod(int(beat_positions[i + 1]) - 1, 4))
if i == len(beat_positions) - 1:
if not beat_positions[i - 1] == "New Point":
beat_positions[i] = str(np.mod(int(beat_positions[i - 1]) + 1, 4))
beat_positions[beat_positions == "0"] = "4"
return beat_positions
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The beatles dataset
"""
def __init__(self, data_home=None, version="default"):
super().__init__(
data_home,
version,
name="beatles",
track_class=Track,
bibtex=BIBTEX,
indexes=INDEXES,
remotes=REMOTES,
download_info=DOWNLOAD_INFO,
license_info=LICENSE_INFO,
)
@deprecated(
reason="Use mirdata.datasets.beatles.load_audio",
version="0.3.4",
)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.beatles.load_beats",
version="0.3.4",
)
def load_beats(self, *args, **kwargs):
return load_beats(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.beatles.load_chords",
version="0.3.4",
)
def load_chords(self, *args, **kwargs):
return load_chords(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.beatles.load_sections",
version="0.3.4",
)
def load_sections(self, *args, **kwargs):
return load_sections(*args, **kwargs)
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses the command line, discovers the appropriate benchmarks, and runs them.
Handles benchmark configuration, but all the logic for
actually running the benchmark is in Benchmark and PageRunner."""
import argparse
import hashlib
import json
import logging
import os
import sys
# We need to set logging format here to make sure that any other modules
# imported by telemetry doesn't set the logging format before this, which will
# make this a no-op call.
# (See: https://docs.python.org/2/library/logging.html#logging.basicConfig)
logging.basicConfig(
format='(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d '
'%(message)s')
from telemetry import benchmark
from telemetry.core import discover
from telemetry import decorators
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_options
from telemetry.internal.util import binary_manager
from telemetry.internal.util import command_line
from telemetry.internal.util import ps_util
from telemetry import project_config
# TODO(aiolos): Remove this once clients move over to project_config version.
ProjectConfig = project_config.ProjectConfig
def _IsBenchmarkEnabled(benchmark_class, possible_browser):
return (issubclass(benchmark_class, benchmark.Benchmark) and
not benchmark_class.ShouldDisable(possible_browser) and
decorators.IsEnabled(benchmark_class, possible_browser)[0])
def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
""" Print benchmarks that are not filtered in the same order of benchmarks in
the |benchmarks| list.
Args:
benchmarks: the list of benchmarks to be printed (in the same order of the
list).
possible_browser: the possible_browser instance that's used for checking
which benchmarks are enabled.
output_pipe: the stream in which benchmarks are printed on.
"""
if not benchmarks:
print >> output_pipe, 'No benchmarks found!'
return
b = None # Need this to stop pylint from complaining undefined variable.
if any(not issubclass(b, benchmark.Benchmark) for b in benchmarks):
assert False, '|benchmarks| param contains non benchmark class: %s' % b
# Align the benchmark names to the longest one.
format_string = ' %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
disabled_benchmarks = []
print >> output_pipe, 'Available benchmarks %sare:' % (
'for %s ' % possible_browser.browser_type if possible_browser else '')
# Sort the benchmarks by benchmark name.
benchmarks = sorted(benchmarks, key=lambda b: b.Name())
for b in benchmarks:
if not possible_browser or _IsBenchmarkEnabled(b, possible_browser):
print >> output_pipe, format_string % (b.Name(), b.Description())
else:
disabled_benchmarks.append(b)
if disabled_benchmarks:
print >> output_pipe, (
'\nDisabled benchmarks for %s are (force run with -d):' %
possible_browser.browser_type)
for b in disabled_benchmarks:
print >> output_pipe, format_string % (b.Name(), b.Description())
print >> output_pipe, (
'Pass --browser to list benchmarks for another browser.\n')
class Help(command_line.OptparseCommand):
"""Display help information about a command"""
usage = '[command]'
def __init__(self, commands):
self._all_commands = commands
def Run(self, args):
if len(args.positional_args) == 1:
commands = _MatchingCommands(args.positional_args[0], self._all_commands)
if len(commands) == 1:
command = commands[0]
parser = command.CreateParser()
command.AddCommandLineArgs(parser, None)
parser.print_help()
return 0
print >> sys.stderr, ('usage: %s [command] [<options>]' % _ScriptName())
print >> sys.stderr, 'Available commands are:'
for command in self._all_commands:
print >> sys.stderr, ' %-10s %s' % (
command.Name(), command.Description())
print >> sys.stderr, ('"%s help <command>" to see usage information '
'for a specific command.' % _ScriptName())
return 0
class List(command_line.OptparseCommand):
"""Lists the available benchmarks"""
usage = '[benchmark_name] [<options>]'
@classmethod
def CreateParser(cls):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
return parser
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option('-j', '--json-output-file', type='string')
parser.add_option('-n', '--num-shards', type='int', default=1)
@classmethod
def ProcessCommandLineArgs(cls, parser, args, environment):
if not args.positional_args:
args.benchmarks = _Benchmarks(environment)
elif len(args.positional_args) == 1:
args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
environment, exact_matches=False)
else:
parser.error('Must provide at most one benchmark name.')
def Run(self, args):
possible_browser = browser_finder.FindBrowser(args)
if args.browser_type in (
'release', 'release_x64', 'debug', 'debug_x64', 'canary',
'android-chromium', 'android-chrome'):
args.browser_type = 'reference'
possible_reference_browser = browser_finder.FindBrowser(args)
else:
possible_reference_browser = None
if args.json_output_file:
with open(args.json_output_file, 'w') as f:
f.write(_GetJsonBenchmarkList(possible_browser,
possible_reference_browser,
args.benchmarks, args.num_shards))
else:
PrintBenchmarkList(args.benchmarks, possible_browser)
return 0
class Run(command_line.OptparseCommand):
"""Run one or more benchmarks (default)"""
usage = 'benchmark_name [page_set] [<options>]'
@classmethod
def CreateParser(cls):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
return parser
@classmethod
def AddCommandLineArgs(cls, parser, environment):
benchmark.AddCommandLineArgs(parser)
# Allow benchmarks to add their own command line options.
matching_benchmarks = []
for arg in sys.argv[1:]:
matching_benchmarks += _MatchBenchmarkName(arg, environment)
if matching_benchmarks:
# TODO(dtu): After move to argparse, add command-line args for all
# benchmarks to subparser. Using subparsers will avoid duplicate
# arguments.
matching_benchmark = matching_benchmarks.pop()
matching_benchmark.AddCommandLineArgs(parser)
# The benchmark's options override the defaults!
matching_benchmark.SetArgumentDefaults(parser)
@classmethod
def ProcessCommandLineArgs(cls, parser, args, environment):
all_benchmarks = _Benchmarks(environment)
if not args.positional_args:
possible_browser = (
browser_finder.FindBrowser(args) if args.browser_type else None)
PrintBenchmarkList(all_benchmarks, possible_browser)
sys.exit(-1)
input_benchmark_name = args.positional_args[0]
matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
if not matching_benchmarks:
print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
print >> sys.stderr
most_likely_matched_benchmarks = command_line.GetMostLikelyMatchedObject(
all_benchmarks, input_benchmark_name, lambda x: x.Name())
if most_likely_matched_benchmarks:
print >> sys.stderr, 'Do you mean any of those benchmarks below?'
PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
sys.exit(-1)
if len(matching_benchmarks) > 1:
print >> sys.stderr, ('Multiple benchmarks named "%s".' %
input_benchmark_name)
print >> sys.stderr, 'Did you mean one of these?'
print >> sys.stderr
PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
sys.exit(-1)
benchmark_class = matching_benchmarks.pop()
if len(args.positional_args) > 1:
parser.error('Too many arguments.')
assert issubclass(benchmark_class, benchmark.Benchmark), (
'Trying to run a non-Benchmark?!')
benchmark.ProcessCommandLineArgs(parser, args)
benchmark_class.ProcessCommandLineArgs(parser, args)
cls._benchmark = benchmark_class
def Run(self, args):
return min(255, self._benchmark().Run(args))
def _ScriptName():
return os.path.basename(sys.argv[0])
def _MatchingCommands(string, commands):
return [command for command in commands
if command.Name().startswith(string)]
@decorators.Cache
def _Benchmarks(environment):
benchmarks = []
for search_dir in environment.benchmark_dirs:
benchmarks += discover.DiscoverClasses(search_dir,
environment.top_level_dir,
benchmark.Benchmark,
index_by_class_name=True).values()
return benchmarks
def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
def _Matches(input_string, search_string):
if search_string.startswith(input_string):
return True
for part in search_string.split('.'):
if part.startswith(input_string):
return True
return False
# Exact matching.
if exact_matches:
# Don't add aliases to search dict, only allow exact matching for them.
if input_benchmark_name in environment.benchmark_aliases:
exact_match = environment.benchmark_aliases[input_benchmark_name]
else:
exact_match = input_benchmark_name
for benchmark_class in _Benchmarks(environment):
if exact_match == benchmark_class.Name():
return [benchmark_class]
return []
# Fuzzy matching.
return [benchmark_class for benchmark_class in _Benchmarks(environment)
if _Matches(input_benchmark_name, benchmark_class.Name())]
def GetBenchmarkByName(name, environment):
matched = _MatchBenchmarkName(name, environment, exact_matches=True)
# With exact_matches, len(matched) is either 0 or 1.
if len(matched) == 0:
return None
return matched[0]
def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
benchmark_classes, num_shards):
"""Returns a list of all enabled benchmarks in a JSON format expected by
buildbots.
JSON format:
{ "version": <int>,
"steps": {
<string>: {
"device_affinity": <int>,
"cmd": <string>,
"perf_dashboard_id": <string>,
},
...
}
}
"""
output = {
'version': 1,
'steps': {
}
}
for benchmark_class in benchmark_classes:
if not _IsBenchmarkEnabled(benchmark_class, possible_browser):
continue
base_name = benchmark_class.Name()
base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
'-v', '--output-format=chartjson', '--upload-results',
base_name]
perf_dashboard_id = base_name
# Based on the current timings, we shift the result of the hash function to
# achieve better load balancing. Those shift values are to be revised when
# necessary. The shift value is calculated such that the total cycle time
# is minimized.
hash_shift = {
2 : 47, # for old desktop configurations with 2 slaves
5 : 56, # for new desktop configurations with 5 slaves
21 : 43 # for Android 3 slaves 7 devices configurations
}
shift = hash_shift.get(num_shards, 0)
base_name_hash = hashlib.sha1(base_name).hexdigest()
device_affinity = (int(base_name_hash, 16) >> shift) % num_shards
output['steps'][base_name] = {
'cmd': ' '.join(base_cmd + [
'--browser=%s' % possible_browser.browser_type]),
'device_affinity': device_affinity,
'perf_dashboard_id': perf_dashboard_id,
}
if (possible_reference_browser and
_IsBenchmarkEnabled(benchmark_class, possible_reference_browser)):
output['steps'][base_name + '.reference'] = {
'cmd': ' '.join(base_cmd + [
'--browser=reference', '--output-trace-tag=_ref']),
'device_affinity': device_affinity,
'perf_dashboard_id': perf_dashboard_id,
}
return json.dumps(output, indent=2, sort_keys=True)
def main(environment, extra_commands=None):
ps_util.EnableListingStrayProcessesUponExitHook()
# Get the command name from the command line.
if len(sys.argv) > 1 and sys.argv[1] == '--help':
sys.argv[1] = 'help'
command_name = 'run'
for arg in sys.argv[1:]:
if not arg.startswith('-'):
command_name = arg
break
# TODO(eakuefner): Remove this hack after we port to argparse.
if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
command_name = 'run'
sys.argv[2] = '--help'
if extra_commands is None:
extra_commands = []
all_commands = [Help, List, Run] + extra_commands
# Validate and interpret the command name.
commands = _MatchingCommands(command_name, all_commands)
if len(commands) > 1:
print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
% (command_name, _ScriptName()))
for command in commands:
print >> sys.stderr, ' %-10s %s' % (
command.Name(), command.Description())
return 1
if commands:
command = commands[0]
else:
command = Run
binary_manager.InitDependencyManager(environment.client_config)
# Parse and run the command.
parser = command.CreateParser()
command.AddCommandLineArgs(parser, environment)
# Set the default chrome root variable.
parser.set_defaults(chrome_root=environment.default_chrome_root)
if isinstance(parser, argparse.ArgumentParser):
commandline_args = sys.argv[1:]
options, args = parser.parse_known_args(commandline_args[1:])
command.ProcessCommandLineArgs(parser, options, args, environment)
else:
options, args = parser.parse_args()
if commands:
args = args[1:]
options.positional_args = args
command.ProcessCommandLineArgs(parser, options, environment)
if command == Help:
command_instance = command(all_commands)
else:
command_instance = command()
if isinstance(command_instance, command_line.OptparseCommand):
return command_instance.Run(options)
else:
return command_instance.Run(options, args)
|
|
# -*- coding: utf-8 -*-
""" Core components """
from boto.exception import JSONResponseError, BotoServerError
from dynamic_dynamodb import calculators
from dynamic_dynamodb.aws import dynamodb, sns
from dynamic_dynamodb.core import circuit_breaker
from dynamic_dynamodb.statistics import gsi as gsi_stats
from dynamic_dynamodb.log_handler import LOGGER as logger
from dynamic_dynamodb.config_handler import get_global_option, get_gsi_option
def ensure_provisioning(
table_name, table_key, gsi_name, gsi_key,
num_consec_read_checks, num_consec_write_checks):
""" Ensure that provisioning is correct for Global Secondary Indexes
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: GSI configuration option key name
:type num_consec_read_checks: int
:param num_consec_read_checks: How many consecutive checks have we had
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
"""
if get_global_option('circuit_breaker_url'):
if circuit_breaker.is_open():
logger.warning('Circuit breaker is OPEN!')
return (0, 0)
logger.info(
'{0} - Will ensure provisioning for global secondary index {1}'.format(
table_name, gsi_name))
# Handle throughput alarm checks
__ensure_provisioning_alarm(table_name, table_key, gsi_name, gsi_key)
try:
read_update_needed, updated_read_units, num_consec_read_checks = \
__ensure_provisioning_reads(
table_name,
table_key,
gsi_name,
gsi_key,
num_consec_read_checks)
write_update_needed, updated_write_units, num_consec_write_checks = \
__ensure_provisioning_writes(
table_name,
table_key,
gsi_name,
gsi_key,
num_consec_write_checks)
if read_update_needed:
num_consec_read_checks = 0
if write_update_needed:
num_consec_write_checks = 0
# Handle throughput updates
if read_update_needed or write_update_needed:
logger.info(
'{0} - GSI: {1} - Changing provisioning to {2:d} '
'read units and {3:d} write units'.format(
table_name,
gsi_name,
int(updated_read_units),
int(updated_write_units)))
__update_throughput(
table_name,
table_key,
gsi_name,
gsi_key,
updated_read_units,
updated_write_units)
else:
logger.info(
'{0} - GSI: {1} - No need to change provisioning'.format(
table_name,
gsi_name))
except JSONResponseError:
raise
except BotoServerError:
raise
return num_consec_read_checks, num_consec_write_checks
def __calculate_always_decrease_rw_values(
table_name, gsi_name, read_units, provisioned_reads,
write_units, provisioned_writes):
""" Calculate values for always-decrease-rw-together
This will only return reads and writes decreases if both reads and writes
are lower than the current provisioning
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type read_units: int
:param read_units: New read unit provisioning
:type provisioned_reads: int
:param provisioned_reads: Currently provisioned reads
:type write_units: int
:param write_units: New write unit provisioning
:type provisioned_writes: int
:param provisioned_writes: Currently provisioned writes
:returns: (int, int) -- (reads, writes)
"""
if read_units < provisioned_reads and write_units < provisioned_writes:
return (read_units, write_units)
if read_units < provisioned_reads:
logger.info(
'{0} - GSI: {1} - Reads could be decreased, '
'but we are waiting for writes to get lower than the threshold '
'before scaling down'.format(table_name, gsi_name))
read_units = provisioned_reads
elif write_units < provisioned_writes:
logger.info(
'{0} - GSI: {1} - Writes could be decreased, '
'but we are waiting for reads to get lower than the threshold '
'before scaling down'.format(table_name, gsi_name))
write_units = provisioned_writes
return (read_units, write_units)
def __ensure_provisioning_reads(
table_name, table_key, gsi_name, gsi_key, num_consec_read_checks):
""" Ensure that provisioning is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: Configuration option key name
:type num_consec_read_checks: int
:param num_consec_read_checks: How many consecutive checks have we had
:returns: (bool, int, int)
update_needed, updated_read_units, num_consec_read_checks
"""
if not get_gsi_option(table_key, gsi_key, 'enable_reads_autoscaling'):
logger.info(
'{0} - GSI: {1} - '
'Autoscaling of reads has been disabled'.format(
table_name, gsi_name))
return False, dynamodb.get_provisioned_gsi_read_units(
table_name, gsi_name), 0
update_needed = False
try:
lookback_window_start = get_gsi_option(
table_key, gsi_key, 'lookback_window_start')
current_read_units = dynamodb.get_provisioned_gsi_read_units(
table_name, gsi_name)
consumed_read_units_percent = \
gsi_stats.get_consumed_read_units_percent(
table_name, gsi_name, lookback_window_start)
throttled_read_count = \
gsi_stats.get_throttled_read_event_count(
table_name, gsi_name, lookback_window_start)
reads_upper_threshold = \
get_gsi_option(table_key, gsi_key, 'reads_upper_threshold')
reads_lower_threshold = \
get_gsi_option(table_key, gsi_key, 'reads_lower_threshold')
increase_reads_unit = \
get_gsi_option(table_key, gsi_key, 'increase_reads_unit')
decrease_reads_unit = \
get_gsi_option(table_key, gsi_key, 'decrease_reads_unit')
increase_reads_with = \
get_gsi_option(table_key, gsi_key, 'increase_reads_with')
decrease_reads_with = \
get_gsi_option(table_key, gsi_key, 'decrease_reads_with')
throttled_reads_upper_threshold = \
get_gsi_option(
table_key, gsi_key, 'throttled_reads_upper_threshold')
max_provisioned_reads = \
get_gsi_option(table_key, gsi_key, 'max_provisioned_reads')
num_read_checks_before_scale_down = \
get_gsi_option(
table_key, gsi_key, 'num_read_checks_before_scale_down')
num_read_checks_reset_percent = \
get_gsi_option(table_key, gsi_key, 'num_read_checks_reset_percent')
except JSONResponseError:
raise
except BotoServerError:
raise
# Set the updated units to the current read unit value
updated_read_units = current_read_units
# Reset consecutive reads if num_read_checks_reset_percent is reached
if num_read_checks_reset_percent:
if consumed_read_units_percent >= num_read_checks_reset_percent:
logger.info(
'{0} - GSI: {1} - Resetting the number of consecutive '
'read checks. Reason: Consumed percent {2} is '
'greater than reset percent: {3}'.format(
table_name,
gsi_name,
consumed_read_units_percent,
num_read_checks_reset_percent))
num_consec_read_checks = 0
if (consumed_read_units_percent == 0 and not
get_gsi_option(
table_key,
gsi_key,
'allow_scaling_down_reads_on_0_percent')):
logger.info(
'{0} - GSI: {1} - '
'Scaling down reads is not done when usage is at 0%'.format(
table_name, gsi_name))
# Increase needed due to high CU consumption
elif consumed_read_units_percent >= reads_upper_threshold:
# Exit if up scaling has been disabled
if not get_gsi_option(table_key, gsi_key, 'enable_reads_up_scaling'):
logger.debug(
'{0} - GSI: {1} - Up scaling event detected. '
'No action taken as scaling '
'up reads has been disabled in the configuration'.format(
table_name, gsi_name))
else:
if increase_reads_unit == 'percent':
calculated_provisioning = calculators.increase_reads_in_percent(
current_read_units,
increase_reads_with,
get_gsi_option(table_key, gsi_key, 'max_provisioned_reads'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = calculators.increase_reads_in_units(
current_read_units,
increase_reads_with,
get_gsi_option(table_key, gsi_key, 'max_provisioned_reads'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
if current_read_units != calculated_provisioning:
logger.info(
'{0} - Resetting the number of consecutive '
'read checks. Reason: scale up event detected'.format(
table_name))
num_consec_read_checks = 0
update_needed = True
updated_read_units = calculated_provisioning
# Increase needed due to high throttling
elif throttled_read_count > throttled_reads_upper_threshold:
if throttled_reads_upper_threshold > 0:
if increase_reads_unit == 'percent':
calculated_provisioning = calculators.increase_reads_in_percent(
current_read_units,
increase_reads_with,
get_gsi_option(table_key, gsi_key, 'max_provisioned_reads'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = calculators.increase_reads_in_units(
current_read_units,
increase_reads_with,
get_gsi_option(table_key, gsi_key, 'max_provisioned_reads'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
if current_read_units != calculated_provisioning:
logger.info(
'{0} - GSI: {1} - Resetting the number of consecutive '
'read checks. Reason: scale up event detected'.format(
table_name, gsi_name))
num_consec_read_checks = 0
update_needed = True
updated_read_units = calculated_provisioning
# Decrease needed due to low CU consumption
elif consumed_read_units_percent <= reads_lower_threshold:
# Exit if down scaling has been disabled
if not get_gsi_option(table_key, gsi_key, 'enable_reads_down_scaling'):
logger.debug(
'{0} - GSI: {1} - Down scaling event detected. '
'No action taken as scaling '
'down reads has been disabled in the configuration'.format(
table_name, gsi_name))
else:
if decrease_reads_unit == 'percent':
calculated_provisioning = calculators.decrease_reads_in_percent(
current_read_units,
decrease_reads_with,
get_gsi_option(table_key, gsi_key, 'min_provisioned_reads'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = calculators.decrease_reads_in_units(
current_read_units,
decrease_reads_with,
get_gsi_option(table_key, gsi_key, 'min_provisioned_reads'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
if current_read_units != calculated_provisioning:
# We need to look at how many times the num_consec_read_checks
# integer has incremented and Compare to config file value
num_consec_read_checks = num_consec_read_checks + 1
if num_consec_read_checks >= num_read_checks_before_scale_down:
update_needed = True
updated_read_units = calculated_provisioning
# Never go over the configured max provisioning
if max_provisioned_reads:
if int(updated_read_units) > int(max_provisioned_reads):
update_needed = True
updated_read_units = int(max_provisioned_reads)
logger.info(
'Will not increase writes over gsi-max-provisioned-reads '
'limit ({0} writes)'.format(updated_read_units))
logger.info('{0} - GSI: {1} - Consecutive read checks {2}/{3}'.format(
table_name,
gsi_name,
num_consec_read_checks,
num_read_checks_before_scale_down))
return update_needed, updated_read_units, num_consec_read_checks
def __ensure_provisioning_writes(
table_name, table_key, gsi_name, gsi_key, num_consec_write_checks):
""" Ensure that provisioning of writes is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: Configuration option key name
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (bool, int, int)
update_needed, updated_write_units, num_consec_write_checks
"""
if not get_gsi_option(table_key, gsi_key, 'enable_writes_autoscaling'):
logger.info(
'{0} - GSI: {1} - '
'Autoscaling of writes has been disabled'.format(
table_name, gsi_name))
return False, dynamodb.get_provisioned_gsi_write_units(
table_name, gsi_name), 0
update_needed = False
try:
lookback_window_start = get_gsi_option(
table_key, gsi_key, 'lookback_window_start')
current_write_units = dynamodb.get_provisioned_gsi_write_units(
table_name, gsi_name)
consumed_write_units_percent = \
gsi_stats.get_consumed_write_units_percent(
table_name, gsi_name, lookback_window_start)
throttled_write_count = \
gsi_stats.get_throttled_write_event_count(
table_name, gsi_name, lookback_window_start)
writes_upper_threshold = \
get_gsi_option(table_key, gsi_key, 'writes_upper_threshold')
writes_lower_threshold = \
get_gsi_option(table_key, gsi_key, 'writes_lower_threshold')
throttled_writes_upper_threshold = \
get_gsi_option(
table_key, gsi_key, 'throttled_writes_upper_threshold')
increase_writes_unit = \
get_gsi_option(table_key, gsi_key, 'increase_writes_unit')
increase_writes_with = \
get_gsi_option(table_key, gsi_key, 'increase_writes_with')
decrease_writes_unit = \
get_gsi_option(table_key, gsi_key, 'decrease_writes_unit')
decrease_writes_with = \
get_gsi_option(table_key, gsi_key, 'decrease_writes_with')
max_provisioned_writes = \
get_gsi_option(table_key, gsi_key, 'max_provisioned_writes')
num_write_checks_before_scale_down = \
get_gsi_option(
table_key, gsi_key, 'num_write_checks_before_scale_down')
num_write_checks_reset_percent = \
get_gsi_option(table_key, gsi_key, 'num_write_checks_reset_percent')
except JSONResponseError:
raise
except BotoServerError:
raise
# Set the updated units to the current write unit value
updated_write_units = current_write_units
# Reset write consecutive count if num_write_checks_reset_percent is reached
if num_write_checks_reset_percent:
if consumed_write_units_percent >= num_write_checks_reset_percent:
logger.info(
'{0} - GSI: {1} - Resetting the number of consecutive '
'write checks. Reason: Consumed percent {2} is '
'greater than reset percent: {3}'.format(
table_name,
gsi_name,
consumed_write_units_percent,
num_write_checks_reset_percent))
num_consec_write_checks = 0
# Check if we should update write provisioning
if (consumed_write_units_percent == 0 and not get_gsi_option(
table_key, gsi_key, 'allow_scaling_down_writes_on_0_percent')):
logger.info(
'{0} - GSI: {1} - '
'Scaling down writes is not done when usage is at 0%'.format(
table_name, gsi_name))
# Increase needed due to high CU consumption
elif consumed_write_units_percent >= writes_upper_threshold:
# Exit if up scaling has been disabled
if not get_gsi_option(table_key, gsi_key, 'enable_writes_up_scaling'):
logger.debug(
'{0} - GSI: {1} - Up scaling event detected. '
'No action taken as scaling '
'up writes has been disabled in the configuration'.format(
table_name, gsi_name))
else:
if increase_writes_unit == 'percent':
calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
increase_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = calculators.increase_writes_in_units(
current_write_units,
increase_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
if current_write_units != calculated_provisioning:
logger.info(
'{0} - GSI: {1} - Resetting the number of consecutive '
'write checks. Reason: scale up event detected'.format(
table_name, gsi_name))
num_consec_write_checks = 0
update_needed = True
updated_write_units = calculated_provisioning
# Increase needed due to high throttling
elif throttled_write_count > throttled_writes_upper_threshold:
if throttled_writes_upper_threshold > 0:
if increase_writes_unit == 'percent':
calculated_provisioning = \
calculators.increase_writes_in_percent(
current_write_units,
increase_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = calculators.increase_writes_in_units(
current_write_units,
increase_writes_with,
get_gsi_option(
table_key, gsi_key, 'max_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
if current_write_units != calculated_provisioning:
logger.info(
'{0} - GSI: {1} - Resetting the number of consecutive '
'write checks. Reason: scale up event detected'.format(
table_name, gsi_name))
num_consec_write_checks = 0
update_needed = True
updated_write_units = calculated_provisioning
# Decrease needed due to low CU consumption
elif consumed_write_units_percent <= writes_lower_threshold:
# Exit if down scaling has been disabled
if not get_gsi_option(table_key, gsi_key, 'enable_writes_down_scaling'):
logger.debug(
'{0} - GSI: {1} - Down scaling event detected. '
'No action taken as scaling '
'down writes has been disabled in the configuration'.format(
table_name, gsi_name))
else:
if decrease_writes_unit == 'percent':
calculated_provisioning = \
calculators.decrease_writes_in_percent(
current_write_units,
decrease_writes_with,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
else:
calculated_provisioning = calculators.decrease_writes_in_units(
current_write_units,
decrease_writes_with,
get_gsi_option(
table_key, gsi_key, 'min_provisioned_writes'),
'{0} - GSI: {1}'.format(table_name, gsi_name))
if current_write_units != calculated_provisioning:
num_consec_write_checks = num_consec_write_checks + 1
if (num_consec_write_checks >=
num_write_checks_before_scale_down):
update_needed = True
updated_write_units = calculated_provisioning
# Never go over the configured max provisioning
if max_provisioned_writes:
if int(updated_write_units) > int(max_provisioned_writes):
update_needed = True
updated_write_units = int(max_provisioned_writes)
logger.info(
'{0} - GSI: {1} - '
'Will not increase writes over gsi-max-provisioned-writes '
'limit ({2} writes)'.format(
table_name,
gsi_name,
updated_write_units))
logger.info('{0} - GSI: {1} - Consecutive write checks {2}/{3}'.format(
table_name,
gsi_name,
num_consec_write_checks,
num_write_checks_before_scale_down))
return update_needed, updated_write_units, num_consec_write_checks
def __update_throughput(
table_name, table_key, gsi_name, gsi_key, read_units, write_units):
""" Update throughput on the GSI
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: Configuration option key name
:type read_units: int
:param read_units: New read unit provisioning
:type write_units: int
:param write_units: New write unit provisioning
"""
try:
current_ru = dynamodb.get_provisioned_gsi_read_units(
table_name, gsi_name)
current_wu = dynamodb.get_provisioned_gsi_write_units(
table_name, gsi_name)
except JSONResponseError:
raise
# Check table status
try:
gsi_status = dynamodb.get_gsi_status(table_name, gsi_name)
except JSONResponseError:
raise
logger.debug('{0} - GSI: {1} - GSI status is {2}'.format(
table_name, gsi_name, gsi_status))
if gsi_status != 'ACTIVE':
logger.warning(
'{0} - GSI: {1} - Not performing throughput changes when GSI '
'status is {2}'.format(table_name, gsi_name, gsi_status))
return
# If this setting is True, we will only scale down when
# BOTH reads AND writes are low
if get_gsi_option(table_key, gsi_key, 'always_decrease_rw_together'):
read_units, write_units = __calculate_always_decrease_rw_values(
table_name,
gsi_name,
read_units,
current_ru,
write_units,
current_wu)
if read_units == current_ru and write_units == current_wu:
logger.info('{0} - GSI: {1} - No changes to perform'.format(
table_name, gsi_name))
return
dynamodb.update_gsi_provisioning(
table_name,
table_key,
gsi_name,
gsi_key,
int(read_units),
int(write_units))
def __ensure_provisioning_alarm(table_name, table_key, gsi_name, gsi_key):
""" Ensure that provisioning alarm threshold is not exceeded
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: Configuration option key name
"""
lookback_window_start = get_gsi_option(
table_key, gsi_key, 'lookback_window_start')
consumed_read_units_percent = gsi_stats.get_consumed_read_units_percent(
table_name, gsi_name, lookback_window_start)
consumed_write_units_percent = gsi_stats.get_consumed_write_units_percent(
table_name, gsi_name, lookback_window_start)
reads_upper_alarm_threshold = \
get_gsi_option(table_key, gsi_key, 'reads-upper-alarm-threshold')
reads_lower_alarm_threshold = \
get_gsi_option(table_key, gsi_key, 'reads-lower-alarm-threshold')
writes_upper_alarm_threshold = \
get_gsi_option(table_key, gsi_key, 'writes-upper-alarm-threshold')
writes_lower_alarm_threshold = \
get_gsi_option(table_key, gsi_key, 'writes-lower-alarm-threshold')
# Check upper alarm thresholds
upper_alert_triggered = False
upper_alert_message = []
if (reads_upper_alarm_threshold > 0 and
consumed_read_units_percent >= reads_upper_alarm_threshold):
upper_alert_triggered = True
upper_alert_message.append(
'{0} - GSI: {1} - Consumed Read Capacity {2:d}% '
'was greater than or equal to the upper alarm '
'threshold {3:d}%\n'.format(
table_name,
gsi_name,
consumed_read_units_percent,
reads_upper_alarm_threshold))
if (writes_upper_alarm_threshold > 0 and
consumed_write_units_percent >= writes_upper_alarm_threshold):
upper_alert_triggered = True
upper_alert_message.append(
'{0} - GSI: {1} - Consumed Write Capacity {2:d}% '
'was greater than or equal to the upper alarm '
'threshold {3:d}%\n'.format(
table_name,
gsi_name,
consumed_write_units_percent,
writes_upper_alarm_threshold))
# Check lower alarm thresholds
lower_alert_triggered = False
lower_alert_message = []
if (reads_lower_alarm_threshold > 0 and
consumed_read_units_percent < reads_lower_alarm_threshold):
lower_alert_triggered = True
lower_alert_message.append(
'{0} - GSI: {1} - Consumed Read Capacity {2:d}% '
'was below the lower alarm threshold {3:d}%\n'.format(
table_name,
gsi_name,
consumed_read_units_percent,
reads_lower_alarm_threshold))
if (writes_lower_alarm_threshold > 0 and
consumed_write_units_percent < writes_lower_alarm_threshold):
lower_alert_triggered = True
lower_alert_message.append(
'{0} - GSI: {1} - Consumed Write Capacity {2:d}% '
'was below the lower alarm threshold {3:d}%\n'.format(
table_name,
gsi_name,
consumed_write_units_percent,
writes_lower_alarm_threshold))
# Send alert if needed
if upper_alert_triggered:
logger.info(
'{0} - GSI: {1} - Will send high provisioning alert'.format(
table_name, gsi_name))
sns.publish_gsi_notification(
table_key,
gsi_key,
''.join(upper_alert_message),
['high-throughput-alarm'],
subject='ALARM: High Throughput for Table {0} - GSI: {1}'.format(
table_name, gsi_name))
elif lower_alert_triggered:
logger.info(
'{0} - GSI: {1} - Will send low provisioning alert'.format(
table_name, gsi_name))
sns.publish_gsi_notification(
table_key,
gsi_key,
''.join(lower_alert_message),
['low-throughput-alarm'],
subject='ALARM: Low Throughput for Table {0} - GSI: {1}'.format(
table_name, gsi_name))
else:
logger.debug(
'{0} - GSI: {1} - Throughput alarm thresholds not crossed'.format(
table_name, gsi_name))
|
|
#!/usr/bin/env python3
import can
from functools import reduce
import logging
from operator import xor
import RPi.GPIO as GPIO
import signal
from time import sleep
from sys import exit
import socketcanopen
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
DEFAULT_CAN_INTERFACE = "vcan0"
REDUNDANT_CAN_INTERFACE = "vcan1"
PIN_ENABLE_N = 16
PIN_ADDRESS_N = [12, 13, 14, 15, 17, 18, 19]
PIN_ADDRESS_PARITY_N = 20
PIN_RUNLED0 = 41
PIN_ERRLED0 = 40
PIN_RUNLED1 = 39
PIN_ERRLED1 = 38
def sigterm_handler(signum, frame):
GPIO.cleanup()
exit()
signal.signal(signal.SIGTERM, sigterm_handler)
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_ENABLE_N, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(PIN_ADDRESS_N, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(PIN_ADDRESS_PARITY_N, GPIO.IN, pull_up_down=GPIO.PUD_UP)
runled0 = socketcanopen.RunIndicator(PIN_RUNLED0)
errled0 = socketcanopen.ErrorIndicator(PIN_ERRLED0)
runled1 = socketcanopen.Indicator(PIN_RUNLED1, socketcanopen.Indicator.OFF)
errled1 = socketcanopen.Indicator(PIN_ERRLED1, socketcanopen.Indicator.ON)
default_bus = can.Bus(DEFAULT_CAN_INTERFACE, bustype="socketcan")
redundant_bus = can.Bus(REDUNDANT_CAN_INTERFACE, bustype="socketcan")
active_bus = default_bus
class ResetNode(Exception):
pass
class ResetCommunication(Exception):
pass
while True:
try:
if GPIO.input(PIN_ENABLE_N) == GPIO.HIGH:
logger.warning("Enable_n is high")
sleep(1)
raise ResetNode
while True:
try:
address_n = [
GPIO.input(PIN_ADDRESS_N[6]),
GPIO.input(PIN_ADDRESS_N[5]),
GPIO.input(PIN_ADDRESS_N[4]),
GPIO.input(PIN_ADDRESS_N[3]),
GPIO.input(PIN_ADDRESS_N[2]),
GPIO.input(PIN_ADDRESS_N[1]),
GPIO.input(PIN_ADDRESS_N[0])]
address_parity_n = reduce(xor, address_n)
if address_parity_n != GPIO.input(PIN_ADDRESS_PARITY_N):
logger.warning("Address parity mismatch")
sleep(1)
raise ResetCommunication
node_id = 0
for bit in address_n:
node_id = (node_id << 1) | (not bit)
if node_id == socketcanopen.BROADCAST_NODE_ID:
logger.warning("Invalid Node ID")
sleep(1)
raise ResetCommunication
canopen_od = socketcanopen.ObjectDictionary({
socketcanopen.ODI_DEVICE_TYPE: socketcanopen.Object(
parameter_name="Device type",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
default_value=0x00000000
),
socketcanopen.ODI_ERROR: socketcanopen.Object(
parameter_name="Error register",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
default_value=0x00
),
socketcanopen.ODI_SYNC: socketcanopen.Object(
parameter_name="COB-ID SYNC",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.RW,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
default_value=0x00000000 + (socketcanopen.FUNCTION_CODE_SYNC << socketcanopen.FUNCTION_CODE_BITNUM), # 0x40000000 + ... if SYNC producer
),
socketcanopen.ODI_SYNC_TIME: socketcanopen.Object(
parameter_name="Communication cycle period",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.RW,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
default_value=1000000 # 1 second, 32-bit, in us
),
socketcanopen.ODI_TIME_STAMP: socketcanopen.Object(
parameter_name="COB-ID time stamp object",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.RW,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
default_value=(socketcanopen.FUNCTION_CODE_TIME_STAMP << socketcanopen.FUNCTION_CODE_BITNUM) + node_id
),
socketcanopen.ODI_EMCY_ID: socketcanopen.Object(
parameter_name="COB-ID emergency message",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
default_value=(socketcanopen.FUNCTION_CODE_EMCY << socketcanopen.FUNCTION_CODE_BITNUM) + node_id
),
socketcanopen.ODI_HEARTBEAT_CONSUMER_TIME: socketcanopen.Object(
parameter_name="Consumer Heartbeat Time",
object_type=socketcanopen.ObjectType.ARRAY,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
sub_number=1,
subs={
socketcanopen.ODSI_VALUE: socketcanopen.SubObject(
parameter_name="Number of Entries",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limit=1,
high_limit=127,
default_value=1,
),
socketcanopen.ODSI_HEARTBEAT_CONSUMER_TIME: socketcanopen.SubObject(
parameter_name="Consumer Heartbeat Time",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=(1 << 16) + 2000 # Node-ID 1, 16-bit, in ms
),
}
),
socketcanopen.ODI_HEARTBEAT_PRODUCER_TIME: socketcanopen.Object(
parameter_name="Producer heartbeat time",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.RW,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED16,
default_value=1000 # 16-bit, in ms
),
socketcanopen.ODI_IDENTITY: socketcanopen.Object(
parameter_name="Identity Object",
object_type=socketcanopen.ObjectType.RECORD,
data_type=socketcanopen.ODI_DATA_TYPE_IDENTITY,
sub_number=4,
subs={
socketcanopen.ODSI_VALUE: socketcanopen.SubObject(
parameter_name="number of entries",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limit=1,
high_limit=4,
default_value=4
),
socketcanopen.ODSI_IDENTITY_VENDOR: socketcanopen.SubObject(
parameter_name="Vendor ID",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=0x00000000
),
socketcanopen.ODSI_IDENTITY_PRODUCT: socketcanopen.SubObject(
parameter_name="Product code",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=0x00000001
),
socketcanopen.ODSI_IDENTITY_REVISION: socketcanopen.SubObject(
parameter_name="Revision number",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=0x00000000
),
socketcanopen.ODSI_IDENTITY_SERIAL: socketcanopen.SubObject(
parameter_name="Serial number",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=0x00000001
)
}
),
socketcanopen.ODI_NMT_INHIBIT_TIME: socketcanopen.Object(
parameter_name="NMT inhibit time",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.RW,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED16,
default_value=0 # in ms
),
socketcanopen.ODI_SDO_SERVER: socketcanopen.Object(
parameter_name="Server SDO parameter",
object_type=socketcanopen.ObjectType.RECORD,
data_type=socketcanopen.ODI_DATA_TYPE_SDO_PARAMETER,
sub_number=2,
subs={
socketcanopen.ODSI_VALUE: socketcanopen.SubObject(
parameter_name="number of entries",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limit=2,
high_limit=2,
default_value=2
),
socketcanopen.ODSI_SDO_SERVER_DEFAULT_CSID: socketcanopen.SubObject(
parameter_name="COB-ID Client->Server (rx)",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=(socketcanopen.FUNCTION_CODE_SDO_RX << socketcanopen.FUNCTION_CODE_BITNUM) + node_id
),
socketcanopen.ODSI_SDO_SERVER_DEFAULT_SCID: socketcanopen.SubObject(
parameter_name="COB-ID Server->Client (tx)",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=(socketcanopen.FUNCTION_CODE_SDO_TX << socketcanopen.FUNCTION_CODE_BITNUM) + node_id
),
}
), #TODO: add Client SDO parameter Object(s)
socketcanopen.ODI_TPDO1_COMMUNICATION_PARAMETER: socketcanopen.Object(
parameter_name="transmit PDO parameter",
object_type=socketcanopen.ObjectType.RECORD,
data_type=socketcanopen.ODI_DATA_TYPE_PDO_COMMUNICATION_PARAMETER,
sub_number=2,
subs={
socketcanopen.ODSI_VALUE: socketcanopen.SubObject(
parameter_name="largest sub-index supported",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limt=2,
high_limit=6,
default_value=2
),
socketcanopen.ODSI_TPDO_COMM_PARAM_ID: socketcanopen.SubObject(
parameter_name="COB-ID used by PDO",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=node_id
),
socketcanopen.ODSI_TPDO_COMM_PARAM_TYPE: socketcanopen.SubObject(
parameter_name="transmission type",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limit=0x00,
high_limit=0xFF,
default_value=1 # synchronous
)
}
),
socketcanopen.ODI_TPDO1_MAPPING_PARAMETER: socketcanopen.Object(
parameter_name="transmit PDO mapping",
object_type=socketcanopen.ObjectType.RECORD,
data_type=socketcanopen.ODI_DATA_TYPE_PDO_MAPPING_PARAMETER,
sub_number=2,
subs={
socketcanopen.ODSI_VALUE: socketcanopen.SubObject(
parameter_name="number of mapped application objects in PDO",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limit=0x00,
high_limit=0x40,
default_value=2
),
0x01: socketcanopen.SubObject(
parameter_name="PDO mapping for the 1st application object to be mapped",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=(socketcanopen.ODI_SYNC << 16) + (socketcanopen.ODSI_VALUE << 8) + 32
),
0x02: socketcanopen.SubObject(
parameter_name="PDO mapping for the 2nd application object to be mapped",
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=(socketcanopen.ODI_SYNC_TIME << 16) + (socketcanopen.ODSI_VALUE << 8) + 32
),
}
),
socketcanopen.ODI_NMT_STARTUP: socketcanopen.Object(
parameter_name="NMT Startup",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
default_value=0x00000023 # Flying master, NMT master start, NMT master
),
socketcanopen.ODI_NMT_SLAVE_ASSIGNMENT: socketcanopen.Object(
parameter_name="NMT slave assignment",
object_type=socketcanopen.ObjectType.ARRAY,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
sub_number=0x7F,
subs=dict(list({
socketcanopen.ODSI_VALUE: socketcanopen.SubObject(
parameter_name="Highest sub-index supported",
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limit=0x7F,
high_limit=0x7F,
default_value=0x7F
)
}.items()) + list({index: socketcanopen.SubObject(
parameter_name="Node-ID {}".format(index),
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
low_limit=0x00000000,
high_limit=0xFFFFFFFF,
default_value=0x00000000
) for index in range(1, 0x80)}.items())
)
),
socketcanopen.ODI_REQUEST_NMT: socketcanopen.Object(
parameter_name="NMT flying master timing parameters",
object_type=socketcanopen.ObjectType.ARRAY,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
sub_number=0x80,
subs=dict(list({
socketcanopen.ODSI_VALUE: socketcanopen.SubObject(
parameter_name="Highest sub-index supported",
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limit=0x80,
high_limit=0x80,
default_value=0x80
)
}.items()) + list({index: socketcanopen.SubObject(
parameter_name="Node-ID {}".format(index),
access_type=socketcanopen.AccessType.RO,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limit=0x00,
high_limit=0xFF,
default_value=0x00
) for index in range(1, 0x81)}.items())
)
),
socketcanopen.ODI_BOOT_TIME: socketcanopen.Object(
parameter_name="Boot time",
object_type=socketcanopen.ObjectType.VAR,
access_type=socketcanopen.AccessType.RW,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED32,
default_value=0x00001388 # 5 sec
),
socketcanopen.ODI_NMT_FLYING_MASTER_TIMING_PARAMETERS: socketcanopen.Object(
parameter_name="NMT flying master timing parameters",
object_type=socketcanopen.ObjectType.ARRAY,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED16,
sub_number=6,
subs={
socketcanopen.ODSI_VALUE: socketcanopen.SubObject(
parameter_name="Highest sub-index supported",
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED8,
low_limit=0x06,
high_limit=0x06,
default_value=0x06
),
socketcanopen.ODSI_NMT_FLYING_MASTER_TIMING_PARAMS_TIMEOUT: socketcanopen.SubObject(
parameter_name="NMT master timeout",
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED16,
low_limit=0x0000,
high_limit=0xFFFF,
default_value=100
),
socketcanopen.ODSI_NMT_FLYING_MASTER_TIMING_PARAMS_DELAY: socketcanopen.SubObject(
parameter_name="NMT master negotiation time delay",
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED16,
low_limit=0x0000,
high_limit=0xFFFF,
default_value=500
),
socketcanopen.ODSI_NMT_FLYING_MASTER_TIMING_PARAMS_PRIORITY: socketcanopen.SubObject(
parameter_name="NMT master priority",
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED16,
low_limit=0x0000,
high_limit=0xFFFF,
default_value=node_id % 3
),
socketcanopen.ODSI_NMT_FLYING_MASTER_TIMING_PARAMS_PRIORITY_TIME_SLOT: socketcanopen.SubObject(
parameter_name="Priority time slot",
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED16,
low_limit=0x0000,
high_limit=0xFFFF,
default_value=1500
),
socketcanopen.ODSI_NMT_FLYING_MASTER_TIMING_PARAMS_DEVICE_TIME_SLOT: socketcanopen.SubObject(
parameter_name="socketcanopen device time slot",
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED16,
low_limit=0x0000,
high_limit=0xFFFF,
default_value=10
),
socketcanopen.ODSI_NMT_FLYING_MASTER_TIMING_PARAMS_DETECT_TIME: socketcanopen.SubObject(
parameter_name="Multiple NMT master detect cycle time",
access_type=socketcanopen.AccessType.CONST,
data_type=socketcanopen.ODI_DATA_TYPE_UNSIGNED16,
low_limit=0x0000,
high_limit=0xFFFF,
default_value=4000 + 10 * node_id
),
}
),
})
nmt_slave_assignments = canopen_od.get(socketcanopen.ODI_NMT_SLAVE_ASSIGNMENT)
nmt_slave_assignment = nmt_slave_assignments.get(0x02)
nmt_slave_assignment.value = 0x00000009 # Mandatory slave
nmt_slave_assignments.update({0x02: nmt_slave_assignment})
canopen_od.update({socketcanopen.ODI_NMT_SLAVE_ASSIGNMENT: nmt_slave_assignments})
with socketcanopen.Node(active_bus, node_id, canopen_od, run_indicator=runled0, err_indicator=errled0) as node:
while True:
signal.pause() # Replace with application code and interact with Object Dictionary (node.od)
except ResetCommunication:
try:
node.reset_communication()
except NameError:
pass
except ResetNode:
try:
node.reset()
except NameError:
pass
|
|
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from six import moves
from neutron.plugins.common import constants as qconstants
from neutron.services.loadbalancer import constants
PROTOCOL_MAP = {
constants.PROTOCOL_TCP: 'tcp',
constants.PROTOCOL_HTTP: 'http',
constants.PROTOCOL_HTTPS: 'tcp',
}
BALANCE_MAP = {
constants.LB_METHOD_ROUND_ROBIN: 'roundrobin',
constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
constants.LB_METHOD_SOURCE_IP: 'source'
}
STATS_MAP = {
constants.STATS_ACTIVE_CONNECTIONS: 'scur',
constants.STATS_TOTAL_CONNECTIONS: 'stot',
constants.STATS_IN_BYTES: 'bin',
constants.STATS_OUT_BYTES: 'bout',
}
ACTIVE_PENDING_STATUSES = qconstants.ACTIVE_PENDING_STATUSES
INACTIVE = qconstants.INACTIVE
def save_config(cfg_data, socket_path=None,
user_group='nogroup'):
"""Convert a logical configuration to the HAProxy version."""
data = []
data.extend(_build_global(socket_path=socket_path,
user_group=user_group))
data.extend(_build_defaults())
data.extend(cfg_data)
return '\n'.join(data)
def get_config_data(logical_config):
data = []
data.extend(_build_frontend(logical_config))
data.extend(_build_listeners(logical_config))
data.extend(_build_backend(logical_config))
return '\n'.join(data)
def _build_global(socket_path=None, user_group='nogroup'):
opts = [
'daemon',
'user nobody',
'group %s' % user_group,
'log /dev/log local3 info',
'log /dev/log local3 notice'
]
if socket_path:
opts.append('stats socket %s mode 0666 level user' % socket_path)
return itertools.chain(['global'], ('\t' + o for o in opts))
def _build_defaults():
opts = [
'log global',
'retries 3',
'option splice-auto',
'option redispatch',
'timeout connect 5000',
'timeout client 50000',
'timeout server 50000',
]
return itertools.chain(['defaults'], ('\t' + o for o in opts))
def _build_frontend(config):
protocol = config['vip']['protocol']
opts = [
'option splice-auto',
'option tcplog',
'bind %s:%d transparent' % (
_get_first_ip_from_port(config['vip']['port']),
config['vip']['protocol_port']
),
'mode %s' % PROTOCOL_MAP[protocol],
'default_backend %s' % config['pool']['id'],
]
if config['vip']['connection_limit'] >= 0:
opts.append('maxconn %s' % config['vip']['connection_limit'])
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
return itertools.chain(
['frontend %s' % config['vip']['id']],
('\t' + o for o in opts)
)
def _build_listeners(config):
# add the listeners
opts = []
for listener in config['listeners']:
protocol = listener['protocol']
listen_opts = [
'frontend %s-%s' % (config['vip']['id'],listener['protocol_port']),
'\toption splice-auto',
'\toption tcplog',
'\tbind %s:%d' % (
_get_first_ip_from_port(config['vip']['port']),
listener['protocol_port']
),
'\tmode %s' % PROTOCOL_MAP[protocol],
'\tdefault_backend %s' % config['pool']['id']
]
opts.extend(listen_opts)
return itertools.chain([],(o for o in opts))
def _build_backend(config):
protocol = config['pool']['protocol']
lb_method = config['pool']['lb_method']
opts = [
'mode %s' % PROTOCOL_MAP[protocol],
'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin'),
'option splice-auto'
]
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
# add the first health_monitor (if available)
server_addon, health_opts = _get_server_health_option(config)
opts.extend(health_opts)
# add session persistence (if available)
persist_opts = _get_session_persistence(config)
opts.extend(persist_opts)
# add the members
for member in config['members']:
if ((member['status'] in ACTIVE_PENDING_STATUSES or
member['status'] == INACTIVE)
and member['admin_state_up']):
server = (('server %(id)s %(address)s:%(protocol_port)s '
'weight %(weight)s') % member) + server_addon
if _has_http_cookie_persistence(config):
server += ' cookie %d' % config['members'].index(member)
opts.append(server)
return itertools.chain(
['backend %s' % config['pool']['id']],
('\t' + o for o in opts)
)
def _get_first_ip_from_port(port):
for fixed_ip in port['fixed_ips']:
return fixed_ip['ip_address']
def _get_server_health_option(config):
"""return the first active health option."""
for monitor in config['healthmonitors']:
# not checking the status of healthmonitor for two reasons:
# 1) status field is absent in HealthMonitor model
# 2) only active HealthMonitors are fetched with
# LoadBalancerCallbacks.get_logical_device
if monitor['admin_state_up']:
break
else:
return '', []
server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor
opts = [
'timeout check %ds' % monitor['timeout']
]
if monitor['type'] in (constants.HEALTH_MONITOR_HTTP,
constants.HEALTH_MONITOR_HTTPS):
opts.append('option httpchk %(http_method)s %(url_path)s' % monitor)
opts.append(
'http-check expect rstatus %s' %
'|'.join(_expand_expected_codes(monitor['expected_codes']))
)
if monitor['type'] == constants.HEALTH_MONITOR_HTTPS:
opts.append('option ssl-hello-chk')
return server_addon, opts
def _get_session_persistence(config):
persistence = config['vip'].get('session_persistence')
if not persistence:
return []
opts = []
if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP:
opts.append('stick-table type ip size 10k')
opts.append('stick on src')
elif (persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE and
config.get('members')):
opts.append('cookie SRV insert indirect nocache')
elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and
persistence.get('cookie_name')):
opts.append('appsession %s len 56 timeout 3h' %
persistence['cookie_name'])
return opts
def _has_http_cookie_persistence(config):
return (config['vip'].get('session_persistence') and
config['vip']['session_persistence']['type'] ==
constants.SESSION_PERSISTENCE_HTTP_COOKIE)
def _expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
retval.update(str(i) for i in moves.xrange(int(low), int(hi) + 1))
else:
retval.add(code)
return retval
|
|
import unittest
from feedly.storage.redis.structures.hash import RedisHashCache,\
ShardedHashCache, FallbackHashCache
from feedly.storage.redis.structures.list import RedisListCache,\
FallbackRedisListCache
from feedly.storage.redis.connection import get_redis_connection
from functools import partial
from feedly.storage.redis.structures.sorted_set import RedisSortedSetCache
class BaseRedisStructureTestCase(unittest.TestCase):
def get_structure(self):
return
class RedisSortedSetTest(BaseRedisStructureTestCase):
test_data = [(1.0, 'a'), (2.0, 'b'), (3.0, 'c')]
def get_structure(self):
structure_class = RedisSortedSetCache
structure = structure_class('test')
structure.delete()
return structure
def test_add_many(self):
cache = self.get_structure()
test_data = self.test_data
for key, value in test_data:
cache.add(key, value)
# this shouldnt insert data, its a sorted set after all
cache.add_many(test_data)
count = cache.count()
self.assertEqual(int(count), 3)
def test_ordering(self):
cache = self.get_structure()
data = self.test_data
test_data = data
cache.add_many(test_data)
results = cache[:]
expected_results = [p[::-1] for p in test_data]
self.assertEqual(results, expected_results[::-1])
cache.sort_asc = True
results = cache[:10]
self.assertEqual(results, expected_results)
def test_filtering(self):
# setup the data
cache = self.get_structure()
cache.add_many(self.test_data)
# try a max
results = cache.get_results(0, 2, max_score=2.0)
self.assertEqual(results, [('b', 2.0), ('a', 1.0)])
# try a min
results = cache.get_results(0, 2, min_score=2.0)
self.assertEqual(results, [('c', 3.0), ('b', 2.0)])
# try a max with a start
results = cache.get_results(1, 2, max_score=2.0)
self.assertEqual(results, [('a', 1.0)])
def test_long_filtering(self):
'''
Check if nothing breaks when using long numbers as scores
'''
self.skipTest('This is a known issue with Redis')
# setup the data
test_data = [(13930920300000000000007001, 'a'), (
13930920300000000000007002, 'b'), (13930920300000000000007003, 'c')]
cache = self.get_structure()
cache.add_many(test_data)
# try a max
results = cache.get_results(0, 2, max_score=13930920300000000000007002)
self.assertEqual(results, [('b', float(13930920300000000000007002)), (
'a', float(13930920300000000000007001))])
# try a min
results = cache.get_results(0, 2, min_score=13930920300000000000007002)
self.assertEqual(results, [('c', float(13930920300000000000007003)), (
'b', float(13930920300000000000007002))])
# try a max with a start
results = cache.get_results(1, 2, max_score=13930920300000000000007002)
self.assertEqual(results, [('a', float(13930920300000000000007001))])
def test_trim(self):
cache = self.get_structure()
test_data = self.test_data
for score, value in test_data:
cache.add(score, value)
cache.trim(1)
count = cache.count()
self.assertEqual(count, 1)
def test_simple_trim(self):
cache = self.get_structure()
test_data = self.test_data
for key, value in test_data:
cache.add(key, value)
cache.max_length = 1
cache.trim()
count = int(cache.count())
self.assertEqual(count, 1)
def test_remove(self):
cache = self.get_structure()
test_data = self.test_data
cache.add_many(test_data)
cache.remove_many(['a'])
count = cache.count()
self.assertEqual(count, 2)
def test_remove_by_score(self):
cache = self.get_structure()
test_data = self.test_data
cache.add_many(test_data)
cache.remove_by_scores([1.0, 2.0])
count = cache.count()
self.assertEqual(count, 1)
def test_zremrangebyrank(self):
redis = get_redis_connection()
key = 'test'
# start out fresh
redis.delete(key)
redis.zadd(key, 1, 'a')
redis.zadd(key, 2, 'b')
redis.zadd(key, 3, 'c')
redis.zadd(key, 4, 'd')
redis.zadd(key, 5, 'e')
expected_results = [('a', 1.0), ('b', 2.0), ('c', 3.0), (
'd', 4.0), ('e', 5.0)]
results = redis.zrange(key, 0, -1, withscores=True)
self.assertEqual(results, expected_results)
results = redis.zrange(key, 0, -4, withscores=True)
# now the idea is to only keep 3,4,5
max_length = 3
end = (max_length * -1) - 1
redis.zremrangebyrank(key, 0, end)
expected_results = [('c', 3.0), ('d', 4.0), ('e', 5.0)]
results = redis.zrange(key, 0, -1, withscores=True)
self.assertEqual(results, expected_results)
class ListCacheTestCase(BaseRedisStructureTestCase):
def get_structure(self):
structure_class = type(
'MyCache', (RedisListCache, ), dict(max_items=10))
structure = structure_class('test')
structure.delete()
return structure
def test_append(self):
cache = self.get_structure()
cache.append_many(['a', 'b'])
self.assertEqual(cache[:5], ['a', 'b'])
self.assertEqual(cache.count(), 2)
def test_simple_append(self):
cache = self.get_structure()
for value in ['a', 'b']:
cache.append(value)
self.assertEqual(cache[:5], ['a', 'b'])
self.assertEqual(cache.count(), 2)
def test_trim(self):
cache = self.get_structure()
cache.append_many(range(100))
self.assertEqual(cache.count(), 100)
cache.trim()
self.assertEqual(cache.count(), 10)
def test_remove(self):
cache = self.get_structure()
data = ['a', 'b']
cache.append_many(data)
self.assertEqual(cache[:5], data)
self.assertEqual(cache.count(), 2)
for value in data:
cache.remove(value)
self.assertEqual(cache[:5], [])
self.assertEqual(cache.count(), 0)
class FakeFallBack(FallbackRedisListCache):
max_items = 10
def __init__(self, *args, **kwargs):
self.fallback_data = kwargs.pop('fallback')
FallbackRedisListCache.__init__(self, *args, **kwargs)
def get_fallback_results(self, start, stop):
return self.fallback_data[start:stop]
class FallbackRedisListCacheTest(ListCacheTestCase):
def get_structure(self):
structure = FakeFallBack('test', fallback=['a', 'b'])
structure.delete()
return structure
def test_remove(self):
cache = self.get_structure()
data = ['a', 'b']
cache.append_many(data)
self.assertEqual(cache[:5], data)
self.assertEqual(cache.count(), 2)
for value in data:
cache.remove(value)
self.assertEqual(cache.count(), 0)
# fallback should still work
self.assertEqual(cache[:5], data)
class SecondFallbackRedisListCacheTest(BaseRedisStructureTestCase):
def get_structure(self):
structure = FakeFallBack('test', fallback=['a', 'b', 'c'])
structure.delete()
return structure
def test_append(self):
cache = self.get_structure()
# test while we have no redis data
self.assertEqual(cache[:5], ['a', 'b', 'c'])
# now test with redis data
cache.append_many(['d', 'e', 'f', 'g'])
self.assertEqual(cache.count(), 7)
self.assertEqual(cache[:3], ['a', 'b', 'c'])
def test_slice(self):
cache = self.get_structure()
# test while we have no redis data
self.assertEqual(cache[:], ['a', 'b', 'c'])
class HashCacheTestCase(BaseRedisStructureTestCase):
def get_structure(self):
structure = RedisHashCache('test')
# always start fresh
structure.delete()
return structure
def test_set_many(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
keys = cache.keys()
self.assertEqual(keys, ['key', 'key2'])
def test_set(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
for key, value in key_value_pairs:
cache.set(key, value)
keys = cache.keys()
self.assertEqual(keys, ['key', 'key2'])
def test_delete_many(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
keys = cache.keys()
cache.delete_many(keys)
keys = cache.keys()
self.assertEqual(keys, [])
def test_get_and_set(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
results = cache.get_many(['key', 'key2'])
self.assertEqual(results, {'key2': 'value2', 'key': 'value'})
result = cache.get('key')
self.assertEqual(result, 'value')
result = cache.get('key_missing')
self.assertEqual(result, None)
def test_contains(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
result = cache.contains('key')
self.assertEqual(result, True)
result = cache.contains('key2')
self.assertEqual(result, True)
result = cache.contains('key_missing')
self.assertEqual(result, False)
def test_count(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
count = cache.count()
self.assertEqual(count, 2)
class MyFallbackHashCache(FallbackHashCache):
def get_many_from_fallback(self, fields):
return dict(zip(fields, range(100)))
class FallbackHashCacheTestCase(HashCacheTestCase):
def get_structure(self):
structure = MyFallbackHashCache('test')
# always start fresh
structure.delete()
return structure
def test_get_and_set(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
results = cache.get_many(['key', 'key2'])
self.assertEqual(results, {'key2': 'value2', 'key': 'value'})
result = cache.get('key')
self.assertEqual(result, 'value')
result = cache.get('key_missing')
self.assertEqual(result, 0)
class ShardedHashCacheTestCase(HashCacheTestCase):
def get_structure(self):
structure = ShardedHashCache('test')
# always start fresh
structure.delete()
return structure
def test_set_many(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
def test_get_and_set(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
results = cache.get_many(['key', 'key2'])
self.assertEqual(results, {'key2': 'value2', 'key': 'value'})
result = cache.get('key')
self.assertEqual(result, 'value')
result = cache.get('key_missing')
self.assertEqual(result, None)
def test_count(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
count = cache.count()
self.assertEqual(count, 2)
def test_contains(self):
cache = self.get_structure()
key_value_pairs = [('key', 'value'), ('key2', 'value2')]
cache.set_many(key_value_pairs)
contains = partial(cache.contains, 'key')
self.assertRaises(NotImplementedError, contains)
|
|
"""
This module contains functions for building and loading NMODL mechanisms
Author: Thomas G. Close ([email protected])
Copyright: 2012-2014 Thomas G. Close.
License: This file is part of the "NineLine" package, which is released under
the MIT Licence, see LICENSE for details.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import next, str
import os.path
import tempfile
import platform
import re
import uuid
from itertools import chain
import subprocess as sp
from collections import defaultdict
import sympy
import neuron
import nineml.units as un
from nineml.abstraction import Alias, AnalogSendPort, Dynamics
from neuron import load_mechanisms
from pype9.simulate.common.code_gen import BaseCodeGenerator
from pype9.simulate.common.cells import (
WithSynapses, DynamicsWithSynapses)
from pype9.exceptions import (
Pype9BuildError, Pype9RuntimeError, Pype9CommandNotFoundError)
import pype9
from datetime import datetime
from nineml.abstraction import (StateAssignment, Parameter, StateVariable,
Constant, Expression)
from nineml.abstraction.dynamics.visitors.queriers import (
DynamicsInterfaceInferer)
from sympy.printing import ccode
from pype9.utils.mpi import is_mpi_master, mpi_comm
from pype9.simulate.neuron.units import UnitHandler
try:
from nineml.extensions.kinetics import Kinetics # @UnusedImport
except ImportError:
KineticsClass = type(None)
from pype9.annotations import (
PYPE9_NS, ION_SPECIES, MEMBRANE_VOLTAGE, MEMBRANE_CAPACITANCE,
TRANSFORM_SRC, NONSPECIFIC_CURRENT, BUILD_TRANS,
EXTERNAL_CURRENTS, NO_TIME_DERIVS,
NUM_TIME_DERIVS, MECH_TYPE, FULL_CELL_MECH,
ARTIFICIAL_CELL_MECH)
import logging
TRANSFORM_NS = 'NeuronBuildTransform'
logger = logging.getLogger("pype9")
class CodeGenerator(BaseCodeGenerator):
SIMULATOR_NAME = 'neuron'
SIMULATOR_VERSION = neuron.h.nrnversion(0)
ODE_SOLVER_DEFAULT = 'derivimplicit'
REGIME_VARNAME = 'regime_'
SEED_VARNAME = 'seed_'
BASE_TMPL_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
'templates'))
UnitHandler = UnitHandler
_neuron_units = {un.mV: 'millivolt',
un.S: 'siemens',
un.mA: 'milliamp'}
_inbuilt_ions = ['na', 'k', 'ca']
def __init__(self, gsl_path=None, **kwargs):
super(CodeGenerator, self).__init__(**kwargs)
self.nrnivmodl_path = self.get_neuron_util_path('nrnivmodl')
self.modlunit_path = self.get_neuron_util_path('modlunit',
default=None)
# Compile wrappers around GSL random distribution functions
if is_mpi_master():
if not os.path.exists(self.libninemlnrn_so):
self.compile_libninemlnrn()
mpi_comm.barrier()
self.nrnivmodl_flags = [
'-L' + self.libninemlnrn_dir,
'-Wl,-rpath,' + self.libninemlnrn_dir,
'-lninemlnrn', '-lgsl', '-lgslcblas']
if gsl_path is not None:
self.nrnivmodl_path.append('-L' + gsl_path)
else:
self.nrnivmodl_flags.extend(self.get_gsl_prefixes())
# Work out the name of the installation directory for the compiled
# NMODL files on the current platform
self.specials_dir = self._get_specials_dir()
def generate_source_files(self, component_class, src_dir, name=None,
**kwargs):
"""
Generates main NMODL file for cell (and synapse) class
Parameters
----------
membrane_voltage : str
Specifies the state that represents membrane voltage.
membrane_capacitance : str
Specifies the state that represents membrane capacitance.
default_capacitance : float
Specifies the quantity assigned to the membrane capacitance by
default
v_threshold: float
The threshold for the neuron to emit a spike.
external_ports : list(str)
Analog ports to strip from expressions as they represent synapses
or injected currents, which can be inserted manually by NEURON
objects.
is_subcomponent : bool
Whether to use the 'SUFFIX' tag or not.
ode_solver : str
specifies the ODE solver to use
"""
if name is None:
name = component_class.name
template = 'main.tmpl'
self.generate_mod_file(template, component_class, src_dir, name,
kwargs)
def generate_mod_file(self, template, component_class, src_dir, name,
template_args):
# Get list of all unique triggers within the component class so they
# can be referred to by an index (i.e. their index in the list).
all_triggers = []
for regime in component_class.regimes:
for on_condition in regime.on_conditions:
if on_condition.trigger.rhs not in all_triggers:
all_triggers.append(on_condition.trigger.rhs)
tmpl_args = {
'code_gen': self,
'component_name': name,
'component_class': component_class,
'all_triggers': all_triggers,
'version': pype9.__version__, 'src_dir': src_dir,
'timestamp': datetime.now().strftime('%a %d %b %y %I:%M:%S%p'),
'unit_handler': UnitHandler(component_class),
'ode_solver': self.ODE_SOLVER_DEFAULT,
'external_ports': [],
'is_subcomponent': True,
'regime_varname': self.REGIME_VARNAME,
'seed_varname': self.SEED_VARNAME}
# # FIXME: weight_vars needs to be removed or implemented properly
# 'weight_variables': []}
tmpl_args.update(template_args)
# Render mod file
self.render_to_file(
template, tmpl_args, component_class.name + '.mod', src_dir)
def transform_for_build(self, name, component_class, **kwargs):
"""
Copies and transforms the component class to match the format of the
simulator (overridden in derived class)
Parameters
----------
name : str
The name of the transformed component class
component_class : nineml.Dynamics
The component class to be transformed
"""
self._set_build_props(component_class, **kwargs)
if not isinstance(component_class, WithSynapses):
raise Pype9RuntimeError(
"'component_class' must be a DynamicsWithSynapses object")
# ---------------------------------------------------------------------
# Clone original component class
# ---------------------------------------------------------------------
trfrm = component_class.dynamics.flatten()
# ---------------------------------------------------------------------
# Get the membrane voltage and convert it to 'v'
# ---------------------------------------------------------------------
try:
name = kwargs['membrane_voltage']
try:
orig_v = component_class.element(
name, nineml_children=Dynamics.nineml_children)
except KeyError:
raise Pype9BuildError(
"Could not find specified membrane voltage '{}'"
.format(name))
except KeyError: # Guess voltage from its dimension if not supplied
candidate_vs = [cv for cv in component_class.state_variables
if cv.dimension == un.voltage]
if len(candidate_vs) == 0:
candidate_vs = [
cv for cv in component_class.analog_receive_ports
if cv.dimension == un.voltage]
if len(candidate_vs) == 1:
orig_v = candidate_vs[0]
logger.info("Guessing that '{}' is the membrane voltage"
.format(orig_v))
elif len(candidate_vs) > 1:
try:
orig_v = next(c for c in candidate_vs if c.name == 'v')
logger.info("Guessing that '{}' is the membrane voltage"
.format(orig_v))
except StopIteration:
raise Pype9BuildError(
"Could not guess the membrane voltage, candidates: "
"'{}'" .format("', '".join(v.name
for v in candidate_vs)))
else:
orig_v = None
logger.info(
"Can't find candidate for the membrane voltage in "
"state_variables '{}' or analog_receive_ports '{}', "
"treating '{}' as an \"artificial cell\"".format(
"', '".join(
sv.name for sv in component_class.state_variables),
"', '".join(
p.name
for p in component_class.analog_receive_ports),
component_class.name))
if orig_v is not None:
# Map voltage to hard-coded 'v' symbol
if orig_v.name != 'v':
trfrm.rename_symbol(orig_v.name, 'v')
v = trfrm.state_variable('v')
v.annotations.set((BUILD_TRANS, PYPE9_NS),
TRANSFORM_SRC, orig_v)
else:
v = trfrm.state_variable('v')
# Add annotations to the original and build models
component_class.annotations.set((BUILD_TRANS, PYPE9_NS),
MEMBRANE_VOLTAGE, orig_v.name) # @IgnorePep8
trfrm.annotations.set((BUILD_TRANS, PYPE9_NS),
MEMBRANE_VOLTAGE, 'v')
# Remove associated analog send port if present
try:
trfrm.remove(trfrm.analog_send_port('v'))
except KeyError:
pass
# Need to convert to AnalogReceivePort if v is a StateVariable
if isinstance(v, StateVariable):
self._transform_full_component(trfrm, component_class, v,
**kwargs)
trfrm.annotations.set((BUILD_TRANS, PYPE9_NS),
MECH_TYPE, FULL_CELL_MECH)
else:
raise NotImplementedError(
"Build sub-components is not supported in PyPe9 v0.1")
else:
trfrm.annotations.set((BUILD_TRANS, PYPE9_NS), MECH_TYPE,
ARTIFICIAL_CELL_MECH)
# -----------------------------------------------------------------
# Insert dummy aliases for parameters (such as capacitance) that
# now do not show up in the inferred interface for the transformed
# class (i.e. that were only # present in the voltage time derivative)
# -----------------------------------------------------------------
# Infer required parameters
inferred = DynamicsInterfaceInferer(trfrm)
for parameter in list(trfrm.parameters):
if parameter.name not in inferred.parameter_names:
trfrm.add(Alias(parameter.name + '___dummy', parameter.name))
# -----------------------------------------------------------------
# Validate the transformed component class and construct prototype
# -----------------------------------------------------------------
trfrm.validate()
trfrm_with_syn = DynamicsWithSynapses(
name, trfrm, component_class.synapses,
component_class.connection_parameter_sets)
# Retun a prototype of the transformed class
return trfrm_with_syn
def _transform_full_component(self, trfrm, component_class, v, **kwargs):
# -----------------------------------------------------------------
# Remove all analog send ports with 'current' dimension so they
# don't get confused with the converted voltage time derivative
# expression
# -----------------------------------------------------------------
for port in list(trfrm.analog_send_ports):
if port.dimension == un.current:
trfrm.remove(port)
# -----------------------------------------------------------------
# Insert membrane capacitance if not present
# -----------------------------------------------------------------
# Get or guess the location of the membrane capacitance
try:
name = kwargs['membrane_capacitance']
try:
orig_cm = component_class.parameter(name)
except KeyError:
raise Pype9BuildError(
"Could not find specified membrane capacitance '{}'"
.format(name))
cm = trfrm.parameter(orig_cm.name)
except KeyError: # 'membrane_capacitance' was not specified
candidate_cms = [ccm for ccm in component_class.parameters
if ccm.dimension == un.capacitance]
if len(candidate_cms) == 1:
orig_cm = candidate_cms[0]
cm = trfrm.parameter(orig_cm.name)
logger.info("Guessing that '{}' is the membrane capacitance"
.format(orig_cm))
elif len(candidate_cms) > 1:
raise Pype9BuildError(
"Could not guess the membrane capacitance, candidates:"
" '{}'".format("', '".join(candidate_cms)))
else:
cm = Parameter("cm___pype9", dimension=un.capacitance)
trfrm.add(cm)
cm.annotations.set((BUILD_TRANS, PYPE9_NS), TRANSFORM_SRC, None)
trfrm.annotations.set((BUILD_TRANS, PYPE9_NS),
MEMBRANE_CAPACITANCE, cm.name)
# -----------------------------------------------------------------
# Replace membrane voltage equation with membrane current
# -----------------------------------------------------------------
# Determine the regimes in which each state variables has a time
# derivative in
has_td = defaultdict(list)
# List which regimes need to be clamped to their last voltage
# (as it has no time derivative)
clamped_regimes = []
# The voltage clamp equation where v_clamp is the last voltage
# value and g_clamp_ is a large conductance
clamp_i = sympy.sympify('g_clamp___pype9 * (v - v_clamp___pype9)')
memb_is = []
for regime in trfrm.regimes:
# Add an appropriate membrane current
try:
# Convert the voltage time derivative into a membrane
# current
dvdt = regime.time_derivative(v.name)
regime.remove(dvdt)
i = -dvdt.rhs * cm
memb_is.append(i)
except KeyError:
i = clamp_i
clamped_regimes.append(regime)
regime.add(Alias('i___pype9', i))
# Record state vars that have a time deriv. in this regime
for var in regime.time_derivative_variables:
if var != 'v':
has_td[var].append(regime)
# Pick the most popular membrane current to be the alias in
# the global scope
assert memb_is, "No regimes contain voltage time derivatives"
memb_i = Alias('i___pype9', max(memb_is, key=memb_is.count))
# Add membrane current along with a analog send port
trfrm.add(memb_i)
i_port = AnalogSendPort('i___pype9', dimension=un.current)
i_port.annotations.set((BUILD_TRANS, PYPE9_NS), ION_SPECIES,
NONSPECIFIC_CURRENT)
trfrm.add(i_port)
# Remove membrane currents that match the membrane current in the
# outer scope
for regime in trfrm.regimes:
if regime.alias('i___pype9') == memb_i:
regime.remove(regime.alias('i___pype9'))
# If there are clamped regimes add extra parameters and set the
# voltage to clamp to in the regimes that trfrmition to them
if clamped_regimes:
trfrm.add(StateVariable('v_clamp___pype9', un.voltage))
trfrm.add(Constant('g_clamp___pype9', 1e8, un.uS))
for trans in trfrm.transitions:
if trans.target_regime in clamped_regimes:
# Assign v_clamp_ to the value
try:
v_clamp_rhs = trans.state_assignment('v').rhs
except KeyError:
v_clamp_rhs = 'v'
trans.add(StateAssignment('v_clamp___pype9',
v_clamp_rhs))
# -----------------------------------------------------------------
trfrm.annotations.set(
(BUILD_TRANS, PYPE9_NS), NO_TIME_DERIVS,
','.join(['v'] + [sv for sv in trfrm.state_variable_names
if sv not in has_td]))
trfrm.annotations.set((BUILD_TRANS, PYPE9_NS), NUM_TIME_DERIVS,
len(has_td))
# -----------------------------------------------------------------
# Remove the external input currents
# -----------------------------------------------------------------
# Analog receive or reduce ports that are of dimension current and
# are purely additive to the membrane current and nothing else
# (actually subtractive as it is outward current)
try:
ext_is = []
for i_name in kwargs['external_currents']:
try:
ext_i = trfrm.analog_receive_port(i_name)
except KeyError:
try:
ext_i = trfrm.analog_reduce_port(i_name)
except KeyError:
raise Pype9BuildError(
"Did not find specified external current port "
"'{}'".format(i_name))
if ext_i.dimension != un.current:
raise Pype9BuildError(
"Analog receive port matching specified external "
"current '{}' does not have 'current' dimension "
"({})".format(ext_i.name, ext_i.dimension))
ext_is.append(ext_i)
except KeyError:
ext_is = []
for port in chain(component_class.analog_receive_ports,
component_class.analog_reduce_ports):
# Check to see if the receive/reduce port has current dimension
if port.dimension != un.current:
continue
# Check to see if the current appears in the membrane current
# expression
# FIXME: This test should check to to see if the port is
# additive to the membrane current and substitute all
# aliases.
if port.name not in memb_i.rhs_symbol_names:
continue
# Get the number of expressions the receive port appears in
# an expression
if len([e for e in component_class.all_expressions
if port.symbol in e.free_symbols]) > 1:
continue
# If all those conditions are met guess that port is a external
# current that can be removed (ports that don't meet these
# conditions will have to be specified separately)
ext_is.append(port)
if ext_is:
logger.info("Guessing '{}' are external currents to be removed"
.format(ext_is))
trfrm.annotations.set((BUILD_TRANS, PYPE9_NS), EXTERNAL_CURRENTS,
','.join(p.name for p in ext_is))
# Remove external input current ports (as NEURON handles them)
for ext_i in ext_is:
trfrm.remove(ext_i)
for expr in chain(trfrm.aliases, trfrm.all_time_derivatives()):
expr.subs(ext_i, 0)
expr.simplify()
def compile_source_files(self, compile_dir, name):
"""
Builds all NMODL files in a directory
Parameters
----------
src_dir : str
The path of the directory to build
build_mode : str
Can be one of either, 'lazy', 'super_lazy', 'require', 'force', or
'build_only'. 'lazy' doesn't run nrnivmodl if the library is found,
'require', requires that the library is found otherwise throws an
exception (useful on clusters that require precompilation before
parallelisation where the error message could otherwise be
confusing), 'force' removes existing library if found and
recompiles, and 'build_only' removes existing library if found,
recompile and then exit
ignore_units :
Flag whether to only print a warning when units don't match instead
of throwing an error
"""
# Change working directory to model directory
os.chdir(compile_dir)
logger.info("Building NMODL mechanisms in '{}' directory."
.format(compile_dir))
# Check the created units by running modlunit
if __debug__ and self.modlunit_path is not None:
for fname in os.listdir('.'):
if fname.endswith('.mod'):
try:
stdout, stderr = self.run_command([self.modlunit_path,
fname])
assert '<<ERROR>>' not in stderr, (
"Incorrect units assigned in NMODL file:\n {}{}"
.format(stdout, stderr))
except sp.CalledProcessError as e:
raise Pype9BuildError(
"Could not run 'modlunit' to check dimensions in "
"NMODL file: {}\n{}".format(fname, e))
# Run nrnivmodl command in src directory
nrnivmodl_cmd = [self.nrnivmodl_path, '-loadflags',
' '.join(self.nrnivmodl_flags)]
logger.debug("Building nrnivmodl in {} with {}".format(
compile_dir, nrnivmodl_cmd))
self.run_command(nrnivmodl_cmd, fail_msg=(
"Compilation of NMODL files for '{}' model failed. See src "
"directory '{}':\n\n{{}}".format(name, compile_dir)))
if stderr.strip().endswith('Error 1'):
raise Pype9BuildError(
"Generated mod file failed to compile with output:\n{}\n{}"
.format(stdout, stderr))
logger.info("Compilation of NEURON (NMODL) files for '{}' "
"completed successfully".format(name))
def get_install_dir(self, name, url):
# return the platform-specific location of the nrnivmodl output files
return os.path.join(self.get_source_dir(name, url), self.specials_dir)
def get_compile_dir(self, name, url):
"""
The compile dir is the same as the src dir for NEURON compile
"""
return self.get_source_dir(name, url)
def load_libraries(self, name, url):
install_dir = self.get_install_dir(name, url)
load_mechanisms(os.path.dirname(install_dir))
def clean_compile_dir(self, *args, **kwargs):
pass # NEURON doesn't use a separate compile dir
def _get_specials_dir(self):
# Create a temporary directory to run nrnivmodl in
tmp_dir_path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
try:
os.mkdir(tmp_dir_path)
except OSError:
raise Pype9BuildError("Error creating temporary directory '{}'"
.format(tmp_dir_path))
orig_dir = os.getcwd()
os.chdir(tmp_dir_path)
# Run nrnivmodl to see what build directory is created
try:
with open(os.devnull, "w") as fnull:
sp.check_call(self.nrnivmodl_path, stdout=fnull, stderr=fnull)
except sp.CalledProcessError as e:
raise Pype9BuildError("Error test running nrnivmodl".format(e))
# Get the name of the specials directory
try:
specials_dir = os.listdir(tmp_dir_path)[0]
except IndexError:
raise Pype9BuildError(
"Error test running nrnivmodl no build directory created"
.format(e))
# Return back to the original directory
os.chdir(orig_dir)
return specials_dir
def simulator_specific_paths(self):
path = []
try:
for d in os.listdir(os.environ['NRNHOME']):
bin_path = os.path.join(d, 'bin')
if os.path.exists(bin_path):
path.append(bin_path)
except KeyError:
pass
return path
def assign_str(self, lhs, rhs):
rhs = Expression.expand_integer_powers(rhs)
nmodl_str = ccode(rhs, user_functions=Expression._cfunc_map,
assign_to=lhs)
nmodl_str = Expression.strip_L_from_rationals(nmodl_str)
nmodl_str = nmodl_str.replace(';', '')
return nmodl_str
@property
def libninemlnrn_dir(self):
return os.path.join(self.base_dir, 'libninemlnrn')
@property
def libninemlnrn_so(self):
return os.path.join(self.libninemlnrn_dir, 'libninemlnrn.so')
def compile_libninemlnrn(self):
"""
Complies libninemlnrn for random distribution support in generated
NMODL mechanisms
"""
logger.info("Attempting to build libninemlnrn")
cc = self.get_cc()
gsl_prefixes = self.get_gsl_prefixes()
# Compile libninemlnrn
compile_cmd = ('{} -fPIC -c -o ninemlnrn.o {}/ninemlnrn.cpp {}'
.format(cc, self.BASE_TMPL_PATH,
' '.join('-I{}/include'.format(p)
for p in gsl_prefixes)))
os.makedirs(self.libninemlnrn_dir)
self.run_cmd(
compile_cmd, work_dir=self.libninemlnrn_dir,
fail_msg=("Unable to compile libninemlnrn extensions"))
# Link libninemlnrn
if platform.system() == 'Darwin':
# On macOS '-install_name' option needs to be set to allow
# rpath to find the compiled library
install_name = "-install_name @rpath/libninemlnrn.so "
else:
install_name = ""
link_cmd = (
"{} -shared {} {} -lm -lgslcblas -lgsl "
"-o libninemlnrn.so ninemlnrn.o -lc".format(
cc, ' '.join('-L{}/lib'.format(p) for p in gsl_prefixes),
install_name))
self.run_cmd(
link_cmd, work_dir=self.libninemlnrn_dir,
fail_msg=("Unable to link libninemlnrn extensions"))
logger.info("Successfully compiled libninemlnrn extension.")
def run_cmd(self, cmd, work_dir, fail_msg):
p = sp.Popen(cmd, shell=True, stdin=sp.PIPE, stdout=sp.PIPE,
stderr=sp.STDOUT, close_fds=True, cwd=work_dir)
stdout = p.stdout.readlines()
result = p.wait()
# test if cmd was successful
if result != 0:
raise Pype9BuildError(
"{}:\n{}".format(fail_msg, ' '.join([''] + stdout)))
@classmethod
def get_neuron_bin_path(cls):
path = neuron.h.neuronhome()
path_contents = os.listdir(path)
if 'examples' in path_contents: # returned NRNHOME/share/nrn
nrnhome = os.path.join(path, '..', '..')
if os.path.exists(os.path.join(nrnhome, 'x86_64')):
bin_path = os.path.join(nrnhome, 'x86_64', 'bin')
else:
bin_path = os.path.join(nrnhome, 'bin')
elif 'bin' in path_contents:
bin_path = os.path.join(path, 'bin')
elif 'nrnivmodl' in path_contents:
bin_path = path
if not os.path.exists(bin_path):
raise Pype9BuildError(
"Did not find NEURON 'bin' path at expected '{}' location"
.format(bin_path))
return bin_path
@classmethod
def get_neuron_util_path(cls, util_name, **kwargs):
util_path = os.path.join(cls.get_neuron_bin_path(), util_name)
if not os.path.exists(util_path):
try:
default_path = kwargs['default']
logger.warning("Did not find '{}' at expected path"
.format(util_name, util_path))
util_path = default_path
except KeyError:
raise Pype9BuildError(
"Did not find '{}' at expected path '{}'"
.format(util_name, util_path))
return util_path
def get_cc(self):
"""
Get the C compiler used to compile NMODL files
Returns
-------
cc : str
Name of the C compiler used to compile NMODL files
"""
# Get path to the nrnmech_makefile, should be next to nrnivmodl
nrnmech_makefile_path = os.path.join(
os.path.dirname(os.path.realpath(self.nrnivmodl_path)),
'nrnmech_makefile')
# Extract C-compiler used in nrnmech_makefile
try:
with open(nrnmech_makefile_path) as f:
contents = f.read()
except OSError:
raise Pype9BuildError(
"Could not read nrnmech_makefile at '{}'"
.format(nrnmech_makefile_path))
matches = re.findall(r'\s*CC\s*=\s*(.*)', contents)
if len(matches) != 1:
raise Pype9BuildError(
"Could not extract CC variable from nrnmech_makefile at '{}'"
.format(nrnmech_makefile_path))
cc = matches[0]
return cc
def get_gsl_prefixes(self):
"""
Get the library paths used to link GLS to PyNEST
Returns
-------
lib_paths : list(str)
List of library paths passed to the PyNEST compile
"""
try:
# Used to attempt to determine the location of the GSL library
nest_config_path = self.path_to_utility('nest-config')
except Pype9CommandNotFoundError:
prefixes = []
except sp.CalledProcessError:
raise Pype9BuildError(
"Could not run '{} --libs'".format(self.nest_config_path))
else:
libs = str(sp.check_output('{} --libs'.format(nest_config_path),
shell=True))
prefixes = [
p[2:-3] for p in libs.split()
if p.startswith('-L') and p.endswith('lib') and 'gsl' in p]
return prefixes
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
TODO: Modify module doc.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "6/6/13"
import unittest2 as unittest
import os
import json
from pymatgen.core.structure import Molecule
from pymatgen.io.nwchem import NwTask, NwInput, NwInputError, NwOutput
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules")
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
mol = Molecule(["C", "H", "H", "H", "H"], coords)
class NwTaskTest(unittest.TestCase):
def setUp(self):
self.task = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="dft",
theory_directives={"xc": "b3lyp"})
self.task_cosmo = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="dft",
theory_directives={"xc": "b3lyp"},
alternate_directives={'cosmo': "cosmo"})
self.task_esp = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="esp")
def test_multi_bset(self):
t = NwTask.from_molecule(
mol, theory="dft", basis_set={"C": "6-311++G**",
"H": "6-31++G**"},
theory_directives={"xc": "b3lyp"})
ans = """title "H4C1 dft optimize"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-31++G**"
end
dft
xc b3lyp
end
task dft optimize"""
self.assertEqual(str(t), ans)
def test_str_and_from_string(self):
ans = """title "dft optimize"
charge 0
basis cartesian
H library "6-31g"
end
dft
xc b3lyp
end
task dft optimize"""
self.assertEqual(str(self.task), ans)
def test_to_from_dict(self):
d = self.task.as_dict()
t = NwTask.from_dict(d)
self.assertIsInstance(t, NwTask)
def test_init(self):
self.assertRaises(NwInputError, NwTask, 0, 1, {"H": "6-31g"},
theory="bad")
self.assertRaises(NwInputError, NwTask, 0, 1, {"H": "6-31g"},
operation="bad")
def test_dft_task(self):
task = NwTask.dft_task(mol, charge=1, operation="energy")
ans = """title "H4C1 dft energy"
charge 1
basis cartesian
C library "6-31g"
H library "6-31g"
end
dft
mult 2
xc b3lyp
end
task dft energy"""
self.assertEqual(str(task), ans)
def test_dft_cosmo_task(self):
task = NwTask.dft_task(
mol, charge=mol.charge, operation="energy",
xc="b3lyp", basis_set="6-311++G**",
alternate_directives={'cosmo': {"dielec": 78.0}})
ans = """title "H4C1 dft energy"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 1
xc b3lyp
end
cosmo
dielec 78.0
end
task dft energy"""
self.assertEqual(str(task), ans)
def test_esp_task(self):
task = NwTask.esp_task(mol, charge=mol.charge, operation="",
basis_set="6-311++G**")
ans = """title "H4C1 esp "
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
task esp """
self.assertEqual(str(task), ans)
class NwInputTest(unittest.TestCase):
def setUp(self):
tasks = [
NwTask.dft_task(mol, operation="optimize", xc="b3lyp",
basis_set="6-31++G*"),
NwTask.dft_task(mol, operation="freq", xc="b3lyp",
basis_set="6-31++G*"),
NwTask.dft_task(mol, operation="energy", xc="b3lyp",
basis_set="6-311++G**"),
NwTask.dft_task(mol, charge=mol.charge + 1, operation="energy",
xc="b3lyp", basis_set="6-311++G**"),
NwTask.dft_task(mol, charge=mol.charge - 1, operation="energy",
xc="b3lyp", basis_set="6-311++G**")
]
self.nwi = NwInput(mol, tasks,
geometry_options=["units", "angstroms", "noautoz"],
memory_options="total 1000 mb")
self.nwi_symm = NwInput(mol, tasks,
geometry_options=["units", "angstroms",
"noautoz"],
symmetry_options=["c1"])
def test_str(self):
ans = """memory total 1000 mb
geometry units angstroms noautoz
C 0.0 0.0 0.0
H 0.0 0.0 1.089
H 1.026719 0.0 -0.363
H -0.51336 -0.889165 -0.363
H -0.51336 0.889165 -0.363
end
title "H4C1 dft optimize"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft optimize
title "H4C1 dft freq"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft freq
title "H4C1 dft energy"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 1
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge 1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge -1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
"""
self.assertEqual(str(self.nwi), ans)
ans_symm = """geometry units angstroms noautoz
symmetry c1
C 0.0 0.0 0.0
H 0.0 0.0 1.089
H 1.026719 0.0 -0.363
H -0.51336 -0.889165 -0.363
H -0.51336 0.889165 -0.363
end
title "H4C1 dft optimize"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft optimize
title "H4C1 dft freq"
charge 0
basis cartesian
C library "6-31++G*"
H library "6-31++G*"
end
dft
mult 1
xc b3lyp
end
task dft freq
title "H4C1 dft energy"
charge 0
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 1
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge 1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
title "H4C1 dft energy"
charge -1
basis cartesian
C library "6-311++G**"
H library "6-311++G**"
end
dft
mult 2
xc b3lyp
end
task dft energy
"""
self.assertEqual(str(self.nwi_symm), ans_symm)
def test_to_from_dict(self):
d = self.nwi.as_dict()
nwi = NwInput.from_dict(d)
self.assertIsInstance(nwi, NwInput)
#Ensure it is json-serializable.
json.dumps(d)
d = self.nwi_symm.as_dict()
nwi_symm = NwInput.from_dict(d)
self.assertIsInstance(nwi_symm, NwInput)
json.dumps(d)
def test_from_string_and_file(self):
nwi = NwInput.from_file(os.path.join(test_dir, "ch4.nw"))
self.assertEqual(nwi.tasks[0].theory, "dft")
self.assertEqual(nwi.memory_options, "total 1000 mb stack 400 mb")
self.assertEqual(nwi.tasks[0].basis_set["C"], "6-31++G*")
self.assertEqual(nwi.tasks[-1].basis_set["C"], "6-311++G**")
#Try a simplified input.
str_inp = """start H4C1
geometry units angstroms
C 0.0 0.0 0.0
H 0.0 0.0 1.089
H 1.026719 0.0 -0.363
H -0.51336 -0.889165 -0.363
H -0.51336 0.889165 -0.363
end
title "H4C1 dft optimize"
charge 0
basis cartesian
H library "6-31++G*"
C library "6-31++G*"
end
dft
xc b3lyp
mult 1
end
task scf optimize
title "H4C1 dft freq"
charge 0
task scf freq
title "H4C1 dft energy"
charge 0
basis cartesian
H library "6-311++G**"
C library "6-311++G**"
end
task dft energy
title "H4C1 dft energy"
charge 1
dft
xc b3lyp
mult 2
end
task dft energy
title "H4C1 dft energy"
charge -1
task dft energy
"""
nwi = NwInput.from_string(str_inp)
self.assertEqual(nwi.geometry_options, ['units', 'angstroms'])
self.assertEqual(nwi.tasks[0].theory, "scf")
self.assertEqual(nwi.tasks[0].basis_set["C"], "6-31++G*")
self.assertEqual(nwi.tasks[-1].theory, "dft")
self.assertEqual(nwi.tasks[-1].basis_set["C"], "6-311++G**")
str_inp_symm = str_inp.replace("geometry units angstroms",
"geometry units angstroms\n symmetry "
"c1")
nwi_symm = NwInput.from_string(str_inp_symm)
self.assertEqual(nwi_symm.geometry_options, ['units', 'angstroms'])
self.assertEqual(nwi_symm.symmetry_options, ['c1'])
self.assertEqual(nwi_symm.tasks[0].theory, "scf")
self.assertEqual(nwi_symm.tasks[0].basis_set["C"], "6-31++G*")
self.assertEqual(nwi_symm.tasks[-1].theory, "dft")
self.assertEqual(nwi_symm.tasks[-1].basis_set["C"], "6-311++G**")
class NwOutputTest(unittest.TestCase):
def test_read(self):
nwo = NwOutput(os.path.join(test_dir, "CH4.nwout"))
nwo_cosmo = NwOutput(os.path.join(test_dir, "N2O4.nwout"))
self.assertEqual(0, nwo.data[0]["charge"])
self.assertEqual(-1, nwo.data[-1]["charge"])
self.assertAlmostEqual(-1102.6224491715582, nwo.data[0]["energies"][-1])
self.assertAlmostEqual(-1102.9986291578023, nwo.data[2]["energies"][-1])
self.assertAlmostEqual(-11156.354030653656,
nwo_cosmo.data[5]["energies"][0]["cosmo scf"])
self.assertAlmostEqual(-11153.374133394364,
nwo_cosmo.data[5]["energies"][0]["gas phase"])
self.assertAlmostEqual(-11156.353632962995,
nwo_cosmo.data[5]["energies"][0]["sol phase"], 2)
self.assertAlmostEqual(-11168.818934311605,
nwo_cosmo.data[6]["energies"][0]["cosmo scf"], 2)
self.assertAlmostEqual(-11166.3624424611462,
nwo_cosmo.data[6]["energies"][0]['gas phase'], 2)
self.assertAlmostEqual(-11168.818934311605,
nwo_cosmo.data[6]["energies"][0]['sol phase'], 2)
self.assertAlmostEqual(-11165.227959110889,
nwo_cosmo.data[7]["energies"][0]['cosmo scf'], 2)
self.assertAlmostEqual(-11165.025443612385,
nwo_cosmo.data[7]["energies"][0]['gas phase'], 2)
self.assertAlmostEqual(-11165.227959110154,
nwo_cosmo.data[7]["energies"][0]['sol phase'], 2)
self.assertAlmostEqual(nwo.data[1]["hessian"][0][0], 4.60187e+01)
self.assertAlmostEqual(nwo.data[1]["hessian"][1][2], -1.14030e-08)
self.assertAlmostEqual(nwo.data[1]["hessian"][2][3], 2.60819e+01)
self.assertAlmostEqual(nwo.data[1]["hessian"][6][6], 1.45055e+02)
self.assertAlmostEqual(nwo.data[1]["hessian"][11][14], 1.35078e+01)
# CH4.nwout, line 722
self.assertAlmostEqual(nwo.data[0]["forces"][0][3], -0.001991)
# N2O4.nwout, line 1071
self.assertAlmostEqual(nwo_cosmo.data[0]["forces"][0][4], 0.011948)
# There should be four DFT gradients.
self.assertEqual(len(nwo_cosmo.data[0]["forces"]), 4)
ie = (nwo.data[4]["energies"][-1] - nwo.data[2]["energies"][-1])
ea = (nwo.data[2]["energies"][-1] - nwo.data[3]["energies"][-1])
self.assertAlmostEqual(0.7575358648355177, ie)
self.assertAlmostEqual(-14.997877958701338, ea)
self.assertEqual(nwo.data[4]["basis_set"]["C"]["description"],
"6-311++G**")
nwo = NwOutput(os.path.join(test_dir, "H4C3O3_1.nwout"))
self.assertTrue(nwo.data[-1]["has_error"])
self.assertEqual(nwo.data[-1]["errors"][0], "Bad convergence")
nwo = NwOutput(os.path.join(test_dir, "CH3CH2O.nwout"))
self.assertTrue(nwo.data[-1]["has_error"])
self.assertEqual(nwo.data[-1]["errors"][0], "Bad convergence")
nwo = NwOutput(os.path.join(test_dir, "C1N1Cl1_1.nwout"))
self.assertTrue(nwo.data[-1]["has_error"])
self.assertEqual(nwo.data[-1]["errors"][0], "autoz error")
nwo = NwOutput(os.path.join(test_dir,
"anthrachinon_wfs_16_ethyl.nwout"))
self.assertTrue(nwo.data[-1]["has_error"])
self.assertEqual(nwo.data[-1]["errors"][0],
"Geometry optimization failed")
nwo = NwOutput(os.path.join(test_dir,
"anthrachinon_wfs_15_carboxyl.nwout"))
self.assertEqual(nwo.data[1]['frequencies'][0][0], -70.47)
self.assertEqual(len(nwo.data[1]['frequencies'][0][1]), 27)
self.assertEqual(nwo.data[1]['frequencies'][-1][0], 3696.74)
self.assertEqual(nwo.data[1]['frequencies'][-1][1][-1],
(0.20498, -0.94542, -0.00073))
self.assertEqual(nwo.data[1]["normal_frequencies"][1][0], -70.72)
self.assertEqual(nwo.data[1]["normal_frequencies"][3][0], -61.92)
self.assertEqual(nwo.data[1]["normal_frequencies"][1][1][-1],
(0.00056, 0.00042, 0.06781))
if __name__ == "__main__":
unittest.main()
|
|
"""
.. module:: dj-stripe.tests.test_managers
:synopsis: dj-stripe Model Manager Tests.
.. moduleauthor:: Alex Kavanaugh (@kavdev)
"""
from copy import deepcopy
import datetime
import decimal
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from djstripe.models import Transfer, Customer, Subscription, Charge, Plan
from tests import FAKE_PLAN, FAKE_PLAN_II, FAKE_TRANSFER, FAKE_TRANSFER_II, FAKE_TRANSFER_III
class SubscriptionManagerTest(TestCase):
def setUp(self):
# create customers and current subscription records
period_start = datetime.datetime(2013, 4, 1, tzinfo=timezone.utc)
period_end = datetime.datetime(2013, 4, 30, tzinfo=timezone.utc)
start = datetime.datetime(2013, 1, 1, 0, 0, 1, tzinfo=timezone.utc) # more realistic start
self.plan = Plan.sync_from_stripe_data(FAKE_PLAN)
self.plan2 = Plan.sync_from_stripe_data(FAKE_PLAN_II)
for i in range(10):
customer = Customer.objects.create(
subscriber=get_user_model().objects.create_user(
username="patrick{0}".format(i),
email="patrick{0}@gmail.com".format(i)
),
stripe_id="cus_xxxxxxxxxxxxxx{0}".format(i),
livemode=False,
)
Subscription.objects.create(
stripe_id="sub_xxxxxxxxxxxxxx{0}".format(i),
customer=customer,
plan=self.plan,
current_period_start=period_start,
current_period_end=period_end,
status="active",
start=start,
quantity=1
)
customer = Customer.objects.create(
subscriber=get_user_model().objects.create_user(
username="patrick{0}".format(11),
email="patrick{0}@gmail.com".format(11)
),
stripe_id="cus_xxxxxxxxxxxxxx{0}".format(11),
livemode=False,
)
Subscription.objects.create(
stripe_id="sub_xxxxxxxxxxxxxx{0}".format(11),
customer=customer,
plan=self.plan,
current_period_start=period_start,
current_period_end=period_end,
status="canceled",
canceled_at=period_end,
start=start,
quantity=1
)
customer = Customer.objects.create(
subscriber=get_user_model().objects.create_user(
username="patrick{0}".format(12),
email="patrick{0}@gmail.com".format(12)
),
stripe_id="cus_xxxxxxxxxxxxxx{0}".format(12),
livemode=False,
)
Subscription.objects.create(
stripe_id="sub_xxxxxxxxxxxxxx{0}".format(12),
customer=customer,
plan=self.plan2,
current_period_start=period_start,
current_period_end=period_end,
status="active",
start=start,
quantity=1
)
def test_started_during_no_records(self):
self.assertEqual(Subscription.objects.started_during(2013, 4).count(), 0)
def test_started_during_has_records(self):
self.assertEqual(Subscription.objects.started_during(2013, 1).count(), 12)
def test_canceled_during(self):
self.assertEqual(Subscription.objects.canceled_during(2013, 4).count(), 1)
def test_canceled_all(self):
self.assertEqual(
Subscription.objects.canceled().count(), 1)
def test_active_all(self):
self.assertEqual(Subscription.objects.active().count(), 11)
def test_started_plan_summary(self):
for plan in Subscription.objects.started_plan_summary_for(2013, 1):
if plan["plan"] == self.plan:
self.assertEqual(plan["count"], 11)
if plan["plan"] == self.plan2:
self.assertEqual(plan["count"], 1)
def test_active_plan_summary(self):
for plan in Subscription.objects.active_plan_summary():
if plan["plan"] == self.plan:
self.assertEqual(plan["count"], 10)
if plan["plan"] == self.plan2:
self.assertEqual(plan["count"], 1)
def test_canceled_plan_summary(self):
for plan in Subscription.objects.canceled_plan_summary_for(2013, 1):
if plan["plan"] == self.plan:
self.assertEqual(plan["count"], 1)
if plan["plan"] == self.plan2:
self.assertEqual(plan["count"], 0)
def test_churn(self):
self.assertEqual(
Subscription.objects.churn(),
decimal.Decimal("1") / decimal.Decimal("11")
)
class TransferManagerTest(TestCase):
def test_transfer_summary(self):
Transfer.sync_from_stripe_data(deepcopy(FAKE_TRANSFER))
Transfer.sync_from_stripe_data(deepcopy(FAKE_TRANSFER_II))
Transfer.sync_from_stripe_data(deepcopy(FAKE_TRANSFER_III))
self.assertEqual(Transfer.objects.during(2015, 8).count(), 2)
totals = Transfer.objects.paid_totals_for(2015, 12)
self.assertEqual(
totals["total_amount"], decimal.Decimal("190.10")
)
class ChargeManagerTest(TestCase):
def setUp(self):
customer = Customer.objects.create(stripe_id="cus_XXXXXXX", livemode=False)
self.march_charge = Charge.objects.create(
stripe_id="ch_XXXXMAR1",
customer=customer,
stripe_timestamp=datetime.datetime(2015, 3, 31, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
fee=0,
fee_details={},
status="pending",
)
self.april_charge_1 = Charge.objects.create(
stripe_id="ch_XXXXAPR1",
customer=customer,
stripe_timestamp=datetime.datetime(2015, 4, 1, tzinfo=timezone.utc),
amount=decimal.Decimal("20.15"),
amount_refunded=0,
currency="usd",
fee=decimal.Decimal("4.90"),
fee_details={},
status="succeeded",
paid=True,
)
self.april_charge_2 = Charge.objects.create(
stripe_id="ch_XXXXAPR2",
customer=customer,
stripe_timestamp=datetime.datetime(2015, 4, 18, tzinfo=timezone.utc),
amount=decimal.Decimal("10.35"),
amount_refunded=decimal.Decimal("5.35"),
currency="usd",
fee=0,
fee_details={},
status="succeeded",
paid=True,
)
self.april_charge_3 = Charge.objects.create(
stripe_id="ch_XXXXAPR3",
customer=customer,
stripe_timestamp=datetime.datetime(2015, 4, 30, tzinfo=timezone.utc),
amount=decimal.Decimal("100.00"),
amount_refunded=decimal.Decimal("80.00"),
currency="usd",
fee=decimal.Decimal("5.00"),
fee_details={},
status="pending",
paid=False,
)
self.may_charge = Charge.objects.create(
stripe_id="ch_XXXXMAY1",
customer=customer,
stripe_timestamp=datetime.datetime(2015, 5, 1, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
fee=0,
fee_details={},
status="pending",
)
self.november_charge = Charge.objects.create(
stripe_id="ch_XXXXNOV1",
customer=customer,
stripe_timestamp=datetime.datetime(2015, 11, 16, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
fee=0,
fee_details={},
status="pending",
)
self.charge_2014 = Charge.objects.create(
stripe_id="ch_XXXX20141",
customer=customer,
stripe_timestamp=datetime.datetime(2014, 12, 31, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
fee=0,
fee_details={},
status="pending",
)
self.charge_2016 = Charge.objects.create(
stripe_id="ch_XXXX20161",
customer=customer,
stripe_timestamp=datetime.datetime(2016, 1, 1, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
fee=0,
fee_details={},
status="pending",
)
def test_is_during_april_2015(self):
raw_charges = Charge.objects.during(year=2015, month=4)
charges = [charge.stripe_id for charge in raw_charges]
self.assertIn(self.april_charge_1.stripe_id, charges, "April charge 1 not in charges.")
self.assertIn(self.april_charge_2.stripe_id, charges, "April charge 2 not in charges.")
self.assertIn(self.april_charge_3.stripe_id, charges, "April charge 3 not in charges.")
self.assertNotIn(self.march_charge.stripe_id, charges, "March charge unexpectedly in charges.")
self.assertNotIn(self.may_charge.stripe_id, charges, "May charge unexpectedly in charges.")
self.assertNotIn(self.november_charge.stripe_id, charges, "November charge unexpectedly in charges.")
self.assertNotIn(self.charge_2014.stripe_id, charges, "2014 charge unexpectedly in charges.")
self.assertNotIn(self.charge_2016.stripe_id, charges, "2016 charge unexpectedly in charges.")
def test_get_paid_totals_for_april_2015(self):
paid_totals = Charge.objects.paid_totals_for(year=2015, month=4)
self.assertEqual(decimal.Decimal("30.50"), paid_totals["total_amount"], "Total amount is not correct.")
self.assertEqual(decimal.Decimal("4.90"), paid_totals["total_fee"], "Total fees is not correct.")
self.assertEqual(
decimal.Decimal("5.35"),
paid_totals["total_refunded"], "Total amount refunded is not correct."
)
|
|
from __future__ import print_function, division, absolute_import
import itertools
from numba import types, intrinsics
from numba.utils import PYVERSION, RANGE_ITER_OBJECTS, operator_map
from numba.typing.templates import (AttributeTemplate, ConcreteTemplate,
AbstractTemplate, builtin_global, builtin,
builtin_attr, signature, bound_function,
make_callable_template)
for obj in RANGE_ITER_OBJECTS:
builtin_global(obj, types.range_type)
builtin_global(len, types.len_type)
builtin_global(slice, types.slice_type)
builtin_global(abs, types.abs_type)
builtin_global(print, types.print_type)
@builtin
class Print(ConcreteTemplate):
key = types.print_type
intcases = [signature(types.none, ty) for ty in types.integer_domain]
realcases = [signature(types.none, ty) for ty in types.real_domain]
cases = intcases + realcases
@builtin
class PrintOthers(AbstractTemplate):
key = types.print_type
def accepted_types(self, ty):
if ty in types.integer_domain or ty in types.real_domain:
return True
if isinstance(ty, types.CharSeq):
return True
def generic(self, args, kws):
assert not kws, "kwargs to print is not supported."
for a in args:
if not self.accepted_types(a):
raise TypeError("Type %s is not printable." % a)
return signature(types.none, *args)
@builtin
class Abs(ConcreteTemplate):
key = types.abs_type
int_cases = [signature(ty, ty) for ty in types.signed_domain]
real_cases = [signature(ty, ty) for ty in types.real_domain]
complex_cases = [signature(ty.underlying_float, ty)
for ty in types.complex_domain]
cases = int_cases + real_cases + complex_cases
@builtin
class Slice(ConcreteTemplate):
key = types.slice_type
cases = [
signature(types.slice3_type),
signature(types.slice3_type, types.none, types.none),
signature(types.slice3_type, types.none, types.intp),
signature(types.slice3_type, types.intp, types.none),
signature(types.slice3_type, types.intp, types.intp),
signature(types.slice3_type, types.intp, types.intp, types.intp),
]
@builtin
class Range(ConcreteTemplate):
key = types.range_type
cases = [
signature(types.range_state32_type, types.int32),
signature(types.range_state32_type, types.int32, types.int32),
signature(types.range_state32_type, types.int32, types.int32,
types.int32),
signature(types.range_state64_type, types.int64),
signature(types.range_state64_type, types.int64, types.int64),
signature(types.range_state64_type, types.int64, types.int64,
types.int64),
signature(types.unsigned_range_state64_type, types.uint64),
signature(types.unsigned_range_state64_type, types.uint64, types.uint64),
signature(types.unsigned_range_state64_type, types.uint64, types.uint64,
types.uint64),
]
@builtin
class GetIter(AbstractTemplate):
key = "getiter"
def generic(self, args, kws):
assert not kws
[obj] = args
if isinstance(obj, types.IterableType):
return signature(obj.iterator_type, obj)
@builtin
class IterNext(AbstractTemplate):
key = "iternext"
def generic(self, args, kws):
assert not kws
[it] = args
if isinstance(it, types.IteratorType):
return signature(types.Pair(it.yield_type, types.boolean), it)
@builtin
class PairFirst(AbstractTemplate):
"""
Given a heterogenous pair, return the first element.
"""
key = "pair_first"
def generic(self, args, kws):
assert not kws
[pair] = args
if isinstance(pair, types.Pair):
return signature(pair.first_type, pair)
@builtin
class PairSecond(AbstractTemplate):
"""
Given a heterogenous pair, return the second element.
"""
key = "pair_second"
def generic(self, args, kws):
assert not kws
[pair] = args
if isinstance(pair, types.Pair):
return signature(pair.second_type, pair)
def choose_result_bitwidth(*inputs):
return max(types.intp.bitwidth, *(tp.bitwidth for tp in inputs))
def choose_result_int(*inputs):
"""
Choose the integer result type for an operation on integer inputs,
according to the integer typing NBEP.
"""
bitwidth = choose_result_bitwidth(*inputs)
signed = any(tp.signed for tp in inputs)
return types.Integer.from_bitwidth(bitwidth, signed)
# The "machine" integer types to take into consideration for operator typing
# (according to the integer typing NBEP)
machine_ints = (
sorted(set((types.intp, types.int64))) +
sorted(set((types.uintp, types.uint64)))
)
# Explicit integer rules for binary operators; smaller ints will be
# automatically upcast.
integer_binop_cases = tuple(
signature(choose_result_int(op1, op2), op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)
)
class BinOp(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@builtin
class BinOpAdd(BinOp):
key = "+"
@builtin
class BinOpSub(BinOp):
key = "-"
@builtin
class BinOpMul(BinOp):
key = "*"
@builtin
class BinOpDiv(BinOp):
key = "/?"
@builtin
class BinOpMod(ConcreteTemplate):
key = "%"
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@builtin
class BinOpTrueDiv(ConcreteTemplate):
key = "/"
cases = [signature(types.float64, op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)]
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@builtin
class BinOpFloorDiv(ConcreteTemplate):
key = "//"
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@builtin
class BinOpPower(ConcreteTemplate):
key = "**"
cases = list(integer_binop_cases)
cases += [signature(types.float64, types.float64, op)
for op in sorted(types.signed_domain)]
cases += [signature(types.float64, types.float64, op)
for op in sorted(types.unsigned_domain)]
cases += [signature(op, op, op)
for op in sorted(types.real_domain)]
cases += [signature(op, op, op)
for op in sorted(types.complex_domain)]
class PowerBuiltin(BinOpPower):
key = pow
# TODO add 3 operand version
builtin_global(pow, types.Function(PowerBuiltin))
class BitwiseShiftOperation(ConcreteTemplate):
cases = list(integer_binop_cases)
@builtin
class BitwiseLeftShift(BitwiseShiftOperation):
key = "<<"
@builtin
class BitwiseRightShift(BitwiseShiftOperation):
key = ">>"
class BitwiseLogicOperation(BinOp):
cases = list(integer_binop_cases)
@builtin
class BitwiseAnd(BitwiseLogicOperation):
key = "&"
@builtin
class BitwiseOr(BitwiseLogicOperation):
key = "|"
@builtin
class BitwiseXor(BitwiseLogicOperation):
key = "^"
# Bitwise invert and negate are special: we must not upcast the operand
# for unsigned numbers, as that would change the result.
# (i.e. ~np.int8(0) == 255 but ~np.int32(0) == 4294967295).
@builtin
class BitwiseInvert(ConcreteTemplate):
key = "~"
cases = [signature(types.int8, types.boolean)]
cases += [signature(choose_result_int(op), op) for op in types.unsigned_domain]
cases += [signature(choose_result_int(op), op) for op in types.signed_domain]
class UnaryOp(ConcreteTemplate):
cases = [signature(choose_result_int(op), op) for op in types.unsigned_domain]
cases += [signature(choose_result_int(op), op) for op in types.signed_domain]
cases += [signature(op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op) for op in sorted(types.complex_domain)]
@builtin
class UnaryNegate(UnaryOp):
key = "-"
@builtin
class UnaryPositive(UnaryOp):
key = "+"
@builtin
class UnaryNot(ConcreteTemplate):
key = "not"
cases = [signature(types.boolean, types.boolean)]
cases += [signature(types.boolean, op) for op in sorted(types.signed_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.unsigned_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.real_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.complex_domain)]
class OrderedCmpOp(ConcreteTemplate):
cases = [signature(types.boolean, types.boolean, types.boolean)]
cases += [signature(types.boolean, op, op) for op in sorted(types.signed_domain)]
cases += [signature(types.boolean, op, op) for op in sorted(types.unsigned_domain)]
cases += [signature(types.boolean, op, op) for op in sorted(types.real_domain)]
class UnorderedCmpOp(ConcreteTemplate):
cases = OrderedCmpOp.cases + [
signature(types.boolean, op, op) for op in sorted(types.complex_domain)]
@builtin
class CmpOpLt(OrderedCmpOp):
key = '<'
@builtin
class CmpOpLe(OrderedCmpOp):
key = '<='
@builtin
class CmpOpGt(OrderedCmpOp):
key = '>'
@builtin
class CmpOpGe(OrderedCmpOp):
key = '>='
@builtin
class CmpOpEq(UnorderedCmpOp):
key = '=='
@builtin
class CmpOpNe(UnorderedCmpOp):
key = '!='
class TupleCompare(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if isinstance(lhs, types.BaseTuple) and isinstance(rhs, types.BaseTuple):
for u, v in zip(lhs, rhs):
# Check element-wise comparability
res = self.context.resolve_function_type(self.key, (u, v), {})
if res is None:
break
else:
return signature(types.boolean, lhs, rhs)
@builtin
class TupleEq(TupleCompare):
key = '=='
@builtin
class TupleNe(TupleCompare):
key = '!='
@builtin
class TupleGe(TupleCompare):
key = '>='
@builtin
class TupleGt(TupleCompare):
key = '>'
@builtin
class TupleLe(TupleCompare):
key = '<='
@builtin
class TupleLt(TupleCompare):
key = '<'
# Register default implementations of binary inplace operators for
# immutable types.
class InplaceImmutable(AbstractTemplate):
def generic(self, args, kws):
lhs, rhs = args
if not lhs.mutable:
return self.context.resolve_function_type(self.key[:-1], args, kws)
# Inplace ops on mutable arguments must be typed explicitly
for _binop, _inp, op in operator_map:
if _inp:
template = type('InplaceImmutable_%s' % _binop,
(InplaceImmutable,),
dict(key=op + '='))
builtin(template)
class CmpOpIdentity(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
return signature(types.boolean, lhs, rhs)
@builtin
class CmpOpIs(CmpOpIdentity):
key = 'is'
@builtin
class CmpOpIsNot(CmpOpIdentity):
key = 'is not'
def normalize_1d_index(index):
"""
Normalize the *index* type (an integer or slice) for indexing a 1D
sequence.
"""
if index == types.slice3_type:
return types.slice3_type
elif isinstance(index, types.Integer):
return types.intp if index.signed else types.uintp
def normalize_nd_index(index):
"""
Normalize the *index* type (an integer, slice or tuple thereof) for
indexing a N-D sequence.
"""
if isinstance(index, types.UniTuple):
if index.dtype in types.integer_domain:
idxtype = types.intp if index.dtype.signed else types.uintp
return types.UniTuple(idxtype, len(index))
elif index.dtype == types.slice3_type:
return index
elif isinstance(index, types.Tuple):
for ty in index:
if (ty not in types.integer_domain and ty != types.slice3_type):
raise TypeError('Type %s of index %s is unsupported for indexing'
% (ty, index))
return index
return normalize_1d_index(index)
@builtin
class GetItemCPointer(AbstractTemplate):
key = "getitem"
def generic(self, args, kws):
assert not kws
ptr, idx = args
if isinstance(ptr, types.CPointer) and isinstance(idx, types.Integer):
return signature(ptr.dtype, ptr, normalize_1d_index(idx))
@builtin
class SetItemCPointer(AbstractTemplate):
key = "setitem"
def generic(self, args, kws):
assert not kws
ptr, idx, val = args
if isinstance(ptr, types.CPointer) and isinstance(idx, types.Integer):
return signature(types.none, ptr, normalize_1d_index(idx), ptr.dtype)
@builtin
class Len(AbstractTemplate):
key = types.len_type
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Buffer, types.BaseTuple)):
return signature(types.intp, val)
@builtin
class TupleBool(AbstractTemplate):
key = "is_true"
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.BaseTuple)):
return signature(types.boolean, val)
#-------------------------------------------------------------------------------
@builtin_attr
class MemoryViewAttribute(AttributeTemplate):
key = types.MemoryView
if PYVERSION >= (3,):
def resolve_contiguous(self, buf):
return types.boolean
def resolve_c_contiguous(self, buf):
return types.boolean
def resolve_f_contiguous(self, buf):
return types.boolean
def resolve_itemsize(self, buf):
return types.intp
def resolve_nbytes(self, buf):
return types.intp
def resolve_readonly(self, buf):
return types.boolean
def resolve_shape(self, buf):
return types.UniTuple(types.intp, buf.ndim)
def resolve_strides(self, buf):
return types.UniTuple(types.intp, buf.ndim)
def resolve_ndim(self, buf):
return types.intp
#-------------------------------------------------------------------------------
@builtin_attr
class BooleanAttribute(AttributeTemplate):
key = types.Boolean
def resolve___class__(self, ty):
return types.NumberClass(ty)
@builtin_attr
class NumberAttribute(AttributeTemplate):
key = types.Number
def resolve___class__(self, ty):
return types.NumberClass(ty)
def resolve_real(self, ty):
return getattr(ty, "underlying_float", ty)
def resolve_imag(self, ty):
return getattr(ty, "underlying_float", ty)
@bound_function("complex.conjugate")
def resolve_conjugate(self, ty, args, kws):
assert not args
assert not kws
return signature(ty)
#-------------------------------------------------------------------------------
@builtin_attr
class NumberClassAttribute(AttributeTemplate):
key = types.NumberClass
def resolve___call__(self, classty):
"""
Resolve a number class's constructor (e.g. calling int(...))
"""
ty = classty.instance_type
def typer(val):
return ty
return types.Function(make_callable_template(key=ty, typer=typer))
def register_number_classes(register_global):
nb_types = set(types.number_domain)
nb_types.add(types.bool_)
for ty in nb_types:
register_global(ty, types.NumberClass(ty))
register_number_classes(builtin_global)
#------------------------------------------------------------------------------
class Max(AbstractTemplate):
key = max
def generic(self, args, kws):
assert not kws
# max(a, b, ...)
if len(args) < 2:
return
for a in args:
if a not in types.number_domain:
return
retty = self.context.unify_types(*args)
if retty is not None:
return signature(retty, *args)
class Min(AbstractTemplate):
key = min
def generic(self, args, kws):
assert not kws
# min(a, b, ...)
if len(args) < 2:
return
for a in args:
if a not in types.number_domain:
return
retty = self.context.unify_types(*args)
if retty is not None:
return signature(retty, *args)
class Round(ConcreteTemplate):
key = round
if PYVERSION < (3, 0):
cases = [
signature(types.float32, types.float32),
signature(types.float64, types.float64),
]
else:
cases = [
signature(types.intp, types.float32),
signature(types.int64, types.float64),
]
cases += [
signature(types.float32, types.float32, types.intp),
signature(types.float64, types.float64, types.intp),
]
builtin_global(max, types.Function(Max))
builtin_global(min, types.Function(Min))
builtin_global(round, types.Function(Round))
#------------------------------------------------------------------------------
class Bool(AbstractTemplate):
key = bool
def generic(self, args, kws):
assert not kws
[arg] = args
if arg in types.number_domain:
return signature(types.boolean, arg)
# XXX typing for bool cannot be polymorphic because of the
# types.Function thing, so we redirect to the "is_true"
# intrinsic.
return self.context.resolve_function_type("is_true", args, kws)
class Int(AbstractTemplate):
key = int
def generic(self, args, kws):
assert not kws
[arg] = args
if arg not in types.number_domain:
raise TypeError("int() only support for numbers")
if arg in types.complex_domain:
raise TypeError("int() does not support complex")
if arg in types.integer_domain:
return signature(arg, arg)
if arg in types.real_domain:
return signature(types.intp, arg)
class Float(AbstractTemplate):
key = float
def generic(self, args, kws):
assert not kws
[arg] = args
if arg not in types.number_domain:
raise TypeError("float() only support for numbers")
if arg in types.complex_domain:
raise TypeError("float() does not support complex")
if arg in types.integer_domain:
return signature(types.float64, arg)
elif arg in types.real_domain:
return signature(arg, arg)
class Complex(AbstractTemplate):
key = complex
def generic(self, args, kws):
assert not kws
if len(args) == 1:
[arg] = args
if arg not in types.number_domain:
raise TypeError("complex() only support for numbers")
if arg == types.float32:
return signature(types.complex64, arg)
else:
return signature(types.complex128, arg)
elif len(args) == 2:
[real, imag] = args
if (real not in types.number_domain or
imag not in types.number_domain):
raise TypeError("complex() only support for numbers")
if real == imag == types.float32:
return signature(types.complex64, real, imag)
else:
return signature(types.complex128, real, imag)
builtin_global(bool, types.Function(Bool))
builtin_global(int, types.Function(Int))
builtin_global(float, types.Function(Float))
builtin_global(complex, types.Function(Complex))
#------------------------------------------------------------------------------
@builtin
class Enumerate(AbstractTemplate):
key = enumerate
def generic(self, args, kws):
assert not kws
it = args[0]
if len(args) > 1 and not args[1] in types.integer_domain:
raise TypeError("Only integers supported as start value in "
"enumerate")
elif len(args) > 2:
#let python raise its own error
enumerate(*args)
if isinstance(it, types.IterableType):
enumerate_type = types.EnumerateType(it)
return signature(enumerate_type, *args)
builtin_global(enumerate, types.Function(Enumerate))
@builtin
class Zip(AbstractTemplate):
key = zip
def generic(self, args, kws):
assert not kws
if all(isinstance(it, types.IterableType) for it in args):
zip_type = types.ZipType(args)
return signature(zip_type, *args)
builtin_global(zip, types.Function(Zip))
@builtin
class Intrinsic_array_ravel(AbstractTemplate):
key = intrinsics.array_ravel
def generic(self, args, kws):
assert not kws
[arr] = args
if arr.layout in 'CF' and arr.ndim >= 1:
return signature(arr.copy(ndim=1), arr)
builtin_global(intrinsics.array_ravel, types.Function(Intrinsic_array_ravel))
#------------------------------------------------------------------------------
@builtin
class TypeBuiltin(AbstractTemplate):
key = type
def generic(self, args, kws):
assert not kws
if len(args) == 1:
# One-argument type() -> return the __class__
try:
classty = self.context.resolve_getattr(args[0], "__class__")
except KeyError:
return
else:
return signature(classty, *args)
builtin_global(type, types.Function(TypeBuiltin))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
flask-07-upgrade
~~~~~~~~~~~~~~~~
This command line script scans a whole application tree and attempts to
output an unified diff with all the changes that are necessary to easily
upgrade the application to 0.7 and to not yield deprecation warnings.
This will also attempt to find `after_request` functions that don't modify
the response and appear to be better suited for `teardown_request`.
This application is indeed an incredible hack, but because what it
attempts to accomplish is impossible to do statically it tries to support
the most common patterns at least. The diff it generates should be
hand reviewed and not applied blindly without making backups.
:copyright: (c) Copyright 2011 by Armin Ronacher.
:license: see LICENSE for more details.
"""
import re
import os
import inspect
import difflib
import posixpath
from optparse import OptionParser
try:
import ast
except ImportError:
ast = None
TEMPLATE_LOOKAHEAD = 4096
_app_re_part = r'((?:[a-zA-Z_][a-zA-Z0-9_]*app)|app|application)'
_string_re_part = r"('([^'\\]*(?:\\.[^'\\]*)*)'" \
r'|"([^"\\]*(?:\\.[^"\\]*)*)")'
_from_import_re = re.compile(r'^\s*from flask import\s+')
_url_for_re = re.compile(r'\b(url_for\()(%s)' % _string_re_part)
_render_template_re = re.compile(r'\b(render_template\()(%s)' % _string_re_part)
_after_request_re = re.compile(r'((?:@\S+\.(?:app_)?))(after_request)(\b\s*$)(?m)')
_module_constructor_re = re.compile(r'([a-zA-Z0-9_][a-zA-Z0-9_]*)\s*=\s*Module'
r'\(__name__\s*(?:,\s*(?:name\s*=\s*)?(%s))?' %
_string_re_part)
_error_handler_re = re.compile(r'%s\.error_handlers\[\s*(\d+)\s*\]' % _app_re_part)
_mod_route_re = re.compile(r'@([a-zA-Z0-9_][a-zA-Z0-9_]*)\.route')
_blueprint_related = [
(re.compile(r'request\.module'), 'request.blueprint'),
(re.compile(r'register_module'), 'register_blueprint'),
(re.compile(r'%s\.modules' % _app_re_part), '\\1.blueprints')
]
def make_diff(filename, old, new):
for line in difflib.unified_diff(old.splitlines(), new.splitlines(),
posixpath.normpath(posixpath.join('a', filename)),
posixpath.normpath(posixpath.join('b', filename)),
lineterm=''):
print line
def looks_like_teardown_function(node):
returns = [x for x in ast.walk(node) if isinstance(x, ast.Return)]
if len(returns) != 1:
return
return_def = returns[0]
resp_name = node.args.args[0]
if not isinstance(return_def.value, ast.Name) or \
return_def.value.id != resp_name.id:
return
for body_node in node.body:
for child in ast.walk(body_node):
if isinstance(child, ast.Name) and \
child.id == resp_name.id:
if child is not return_def.value:
return
return resp_name.id
def fix_url_for(contents, module_declarations=None):
if module_declarations is None:
skip_module_test = True
else:
skip_module_test = False
mapping = dict(module_declarations)
annotated_lines = []
def make_line_annotations():
if not annotated_lines:
last_index = 0
for line in contents.splitlines(True):
last_index += len(line)
annotated_lines.append((last_index, line))
def backtrack_module_name(call_start):
make_line_annotations()
for idx, (line_end, line) in enumerate(annotated_lines):
if line_end > call_start:
for _, line in reversed(annotated_lines[:idx]):
match = _mod_route_re.search(line)
if match is not None:
shortname = match.group(1)
return mapping.get(shortname)
def handle_match(match):
if not skip_module_test:
modname = backtrack_module_name(match.start())
if modname is None:
return match.group(0)
prefix = match.group(1)
endpoint = ast.literal_eval(match.group(2))
if endpoint.startswith('.'):
endpoint = endpoint[1:]
elif '.' not in endpoint:
endpoint = '.' + endpoint
else:
return match.group(0)
return prefix + repr(endpoint)
return _url_for_re.sub(handle_match, contents)
def fix_teardown_funcs(contents):
def is_return_line(line):
args = line.strip().split()
return args and args[0] == 'return'
def fix_single(match, lines, lineno):
if not lines[lineno + 1].startswith('def'):
return
block_lines = inspect.getblock(lines[lineno + 1:])
func_code = ''.join(block_lines)
if func_code[0].isspace():
node = ast.parse('if 1:\n' + func_code).body[0].body
else:
node = ast.parse(func_code).body[0]
response_param_name = looks_like_teardown_function(node)
if response_param_name is None:
return
before = lines[:lineno]
decorator = [match.group(1) +
match.group(2).replace('after_', 'teardown_') +
match.group(3)]
body = [line.replace(response_param_name, 'exception')
for line in block_lines if
not is_return_line(line)]
after = lines[lineno + len(block_lines) + 1:]
return before + decorator + body + after
content_lines = contents.splitlines(True)
while 1:
found_one = False
for idx, line in enumerate(content_lines):
match = _after_request_re.match(line)
if match is None:
continue
new_content_lines = fix_single(match, content_lines, idx)
if new_content_lines is not None:
content_lines = new_content_lines
break
else:
break
return ''.join(content_lines)
def get_module_autoname(filename):
directory, filename = os.path.split(filename)
if filename != '__init__.py':
return os.path.splitext(filename)[0]
return os.path.basename(directory)
def rewrite_from_imports(prefix, fromlist, lineiter):
import_block = [prefix, fromlist]
if fromlist[0] == '(' and fromlist[-1] != ')':
for line in lineiter:
import_block.append(line)
if line.rstrip().endswith(')'):
break
elif fromlist[-1] == '\\':
for line in lineiter:
import_block.append(line)
if line.rstrip().endswith('\\'):
break
return ''.join(import_block).replace('Module', 'Blueprint')
def rewrite_blueprint_imports(contents):
new_file = []
lineiter = iter(contents.splitlines(True))
for line in lineiter:
match = _from_import_re.search(line)
if match is not None:
new_file.extend(rewrite_from_imports(match.group(),
line[match.end():],
lineiter))
else:
new_file.append(line)
return ''.join(new_file)
def rewrite_for_blueprints(contents, filename):
modules_declared = []
def handle_match(match):
target = match.group(1)
name_param = match.group(2)
if name_param is None:
modname = get_module_autoname(filename)
else:
modname = ast.literal_eval(name_param)
modules_declared.append((target, modname))
return '%s = %s' % (target, 'Blueprint(%r, __name__' % modname)
new_contents = _module_constructor_re.sub(handle_match, contents)
if modules_declared:
new_contents = rewrite_blueprint_imports(new_contents)
for pattern, replacement in _blueprint_related:
new_contents = pattern.sub(replacement, new_contents)
return new_contents, dict(modules_declared)
def upgrade_python_file(filename, contents, teardown):
new_contents = contents
if teardown:
new_contents = fix_teardown_funcs(new_contents)
new_contents, modules = rewrite_for_blueprints(new_contents, filename)
new_contents = fix_url_for(new_contents, modules)
new_contents = _error_handler_re.sub('\\1.error_handler_spec[None][\\2]',
new_contents)
make_diff(filename, contents, new_contents)
def upgrade_template_file(filename, contents):
new_contents = fix_url_for(contents, None)
make_diff(filename, contents, new_contents)
def walk_path(path):
this_file = os.path.realpath(__file__).rstrip('c')
for dirpath, dirnames, filenames in os.walk(path):
dirnames[:] = [x for x in dirnames if not x.startswith('.')]
for filename in filenames:
filename = os.path.join(dirpath, filename)
if os.path.realpath(filename) == this_file:
continue
if filename.endswith('.py'):
yield filename, 'python'
# skip files that are diffs. These might be false positives
# when run multiple times.
elif not filename.endswith(('.diff', '.patch', '.udiff')):
with open(filename) as f:
contents = f.read(TEMPLATE_LOOKAHEAD)
if '{% for' or '{% if' or '{{ url_for' in contents:
yield filename, 'template'
def scan_path(path=None, teardown=True):
for filename, type in walk_path(path):
with open(filename) as f:
contents = f.read()
if type == 'python':
upgrade_python_file(filename, contents, teardown)
elif type == 'template':
upgrade_template_file(filename, contents)
def main():
"""Entrypoint"""
parser = OptionParser(usage='%prog [options] [paths]')
parser.add_option('-T', '--no-teardown-detection', dest='no_teardown',
action='store_true', help='Do not attempt to '
'detect teardown function rewrites.')
parser.add_option('-b', '--bundled-templates', dest='bundled_tmpl',
action='store_true', help='Indicate to the system '
'that templates are bundled with modules. Default '
'is auto detect.')
options, args = parser.parse_args()
if not args:
args = ['.']
if ast is None:
parser.error('Python 2.6 or later is required to run the upgrade script.\n'
'The runtime requirements for Flask 0.7 however are still '
'Python 2.5.')
for path in args:
scan_path(path, teardown=not options.no_teardown)
if __name__ == '__main__':
main()
|
|
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import RequestFactory
from importlib import import_module
from django_cas_ng.models import SessionTicket, ProxyGrantingTicket
from django_cas_ng.views import login, logout, callback
import pytest
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
# function takes a request and applies a middleware process
def process_request_for_middleware(request, middleware):
middleware = middleware()
middleware.process_request(request)
@pytest.mark.django_db
def test_login_post_logout(django_user_model, settings):
"""
Test that when CAS authentication creates a user, the signal is called with
`created = True`
"""
settings.CAS_VERSION = 'CAS_2_SAML_1_0'
data = {'logoutRequest': '<samlp:LogoutRequest '
'xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol">'
'<samlp:SessionIndex>fake-ticket'
'</samlp:SessionIndex></samlp:LogoutRequest>'
}
session = SessionStore()
session['fake_session'] = 'fake-session'
session.save()
assert SessionStore(session_key=session.session_key) is not None
factory = RequestFactory()
request = factory.post('/login/', data)
request.session = session
# Create a fake session ticket and make sure it exists in the db
session_ticket = SessionTicket.objects.create(
session_key=session.session_key,
ticket='fake-ticket'
)
assert session_ticket is not None
assert SessionTicket.objects.filter(session_key=session.session_key,
ticket='fake-ticket').exists() is True
user = django_user_model.objects.create(username='test-user', email='[email protected]')
assert user is not None
assert django_user_model.objects.filter(username='test-user').exists() is True
request.user = user
# Create a fake pgt
pgt = ProxyGrantingTicket.objects.create(session_key=session.session_key,
user=user, pgtiou='fake-ticket-iou',
pgt='fake-ticket')
assert pgt is not None
assert ProxyGrantingTicket.objects.filter(session_key=session.session_key,
user=user, pgtiou='fake-ticket-iou',
pgt='fake-ticket').exists() is True
login(request)
assert SessionTicket.objects.filter(session_key=session.session_key,
ticket='fake-ticket').exists() is False
assert ProxyGrantingTicket.objects.filter(session_key=session.session_key,
user=user, pgtiou='fake-ticket-iou',
pgt='fake-ticket').exists() is False
assert SessionTicket.objects.filter(session_key=session.session_key,
ticket='fake-ticket').exists() is False
@pytest.mark.django_db
def test_login_authenticate_and_create_user(monkeypatch, django_user_model, settings):
"""
Test the case where the login view authenticates a new user.
"""
# No need to test the message framework
settings.CAS_LOGIN_MSG = None
# Make sure we use our backend
settings.AUTHENTICATION_BACKENDS = ['django_cas_ng.backends.CASBackend']
# Json serializer was havinga hard time
settings.SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
def mock_verify(ticket, service):
return '[email protected]', {'ticket': ticket, 'service': service}, None
monkeypatch.setattr('cas.CASClientV2.verify_ticket', mock_verify)
factory = RequestFactory()
request = factory.get('/login/', {'ticket': 'fake-ticket',
'service': 'fake-service'})
# Create a session object from the middleware
process_request_for_middleware(request, SessionMiddleware)
# Create a user object from middleware
process_request_for_middleware(request, AuthenticationMiddleware)
response = login(request)
assert response.status_code == 302
assert response['Location'] == '/'
assert django_user_model.objects.get(username='[email protected]').is_authenticated() is True
@pytest.mark.django_db
def test_login_authenticate_do_not_create_user(monkeypatch, django_user_model, settings):
"""
Test the case where the login view authenticates a user, but does not
create a user based on the CAS_CREATE_USER setting.
"""
# No need to test the message framework
settings.CAS_CREATE_USER = False
# No need to test the message framework
settings.CAS_LOGIN_MSG = None
# Make sure we use our backend
settings.AUTHENTICATION_BACKENDS = ['django_cas_ng.backends.CASBackend']
# Json serializer was havinga hard time
settings.SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
def mock_verify(ticket, service):
return '[email protected]', {'ticket': ticket, 'service': service}, None
monkeypatch.setattr('cas.CASClientV2.verify_ticket', mock_verify)
factory = RequestFactory()
request = factory.get('/login/', {'ticket': 'fake-ticket',
'service': 'fake-service'})
# Create a session object from the middleware
process_request_for_middleware(request, SessionMiddleware)
# Create a user object from middleware
process_request_for_middleware(request, AuthenticationMiddleware)
with pytest.raises(PermissionDenied):
login(request)
assert django_user_model.objects.filter(username='[email protected]').exists() is False
@pytest.mark.django_db
def test_login_proxy_callback(monkeypatch, django_user_model, settings):
"""
Test the case where the login view has a pgtiou.
"""
# No need to test the message framework
settings.CAS_PROXY_CALLBACK = True
# No need to test the message framework
settings.CAS_LOGIN_MSG = None
# Make sure we use our backend
settings.AUTHENTICATION_BACKENDS = ['django_cas_ng.backends.CASBackend']
# Json serializer was havinga hard time
settings.SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
def mock_verify(ticket, service):
return '[email protected]', {'ticket': ticket, 'service': service}, None
monkeypatch.setattr('cas.CASClientV2.verify_ticket', mock_verify)
factory = RequestFactory()
request = factory.get('/login/', {'ticket': 'fake-ticket',
'service': 'fake-service'})
# Create a session object from the middleware
process_request_for_middleware(request, SessionMiddleware)
# Create a user object from middleware
process_request_for_middleware(request, AuthenticationMiddleware)
request.session['pgtiou'] = 'fake-pgtiou'
request.session.save()
user = django_user_model.objects.create_user('[email protected]', '')
assert user is not None
pgt = ProxyGrantingTicket.objects.create(session_key=request.session.session_key,
user=user, pgtiou='fake-pgtiou',
pgt='fake-pgt')
assert pgt is not None
response = login(request)
assert response.status_code == 302
assert django_user_model.objects.get(username='[email protected]').is_authenticated() is True
assert ProxyGrantingTicket.objects.filter(pgtiou='fake-pgtiou').exists() is True
assert ProxyGrantingTicket.objects.filter(pgtiou='fake-pgtiou').count() == 1
@pytest.mark.django_db
def test_login_redirect_based_on_cookie(monkeypatch, django_user_model, settings):
"""
Test the case where the login view authenticates a new user and redirects them based on cookie.
"""
# No need to test the message framework
settings.CAS_LOGIN_MSG = None
# Make sure we use our backend
settings.AUTHENTICATION_BACKENDS = ['django_cas_ng.backends.CASBackend']
# Json serializer was havinga hard time
settings.SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Store next as cookie
settings.CAS_STORE_NEXT = True
def mock_verify(ticket, service):
return '[email protected]', {'ticket': ticket, 'service': service}, None
monkeypatch.setattr('cas.CASClientV2.verify_ticket', mock_verify)
factory = RequestFactory()
request = factory.get('/login/', {'ticket': 'fake-ticket',
'service': 'fake-service'})
# Create a session object from the middleware
process_request_for_middleware(request, SessionMiddleware)
# Create a user object from middleware
process_request_for_middleware(request, AuthenticationMiddleware)
# Add the next pointer
request.session['CASNEXT'] = '/admin/'
response = login(request)
assert response.status_code == 302
assert response['Location'] == '/admin/'
assert 'CASNEXT' not in request.session
assert django_user_model.objects.get(username='[email protected]').is_authenticated() is True
@pytest.mark.django_db
def test_login_no_ticket():
"""
Test the case where we try to login with no ticket
"""
factory = RequestFactory()
request = factory.get('/login/')
# Create a session object from the middleware
process_request_for_middleware(request, SessionMiddleware)
# Create a user object from middleware
process_request_for_middleware(request, AuthenticationMiddleware)
response = login(request)
assert response.status_code == 302
@pytest.mark.django_db
def test_login_no_ticket_stores_default_next(settings):
"""
When there is no explicit next pointer, it gets stored in a cookie
"""
settings.CAS_STORE_NEXT = True
factory = RequestFactory()
request = factory.get('/login/')
# Create a session object from the middleware
process_request_for_middleware(request, SessionMiddleware)
# Create a user object from middleware
process_request_for_middleware(request, AuthenticationMiddleware)
response = login(request)
assert response.status_code == 302
assert 'CASNEXT' in request.session
assert request.session['CASNEXT'] == '/'
@pytest.mark.django_db
def test_login_no_ticket_stores_explicit_next(settings):
"""
When there is an explicit next pointer, it gets stored in the cookie
"""
settings.CAS_STORE_NEXT = True
factory = RequestFactory()
request = factory.get('/login/', {'next': '/admin/'})
# Create a session object from the middleware
process_request_for_middleware(request, SessionMiddleware)
# Create a user object from middleware
process_request_for_middleware(request, AuthenticationMiddleware)
response = login(request)
assert response.status_code == 302
assert 'CASNEXT' in request.session
assert request.session['CASNEXT'] == '/admin/'
def test_login_put_not_allowed():
factory = RequestFactory()
request = factory.put('/login/')
response = login(request)
assert response.status_code == 405
def test_login_delete_not_allowed():
factory = RequestFactory()
request = factory.delete('/login/')
response = login(request)
assert response.status_code == 405
@pytest.mark.django_db
def test_logout_not_completely(django_user_model, settings):
"""
Test the case where the user logs out, without the logout_completely flag.
"""
settings.CAS_LOGOUT_COMPLETELY = False
factory = RequestFactory()
request = factory.get('/logout/')
# Create a session object from the middleware
process_request_for_middleware(request, SessionMiddleware)
user = django_user_model.objects.create_user('[email protected]', '')
assert user is not None
request.user = user
response = logout(request)
assert response.status_code == 302
assert request.user.is_anonymous() is True
@pytest.mark.django_db
def test_logout_completely(django_user_model, settings):
"""
Test the case where the user logs out.
"""
settings.CAS_LOGOUT_COMPLETELY = True
factory = RequestFactory()
request = factory.get('/logout/')
# Create a session object from the middleware
process_request_for_middleware(request, SessionMiddleware)
user = django_user_model.objects.create_user('[email protected]', '')
assert user is not None
request.user = user
response = logout(request)
assert response.status_code == 302
assert request.user.is_anonymous() is True
def test_logout_post_not_allowed():
factory = RequestFactory()
request = factory.post('/logout/')
response = logout(request)
assert response.status_code == 405
def test_logout_put_not_allowed():
factory = RequestFactory()
request = factory.put('/logout/')
response = logout(request)
assert response.status_code == 405
def test_logout_delete_not_allowed():
factory = RequestFactory()
request = factory.delete('/logout/')
response = logout(request)
assert response.status_code == 405
@pytest.mark.django_db
def test_callback_create_pgt():
"""
Test the case where a pgt callback is used.
"""
factory = RequestFactory()
request = factory.get('/callback/', {'pgtId': 'fake-pgtId',
'pgtIou': 'fake-pgtIou'})
response = callback(request)
assert response.status_code == 200
assert ProxyGrantingTicket.objects.filter(pgt='fake-pgtId',
pgtiou='fake-pgtIou'
).exists() is True
@pytest.mark.django_db
def test_callback_post_logout(django_user_model, settings):
"""
Test that when logout is from a callback
"""
settings.CAS_VERSION = 'CAS_2_SAML_1_0'
data = {'logoutRequest': '<samlp:LogoutRequest '
'xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol">'
'<samlp:SessionIndex>fake-ticket'
'</samlp:SessionIndex></samlp:LogoutRequest>'
}
session = SessionStore()
session['fake_session'] = 'fake-session'
session.save()
assert SessionStore(session_key=session.session_key) is not None
factory = RequestFactory()
request = factory.post('/callback/', data)
request.session = session
# Create a fake session ticket and make sure it exists in the db
session_ticket = SessionTicket.objects.create(
session_key=session.session_key,
ticket='fake-ticket'
)
assert session_ticket is not None
assert SessionTicket.objects.filter(session_key=session.session_key,
ticket='fake-ticket').exists() is True
user = django_user_model.objects.create(username='test-user', email='[email protected]')
assert user is not None
assert django_user_model.objects.filter(username='test-user').exists() is True
request.user = user
# Create a fake pgt
pgt = ProxyGrantingTicket.objects.create(session_key=session.session_key,
user=user, pgtiou='fake-ticket-iou',
pgt='fake-ticket')
assert pgt is not None
assert ProxyGrantingTicket.objects.filter(session_key=session.session_key,
user=user, pgtiou='fake-ticket-iou',
pgt='fake-ticket').exists() is True
callback(request)
assert SessionTicket.objects.filter(session_key=session.session_key,
ticket='fake-ticket').exists() is False
assert ProxyGrantingTicket.objects.filter(session_key=session.session_key,
user=user, pgtiou='fake-ticket-iou',
pgt='fake-ticket').exists() is False
assert SessionTicket.objects.filter(session_key=session.session_key,
ticket='fake-ticket').exists() is False
def test_callback_put_not_allowed():
factory = RequestFactory()
request = factory.put('/callback/')
response = callback(request)
assert response.status_code == 405
def test_callback_delete_not_allowed():
factory = RequestFactory()
request = factory.delete('/callback/')
response = callback(request)
assert response.status_code == 405
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for configuration properties."""
from __future__ import annotations
from core import feconf
from core import schema_utils
from core.constants import constants
from core.domain import caching_services
from core.domain import change_domain
from core.platform import models
(config_models, suggestion_models,) = models.Registry.import_models(
[models.NAMES.config, models.NAMES.suggestion])
CMD_CHANGE_PROPERTY_VALUE = 'change_property_value'
LIST_OF_FEATURED_TRANSLATION_LANGUAGES_DICTS_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_LIST,
'items': {
'type': schema_utils.SCHEMA_TYPE_DICT,
'properties': [{
'name': 'language_code',
'schema': {
'type': schema_utils.SCHEMA_TYPE_UNICODE,
'validators': [{
'id': 'is_supported_audio_language_code',
}]
},
}, {
'name': 'explanation',
'schema': {
'type': schema_utils.SCHEMA_TYPE_UNICODE
}
}]
}
}
SET_OF_STRINGS_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_LIST,
'items': {
'type': schema_utils.SCHEMA_TYPE_UNICODE,
},
'validators': [{
'id': 'is_uniquified',
}],
}
SET_OF_CLASSROOM_DICTS_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_LIST,
'items': {
'type': schema_utils.SCHEMA_TYPE_DICT,
'properties': [{
'name': 'name',
'schema': {
'type': schema_utils.SCHEMA_TYPE_UNICODE
}
}, {
'name': 'url_fragment',
'schema': {
'type': schema_utils.SCHEMA_TYPE_UNICODE,
'validators': [{
'id': 'is_url_fragment',
}, {
'id': 'has_length_at_most',
'max_value': constants.MAX_CHARS_IN_CLASSROOM_URL_FRAGMENT
}]
},
}, {
'name': 'course_details',
'schema': {
'type': schema_utils.SCHEMA_TYPE_UNICODE,
'ui_config': {
'rows': 8,
}
}
}, {
'name': 'topic_list_intro',
'schema': {
'type': schema_utils.SCHEMA_TYPE_UNICODE,
'ui_config': {
'rows': 5,
}
}
}, {
'name': 'topic_ids',
'schema': {
'type': schema_utils.SCHEMA_TYPE_LIST,
'items': {
'type': schema_utils.SCHEMA_TYPE_UNICODE,
},
'validators': [{
'id': 'is_uniquified',
}]
}
}]
}
}
VMID_SHARED_SECRET_KEY_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_LIST,
'items': {
'type': schema_utils.SCHEMA_TYPE_DICT,
'properties': [{
'name': 'vm_id',
'schema': {
'type': schema_utils.SCHEMA_TYPE_UNICODE
}
}, {
'name': 'shared_secret_key',
'schema': {
'type': schema_utils.SCHEMA_TYPE_UNICODE
}
}]
}
}
BOOL_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_BOOL
}
UNICODE_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_UNICODE
}
FLOAT_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_FLOAT
}
INT_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_INT
}
POSITIVE_INT_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_CUSTOM,
'obj_type': 'PositiveInt'
}
class ConfigPropertyChange(change_domain.BaseChange):
"""Domain object for changes made to a config property object.
The allowed commands, together with the attributes:
- 'change_property_value' (with new_value)
"""
ALLOWED_COMMANDS = [{
'name': CMD_CHANGE_PROPERTY_VALUE,
'required_attribute_names': ['new_value'],
'optional_attribute_names': [],
'user_id_attribute_names': []
}]
class ConfigProperty:
"""A property with a name and a default value.
NOTE TO DEVELOPERS: These config properties are deprecated. Do not reuse
these names:
- about_page_youtube_video_id.
- admin_email_address.
- admin_ids.
- admin_usernames.
- allow_yaml_file_upload.
- banned_usernames.
- banner_alt_text.
- before_end_body_tag_hook.
- before_end_head_tag_hook.
- carousel_slides_config.
- classroom_page_is_accessible.
- collection_editor_whitelist.
- contact_email_address.
- contribute_gallery_page_announcement.
- default_twitter_share_message_editor.
- disabled_explorations.
- editor_page_announcement.
- editor_prerequisites_agreement.
- embedded_google_group_url.
- full_site_url.
- moderator_ids.
- moderator_request_forum_url.
- moderator_usernames.
- publicize_exploration_email_html_body.
- sharing_options.
- sharing_options_twitter_text.
- sidebar_menu_additional_links.
- site_forum_url.
- social_media_buttons.
- splash_page_exploration_id.
- splash_page_exploration_version.
- splash_page_youtube_video_id.
- ssl_challenge_responses.
- whitelisted_email_senders.
"""
def __init__(self, name, schema, description, default_value):
if Registry.get_config_property(name):
raise Exception('Property with name %s already exists' % name)
self._name = name
self._schema = schema
self._description = description
self._default_value = self.normalize(default_value)
Registry.init_config_property(self.name, self)
@property
def name(self):
"""Returns the name of the configuration property."""
return self._name
@property
def schema(self):
"""Returns the schema of the configuration property."""
return self._schema
@property
def description(self):
"""Returns the description of the configuration property."""
return self._description
@property
def default_value(self):
"""Returns the default value of the configuration property."""
return self._default_value
@property
def value(self):
"""Get the latest value from memcache, datastore, or use default."""
memcached_items = caching_services.get_multi(
caching_services.CACHE_NAMESPACE_CONFIG, None, [self.name])
if self.name in memcached_items:
return memcached_items[self.name]
datastore_item = config_models.ConfigPropertyModel.get(
self.name, strict=False)
if datastore_item is not None:
caching_services.set_multi(
caching_services.CACHE_NAMESPACE_CONFIG, None,
{
datastore_item.id: datastore_item.value
})
return datastore_item.value
return self.default_value
def set_value(self, committer_id, raw_value):
"""Sets the value of the property. In general, this should not be
called directly -- use config_services.set_property() instead.
"""
value = self.normalize(raw_value)
# Set value in datastore.
model_instance = config_models.ConfigPropertyModel.get(
self.name, strict=False)
if model_instance is None:
model_instance = config_models.ConfigPropertyModel(
id=self.name)
model_instance.value = value
model_instance.commit(
committer_id, [{
'cmd': CMD_CHANGE_PROPERTY_VALUE,
'new_value': value
}])
# Set value in memcache.
caching_services.set_multi(
caching_services.CACHE_NAMESPACE_CONFIG, None,
{
model_instance.id: model_instance.value
})
def normalize(self, value):
"""Validates the given object using the schema and normalizes if
necessary.
Args:
value: str. The value of the configuration property.
Returns:
instance. The normalized object.
"""
email_validators = [{'id': 'does_not_contain_email'}]
return schema_utils.normalize_against_schema(
value, self._schema, global_validators=email_validators)
class Registry:
"""Registry of all configuration properties."""
# The keys of _config_registry are the property names, and the values are
# ConfigProperty instances.
_config_registry = {}
@classmethod
def init_config_property(cls, name, instance):
"""Initializes _config_registry with keys as the property names and
values as instances of the specified property.
Args:
name: str. The name of the configuration property.
instance: *. The instance of the configuration property.
"""
cls._config_registry[name] = instance
@classmethod
def get_config_property(cls, name):
"""Returns the instance of the specified name of the configuration
property.
Args:
name: str. The name of the configuration property.
Returns:
instance. The instance of the specified configuration property.
"""
return cls._config_registry.get(name)
@classmethod
def get_config_property_schemas(cls):
"""Return a dict of editable config property schemas.
The keys of the dict are config property names. The values are dicts
with the following keys: schema, description, value.
"""
schemas_dict = {}
for (property_name, instance) in cls._config_registry.items():
schemas_dict[property_name] = {
'schema': instance.schema,
'description': instance.description,
'value': instance.value
}
return schemas_dict
@classmethod
def get_all_config_property_names(cls):
"""Return a list of all the config property names.
Returns:
list. The list of all config property names.
"""
return list(cls._config_registry)
PROMO_BAR_ENABLED = ConfigProperty(
'promo_bar_enabled', BOOL_SCHEMA,
'Whether the promo bar should be enabled for all users', False)
PROMO_BAR_MESSAGE = ConfigProperty(
'promo_bar_message', UNICODE_SCHEMA,
'The message to show to all users if the promo bar is enabled', '')
VMID_SHARED_SECRET_KEY_MAPPING = ConfigProperty(
'vmid_shared_secret_key_mapping', VMID_SHARED_SECRET_KEY_SCHEMA,
'VMID and shared secret key corresponding to that VM',
[{
'vm_id': feconf.DEFAULT_VM_ID,
'shared_secret_key': feconf.DEFAULT_VM_SHARED_SECRET
}])
WHITELISTED_EXPLORATION_IDS_FOR_PLAYTHROUGHS = ConfigProperty(
'whitelisted_exploration_ids_for_playthroughs',
SET_OF_STRINGS_SCHEMA,
'The set of exploration IDs for recording playthrough issues', [
'umPkwp0L1M0-', 'MjZzEVOG47_1', '9trAQhj6uUC2', 'rfX8jNkPnA-1',
'0FBWxCE5egOw', '670bU6d9JGBh', 'aHikhPlxYgOH', '-tMgcP1i_4au',
'zW39GLG_BdN2', 'Xa3B_io-2WI5', '6Q6IyIDkjpYC', 'osw1m5Q3jK41'])
CLASSROOM_PAGES_DATA = ConfigProperty(
'classroom_pages_data', SET_OF_CLASSROOM_DICTS_SCHEMA,
'The details for each classroom page.', [{
'name': 'math',
'url_fragment': 'math',
'topic_ids': [],
'course_details': '',
'topic_list_intro': ''
}]
)
RECORD_PLAYTHROUGH_PROBABILITY = ConfigProperty(
'record_playthrough_probability', FLOAT_SCHEMA,
'The probability of recording playthroughs', 0.2)
IS_IMPROVEMENTS_TAB_ENABLED = ConfigProperty(
'is_improvements_tab_enabled', BOOL_SCHEMA,
'Exposes the Improvements Tab for creators in the exploration editor.',
False)
ALWAYS_ASK_LEARNERS_FOR_ANSWER_DETAILS = ConfigProperty(
'always_ask_learners_for_answer_details', BOOL_SCHEMA,
'Always ask learners for answer details. For testing -- do not use',
False)
CLASSROOM_PROMOS_ARE_ENABLED = ConfigProperty(
'classroom_promos_are_enabled', BOOL_SCHEMA,
'Show classroom promos.', False)
FEATURED_TRANSLATION_LANGUAGES = ConfigProperty(
'featured_translation_languages',
LIST_OF_FEATURED_TRANSLATION_LANGUAGES_DICTS_SCHEMA,
'Featured Translation Languages', []
)
HIGH_BOUNCE_RATE_TASK_STATE_BOUNCE_RATE_CREATION_THRESHOLD = ConfigProperty(
'high_bounce_rate_task_state_bounce_rate_creation_threshold',
FLOAT_SCHEMA,
'The bounce-rate a state must exceed to create a new improvements task.',
0.20)
HIGH_BOUNCE_RATE_TASK_STATE_BOUNCE_RATE_OBSOLETION_THRESHOLD = ConfigProperty(
'high_bounce_rate_task_state_bounce_rate_obsoletion_threshold',
FLOAT_SCHEMA,
'The bounce-rate a state must fall under to discard its improvement task.',
0.20)
HIGH_BOUNCE_RATE_TASK_MINIMUM_EXPLORATION_STARTS = ConfigProperty(
'high_bounce_rate_task_minimum_exploration_starts',
INT_SCHEMA,
'The minimum number of times an exploration is started before it can '
'generate high bounce-rate improvements tasks.',
100)
MAX_NUMBER_OF_SVGS_IN_MATH_SVGS_BATCH = ConfigProperty(
'max_number_of_svgs_in_math_svgs_batch',
INT_SCHEMA,
'The maximum number of Math SVGs that can be send in a batch of math rich '
'text svgs.',
25)
MAX_NUMBER_OF_EXPLORATIONS_IN_MATH_SVGS_BATCH = ConfigProperty(
'max_number_of_explorations_in_math_svgs_batch',
INT_SCHEMA,
'The maximum number of explorations that can be send in a batch of math '
'rich text svgs.',
2)
MAX_NUMBER_OF_TAGS_ASSIGNED_TO_BLOG_POST = ConfigProperty(
'max_number_of_tags_assigned_to_blog_post',
POSITIVE_INT_SCHEMA,
'The maximum number of tags that can be selected to categorize the blog'
' post',
10
)
LIST_OF_DEFAULT_TAGS_FOR_BLOG_POST = ConfigProperty(
'list_of_default_tags_for_blog_post',
SET_OF_STRINGS_SCHEMA,
'The list of tags available to a blog post editor for categorizing the blog'
' post.',
['News', 'International', 'Educators', 'Learners', 'Community',
'Partnerships', 'Volunteer', 'Stories', 'Languages', 'New features',
'New lessons', 'Software development', 'Content']
)
CONTRIBUTOR_DASHBOARD_IS_ENABLED = ConfigProperty(
'contributor_dashboard_is_enabled', BOOL_SCHEMA,
'Enable contributor dashboard page. The default value is true.', True)
CONTRIBUTOR_DASHBOARD_REVIEWER_EMAILS_IS_ENABLED = ConfigProperty(
'contributor_dashboard_reviewer_emails_is_enabled', BOOL_SCHEMA,
(
'Enable sending Contributor Dashboard reviewers email notifications '
'about suggestions that need review. The default value is false.'
), False)
ENABLE_ADMIN_NOTIFICATIONS_FOR_SUGGESTIONS_NEEDING_REVIEW = ConfigProperty(
'notify_admins_suggestions_waiting_too_long_is_enabled', BOOL_SCHEMA,
(
'Enable sending admins email notifications if there are Contributor '
'Dashboard suggestions that have been waiting for a review for more '
'than %s days. The default value is false.' % (
suggestion_models.SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS)
), False)
ENABLE_ADMIN_NOTIFICATIONS_FOR_REVIEWER_SHORTAGE = ConfigProperty(
'enable_admin_notifications_for_reviewer_shortage', BOOL_SCHEMA,
(
'Enable sending admins email notifications if Contributor Dashboard '
'reviewers are needed in specific suggestion types. The default value '
'is false.'
), False)
MAX_NUMBER_OF_SUGGESTIONS_PER_REVIEWER = ConfigProperty(
'max_number_of_suggestions_per_reviewer',
INT_SCHEMA,
'The maximum number of Contributor Dashboard suggestions per reviewer. If '
'the number of suggestions per reviewer surpasses this maximum, for any '
'given suggestion type on the dashboard, the admins are notified by email.',
5)
|
|
from typing import Any, List, Mapping, Set
from unittest import mock
import orjson
from django.db import connection
from django.http import HttpResponse
from zerver.lib.actions import do_change_stream_invite_only
from zerver.lib.fix_unreads import fix, fix_unsubscribed
from zerver.lib.message import (
MessageDict,
UnreadMessagesResult,
aggregate_unread_data,
apply_unread_message_event,
bulk_access_messages,
get_raw_unread_data,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import get_subscription, queries_captured
from zerver.lib.topic_mutes import add_topic_mute
from zerver.models import (
Message,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
get_realm,
get_stream,
)
def check_flags(flags: List[str], expected: Set[str]) -> None:
"""
The has_alert_word flag can be ignored for most tests.
"""
assert "has_alert_word" not in expected
flag_set = set(flags)
flag_set.discard("has_alert_word")
if flag_set != expected:
raise AssertionError(f"expected flags (ignoring has_alert_word) to be {expected}")
class FirstUnreadAnchorTests(ZulipTestCase):
"""
HISTORICAL NOTE:
The two tests in this class were originally written when
we had the concept of a "pointer", and they may be a bit
redundant in what they now check.
"""
def test_use_first_unread_anchor(self) -> None:
self.login("hamlet")
# Mark all existing messages as read
result = self.client_post("/json/mark_all_as_read")
self.assert_json_success(result)
# Send a new message (this will be unread)
new_message_id = self.send_stream_message(self.example_user("othello"), "Verona", "test")
# If we call get_messages with use_first_unread_anchor=True, we
# should get the message we just sent
messages_response = self.get_messages_response(
anchor="first_unread", num_before=0, num_after=1
)
self.assertEqual(messages_response["messages"][0]["id"], new_message_id)
self.assertEqual(messages_response["anchor"], new_message_id)
# Test with the old way of expressing use_first_unread_anchor=True
messages_response = self.get_messages_response(
anchor=0, num_before=0, num_after=1, use_first_unread_anchor=True
)
self.assertEqual(messages_response["messages"][0]["id"], new_message_id)
self.assertEqual(messages_response["anchor"], new_message_id)
# We want to get the message_id of an arbitrary old message. We can
# call get_messages with use_first_unread_anchor=False and simply
# save the first message we're returned.
messages = self.get_messages(
anchor=0, num_before=0, num_after=2, use_first_unread_anchor=False
)
old_message_id = messages[0]["id"]
# Verify the message is marked as read
user_message = UserMessage.objects.get(
message_id=old_message_id, user_profile=self.example_user("hamlet")
)
self.assertTrue(user_message.flags.read)
# Let's set this old message to be unread
result = self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps([old_message_id]).decode(), "op": "remove", "flag": "read"},
)
# Verify it's now marked as unread
user_message = UserMessage.objects.get(
message_id=old_message_id, user_profile=self.example_user("hamlet")
)
self.assert_json_success(result)
self.assertFalse(user_message.flags.read)
# Now if we call get_messages with use_first_unread_anchor=True,
# we should get the old message we just set to unread
messages_response = self.get_messages_response(
anchor="first_unread", num_before=0, num_after=1
)
self.assertEqual(messages_response["messages"][0]["id"], old_message_id)
self.assertEqual(messages_response["anchor"], old_message_id)
def test_visible_messages_use_first_unread_anchor(self) -> None:
self.login("hamlet")
result = self.client_post("/json/mark_all_as_read")
self.assert_json_success(result)
new_message_id = self.send_stream_message(self.example_user("othello"), "Verona", "test")
messages_response = self.get_messages_response(
anchor="first_unread", num_before=0, num_after=1
)
self.assertEqual(messages_response["messages"][0]["id"], new_message_id)
self.assertEqual(messages_response["anchor"], new_message_id)
with mock.patch(
"zerver.views.message_fetch.get_first_visible_message_id", return_value=new_message_id
):
messages_response = self.get_messages_response(
anchor="first_unread", num_before=0, num_after=1
)
self.assertEqual(messages_response["messages"][0]["id"], new_message_id)
self.assertEqual(messages_response["anchor"], new_message_id)
with mock.patch(
"zerver.views.message_fetch.get_first_visible_message_id",
return_value=new_message_id + 1,
):
messages_reponse = self.get_messages_response(
anchor="first_unread", num_before=0, num_after=1
)
self.assert_length(messages_reponse["messages"], 0)
self.assertIn("anchor", messages_reponse)
with mock.patch(
"zerver.views.message_fetch.get_first_visible_message_id",
return_value=new_message_id - 1,
):
messages = self.get_messages(anchor="first_unread", num_before=0, num_after=1)
self.assert_length(messages, 1)
class UnreadCountTests(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
with mock.patch(
"zerver.lib.push_notifications.push_notifications_enabled", return_value=True
) as mock_push_notifications_enabled:
self.unread_msg_ids = [
self.send_personal_message(
self.example_user("iago"), self.example_user("hamlet"), "hello"
),
self.send_personal_message(
self.example_user("iago"), self.example_user("hamlet"), "hello2"
),
]
mock_push_notifications_enabled.assert_called()
# Sending a new message results in unread UserMessages being created
def test_new_message(self) -> None:
self.login("hamlet")
content = "Test message for unset read bit"
last_msg = self.send_stream_message(self.example_user("hamlet"), "Verona", content)
user_messages = list(UserMessage.objects.filter(message=last_msg))
self.assertGreater(len(user_messages), 0)
for um in user_messages:
self.assertEqual(um.message.content, content)
if um.user_profile.email != self.example_email("hamlet"):
self.assertFalse(um.flags.read)
def test_update_flags(self) -> None:
self.login("hamlet")
result = self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps(self.unread_msg_ids).decode(), "op": "add", "flag": "read"},
)
self.assert_json_success(result)
# Ensure we properly set the flags
found = 0
for msg in self.get_messages():
if msg["id"] in self.unread_msg_ids:
check_flags(msg["flags"], {"read"})
found += 1
self.assertEqual(found, 2)
result = self.client_post(
"/json/messages/flags",
{
"messages": orjson.dumps([self.unread_msg_ids[1]]).decode(),
"op": "remove",
"flag": "read",
},
)
self.assert_json_success(result)
# Ensure we properly remove just one flag
for msg in self.get_messages():
if msg["id"] == self.unread_msg_ids[0]:
check_flags(msg["flags"], {"read"})
elif msg["id"] == self.unread_msg_ids[1]:
check_flags(msg["flags"], set())
def test_mark_all_in_stream_read(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = self.subscribe(user_profile, "test_stream")
self.subscribe(self.example_user("cordelia"), "test_stream")
message_id = self.send_stream_message(self.example_user("hamlet"), "test_stream", "hello")
unrelated_message_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", "hello"
)
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
result = self.client_post(
"/json/mark_stream_as_read",
{
"stream_id": stream.id,
},
)
self.assert_json_success(result)
event = events[0]["event"]
expected = dict(
operation="add",
messages=[message_id],
flag="read",
type="update_message_flags",
all=False,
)
differences = [key for key in expected if expected[key] != event[key]]
self.assert_length(differences, 0)
hamlet = self.example_user("hamlet")
um = list(UserMessage.objects.filter(message=message_id))
for msg in um:
if msg.user_profile.email == hamlet.email:
self.assertTrue(msg.flags.read)
else:
self.assertFalse(msg.flags.read)
unrelated_messages = list(UserMessage.objects.filter(message=unrelated_message_id))
for msg in unrelated_messages:
if msg.user_profile.email == hamlet.email:
self.assertFalse(msg.flags.read)
def test_mark_all_in_invalid_stream_read(self) -> None:
self.login("hamlet")
invalid_stream_id = "12345678"
result = self.client_post(
"/json/mark_stream_as_read",
{
"stream_id": invalid_stream_id,
},
)
self.assert_json_error(result, "Invalid stream id")
def test_mark_all_topics_unread_with_invalid_stream_name(self) -> None:
self.login("hamlet")
invalid_stream_id = "12345678"
result = self.client_post(
"/json/mark_topic_as_read",
{
"stream_id": invalid_stream_id,
"topic_name": "whatever",
},
)
self.assert_json_error(result, "Invalid stream id")
def test_mark_all_in_stream_topic_read(self) -> None:
self.login("hamlet")
user_profile = self.example_user("hamlet")
self.subscribe(user_profile, "test_stream")
message_id = self.send_stream_message(
self.example_user("hamlet"), "test_stream", "hello", "test_topic"
)
unrelated_message_id = self.send_stream_message(
self.example_user("hamlet"), "Denmark", "hello", "Denmark2"
)
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
result = self.client_post(
"/json/mark_topic_as_read",
{
"stream_id": get_stream("test_stream", user_profile.realm).id,
"topic_name": "test_topic",
},
)
self.assert_json_success(result)
event = events[0]["event"]
expected = dict(
operation="add",
messages=[message_id],
flag="read",
type="update_message_flags",
all=False,
)
differences = [key for key in expected if expected[key] != event[key]]
self.assert_length(differences, 0)
um = list(UserMessage.objects.filter(message=message_id))
for msg in um:
if msg.user_profile_id == user_profile.id:
self.assertTrue(msg.flags.read)
unrelated_messages = list(UserMessage.objects.filter(message=unrelated_message_id))
for msg in unrelated_messages:
if msg.user_profile_id == user_profile.id:
self.assertFalse(msg.flags.read)
def test_mark_all_in_invalid_topic_read(self) -> None:
self.login("hamlet")
invalid_topic_name = "abc"
result = self.client_post(
"/json/mark_topic_as_read",
{
"stream_id": get_stream("Denmark", get_realm("zulip")).id,
"topic_name": invalid_topic_name,
},
)
self.assert_json_error(result, "No such topic 'abc'")
class FixUnreadTests(ZulipTestCase):
def test_fix_unreads(self) -> None:
user = self.example_user("hamlet")
realm = get_realm("zulip")
def send_message(stream_name: str, topic_name: str) -> int:
msg_id = self.send_stream_message(
self.example_user("othello"), stream_name, topic_name=topic_name
)
um = UserMessage.objects.get(user_profile=user, message_id=msg_id)
return um.id
def assert_read(user_message_id: int) -> None:
um = UserMessage.objects.get(id=user_message_id)
self.assertTrue(um.flags.read)
def assert_unread(user_message_id: int) -> None:
um = UserMessage.objects.get(id=user_message_id)
self.assertFalse(um.flags.read)
def mute_stream(stream_name: str) -> None:
stream = get_stream(stream_name, realm)
recipient = stream.recipient
subscription = Subscription.objects.get(
user_profile=user,
recipient=recipient,
)
subscription.is_muted = True
subscription.save()
def mute_topic(stream_name: str, topic_name: str) -> None:
stream = get_stream(stream_name, realm)
recipient = stream.recipient
assert recipient is not None
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name=topic_name,
)
def force_unsubscribe(stream_name: str) -> None:
"""
We don't want side effects here, since the eventual
unsubscribe path may mark messages as read, defeating
the test setup here.
"""
sub = get_subscription(stream_name, user)
sub.active = False
sub.save()
# The data setup here is kind of funny, because some of these
# conditions should not actually happen in practice going forward,
# but we may have had bad data from the past.
mute_stream("Denmark")
mute_topic("Verona", "muted_topic")
um_normal_id = send_message("Verona", "normal")
um_muted_topic_id = send_message("Verona", "muted_topic")
um_muted_stream_id = send_message("Denmark", "whatever")
self.subscribe(user, "temporary")
um_unsubscribed_id = send_message("temporary", "whatever")
force_unsubscribe("temporary")
# Verify the setup
assert_unread(um_normal_id)
assert_unread(um_muted_topic_id)
assert_unread(um_muted_stream_id)
assert_unread(um_unsubscribed_id)
# fix unsubscribed
with connection.cursor() as cursor, self.assertLogs(
"zulip.fix_unreads", "INFO"
) as info_logs:
fix_unsubscribed(cursor, user)
self.assertEqual(info_logs.output[0], "INFO:zulip.fix_unreads:get recipients")
self.assertTrue("INFO:zulip.fix_unreads:[" in info_logs.output[1])
self.assertTrue("INFO:zulip.fix_unreads:elapsed time:" in info_logs.output[2])
self.assertEqual(
info_logs.output[3],
"INFO:zulip.fix_unreads:finding unread messages for non-active streams",
)
self.assertEqual(info_logs.output[4], "INFO:zulip.fix_unreads:rows found: 1")
self.assertTrue("INFO:zulip.fix_unreads:elapsed time:" in info_logs.output[5])
self.assertEqual(
info_logs.output[6],
"INFO:zulip.fix_unreads:fixing unread messages for non-active streams",
)
self.assertTrue("INFO:zulip.fix_unreads:elapsed time:" in info_logs.output[7])
# Muted messages don't change.
assert_unread(um_muted_topic_id)
assert_unread(um_muted_stream_id)
assert_unread(um_normal_id)
# The unsubscribed entry should change.
assert_read(um_unsubscribed_id)
with self.assertLogs("zulip.fix_unreads", "INFO") as info_logs:
# test idempotency
fix(user)
self.assertEqual(info_logs.output[0], f"INFO:zulip.fix_unreads:\n---\nFixing {user.id}:")
self.assertEqual(info_logs.output[1], "INFO:zulip.fix_unreads:get recipients")
self.assertTrue("INFO:zulip.fix_unreads:[" in info_logs.output[2])
self.assertTrue("INFO:zulip.fix_unreads:elapsed time:" in info_logs.output[3])
self.assertEqual(
info_logs.output[4],
"INFO:zulip.fix_unreads:finding unread messages for non-active streams",
)
self.assertEqual(info_logs.output[5], "INFO:zulip.fix_unreads:rows found: 0")
self.assertTrue("INFO:zulip.fix_unreads:elapsed time:" in info_logs.output[6])
assert_unread(um_normal_id)
assert_unread(um_muted_topic_id)
assert_unread(um_muted_stream_id)
assert_read(um_unsubscribed_id)
class PushNotificationMarkReadFlowsTest(ZulipTestCase):
def get_mobile_push_notification_ids(self, user_profile: UserProfile) -> List[int]:
return list(
UserMessage.objects.filter(
user_profile=user_profile,
)
.extra(
where=[UserMessage.where_active_push_notification()],
)
.order_by("message_id")
.values_list("message_id", flat=True)
)
@mock.patch("zerver.lib.push_notifications.push_notifications_enabled", return_value=True)
def test_track_active_mobile_push_notifications(
self, mock_push_notifications: mock.MagicMock
) -> None:
mock_push_notifications.return_value = True
self.login("hamlet")
user_profile = self.example_user("hamlet")
stream = self.subscribe(user_profile, "test_stream")
second_stream = self.subscribe(user_profile, "second_stream")
property_name = "push_notifications"
result = self.api_post(
user_profile,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": True, "stream_id": stream.id}]
).decode()
},
)
result = self.api_post(
user_profile,
"/api/v1/users/me/subscriptions/properties",
{
"subscription_data": orjson.dumps(
[{"property": property_name, "value": True, "stream_id": second_stream.id}]
).decode()
},
)
self.assert_json_success(result)
self.assertEqual(self.get_mobile_push_notification_ids(user_profile), [])
message_id = self.send_stream_message(
self.example_user("cordelia"), "test_stream", "hello", "test_topic"
)
second_message_id = self.send_stream_message(
self.example_user("cordelia"), "test_stream", "hello", "other_topic"
)
third_message_id = self.send_stream_message(
self.example_user("cordelia"), "second_stream", "hello", "test_topic"
)
self.assertEqual(
self.get_mobile_push_notification_ids(user_profile),
[message_id, second_message_id, third_message_id],
)
result = self.client_post(
"/json/mark_topic_as_read",
{
"stream_id": str(stream.id),
"topic_name": "test_topic",
},
)
self.assert_json_success(result)
self.assertEqual(
self.get_mobile_push_notification_ids(user_profile),
[second_message_id, third_message_id],
)
result = self.client_post(
"/json/mark_stream_as_read",
{
"stream_id": str(stream.id),
"topic_name": "test_topic",
},
)
self.assertEqual(self.get_mobile_push_notification_ids(user_profile), [third_message_id])
fourth_message_id = self.send_stream_message(
self.example_user("cordelia"), "test_stream", "hello", "test_topic"
)
self.assertEqual(
self.get_mobile_push_notification_ids(user_profile),
[third_message_id, fourth_message_id],
)
result = self.client_post("/json/mark_all_as_read", {})
self.assertEqual(self.get_mobile_push_notification_ids(user_profile), [])
mock_push_notifications.assert_called()
class GetUnreadMsgsTest(ZulipTestCase):
def mute_stream(self, user_profile: UserProfile, stream: Stream) -> None:
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscription = Subscription.objects.get(
user_profile=user_profile,
recipient=recipient,
)
subscription.is_muted = True
subscription.save()
def mute_topic(self, user_profile: UserProfile, stream_name: str, topic_name: str) -> None:
realm = user_profile.realm
stream = get_stream(stream_name, realm)
recipient = stream.recipient
assert recipient is not None
add_topic_mute(
user_profile=user_profile,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name=topic_name,
)
def test_raw_unread_stream(self) -> None:
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
realm = hamlet.realm
for stream_name in ["social", "devel", "test here"]:
self.subscribe(hamlet, stream_name)
self.subscribe(cordelia, stream_name)
all_message_ids: Set[int] = set()
message_ids = {}
tups = [
("social", "lunch"),
("test here", "bla"),
("devel", "python"),
("devel", "ruby"),
]
for stream_name, topic_name in tups:
message_ids[topic_name] = [
self.send_stream_message(
sender=cordelia,
stream_name=stream_name,
topic_name=topic_name,
)
for i in range(3)
]
all_message_ids |= set(message_ids[topic_name])
self.assert_length(all_message_ids, 12) # sanity check on test setup
self.mute_stream(
user_profile=hamlet,
stream=get_stream("test here", realm),
)
self.mute_topic(
user_profile=hamlet,
stream_name="devel",
topic_name="ruby",
)
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
stream_dict = raw_unread_data["stream_dict"]
self.assertEqual(
set(stream_dict.keys()),
all_message_ids,
)
self.assertEqual(
raw_unread_data["unmuted_stream_msgs"],
set(message_ids["python"]) | set(message_ids["lunch"]),
)
self.assertEqual(
stream_dict[message_ids["lunch"][0]],
dict(
sender_id=cordelia.id,
stream_id=get_stream("social", realm).id,
topic="lunch",
),
)
def test_raw_unread_huddle(self) -> None:
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
hamlet = self.example_user("hamlet")
prospero = self.example_user("prospero")
huddle1_message_ids = [
self.send_huddle_message(
cordelia,
[hamlet, othello],
)
for i in range(3)
]
huddle2_message_ids = [
self.send_huddle_message(
cordelia,
[hamlet, prospero],
)
for i in range(3)
]
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
huddle_dict = raw_unread_data["huddle_dict"]
self.assertEqual(
set(huddle_dict.keys()),
set(huddle1_message_ids) | set(huddle2_message_ids),
)
huddle_string = ",".join(str(uid) for uid in sorted([cordelia.id, hamlet.id, othello.id]))
self.assertEqual(
huddle_dict[huddle1_message_ids[0]],
dict(user_ids_string=huddle_string),
)
def test_raw_unread_personal(self) -> None:
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
hamlet = self.example_user("hamlet")
cordelia_pm_message_ids = [self.send_personal_message(cordelia, hamlet) for i in range(3)]
othello_pm_message_ids = [self.send_personal_message(othello, hamlet) for i in range(3)]
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
pm_dict = raw_unread_data["pm_dict"]
self.assertEqual(
set(pm_dict.keys()),
set(cordelia_pm_message_ids) | set(othello_pm_message_ids),
)
self.assertEqual(
pm_dict[cordelia_pm_message_ids[0]],
dict(sender_id=cordelia.id),
)
def test_raw_unread_personal_from_self(self) -> None:
hamlet = self.example_user("hamlet")
def send_unread_pm(other_user: UserProfile) -> Message:
# It is rare to send a message from Hamlet to Othello
# (or any other user) and have it be unread for
# Hamlet himself, but that is actually normal
# behavior for most API clients.
message_id = self.send_personal_message(
from_user=hamlet,
to_user=other_user,
sending_client_name="some_api_program",
)
# Check our test setup is correct--the message should
# not have looked like it was sent by a human.
message = Message.objects.get(id=message_id)
self.assertFalse(message.sent_by_human())
# And since it was not sent by a human, it should not
# be read, not even by the sender (Hamlet).
um = UserMessage.objects.get(
user_profile_id=hamlet.id,
message_id=message_id,
)
self.assertFalse(um.flags.read)
return message
othello = self.example_user("othello")
othello_msg = send_unread_pm(other_user=othello)
# And now check the unread data structure...
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
pm_dict = raw_unread_data["pm_dict"]
self.assertEqual(set(pm_dict.keys()), {othello_msg.id})
# For legacy reason we call the field `sender_id` here,
# but it really refers to the other user id in the conversation,
# which is Othello.
self.assertEqual(
pm_dict[othello_msg.id],
dict(sender_id=othello.id),
)
cordelia = self.example_user("cordelia")
cordelia_msg = send_unread_pm(other_user=cordelia)
apply_unread_message_event(
user_profile=hamlet,
state=raw_unread_data,
message=MessageDict.wide_dict(cordelia_msg),
flags=[],
)
self.assertEqual(
set(pm_dict.keys()),
{othello_msg.id, cordelia_msg.id},
)
# Again, `sender_id` is misnamed here.
self.assertEqual(
pm_dict[cordelia_msg.id],
dict(sender_id=cordelia.id),
)
# Send a message to ourself.
hamlet_msg = send_unread_pm(other_user=hamlet)
apply_unread_message_event(
user_profile=hamlet,
state=raw_unread_data,
message=MessageDict.wide_dict(hamlet_msg),
flags=[],
)
self.assertEqual(
set(pm_dict.keys()),
{othello_msg.id, cordelia_msg.id, hamlet_msg.id},
)
# Again, `sender_id` is misnamed here.
self.assertEqual(
pm_dict[hamlet_msg.id],
dict(sender_id=hamlet.id),
)
# Call get_raw_unread_data again.
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
pm_dict = raw_unread_data["pm_dict"]
self.assertEqual(
set(pm_dict.keys()),
{othello_msg.id, cordelia_msg.id, hamlet_msg.id},
)
# Again, `sender_id` is misnamed here.
self.assertEqual(
pm_dict[hamlet_msg.id],
dict(sender_id=hamlet.id),
)
def test_unread_msgs(self) -> None:
sender = self.example_user("cordelia")
sender_id = sender.id
user_profile = self.example_user("hamlet")
othello = self.example_user("othello")
pm1_message_id = self.send_personal_message(sender, user_profile, "hello1")
pm2_message_id = self.send_personal_message(sender, user_profile, "hello2")
muted_stream = self.subscribe(user_profile, "Muted stream")
self.mute_stream(user_profile, muted_stream)
self.mute_topic(user_profile, "Denmark", "muted-topic")
stream_message_id = self.send_stream_message(sender, "Denmark", "hello")
muted_stream_message_id = self.send_stream_message(sender, "Muted stream", "hello")
muted_topic_message_id = self.send_stream_message(
sender,
"Denmark",
topic_name="muted-topic",
content="hello",
)
huddle_message_id = self.send_huddle_message(
sender,
[user_profile, othello],
"hello3",
)
def get_unread_data() -> UnreadMessagesResult:
raw_unread_data = get_raw_unread_data(user_profile)
aggregated_data = aggregate_unread_data(raw_unread_data)
return aggregated_data
with mock.patch("zerver.lib.message.MAX_UNREAD_MESSAGES", 4):
result = get_unread_data()
self.assertEqual(result["count"], 2)
self.assertTrue(result["old_unreads_missing"])
result = get_unread_data()
# The count here reflects the count of unread messages that we will
# report to users in the bankruptcy dialog, and for now it excludes unread messages
# from muted treams, but it doesn't exclude unread messages from muted topics yet.
self.assertEqual(result["count"], 4)
self.assertFalse(result["old_unreads_missing"])
unread_pm = result["pms"][0]
self.assertEqual(unread_pm["sender_id"], sender_id)
self.assertEqual(unread_pm["unread_message_ids"], [pm1_message_id, pm2_message_id])
self.assertTrue("sender_ids" not in unread_pm)
unread_stream = result["streams"][0]
self.assertEqual(unread_stream["stream_id"], get_stream("Denmark", user_profile.realm).id)
self.assertEqual(unread_stream["topic"], "muted-topic")
self.assertEqual(unread_stream["unread_message_ids"], [muted_topic_message_id])
self.assertEqual(unread_stream["sender_ids"], [sender_id])
unread_stream = result["streams"][1]
self.assertEqual(unread_stream["stream_id"], get_stream("Denmark", user_profile.realm).id)
self.assertEqual(unread_stream["topic"], "test")
self.assertEqual(unread_stream["unread_message_ids"], [stream_message_id])
self.assertEqual(unread_stream["sender_ids"], [sender_id])
unread_stream = result["streams"][2]
self.assertEqual(
unread_stream["stream_id"], get_stream("Muted stream", user_profile.realm).id
)
self.assertEqual(unread_stream["topic"], "test")
self.assertEqual(unread_stream["unread_message_ids"], [muted_stream_message_id])
self.assertEqual(unread_stream["sender_ids"], [sender_id])
huddle_string = ",".join(
str(uid) for uid in sorted([sender_id, user_profile.id, othello.id])
)
unread_huddle = result["huddles"][0]
self.assertEqual(unread_huddle["user_ids_string"], huddle_string)
self.assertEqual(unread_huddle["unread_message_ids"], [huddle_message_id])
self.assertTrue("sender_ids" not in unread_huddle)
self.assertEqual(result["mentions"], [])
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=stream_message_id,
)
um.flags |= UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [stream_message_id])
um.flags = UserMessage.flags.has_alert_word
um.save()
result = get_unread_data()
# TODO: This should change when we make alert words work better.
self.assertEqual(result["mentions"], [])
um.flags = UserMessage.flags.wildcard_mentioned
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [stream_message_id])
um.flags = 0
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [])
# Test with a muted stream
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=muted_stream_message_id,
)
um.flags = UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [muted_stream_message_id])
um.flags = UserMessage.flags.has_alert_word
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [])
um.flags = UserMessage.flags.wildcard_mentioned
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [])
um.flags = 0
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [])
# Test with a muted topic
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=muted_topic_message_id,
)
um.flags = UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [muted_topic_message_id])
um.flags = UserMessage.flags.has_alert_word
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [])
um.flags = UserMessage.flags.wildcard_mentioned
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [])
um.flags = 0
um.save()
result = get_unread_data()
self.assertEqual(result["mentions"], [])
class MessageAccessTests(ZulipTestCase):
def test_update_invalid_flags(self) -> None:
message = self.send_personal_message(
self.example_user("cordelia"),
self.example_user("hamlet"),
"hello",
)
self.login("hamlet")
result = self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps([message]).decode(), "op": "add", "flag": "invalid"},
)
self.assert_json_error(result, "Invalid flag: 'invalid'")
result = self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps([message]).decode(), "op": "add", "flag": "is_private"},
)
self.assert_json_error(result, "Invalid flag: 'is_private'")
result = self.client_post(
"/json/messages/flags",
{
"messages": orjson.dumps([message]).decode(),
"op": "add",
"flag": "active_mobile_push_notification",
},
)
self.assert_json_error(result, "Invalid flag: 'active_mobile_push_notification'")
result = self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps([message]).decode(), "op": "add", "flag": "mentioned"},
)
self.assert_json_error(result, "Flag not editable: 'mentioned'")
result = self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps([message]).decode(), "op": "bogus", "flag": "starred"},
)
self.assert_json_error(result, "Invalid message flag operation: 'bogus'")
def change_star(self, messages: List[int], add: bool = True, **kwargs: Any) -> HttpResponse:
return self.client_post(
"/json/messages/flags",
{
"messages": orjson.dumps(messages).decode(),
"op": "add" if add else "remove",
"flag": "starred",
},
**kwargs,
)
def test_change_star(self) -> None:
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
self.login("hamlet")
message_ids = [
self.send_personal_message(
self.example_user("hamlet"), self.example_user("hamlet"), "test"
)
]
# Star a message.
result = self.change_star(message_ids)
self.assert_json_success(result)
for msg in self.get_messages():
if msg["id"] in message_ids:
check_flags(msg["flags"], {"starred"})
else:
check_flags(msg["flags"], {"read"})
# Remove the stars.
result = self.change_star(message_ids, False)
self.assert_json_success(result)
for msg in self.get_messages():
if msg["id"] in message_ids:
check_flags(msg["flags"], set())
def test_change_star_public_stream_historical(self) -> None:
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
stream_name = "new_stream"
self.subscribe(self.example_user("hamlet"), stream_name)
self.login("hamlet")
message_ids = [
self.send_stream_message(self.example_user("hamlet"), stream_name, "test"),
]
# Send a second message so we can verify it isn't modified
other_message_ids = [
self.send_stream_message(self.example_user("hamlet"), stream_name, "test_unused"),
]
received_message_ids = [
self.send_personal_message(
self.example_user("hamlet"),
self.example_user("cordelia"),
"test_received",
),
]
# Now login as another user who wasn't on that stream
self.login("cordelia")
# Send a message to yourself to make sure we have at least one with the read flag
sent_message_ids = [
self.send_personal_message(
self.example_user("cordelia"),
self.example_user("cordelia"),
"test_read_message",
),
]
result = self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps(sent_message_ids).decode(), "op": "add", "flag": "read"},
)
# We can't change flags other than "starred" on historical messages:
result = self.client_post(
"/json/messages/flags",
{"messages": orjson.dumps(message_ids).decode(), "op": "add", "flag": "read"},
)
self.assert_json_error(result, "Invalid message(s)")
# Trying to change a list of more than one historical message fails
result = self.change_star(message_ids * 2)
self.assert_json_error(result, "Invalid message(s)")
# Confirm that one can change the historical flag now
result = self.change_star(message_ids)
self.assert_json_success(result)
for msg in self.get_messages():
if msg["id"] in message_ids:
check_flags(msg["flags"], {"starred", "historical", "read"})
elif msg["id"] in received_message_ids:
check_flags(msg["flags"], set())
else:
check_flags(msg["flags"], {"read"})
self.assertNotIn(msg["id"], other_message_ids)
result = self.change_star(message_ids, False)
self.assert_json_success(result)
# But it still doesn't work if you're in another realm
user = self.mit_user("sipbtest")
self.login_user(user)
result = self.change_star(message_ids, subdomain="zephyr")
self.assert_json_error(result, "Invalid message(s)")
def test_change_star_private_message_security(self) -> None:
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
self.login("hamlet")
message_ids = [
self.send_personal_message(
self.example_user("hamlet"),
self.example_user("hamlet"),
"test",
),
]
# Starring private messages you didn't receive fails.
self.login("cordelia")
result = self.change_star(message_ids)
self.assert_json_error(result, "Invalid message(s)")
def test_change_star_private_stream_security(self) -> None:
stream_name = "private_stream"
self.make_stream(stream_name, invite_only=True)
self.subscribe(self.example_user("hamlet"), stream_name)
self.login("hamlet")
message_ids = [
self.send_stream_message(self.example_user("hamlet"), stream_name, "test"),
]
# Starring private stream messages you received works
result = self.change_star(message_ids)
self.assert_json_success(result)
# Starring private stream messages you didn't receive fails.
self.login("cordelia")
result = self.change_star(message_ids)
self.assert_json_error(result, "Invalid message(s)")
stream_name = "private_stream_2"
self.make_stream(stream_name, invite_only=True, history_public_to_subscribers=True)
self.subscribe(self.example_user("hamlet"), stream_name)
self.login("hamlet")
message_ids = [
self.send_stream_message(self.example_user("hamlet"), stream_name, "test"),
]
# With stream.history_public_to_subscribers = True, you still
# can't see it if you didn't receive the message and are
# not subscribed.
self.login("cordelia")
result = self.change_star(message_ids)
self.assert_json_error(result, "Invalid message(s)")
# But if you subscribe, then you can star the message
self.subscribe(self.example_user("cordelia"), stream_name)
result = self.change_star(message_ids)
self.assert_json_success(result)
def test_new_message(self) -> None:
"""
New messages aren't starred.
"""
sender = self.example_user("hamlet")
self.login_user(sender)
content = "Test message for star"
self.send_stream_message(sender, "Verona", content=content)
sent_message = (
UserMessage.objects.filter(
user_profile=self.example_user("hamlet"),
)
.order_by("id")
.reverse()[0]
)
self.assertEqual(sent_message.message.content, content)
self.assertFalse(sent_message.flags.starred)
def test_change_star_public_stream_security_for_guest_user(self) -> None:
# Guest user can't access(star) unsubscribed public stream messages
normal_user = self.example_user("hamlet")
stream_name = "public_stream"
self.make_stream(stream_name)
self.subscribe(normal_user, stream_name)
self.login_user(normal_user)
message_id = [
self.send_stream_message(normal_user, stream_name, "test 1"),
]
guest_user = self.example_user("polonius")
self.login_user(guest_user)
result = self.change_star(message_id)
self.assert_json_error(result, "Invalid message(s)")
# Subscribed guest users can access public stream messages sent before they join
self.subscribe(guest_user, stream_name)
result = self.change_star(message_id)
self.assert_json_success(result)
# And messages sent after they join
self.login_user(normal_user)
message_id = [
self.send_stream_message(normal_user, stream_name, "test 2"),
]
self.login_user(guest_user)
result = self.change_star(message_id)
self.assert_json_success(result)
def test_change_star_private_stream_security_for_guest_user(self) -> None:
# Guest users can't access(star) unsubscribed private stream messages
normal_user = self.example_user("hamlet")
stream_name = "private_stream"
stream = self.make_stream(stream_name, invite_only=True)
self.subscribe(normal_user, stream_name)
self.login_user(normal_user)
message_id = [
self.send_stream_message(normal_user, stream_name, "test 1"),
]
guest_user = self.example_user("polonius")
self.login_user(guest_user)
result = self.change_star(message_id)
self.assert_json_error(result, "Invalid message(s)")
# Guest user can't access messages of subscribed private streams if
# history is not public to subscribers
self.subscribe(guest_user, stream_name)
result = self.change_star(message_id)
self.assert_json_error(result, "Invalid message(s)")
# Guest user can access messages of subscribed private streams if
# history is public to subscribers
do_change_stream_invite_only(stream, True, history_public_to_subscribers=True)
result = self.change_star(message_id)
self.assert_json_success(result)
# With history not public to subscribers, they can still see new messages
do_change_stream_invite_only(stream, True, history_public_to_subscribers=False)
self.login_user(normal_user)
message_id = [
self.send_stream_message(normal_user, stream_name, "test 2"),
]
self.login_user(guest_user)
result = self.change_star(message_id)
self.assert_json_success(result)
def test_bulk_access_messages_private_stream(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
stream_name = "private_stream"
stream = self.make_stream(
stream_name, invite_only=True, history_public_to_subscribers=False
)
self.subscribe(user, stream_name)
# Send a message before subscribing a new user to stream
message_one_id = self.send_stream_message(user, stream_name, "Message one")
later_subscribed_user = self.example_user("cordelia")
# Subscribe a user to private-protected history stream
self.subscribe(later_subscribed_user, stream_name)
# Send a message after subscribing a new user to stream
message_two_id = self.send_stream_message(user, stream_name, "Message two")
message_ids = [message_one_id, message_two_id]
messages = [
Message.objects.select_related().get(id=message_id) for message_id in message_ids
]
with queries_captured() as queries:
filtered_messages = bulk_access_messages(later_subscribed_user, messages, stream=stream)
self.assert_length(queries, 2)
# Message sent before subscribing wouldn't be accessible by later
# subscribed user as stream has protected history
self.assert_length(filtered_messages, 1)
self.assertEqual(filtered_messages[0].id, message_two_id)
do_change_stream_invite_only(stream, True, history_public_to_subscribers=True)
with queries_captured() as queries:
filtered_messages = bulk_access_messages(later_subscribed_user, messages, stream=stream)
self.assert_length(queries, 2)
# Message sent before subscribing are accessible by 8user as stream
# don't have protected history
self.assert_length(filtered_messages, 2)
# Testing messages accessiblity for an unsubscribed user
unsubscribed_user = self.example_user("ZOE")
with queries_captured() as queries:
filtered_messages = bulk_access_messages(unsubscribed_user, messages, stream=stream)
self.assert_length(queries, 2)
self.assert_length(filtered_messages, 0)
# Verify an exception is thrown if called where the passed
# stream not matching the messages.
with self.assertRaises(AssertionError):
bulk_access_messages(
unsubscribed_user, messages, stream=get_stream("Denmark", unsubscribed_user.realm)
)
def test_bulk_access_messages_public_stream(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# Testing messages accessiblity including a public stream message
stream_name = "public_stream"
stream = self.subscribe(user, stream_name)
message_one_id = self.send_stream_message(user, stream_name, "Message one")
later_subscribed_user = self.example_user("cordelia")
self.subscribe(later_subscribed_user, stream_name)
# Send a message after subscribing a new user to stream
message_two_id = self.send_stream_message(user, stream_name, "Message two")
message_ids = [message_one_id, message_two_id]
messages = [
Message.objects.select_related().get(id=message_id) for message_id in message_ids
]
# All public stream messages are always accessible
with queries_captured() as queries:
filtered_messages = bulk_access_messages(later_subscribed_user, messages, stream=stream)
self.assert_length(filtered_messages, 2)
self.assert_length(queries, 2)
unsubscribed_user = self.example_user("ZOE")
with queries_captured() as queries:
filtered_messages = bulk_access_messages(unsubscribed_user, messages, stream=stream)
self.assert_length(filtered_messages, 2)
self.assert_length(queries, 2)
class PersonalMessagesFlagTest(ZulipTestCase):
def test_is_private_flag_not_leaked(self) -> None:
"""
Make sure `is_private` flag is not leaked to the API.
"""
self.login("hamlet")
self.send_personal_message(
self.example_user("hamlet"), self.example_user("cordelia"), "test"
)
for msg in self.get_messages():
self.assertNotIn("is_private", msg["flags"])
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Apache Beam operators."""
import copy
from abc import ABCMeta
from contextlib import ExitStack
from typing import Callable, List, Optional, Tuple, Union
from airflow.models import BaseOperator
from airflow.providers.apache.beam.hooks.beam import BeamHook, BeamRunnerType
from airflow.providers.google.cloud.hooks.dataflow import (
DataflowHook,
process_line_and_extract_dataflow_job_id_callback,
)
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.operators.dataflow import CheckJobRunning, DataflowConfiguration
from airflow.utils.helpers import convert_camel_to_snake
from airflow.version import version
class BeamDataflowMixin(metaclass=ABCMeta):
"""
Helper class to store common, Dataflow specific logic for both
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator` and
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`.
"""
dataflow_hook: Optional[DataflowHook]
dataflow_config: Optional[DataflowConfiguration]
def _set_dataflow(
self, pipeline_options: dict, job_name_variable_key: Optional[str] = None
) -> Tuple[str, dict, Callable[[str], None]]:
self.dataflow_hook = self.__set_dataflow_hook()
self.dataflow_config.project_id = self.dataflow_config.project_id or self.dataflow_hook.project_id
dataflow_job_name = self.__get_dataflow_job_name()
pipeline_options = self.__get_dataflow_pipeline_options(
pipeline_options, dataflow_job_name, job_name_variable_key
)
process_line_callback = self.__get_dataflow_process_callback()
return dataflow_job_name, pipeline_options, process_line_callback
def __set_dataflow_hook(self) -> DataflowHook:
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.dataflow_config.gcp_conn_id or self.gcp_conn_id,
delegate_to=self.dataflow_config.delegate_to or self.delegate_to,
poll_sleep=self.dataflow_config.poll_sleep,
impersonation_chain=self.dataflow_config.impersonation_chain,
drain_pipeline=self.dataflow_config.drain_pipeline,
cancel_timeout=self.dataflow_config.cancel_timeout,
wait_until_finished=self.dataflow_config.wait_until_finished,
)
return self.dataflow_hook
def __get_dataflow_job_name(self) -> str:
return DataflowHook.build_dataflow_job_name(
self.dataflow_config.job_name, self.dataflow_config.append_job_name
)
def __get_dataflow_pipeline_options(
self, pipeline_options: dict, job_name: str, job_name_key: Optional[str] = None
) -> dict:
pipeline_options = copy.deepcopy(pipeline_options)
if job_name_key is not None:
pipeline_options[job_name_key] = job_name
pipeline_options["project"] = self.dataflow_config.project_id
pipeline_options["region"] = self.dataflow_config.location
pipeline_options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
return pipeline_options
def __get_dataflow_process_callback(self) -> Callable[[str], None]:
def set_current_dataflow_job_id(job_id):
self.dataflow_job_id = job_id
return process_line_and_extract_dataflow_job_id_callback(
on_new_job_id_callback=set_current_dataflow_job_id
)
class BeamRunPythonPipelineOperator(BaseOperator, BeamDataflowMixin):
"""
Launching Apache Beam pipelines written in Python. Note that both
``default_pipeline_options`` and ``pipeline_options`` will be merged to specify pipeline
execution parameter, and ``default_pipeline_options`` is expected to save
high-level options, for instances, project and zone information, which
apply to all beam operators in the DAG.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BeamRunPythonPipelineOperator`
.. seealso::
For more detail on Apache Beam have a look at the reference:
https://beam.apache.org/documentation/
:param py_file: Reference to the python Apache Beam pipeline file.py, e.g.,
/some/local/file/path/to/your/python/pipeline/file. (templated)
:type py_file: str
:param runner: Runner on which pipeline will be run. By default "DirectRunner" is being used.
Other possible options: DataflowRunner, SparkRunner, FlinkRunner.
See: :class:`~providers.apache.beam.hooks.beam.BeamRunnerType`
See: https://beam.apache.org/documentation/runners/capability-matrix/
:type runner: str
:param py_options: Additional python options, e.g., ["-m", "-v"].
:type py_options: list[str]
:param default_pipeline_options: Map of default pipeline options.
:type default_pipeline_options: dict
:param pipeline_options: Map of pipeline options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key-B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:type pipeline_options: dict
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:type py_interpreter: str
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache_beam package if it is not installed on your system or you want
to use a different version.
:type py_requirements: List[str]
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param gcp_conn_id: Optional.
The connection ID to use connecting to Google Cloud Storage if python file is on GCS.
:type gcp_conn_id: str
:param delegate_to: Optional.
The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param dataflow_config: Dataflow configuration, used when runner type is set to DataflowRunner
:type dataflow_config: Union[dict, providers.google.cloud.operators.dataflow.DataflowConfiguration]
"""
template_fields = ["py_file", "runner", "pipeline_options", "default_pipeline_options", "dataflow_config"]
template_fields_renderers = {'dataflow_config': 'json', 'pipeline_options': 'json'}
def __init__(
self,
*,
py_file: str,
runner: str = "DirectRunner",
default_pipeline_options: Optional[dict] = None,
pipeline_options: Optional[dict] = None,
py_interpreter: str = "python3",
py_options: Optional[List[str]] = None,
py_requirements: Optional[List[str]] = None,
py_system_site_packages: bool = False,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
dataflow_config: Optional[Union[DataflowConfiguration, dict]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.py_file = py_file
self.runner = runner
self.py_options = py_options or []
self.default_pipeline_options = default_pipeline_options or {}
self.pipeline_options = pipeline_options or {}
self.pipeline_options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
self.py_interpreter = py_interpreter
self.py_requirements = py_requirements
self.py_system_site_packages = py_system_site_packages
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.dataflow_config = dataflow_config or {}
self.beam_hook: Optional[BeamHook] = None
self.dataflow_hook: Optional[DataflowHook] = None
self.dataflow_job_id: Optional[str] = None
if self.dataflow_config and self.runner.lower() != BeamRunnerType.DataflowRunner.lower():
self.log.warning(
"dataflow_config is defined but runner is different than DataflowRunner (%s)", self.runner
)
def execute(self, context):
"""Execute the Apache Beam Pipeline."""
self.beam_hook = BeamHook(runner=self.runner)
pipeline_options = self.default_pipeline_options.copy()
process_line_callback: Optional[Callable] = None
is_dataflow = self.runner.lower() == BeamRunnerType.DataflowRunner.lower()
dataflow_job_name: Optional[str] = None
if isinstance(self.dataflow_config, dict):
self.dataflow_config = DataflowConfiguration(**self.dataflow_config)
if is_dataflow:
dataflow_job_name, pipeline_options, process_line_callback = self._set_dataflow(
pipeline_options=pipeline_options, job_name_variable_key="job_name"
)
pipeline_options.update(self.pipeline_options)
# Convert argument names from lowerCamelCase to snake case.
formatted_pipeline_options = {
convert_camel_to_snake(key): pipeline_options[key] for key in pipeline_options
}
with ExitStack() as exit_stack:
if self.py_file.lower().startswith("gs://"):
gcs_hook = GCSHook(self.gcp_conn_id, self.delegate_to)
tmp_gcs_file = exit_stack.enter_context(gcs_hook.provide_file(object_url=self.py_file))
self.py_file = tmp_gcs_file.name
if is_dataflow:
with self.dataflow_hook.provide_authorized_gcloud():
self.beam_hook.start_python_pipeline(
variables=formatted_pipeline_options,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
process_line_callback=process_line_callback,
)
self.dataflow_hook.wait_for_done(
job_name=dataflow_job_name,
location=self.dataflow_config.location,
job_id=self.dataflow_job_id,
multiple_jobs=False,
)
else:
self.beam_hook.start_python_pipeline(
variables=formatted_pipeline_options,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
process_line_callback=process_line_callback,
)
return {"dataflow_job_id": self.dataflow_job_id}
def on_kill(self) -> None:
if self.dataflow_hook and self.dataflow_job_id:
self.log.info('Dataflow job with id: `%s` was requested to be cancelled.', self.dataflow_job_id)
self.dataflow_hook.cancel_job(
job_id=self.dataflow_job_id,
project_id=self.dataflow_config.project_id,
)
class BeamRunJavaPipelineOperator(BaseOperator, BeamDataflowMixin):
"""
Launching Apache Beam pipelines written in Java.
Note that both
``default_pipeline_options`` and ``pipeline_options`` will be merged to specify pipeline
execution parameter, and ``default_pipeline_options`` is expected to save
high-level pipeline_options, for instances, project and zone information, which
apply to all Apache Beam operators in the DAG.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BeamRunJavaPipelineOperator`
.. seealso::
For more detail on Apache Beam have a look at the reference:
https://beam.apache.org/documentation/
You need to pass the path to your jar file as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar (see documentation here:
https://beam.apache.org/documentation/runners/dataflow/#self-executing-jar).
Use ``pipeline_options`` to pass on pipeline_options to your job.
:param jar: The reference to a self executing Apache Beam jar (templated).
:type jar: str
:param runner: Runner on which pipeline will be run. By default "DirectRunner" is being used.
See:
https://beam.apache.org/documentation/runners/capability-matrix/
:type runner: str
:param job_class: The name of the Apache Beam pipeline class to be executed, it
is often not the main class configured in the pipeline jar file.
:type job_class: str
:param default_pipeline_options: Map of default job pipeline_options.
:type default_pipeline_options: dict
:param pipeline_options: Map of job specific pipeline_options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many pipeline_options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key-B`` pipeline_options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:type pipeline_options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Storage if jar is on GCS
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param dataflow_config: Dataflow configuration, used when runner type is set to DataflowRunner
:type dataflow_config: Union[dict, providers.google.cloud.operators.dataflow.DataflowConfiguration]
"""
template_fields = [
"jar",
"runner",
"job_class",
"pipeline_options",
"default_pipeline_options",
"dataflow_config",
]
template_fields_renderers = {'dataflow_config': 'json', 'pipeline_options': 'json'}
ui_color = "#0273d4"
def __init__(
self,
*,
jar: str,
runner: str = "DirectRunner",
job_class: Optional[str] = None,
default_pipeline_options: Optional[dict] = None,
pipeline_options: Optional[dict] = None,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
dataflow_config: Optional[Union[DataflowConfiguration, dict]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.jar = jar
self.runner = runner
self.default_pipeline_options = default_pipeline_options or {}
self.pipeline_options = pipeline_options or {}
self.job_class = job_class
self.dataflow_config = dataflow_config or {}
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.dataflow_job_id = None
self.dataflow_hook: Optional[DataflowHook] = None
self.beam_hook: Optional[BeamHook] = None
self._dataflow_job_name: Optional[str] = None
if self.dataflow_config and self.runner.lower() != BeamRunnerType.DataflowRunner.lower():
self.log.warning(
"dataflow_config is defined but runner is different than DataflowRunner (%s)", self.runner
)
def execute(self, context):
"""Execute the Apache Beam Pipeline."""
self.beam_hook = BeamHook(runner=self.runner)
pipeline_options = self.default_pipeline_options.copy()
process_line_callback: Optional[Callable] = None
is_dataflow = self.runner.lower() == BeamRunnerType.DataflowRunner.lower()
dataflow_job_name: Optional[str] = None
if isinstance(self.dataflow_config, dict):
self.dataflow_config = DataflowConfiguration(**self.dataflow_config)
if is_dataflow:
dataflow_job_name, pipeline_options, process_line_callback = self._set_dataflow(
pipeline_options=pipeline_options, job_name_variable_key=None
)
pipeline_options.update(self.pipeline_options)
with ExitStack() as exit_stack:
if self.jar.lower().startswith("gs://"):
gcs_hook = GCSHook(self.gcp_conn_id, self.delegate_to)
tmp_gcs_file = exit_stack.enter_context(gcs_hook.provide_file(object_url=self.jar))
self.jar = tmp_gcs_file.name
if is_dataflow:
is_running = False
if self.dataflow_config.check_if_running != CheckJobRunning.IgnoreJob:
is_running = (
# The reason for disable=no-value-for-parameter is that project_id parameter is
# required but here is not passed, moreover it cannot be passed here.
# This method is wrapped by @_fallback_to_project_id_from_variables decorator which
# fallback project_id value from variables and raise error if project_id is
# defined both in variables and as parameter (here is already defined in variables)
self.dataflow_hook.is_job_dataflow_running(
name=self.dataflow_config.job_name,
variables=pipeline_options,
)
)
while is_running and self.dataflow_config.check_if_running == CheckJobRunning.WaitForRun:
# The reason for disable=no-value-for-parameter is that project_id parameter is
# required but here is not passed, moreover it cannot be passed here.
# This method is wrapped by @_fallback_to_project_id_from_variables decorator which
# fallback project_id value from variables and raise error if project_id is
# defined both in variables and as parameter (here is already defined in variables)
is_running = self.dataflow_hook.is_job_dataflow_running(
name=self.dataflow_config.job_name,
variables=pipeline_options,
)
if not is_running:
pipeline_options["jobName"] = dataflow_job_name
with self.dataflow_hook.provide_authorized_gcloud():
self.beam_hook.start_java_pipeline(
variables=pipeline_options,
jar=self.jar,
job_class=self.job_class,
process_line_callback=process_line_callback,
)
self.dataflow_hook.wait_for_done(
job_name=dataflow_job_name,
location=self.dataflow_config.location,
job_id=self.dataflow_job_id,
multiple_jobs=self.dataflow_config.multiple_jobs,
project_id=self.dataflow_config.project_id,
)
else:
self.beam_hook.start_java_pipeline(
variables=pipeline_options,
jar=self.jar,
job_class=self.job_class,
process_line_callback=process_line_callback,
)
return {"dataflow_job_id": self.dataflow_job_id}
def on_kill(self) -> None:
if self.dataflow_hook and self.dataflow_job_id:
self.log.info('Dataflow job with id: `%s` was requested to be cancelled.', self.dataflow_job_id)
self.dataflow_hook.cancel_job(
job_id=self.dataflow_job_id,
project_id=self.dataflow_config.project_id,
)
|
|
""" Comverted from matlab code
Source: http://www.robots.ox.ac.uk/~fwood/teaching/AIMS_CDT_ML_2015/homework/HW_1_sum_product/
main_big_alarm_net
This file creates the graph structure needed for the big alarm network.
Creating the graph structure includes creating all nodes, placing the
nodes into the stucture with the appropriate neighbors and assigning
probability tables to the factor nodes. At the very end of this main
file is a for loop which is documented. That for loop executes the
inference algorithm accross the graph. At initialization, every variable
node in the graph is unobserved. If you wish to do inference with some
of the nodes observed you will need to indicate the observed variables
before the inference procedure is executed."""
import numpy as np
from classes import Factor, VariableNode, FactorNode
f = np.array([ 0.2, 0.8])
fact = Factor(f)
vn_Hypovolemia = VariableNode('vn:Hypovolemia', 2)
fn_Hypovolemia = FactorNode('fn:Hypovolemia', fact)
vn_Hypovolemia.addNode(fn_Hypovolemia)
fn_Hypovolemia.addNode(vn_Hypovolemia)
# now we do the next node
f = np.zeros((2, 1))
f[:, 0] = [0.05, 0.95]
fact = Factor(f)
vn_LVFailure = VariableNode('vn:LVFailure', 2)
fn_LVFailure = FactorNode('fn:LVFailure', fact)
vn_LVFailure.addNode(fn_LVFailure)
fn_LVFailure.addNode(vn_LVFailure)
# now we do the next node
f = np.zeros((3, 2, 2))
f[:, 0, 0] = [ 0.95, 0.04, 0.01]
f[:, 0, 1] = [ 0.98, 0.01, 0.01]
f[:, 1, 0] = [ 0.01, 0.09, 0.9]
f[:, 1, 1] = [ 0.05, 0.9, 0.05]
fact = Factor(f)
vn_LVEDVolume = VariableNode('vn:LVEDVolume', 3)
fn_LVEDVolume_Hypovolemia_LVFailure = FactorNode('fn:LVEDVolume_Hypovolemia_LVFailure', fact)
vn_LVEDVolume.addNode(fn_LVEDVolume_Hypovolemia_LVFailure)
fn_LVEDVolume_Hypovolemia_LVFailure.addNode(vn_LVEDVolume)
vn_Hypovolemia.addNode(fn_LVEDVolume_Hypovolemia_LVFailure)
fn_LVEDVolume_Hypovolemia_LVFailure.addNode(vn_Hypovolemia)
vn_LVFailure.addNode(fn_LVEDVolume_Hypovolemia_LVFailure)
fn_LVEDVolume_Hypovolemia_LVFailure.addNode(vn_LVFailure)
# now we do the next node
f = np.zeros((3, 2, 2))
f[:, 0, 0] = [ 0.98, 0.01, 0.01]
f[:, 0, 1] = [ 0.5, 0.49, 0.01]
f[:, 1, 0] = [ 0.95, 0.04, 0.01]
f[:, 1, 1] = [ 0.05, 0.9, 0.05]
fact = Factor(f)
vn_StrokeVolume = VariableNode('vn:StrokeVolume', 3)
fn_StrokeVolume_LVFailure_Hypovolemia = FactorNode('fn:StrokeVolume_LVFailure_Hypovolemia', fact)
vn_StrokeVolume.addNode(fn_StrokeVolume_LVFailure_Hypovolemia)
fn_StrokeVolume_LVFailure_Hypovolemia.addNode(vn_StrokeVolume)
vn_LVFailure.addNode(fn_StrokeVolume_LVFailure_Hypovolemia)
fn_StrokeVolume_LVFailure_Hypovolemia.addNode(vn_LVFailure)
vn_Hypovolemia.addNode(fn_StrokeVolume_LVFailure_Hypovolemia)
fn_StrokeVolume_LVFailure_Hypovolemia.addNode(vn_Hypovolemia)
# now we do the next node
f = np.zeros((3, 3))
f[:, 0] = [ 0.95, 0.04, 0.01]
f[:, 1] = [ 0.04, 0.95, 0.01]
f[:, 2] = [ 0.01, 0.29, 0.7]
fact = Factor(f)
vn_CVP = VariableNode('vn:CVP', 3)
fn_CVP_LVEDVolume = FactorNode('fn:CVP_LVEDVolume', fact)
vn_CVP.addNode(fn_CVP_LVEDVolume)
fn_CVP_LVEDVolume.addNode(vn_CVP)
vn_LVEDVolume.addNode(fn_CVP_LVEDVolume)
fn_CVP_LVEDVolume.addNode(vn_LVEDVolume)
# now we do the next node
f = np.zeros((3, 3))
f[:, 0] = [ 0.95, 0.04, 0.01]
f[:, 1] = [ 0.04, 0.95, 0.01]
f[:, 2] = [ 0.01, 0.04, 0.95]
fact = Factor(f)
vn_PCWP = VariableNode('vn:PCWP', 3)
fn_PCWP_LVEDVolume = FactorNode('fn:PCWP_LVEDVolume', fact)
vn_PCWP.addNode(fn_PCWP_LVEDVolume)
fn_PCWP_LVEDVolume.addNode(vn_PCWP)
vn_LVEDVolume.addNode(fn_PCWP_LVEDVolume)
fn_PCWP_LVEDVolume.addNode(vn_LVEDVolume)
# now we do the next node
f = np.zeros((2, 1))
f[:, 0] = [ 0.2, 0.8]
fact = Factor(f)
vn_InsuffAnesth = VariableNode('vn:InsuffAnesth', 2)
fn_InsuffAnesth = FactorNode('fn:InsuffAnesth', fact)
vn_InsuffAnesth.addNode(fn_InsuffAnesth)
fn_InsuffAnesth.addNode(vn_InsuffAnesth)
# now we do the next node
f = np.zeros((2, 1))
f[:, 0] = [ 0.01, 0.99]
fact = Factor(f)
vn_PulmEmbolus = VariableNode('vn:PulmEmbolus', 2)
fn_PulmEmbolus = FactorNode('fn:PulmEmbolus', fact)
vn_PulmEmbolus.addNode(fn_PulmEmbolus)
fn_PulmEmbolus.addNode(vn_PulmEmbolus)
# now we do the next node
f = np.zeros((3, 1))
f[:, 0] = [ 0.92, 0.03, 0.05]
fact = Factor(f)
vn_Intubation = VariableNode('vn:Intubation', 3)
fn_Intubation = FactorNode('fn:Intubation', fact)
vn_Intubation.addNode(fn_Intubation)
fn_Intubation.addNode(vn_Intubation)
# now we do the next node
f = np.zeros((2, 2, 3))
f[:, 0, 0] = [ 0.1, 0.9]
f[:, 0, 1] = [ 0.1, 0.9]
f[:, 0, 2] = [ 0.01, 0.99]
f[:, 1, 0] = [ 0.95, 0.05]
f[:, 1, 1] = [ 0.95, 0.05]
f[:, 1, 2] = [ 0.05, 0.95]
fact = Factor(f)
vn_Shunt = VariableNode('vn:Shunt', 2)
fn_Shunt_PulmEmbolus_Intubation = FactorNode('fn:Shunt_PulmEmbolus_Intubation', fact)
vn_Shunt.addNode(fn_Shunt_PulmEmbolus_Intubation)
fn_Shunt_PulmEmbolus_Intubation.addNode(vn_Shunt)
vn_PulmEmbolus.addNode(fn_Shunt_PulmEmbolus_Intubation)
fn_Shunt_PulmEmbolus_Intubation.addNode(vn_PulmEmbolus)
vn_Intubation.addNode(fn_Shunt_PulmEmbolus_Intubation)
fn_Shunt_PulmEmbolus_Intubation.addNode(vn_Intubation)
# now we do the next node
f = np.zeros((2, 1))
f[:, 0] = [ 0.04, 0.96]
fact = Factor(f)
vn_KinkedTube = VariableNode('vn:KinkedTube', 2)
fn_KinkedTube = FactorNode('fn:KinkedTube', fact)
vn_KinkedTube.addNode(fn_KinkedTube)
fn_KinkedTube.addNode(vn_KinkedTube)
# now we do the next node
f = np.zeros((3, 1))
f[:, 0] = [ 0.01, 0.98, 0.01]
fact = Factor(f)
vn_MinVolSet = VariableNode('vn:MinVolSet', 3)
fn_MinVolSet = FactorNode('fn:MinVolSet', fact)
vn_MinVolSet.addNode(fn_MinVolSet)
fn_MinVolSet.addNode(vn_MinVolSet)
# now we do the next node
f = np.zeros((4, 3))
f[:, 0] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 1] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 2] = [ 0.01, 0.01, 0.01, 0.97]
fact = Factor(f)
vn_VentMach = VariableNode('vn:VentMach', 4)
fn_VentMach_MinVolSet = FactorNode('fn:VentMach_MinVolSet', fact)
vn_VentMach.addNode(fn_VentMach_MinVolSet)
fn_VentMach_MinVolSet.addNode(vn_VentMach)
vn_MinVolSet.addNode(fn_VentMach_MinVolSet)
fn_VentMach_MinVolSet.addNode(vn_MinVolSet)
# now we do the next node
f = np.zeros((2, 1))
f[:, 0] = [ 0.05, 0.95]
fact = Factor(f)
vn_Disconnect = VariableNode('vn:Disconnect', 2)
fn_Disconnect = FactorNode('fn:Disconnect', fact)
vn_Disconnect.addNode(fn_Disconnect)
fn_Disconnect.addNode(vn_Disconnect)
# now we do the next node
f = np.zeros((4, 4, 2))
f[:, 0, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 1] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 2, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 2, 1] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 3, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 3, 1] = [ 0.01, 0.01, 0.01, 0.97]
fact = Factor(f)
vn_VentTube = VariableNode('vn:VentTube', 4)
fn_VentTube_VentMach_Disconnect = FactorNode('fn:VentTube_VentMach_Disconnect', fact)
vn_VentTube.addNode(fn_VentTube_VentMach_Disconnect)
fn_VentTube_VentMach_Disconnect.addNode(vn_VentTube)
vn_VentMach.addNode(fn_VentTube_VentMach_Disconnect)
fn_VentTube_VentMach_Disconnect.addNode(vn_VentMach)
vn_Disconnect.addNode(fn_VentTube_VentMach_Disconnect)
fn_VentTube_VentMach_Disconnect.addNode(vn_Disconnect)
# now we do the next node
f = np.zeros((4, 2, 4, 3))
f[:, 0, 0, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 0, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 0, 2] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 1, 0] = [ 0.95, 0.03, 0.01, 0.01]
f[:, 0, 1, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 1, 2] = [ 0.95, 0.03, 0.01, 0.01]
f[:, 0, 2, 0] = [ 0.4, 0.58, 0.01, 0.01]
f[:, 0, 2, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 2, 2] = [ 0.5, 0.48, 0.01, 0.01]
f[:, 0, 3, 0] = [ 0.3, 0.68, 0.01, 0.01]
f[:, 0, 3, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 3, 2] = [ 0.3, 0.68, 0.01, 0.01]
f[:, 1, 0, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 0, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 0, 2] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 1, 0] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 1, 1, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 1, 2] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 1, 2, 0] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 1, 2, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 2, 2] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 1, 3, 0] = [ 0.01, 0.01, 0.01, 0.97]
f[:, 1, 3, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 3, 2] = [ 0.01, 0.01, 0.01, 0.97]
fact = Factor(f)
vn_VentLung = VariableNode('vn:VentLung', 4)
fn_VentLung_KinkedTube_VentTube_Intubation = FactorNode('fn:VentLung_KinkedTube_VentTube_Intubation', fact)
vn_VentLung.addNode(fn_VentLung_KinkedTube_VentTube_Intubation)
fn_VentLung_KinkedTube_VentTube_Intubation.addNode(vn_VentLung)
vn_KinkedTube.addNode(fn_VentLung_KinkedTube_VentTube_Intubation)
fn_VentLung_KinkedTube_VentTube_Intubation.addNode(vn_KinkedTube)
vn_VentTube.addNode(fn_VentLung_KinkedTube_VentTube_Intubation)
fn_VentLung_KinkedTube_VentTube_Intubation.addNode(vn_VentTube)
vn_Intubation.addNode(fn_VentLung_KinkedTube_VentTube_Intubation)
fn_VentLung_KinkedTube_VentTube_Intubation.addNode(vn_Intubation)
# now we do the next node
f = np.zeros((4, 3, 4))
f[:, 0, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 1] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 0, 2] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 0, 3] = [ 0.01, 0.01, 0.01, 0.97]
f[:, 1, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 1] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 1, 2] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 1, 3] = [ 0.01, 0.01, 0.01, 0.97]
f[:, 2, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 2, 1] = [ 0.03, 0.95, 0.01, 0.01]
f[:, 2, 2] = [ 0.01, 0.94, 0.04, 0.01]
f[:, 2, 3] = [ 0.01, 0.88, 0.1, 0.01]
fact = Factor(f)
vn_VentAlv = VariableNode('vn:VentAlv', 4)
fn_VentAlv_Intubation_VentLung = FactorNode('fn:VentAlv_Intubation_VentLung', fact)
vn_VentAlv.addNode(fn_VentAlv_Intubation_VentLung)
fn_VentAlv_Intubation_VentLung.addNode(vn_VentAlv)
vn_Intubation.addNode(fn_VentAlv_Intubation_VentLung)
fn_VentAlv_Intubation_VentLung.addNode(vn_Intubation)
vn_VentLung.addNode(fn_VentAlv_Intubation_VentLung)
fn_VentAlv_Intubation_VentLung.addNode(vn_VentLung)
# now we do the next node
f = np.zeros((2, 1))
f[:, 0] = [ 0.01, 0.99]
fact = Factor(f)
vn_FiO2 = VariableNode('vn:FiO2', 2)
fn_FiO2 = FactorNode('fn:FiO2', fact)
vn_FiO2.addNode(fn_FiO2)
fn_FiO2.addNode(vn_FiO2)
# now we do the next node
f = np.zeros((3, 4, 2))
f[:, 0, 0] = [ 0.98, 0.01, 0.01]
f[:, 0, 1] = [ 0.98, 0.01, 0.01]
f[:, 1, 0] = [ 0.98, 0.01, 0.01]
f[:, 1, 1] = [ 0.98, 0.01, 0.01]
f[:, 2, 0] = [ 0.95, 0.04, 0.01]
f[:, 2, 1] = [ 0.01, 0.95, 0.04]
f[:, 3, 0] = [ 0.95, 0.04, 0.01]
f[:, 3, 1] = [ 0.01, 0.01, 0.98]
fact = Factor(f)
vn_PVSat = VariableNode('vn:PVSat', 3)
fn_PVSat_VentAlv_FiO2 = FactorNode('fn:PVSat_VentAlv_FiO2', fact)
vn_PVSat.addNode(fn_PVSat_VentAlv_FiO2)
fn_PVSat_VentAlv_FiO2.addNode(vn_PVSat)
vn_VentAlv.addNode(fn_PVSat_VentAlv_FiO2)
fn_PVSat_VentAlv_FiO2.addNode(vn_VentAlv)
vn_FiO2.addNode(fn_PVSat_VentAlv_FiO2)
fn_PVSat_VentAlv_FiO2.addNode(vn_FiO2)
# now we do the next node
f = np.zeros((3, 2, 3))
f[:, 0, 0] = [ 0.98, 0.01, 0.01]
f[:, 0, 1] = [ 0.01, 0.98, 0.01]
f[:, 0, 2] = [ 0.01, 0.01, 0.98]
f[:, 1, 0] = [ 0.98, 0.01, 0.01]
f[:, 1, 1] = [ 0.98, 0.01, 0.01]
f[:, 1, 2] = [ 0.69, 0.3, 0.01]
fact = Factor(f)
vn_SaO2 = VariableNode('vn:SaO2', 3)
fn_SaO2_Shunt_PVSat = FactorNode('fn:SaO2_Shunt_PVSat', fact)
vn_SaO2.addNode(fn_SaO2_Shunt_PVSat)
fn_SaO2_Shunt_PVSat.addNode(vn_SaO2)
vn_Shunt.addNode(fn_SaO2_Shunt_PVSat)
fn_SaO2_Shunt_PVSat.addNode(vn_Shunt)
vn_PVSat.addNode(fn_SaO2_Shunt_PVSat)
fn_SaO2_Shunt_PVSat.addNode(vn_PVSat)
# now we do the next node
f = np.zeros((2, 1))
f[:, 0] = [ 0.01, 0.99]
fact = Factor(f)
vn_Anaphylaxis = VariableNode('vn:Anaphylaxis', 2)
fn_Anaphylaxis = FactorNode('fn:Anaphylaxis', fact)
vn_Anaphylaxis.addNode(fn_Anaphylaxis)
fn_Anaphylaxis.addNode(vn_Anaphylaxis)
# now we do the next node
f = np.zeros((3, 2))
f[:, 0] = [ 0.98, 0.01, 0.01]
f[:, 1] = [ 0.3, 0.4, 0.3]
fact = Factor(f)
vn_TPR = VariableNode('vn:TPR', 3)
fn_TPR_Anaphylaxis = FactorNode('fn:TPR_Anaphylaxis', fact)
vn_TPR.addNode(fn_TPR_Anaphylaxis)
fn_TPR_Anaphylaxis.addNode(vn_TPR)
vn_Anaphylaxis.addNode(fn_TPR_Anaphylaxis)
fn_TPR_Anaphylaxis.addNode(vn_Anaphylaxis)
# now we do the next node
f = np.zeros((3, 4))
f[:, 0] = [ 0.01, 0.01, 0.98]
f[:, 1] = [ 0.01, 0.01, 0.98]
f[:, 2] = [ 0.04, 0.92, 0.04]
f[:, 3] = [ 0.9, 0.09, 0.01]
fact = Factor(f)
vn_ArtCO2 = VariableNode('vn:ArtCO2', 3)
fn_ArtCO2_VentAlv = FactorNode('fn:ArtCO2_VentAlv', fact)
vn_ArtCO2.addNode(fn_ArtCO2_VentAlv)
fn_ArtCO2_VentAlv.addNode(vn_ArtCO2)
vn_VentAlv.addNode(fn_ArtCO2_VentAlv)
fn_ArtCO2_VentAlv.addNode(vn_VentAlv)
# now we do the next node
f = np.zeros((2, 2, 3, 3, 3))
f[:, 0, 0, 0, 0] = [ 0.01, 0.99]
f[:, 0, 0, 0, 1] = [ 0.01, 0.99]
f[:, 0, 0, 0, 2] = [ 0.01, 0.99]
f[:, 0, 0, 1, 0] = [ 0.01, 0.99]
f[:, 0, 0, 1, 1] = [ 0.01, 0.99]
f[:, 0, 0, 1, 2] = [ 0.01, 0.99]
f[:, 0, 0, 2, 0] = [ 0.01, 0.99]
f[:, 0, 0, 2, 1] = [ 0.01, 0.99]
f[:, 0, 0, 2, 2] = [ 0.01, 0.99]
f[:, 0, 1, 0, 0] = [ 0.01, 0.99]
f[:, 0, 1, 0, 1] = [ 0.01, 0.99]
f[:, 0, 1, 0, 2] = [ 0.01, 0.99]
f[:, 0, 1, 1, 0] = [ 0.01, 0.99]
f[:, 0, 1, 1, 1] = [ 0.01, 0.99]
f[:, 0, 1, 1, 2] = [ 0.01, 0.99]
f[:, 0, 1, 2, 0] = [ 0.05, 0.95]
f[:, 0, 1, 2, 1] = [ 0.05, 0.95]
f[:, 0, 1, 2, 2] = [ 0.01, 0.99]
f[:, 0, 2, 0, 0] = [ 0.01, 0.99]
f[:, 0, 2, 0, 1] = [ 0.01, 0.99]
f[:, 0, 2, 0, 2] = [ 0.01, 0.99]
f[:, 0, 2, 1, 0] = [ 0.05, 0.95]
f[:, 0, 2, 1, 1] = [ 0.05, 0.95]
f[:, 0, 2, 1, 2] = [ 0.01, 0.99]
f[:, 0, 2, 2, 0] = [ 0.05, 0.95]
f[:, 0, 2, 2, 1] = [ 0.05, 0.95]
f[:, 0, 2, 2, 2] = [ 0.01, 0.99]
f[:, 1, 0, 0, 0] = [ 0.05, 0.95]
f[:, 1, 0, 0, 1] = [ 0.05, 0.95]
f[:, 1, 0, 0, 2] = [ 0.01, 0.99]
f[:, 1, 0, 1, 0] = [ 0.05, 0.95]
f[:, 1, 0, 1, 1] = [ 0.05, 0.95]
f[:, 1, 0, 1, 2] = [ 0.01, 0.99]
f[:, 1, 0, 2, 0] = [ 0.05, 0.95]
f[:, 1, 0, 2, 1] = [ 0.05, 0.95]
f[:, 1, 0, 2, 2] = [ 0.01, 0.99]
f[:, 1, 1, 0, 0] = [ 0.1, 0.9]
f[:, 1, 1, 0, 1] = [ 0.1, 0.9]
f[:, 1, 1, 0, 2] = [ 0.1, 0.9]
f[:, 1, 1, 1, 0] = [ 0.95, 0.05]
f[:, 1, 1, 1, 1] = [ 0.95, 0.05]
f[:, 1, 1, 1, 2] = [ 0.3, 0.7]
f[:, 1, 1, 2, 0] = [ 0.95, 0.05]
f[:, 1, 1, 2, 1] = [ 0.95, 0.05]
f[:, 1, 1, 2, 2] = [ 0.3, 0.7]
f[:, 1, 2, 0, 0] = [ 0.95, 0.05]
f[:, 1, 2, 0, 1] = [ 0.95, 0.05]
f[:, 1, 2, 0, 2] = [ 0.3, 0.7]
f[:, 1, 2, 1, 0] = [ 0.99, 0.00999999]
f[:, 1, 2, 1, 1] = [ 0.99, 0.00999999]
f[:, 1, 2, 1, 2] = [ 0.99, 0.00999999]
f[:, 1, 2, 2, 0] = [ 0.95, 0.05]
f[:, 1, 2, 2, 1] = [ 0.99, 0.00999999]
f[:, 1, 2, 2, 2] = [ 0.3, 0.7]
fact = Factor(f)
vn_Catechol = VariableNode('vn:Catechol', 2)
fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2 = FactorNode('fn:Catechol_InsuffAnesth_SaO2_TPR_ArtCO2', fact)
vn_Catechol.addNode(fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2)
fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2.addNode(vn_Catechol)
vn_InsuffAnesth.addNode(fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2)
fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2.addNode(vn_InsuffAnesth)
vn_SaO2.addNode(fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2)
fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2.addNode(vn_SaO2)
vn_TPR.addNode(fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2)
fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2.addNode(vn_TPR)
vn_ArtCO2.addNode(fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2)
fn_Catechol_InsuffAnesth_SaO2_TPR_ArtCO2.addNode(vn_ArtCO2)
# now we do the next node
f = np.zeros((3, 2))
f[:, 0] = [ 0.1, 0.89, 0.01]
f[:, 1] = [ 0.01, 0.09, 0.9]
fact = Factor(f)
vn_HR = VariableNode('vn:HR', 3)
fn_HR_Catechol = FactorNode('fn:HR_Catechol', fact)
vn_HR.addNode(fn_HR_Catechol)
fn_HR_Catechol.addNode(vn_HR)
vn_Catechol.addNode(fn_HR_Catechol)
fn_HR_Catechol.addNode(vn_Catechol)
# now we do the next node
f = np.zeros((3, 3, 3))
f[:, 0, 0] = [ 0.98, 0.01, 0.01]
f[:, 0, 1] = [ 0.95, 0.04, 0.01]
f[:, 0, 2] = [ 0.3, 0.69, 0.01]
f[:, 1, 0] = [ 0.95, 0.04, 0.01]
f[:, 1, 1] = [ 0.04, 0.95, 0.01]
f[:, 1, 2] = [ 0.01, 0.3, 0.69]
f[:, 2, 0] = [ 0.8, 0.19, 0.01]
f[:, 2, 1] = [ 0.01, 0.04, 0.95]
f[:, 2, 2] = [ 0.01, 0.01, 0.98]
fact = Factor(f)
vn_CO = VariableNode('vn:CO', 3)
fn_CO_HR_StrokeVolume = FactorNode('fn:CO_HR_StrokeVolume', fact)
vn_CO.addNode(fn_CO_HR_StrokeVolume)
fn_CO_HR_StrokeVolume.addNode(vn_CO)
vn_HR.addNode(fn_CO_HR_StrokeVolume)
fn_CO_HR_StrokeVolume.addNode(vn_HR)
vn_StrokeVolume.addNode(fn_CO_HR_StrokeVolume)
fn_CO_HR_StrokeVolume.addNode(vn_StrokeVolume)
# now we do the next node
f = np.zeros((2, 2))
f[:, 0] = [ 0.9, 0.1]
f[:, 1] = [ 0.01, 0.99]
fact = Factor(f)
vn_History = VariableNode('vn:History', 2)
fn_History_LVFailure = FactorNode('fn:History_LVFailure', fact)
vn_History.addNode(fn_History_LVFailure)
fn_History_LVFailure.addNode(vn_History)
vn_LVFailure.addNode(fn_History_LVFailure)
fn_History_LVFailure.addNode(vn_LVFailure)
# now we do the next node
f = np.zeros((3, 3, 3))
f[:, 0, 0] = [ 0.98, 0.01, 0.01]
f[:, 0, 1] = [ 0.98, 0.01, 0.01]
f[:, 0, 2] = [ 0.3, 0.6, 0.1]
f[:, 1, 0] = [ 0.98, 0.01, 0.01]
f[:, 1, 1] = [ 0.1, 0.85, 0.05]
f[:, 1, 2] = [ 0.05, 0.4, 0.55]
f[:, 2, 0] = [ 0.9, 0.09, 0.01]
f[:, 2, 1] = [ 0.05, 0.2, 0.75]
f[:, 2, 2] = [ 0.01, 0.09, 0.9]
fact = Factor(f)
vn_BP = VariableNode('vn:BP', 3)
fn_BP_CO_TPR = FactorNode('fn:BP_CO_TPR', fact)
vn_BP.addNode(fn_BP_CO_TPR)
fn_BP_CO_TPR.addNode(vn_BP)
vn_CO.addNode(fn_BP_CO_TPR)
fn_BP_CO_TPR.addNode(vn_CO)
vn_TPR.addNode(fn_BP_CO_TPR)
fn_BP_CO_TPR.addNode(vn_TPR)
# now we do the next node
f = np.zeros((2, 1))
f[:, 0] = [ 0.1, 0.9]
fact = Factor(f)
vn_ErrCauter = VariableNode('vn:ErrCauter', 2)
fn_ErrCauter = FactorNode('fn:ErrCauter', fact)
vn_ErrCauter.addNode(fn_ErrCauter)
fn_ErrCauter.addNode(vn_ErrCauter)
# now we do the next node
f = np.zeros((3, 3, 2))
f[:, 0, 0] = [ 0.3333333, 0.3333333, 0.3333333]
f[:, 0, 1] = [ 0.98, 0.01, 0.01]
f[:, 1, 0] = [ 0.3333333, 0.3333333, 0.3333333]
f[:, 1, 1] = [ 0.01, 0.98, 0.01]
f[:, 2, 0] = [ 0.3333333, 0.3333333, 0.3333333]
f[:, 2, 1] = [ 0.01, 0.01, 0.98]
fact = Factor(f)
vn_HREKG = VariableNode('vn:HREKG', 3)
fn_HREKG_HR_ErrCauter = FactorNode('fn:HREKG_HR_ErrCauter', fact)
vn_HREKG.addNode(fn_HREKG_HR_ErrCauter)
fn_HREKG_HR_ErrCauter.addNode(vn_HREKG)
vn_HR.addNode(fn_HREKG_HR_ErrCauter)
fn_HREKG_HR_ErrCauter.addNode(vn_HR)
vn_ErrCauter.addNode(fn_HREKG_HR_ErrCauter)
fn_HREKG_HR_ErrCauter.addNode(vn_ErrCauter)
# now we do the next node
f = np.zeros((3, 3, 2))
f[:, 0, 0] = [ 0.3333333, 0.3333333, 0.3333333]
f[:, 0, 1] = [ 0.98, 0.01, 0.01]
f[:, 1, 0] = [ 0.3333333, 0.3333333, 0.3333333]
f[:, 1, 1] = [ 0.01, 0.98, 0.01]
f[:, 2, 0] = [ 0.3333333, 0.3333333, 0.3333333]
f[:, 2, 1] = [ 0.01, 0.01, 0.98]
fact = Factor(f)
vn_HRSat = VariableNode('vn:HRSat', 3)
fn_HRSat_HR_ErrCauter = FactorNode('fn:HRSat_HR_ErrCauter', fact)
vn_HRSat.addNode(fn_HRSat_HR_ErrCauter)
fn_HRSat_HR_ErrCauter.addNode(vn_HRSat)
vn_HR.addNode(fn_HRSat_HR_ErrCauter)
fn_HRSat_HR_ErrCauter.addNode(vn_HR)
vn_ErrCauter.addNode(fn_HRSat_HR_ErrCauter)
fn_HRSat_HR_ErrCauter.addNode(vn_ErrCauter)
# now we do the next node
f = np.zeros((2, 1))
f[:, 0] = [ 0.05, 0.95]
fact = Factor(f)
vn_ErrLowOutput = VariableNode('vn:ErrLowOutput', 2)
fn_ErrLowOutput = FactorNode('fn:ErrLowOutput', fact)
vn_ErrLowOutput.addNode(fn_ErrLowOutput)
fn_ErrLowOutput.addNode(vn_ErrLowOutput)
# now we do the next node
f = np.zeros((3, 2, 3))
f[:, 0, 0] = [ 0.98, 0.01, 0.01]
f[:, 0, 1] = [ 0.4, 0.59, 0.01]
f[:, 0, 2] = [ 0.3, 0.4, 0.3]
f[:, 1, 0] = [ 0.98, 0.01, 0.01]
f[:, 1, 1] = [ 0.01, 0.98, 0.01]
f[:, 1, 2] = [ 0.01, 0.01, 0.98]
fact = Factor(f)
vn_HRBP = VariableNode('vn:HRBP', 3)
fn_HRBP_ErrLowOutput_HR = FactorNode('fn:HRBP_ErrLowOutput_HR', fact)
vn_HRBP.addNode(fn_HRBP_ErrLowOutput_HR)
fn_HRBP_ErrLowOutput_HR.addNode(vn_HRBP)
vn_ErrLowOutput.addNode(fn_HRBP_ErrLowOutput_HR)
fn_HRBP_ErrLowOutput_HR.addNode(vn_ErrLowOutput)
vn_HR.addNode(fn_HRBP_ErrLowOutput_HR)
fn_HRBP_ErrLowOutput_HR.addNode(vn_HR)
# now we do the next node
f = np.zeros((4, 3, 4))
f[:, 0, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 1] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 0, 2] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 0, 3] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 1, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 1] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 1, 2] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 1, 3] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 2, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 2, 1] = [ 0.01, 0.01, 0.01, 0.97]
f[:, 2, 2] = [ 0.01, 0.01, 0.01, 0.97]
f[:, 2, 3] = [ 0.01, 0.01, 0.01, 0.97]
fact = Factor(f)
vn_ExpCO2 = VariableNode('vn:ExpCO2', 4)
fn_ExpCO2_ArtCO2_VentLung = FactorNode('fn:ExpCO2_ArtCO2_VentLung', fact)
vn_ExpCO2.addNode(fn_ExpCO2_ArtCO2_VentLung)
fn_ExpCO2_ArtCO2_VentLung.addNode(vn_ExpCO2)
vn_ArtCO2.addNode(fn_ExpCO2_ArtCO2_VentLung)
fn_ExpCO2_ArtCO2_VentLung.addNode(vn_ArtCO2)
vn_VentLung.addNode(fn_ExpCO2_ArtCO2_VentLung)
fn_ExpCO2_ArtCO2_VentLung.addNode(vn_VentLung)
# now we do the next node
f = np.zeros((3, 2))
f[:, 0] = [ 0.01, 0.19, 0.8]
f[:, 1] = [ 0.05, 0.9, 0.05]
fact = Factor(f)
vn_PAP = VariableNode('vn:PAP', 3)
fn_PAP_PulmEmbolus = FactorNode('fn:PAP_PulmEmbolus', fact)
vn_PAP.addNode(fn_PAP_PulmEmbolus)
fn_PAP_PulmEmbolus.addNode(vn_PAP)
vn_PulmEmbolus.addNode(fn_PAP_PulmEmbolus)
fn_PAP_PulmEmbolus.addNode(vn_PulmEmbolus)
# now we do the next node
f = np.zeros((4, 2, 3, 4))
f[:, 0, 0, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 0, 1] = [ 0.01, 0.49, 0.3, 0.2]
f[:, 0, 0, 2] = [ 0.01, 0.01, 0.08, 0.9]
f[:, 0, 0, 3] = [ 0.01, 0.01, 0.01, 0.97]
f[:, 0, 1, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 1, 1] = [ 0.1, 0.84, 0.05, 0.01]
f[:, 0, 1, 2] = [ 0.05, 0.25, 0.25, 0.45]
f[:, 0, 1, 3] = [ 0.01, 0.15, 0.25, 0.59]
f[:, 0, 2, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 2, 1] = [ 0.01, 0.29, 0.3, 0.4]
f[:, 0, 2, 2] = [ 0.01, 0.01, 0.08, 0.9]
f[:, 0, 2, 3] = [ 0.01, 0.01, 0.01, 0.97]
f[:, 1, 0, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 0, 1] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 1, 0, 2] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 1, 0, 3] = [ 0.01, 0.01, 0.01, 0.97]
f[:, 1, 1, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 1, 1] = [ 0.4, 0.58, 0.01, 0.01]
f[:, 1, 1, 2] = [ 0.2, 0.75, 0.04, 0.01]
f[:, 1, 1, 3] = [ 0.2, 0.7, 0.09, 0.01]
f[:, 1, 2, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 2, 1] = [ 0.01, 0.9, 0.08, 0.01]
f[:, 1, 2, 2] = [ 0.01, 0.01, 0.38, 0.6]
f[:, 1, 2, 3] = [ 0.01, 0.01, 0.01, 0.97]
fact = Factor(f)
vn_Press = VariableNode('vn:Press', 4)
fn_Press_KinkedTube_Intubation_VentTube = FactorNode('fn:Press_KinkedTube_Intubation_VentTube', fact)
vn_Press.addNode(fn_Press_KinkedTube_Intubation_VentTube)
fn_Press_KinkedTube_Intubation_VentTube.addNode(vn_Press)
vn_KinkedTube.addNode(fn_Press_KinkedTube_Intubation_VentTube)
fn_Press_KinkedTube_Intubation_VentTube.addNode(vn_KinkedTube)
vn_Intubation.addNode(fn_Press_KinkedTube_Intubation_VentTube)
fn_Press_KinkedTube_Intubation_VentTube.addNode(vn_Intubation)
vn_VentTube.addNode(fn_Press_KinkedTube_Intubation_VentTube)
fn_Press_KinkedTube_Intubation_VentTube.addNode(vn_VentTube)
# now we do the next node
f = np.zeros((4, 4, 3))
f[:, 0, 0] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 1] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 0, 2] = [ 0.97, 0.01, 0.01, 0.01]
f[:, 1, 0] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 1, 1] = [ 0.6, 0.38, 0.01, 0.01]
f[:, 1, 2] = [ 0.01, 0.97, 0.01, 0.01]
f[:, 2, 0] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 2, 1] = [ 0.5, 0.48, 0.01, 0.01]
f[:, 2, 2] = [ 0.01, 0.01, 0.97, 0.01]
f[:, 3, 0] = [ 0.01, 0.01, 0.01, 0.97]
f[:, 3, 1] = [ 0.5, 0.48, 0.01, 0.01]
f[:, 3, 2] = [ 0.01, 0.01, 0.01, 0.97]
fact = Factor(f)
vn_MinVol = VariableNode('vn:MinVol', 4)
fn_MinVol_VentLung_Intubation = FactorNode('fn:MinVol_VentLung_Intubation', fact)
vn_MinVol.addNode(fn_MinVol_VentLung_Intubation)
fn_MinVol_VentLung_Intubation.addNode(vn_MinVol)
vn_VentLung.addNode(fn_MinVol_VentLung_Intubation)
fn_MinVol_VentLung_Intubation.addNode(vn_VentLung)
vn_Intubation.addNode(fn_MinVol_VentLung_Intubation)
fn_MinVol_VentLung_Intubation.addNode(vn_Intubation)
# set the value of any nodes which are observed
vn_SaO2.setValue(np.array([1, 0, 0]))
vn_BP.setValue(np.array([1, 0, 0]))
vn_ArtCO2.setValue(np.array([1, 0, 0]))
vn_Press.setValue(np.array([0, 1, 0, 0]))
vn_ExpCO2.setValue(np.array([1, 0, 0, 0]))
# do loopy belief propagation as an inference procedure. pass messages in
# every node 20 times.
for i in range(20):
print('i = ' + str(i))
vn_MinVol.loopy_bp()
vn_MinVol.setNotUpdated()
# display the marginal distribution in the variable node for Kinked Tube, Vent Lung & Anaphy Laxis
print(vn_KinkedTube.getMarginalDistribution())
print(vn_VentLung.getMarginalDistribution())
print(vn_Anaphylaxis.getMarginalDistribution())
|
|
import xmlrpc.client
import ssl
import socket # Required for network/socket connections
import os # Required for Forking/child processes
import time # Required for sleep call
import threading
import datetime
import dbm
import argparse
import random
import certs.gencert as gencert
import config
import logging
from logging.config import fileConfig
# Load logging config
fileConfig('/home/shnuser/coding/shn/setup/logging.conf')
log = logging.getLogger(__name__)
# Global Variables -- Don't change. [No need to change.]
CERTFILE = "/bin/shn/certs/domains/local.cert" # Placeholder; updated when executed
KEYFILE = "/bin/shn/certs/domains/local.key" # Placeholder; updated when executed
hostIP = "localhost" # Default; updated when executed
AGENT_ALIAS = "agent" # Default; updated to match agent hostname when run
SLEEP_TIME = 60 # Default; updated based on user-provided input
admin_selected = False
# Return pseudorandom decision on whether host is infected or
# not; returns True if 'infected'
def getDecision():
log.debug("Making a decision...")
number = random.randint(1, 99)
if number > 89:
answer = True
else:
answer = False
log.debug("Is host infected: %s" % answer)
return answer
# Return ip address of local host where server is running
def getMyIP():
log.debug('Getting Host ip address')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 53))
ipAdd = s.getsockname()[0]
s.close()
log.debug('Socket closed: ipAdd=%s' % ipAdd)
return ipAdd
# Create SSL certs for current ip address if not already present
def verifyCerts():
global CERTFILE
global KEYFILE
# Determine file path based on current ip address
CERTFILE = ''.join([config.certPath, config.rootDomain, ".cert"])
KEYFILE = ''.join([config.certPath, config.rootDomain, ".key"])
log.debug("CERTFILE: %s" % CERTFILE)
log.debug("KEYFILE: %s" % KEYFILE)
# If cert or key file not present, create new certs
if not os.path.isfile(CERTFILE) or not os.path.isfile(KEYFILE):
gencert.gencert(config.rootDomain)
log.info("Certfile(s) NOT present; new certs created.")
print("Certfile(s) NOT present; new certs created.")
else:
log.info("Certfiles Verified Present")
print("Certfiles Verified Present")
# Test connection with Monitor
def testConnection(remoteName=config.mntrHostName,
remotePort=config.mntrServerPort):
log.debug("Start of Test Connection Function...")
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
myurl = ''.join(['https://', remoteName, ':', str(remotePort)])
testResult = False
with xmlrpc.client.ServerProxy(myurl,
context=myContext) as proxy:
# Test Connection
try:
print("Testing connection with Monitor:")
testResult = proxy.testConnection()
except ConnectionRefusedError:
log.warning("Connection to Monitor FAILED")
log.debug("Connection settings used: %s" % (myurl))
print("Connection to Monitor FAILED:\n",
"Is Monitor listening? Confirm connection",
"settings and try again.")
print("Connection settings used:\n '%s'" % (myurl))
if testResult:
log.info("Connection Test to '%s' SUCCESSFUL!" % myurl)
print("Connection Test to '%s' SUCCESSFUL!" % myurl)
else:
log.info("Connection Test to '%s' FAILED!" % myurl)
print("Connection Test to '%s' FAILED!" % myurl)
# Change/Update the Monitor's connection settings
def updateMonitor():
log.debug("Updating Monitor connection settings")
print("DEFAULT Monitor Hostname: 'monitor.shn.local'")
print("CURRENT Monitor Hostname: '%s'" % config.mntrHostName)
print("ENTER NEW Monitor Hostname: ['q' to keep current]")
tempNewHost = input(">>> ")
if tempNewHost == 'q':
log.debug("No Change")
elif tempNewHost == 'd':
log.debug("Keeping Default")
config.mntrHostName = 'monitor.shn.local'
else:
config.mntrHostName = tempNewHost
print("DEFAULT Monitor Port: '36363'")
print("CURRENT Monitor Port: '%s'" % config.mntrServerPort)
print("ENTER NEW Monitor Port: ['q' to keep current]")
tempNewPort = input(">>> ")
if tempNewPort == 'q':
log.debug("No Change")
elif tempNewPort == 'd':
log.debug("Keeping Default")
config.mntrServerPort = 36363
else:
config.mntrServerPort = int(tempNewPort)
print("UPDATED Monitor Saved: '%s', Port: '%d'" % (config.mntrHostName,
config.mntrServerPort))
log.debug("Monitor Saved: '%s', Port: '%d'" % (config.mntrHostName,
config.mntrServerPort))
# Print entire stored status history
def printHistory():
log.debug("Printing entire stored status history...")
currentTotal = 0
try:
with dbm.open('cache_esm', 'r') as db:
currentTotal = int((db.get('total')).decode("utf-8"))
log.debug("Cache found. Total Retrieved.")
print("Total Saved: %d" % currentTotal)
except:
log.debug("No cache found or read failed.")
print("READ FAILED or No Current Status Present")
if currentTotal > 0:
# Display history
log.debug("Current Total > 0")
print("[Update #]: [Update Time] >>> [Status]")
for k in range(currentTotal):
try:
with dbm.open('cache_esm', 'r') as db:
readstatus = "%s.status" % (k+1)
readtime = "%s.time" % (k+1)
thisTime = (db.get(readtime)).decode("utf-8")
thisStatus = (db.get(readstatus)).decode("utf-8")
if thisStatus == '1':
pStatus = "CLEAN ['1']"
elif thisStatus == '999':
pStatus = "COMPROMISED ['999']"
else:
pStatus = "UNKNOWN ['???']"
print("%d: %s >>> %s" % ((k+1), thisTime, pStatus))
except:
log.debug("Read Failed with Item %d!" % (k+1))
print("READ FAILED!")
print("End of History")
log.debug("End of History")
else:
log.debug("No Status. Exiting.")
print("No Status. Exiting.")
# Check currently-recorded status of ESM/VM
def checkStatus():
log.debug("Checking current ESM/VM Status...")
try:
with dbm.open('cache_esm', 'r') as db:
lastUpdate = (db.get('last_update')).decode("utf-8")
lastStatus = (db.get('last_status')).decode("utf-8")
log.debug("Cache found. Values retrieved.")
print("ESM/VM Status:")
if lastStatus == "1":
print("CLEAN ['1'] (as of %s)" % lastUpdate)
log.debug("CLEAN ['1'] (as of %s)" % lastUpdate)
elif lastStatus == "999":
print("COMPROMISED ['999'] (as of %s)" % lastUpdate)
log.debug("COMPROMISED ['999'] (as of %s)" % lastUpdate)
else:
print("Unknown Status!!!")
log.debug("Unknown Status!!!")
except:
log.debug("No cache found or read failed.")
print("READ FAILED or No Current Status Present")
# View current monitor connection settings
def viewConnection():
log.debug("Checking current Monitor Connection Settings...")
print("\nMonitor Settings:")
print("HostName: %s" % config.mntrHostName)
print("Port: %d" % config.mntrServerPort)
log.debug("Reading last successful transmit time...")
try:
with dbm.open('cache_esm', 'w') as db:
lastUpdate = (db.get('last_update')).decode("utf-8")
log.debug("Cache found. Value retrieved.")
except:
log.debug("No cache found or read failed.")
lastUpdate = "NONE recorded!!"
print("Last Successful Transmit: %s" % lastUpdate)
log.debug("End of View Connection Function")
# Simple test function to ensure communication is working
def mathTest():
log.debug("Start of Math Test Function...")
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
myurl = ''.join(['https://', config.mntrHostName, ':',
str(config.mntrServerPort)])
with xmlrpc.client.ServerProxy(myurl,
context=myContext) as proxy:
try:
print("3 + 7 is %d" % (proxy.add(3, 7)))
print("11 x 9 is: %d" % (proxy.multiply(11, 9)))
except ConnectionRefusedError:
log.warning("Connection to Monitor Server REFUSED")
print("Connection to Monitor Server FAILED:\n",
"Is Monitor listening? Confirm connection",
"settings and port number and try again.")
print("Settings used: '%s'" % myurl)
except:
log.warning("Connection to Monitor Server FAILED")
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % myurl)
def logStatus(logStatus, logTime):
log = logging.getLogger(__name__)
log.debug("Saving Status: %s, at Time: %s" % (logStatus, logTime))
storeStatus = str(logStatus)
storeTime = str(logTime)
log.debug("Values Storing: %s, %s" % (storeStatus, storeTime))
try:
with dbm.open('cache_esm', 'w') as db:
# Get current total and add 1 with type conversions
newtotal = str(int((db.get('total')).decode("utf-8")) + 1)
# Store new total in persistent storage
db['total'] = newtotal
# Create names based on connection number
savestatus = "%s.status" % (newtotal)
savetime = "%s.time" % (newtotal)
# Save connection info to persistent storage
db[savestatus] = storeStatus
db[savetime] = storeTime
db['last_update'] = storeTime
db['last_status'] = storeStatus
log.debug("Cache found. Values stored in old cache.")
log.debug("Saved: %s, %s" % (storeStatus, storeTime))
except:
log.debug("No cache file found; creating new file.")
with dbm.open('cache_esm', 'c') as db:
db['total'] = "1"
savestatus = "1.status"
savetime = "1.time"
db[savestatus] = storeStatus
db[savetime] = storeTime
db['last_update'] = storeTime
db['last_status'] = storeStatus
log.debug("Saved: %s, %s" % (storeStatus, storeTime))
log.debug("End of log status function")
# Send status update
def sendStatus(state=0, userInput=True):
log.debug("Start of Send Status Function...")
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
if userInput:
print("Enter Current Status:")
print("1) CLEAN ['1']")
print("2) COMPROMISED ['999']")
answer = input("Make a choice\n>>> ")
if answer == "1":
mystatus = 1
else:
mystatus = 999
if mystatus == 1:
print("Status selected: 'CLEAN'")
else:
print("Status selected: 'COMPROMISED'")
print("If this is incorrect, resubmit IMMEDIATELY!")
else:
mystatus = state
myurl = ''.join(['https://', config.mntrHostName, ':',
str(config.mntrServerPort)])
with xmlrpc.client.ServerProxy(myurl,
context=myContext) as proxy:
try:
response = proxy.reportStatus(hostIP, mystatus,
AGENT_ALIAS)
log.debug("Response: %s" % response)
if userInput:
print("Response from Monitor: %s" % response)
timeConfirmed = str(datetime.datetime.now())
print("Status '%s' Sent to Monitor; Confirmed at %s." % (mystatus,
timeConfirmed))
log.debug("Time Confirmed: %s" % timeConfirmed)
logStatus(mystatus, timeConfirmed)
log.debug("Status Logged")
except ConnectionRefusedError:
log.warning("Connection to Monitor Server FAILED")
if userInput:
print("Connection to Monitor Server FAILED:\n",
"Is Monitor listening? Confirm connection",
"settings and try again.")
print("Settings used: '%s'" % myurl)
except:
log.warning("Connection to Monitor Server FAILED")
if userInput:
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % myurl)
def deleteHistory(no_confirmation=False):
log.info("Delete History Function starting...")
confirm = False
if no_confirmation:
confirm = True
else:
# Get confirmation from user
print("Confirm you wish to DELETE ALL SAVED HISTORY:")
answer = input("Confirm YES['y'] or NO['n']:\n>>> ")
if answer in ["y", "Y", "YES", "yes", "Yes"]:
log.debug("Request for deletion confirmed.")
confirm = True
else:
log.debug("Request for deletion cancelled.")
log.debug("Answer selected: %s" % answer)
confirm = False
# Delete history, if confirmed
if confirm:
log.debug("Removing history now.")
os.remove("cache_esm")
log.info("History Deleted.")
else:
log.debug("History was NOT deleted.")
# Run basic 'simulator' to determine infection status
def basicSimulation(sleeptime=60):
log.debug("Running basic simulation")
# Report status as CLEAN three times
log.debug("Reporting status CLEAN three times.")
for k in range(3):
currentStatus = 1
# Log current state
log.debug("Current Status: CLEAN ['1']")
# Report current state
sendStatus(state=currentStatus, userInput=False)
# Sleep One Time period
time.sleep(sleeptime)
# Report status as COMPROMISED three times
for k in range(3):
currentStatus = 999
# Log current state
log.debug("Current Status: COMPROMISED ['999']")
# If this is the first time this is reported compromised
# then log as a warning and print as well
if k == 0:
log.warning("HOST NOW COMPROMISED ['999']!!!")
print("HOST NOW COMPROMISED ['999']!!! TAKE ACTION!!!")
# Report current state
sendStatus(state=currentStatus, userInput=False)
# Sleep One Time period
time.sleep(sleeptime)
# Run 'simulator' to randomly determine infection status
def randomSimulation(sleeptime=60):
log.debug("Running random simulation")
while True:
# Get current status
log.debug("Checking current ESM/VM Status...")
lastStatus = 1
currentStatus = 1
try:
with dbm.open('cache_esm', 'r') as db:
lastStatus = int((db.get('last_status')).decode("utf-8"))
log.debug("Cache found. Values retrieved: %d" % lastStatus)
except:
log.debug("No cache found or read failed.")
print("READ FAILED or No Current Status Present")
# If current is infected, remain infected
if not lastStatus == 1:
currentStatus = lastStatus
# If current not infected, get new decision
else:
r = getDecision()
if r:
currentStatus = 999
else:
currentStatus = 1
# Log current state
if currentStatus == 1:
log.debug("Current Status: CLEAN ['1']")
elif currentStatus == 999:
log.debug("Current Status: COMPROMISED ['999']")
# If this is the first time this is reported compromised
# then log as a warning and print as well
if not lastStatus == 999:
log.warning("HOST NOW COMPROMISED ['999']!!!")
print("HOST NOW COMPROMISED ['999']!!! TAKE ACTION!!!")
else:
log.debug("Unknown Status!!! ... %d" % currentStatus)
# Report current state
sendStatus(state=currentStatus, userInput=False)
# Sleep for set time limit before repeating
log.debug("Sleeping for %d seconds." % sleeptime)
time.sleep(sleeptime)
# Start basic simulation as background / thread process
def startBasicSimulation():
log.info("Starting basic simulation as background thread")
t = threading.Thread(name="BasicSimulation",
target=basicSimulation,
args=(SLEEP_TIME,
)
)
t.daemon = True
log.debug("Starting daemon simulation thread")
t.start()
# Quit gracefully after terminting all child processes
def myQuit():
log.info("ESM Exiting. Goodbye.")
print("ESM Exiting. Goodbye.\n")
raise SystemExit
def invalid(choice):
log.debug("Invalid choice: %s" % choice)
print("INVALID CHOICE!")
def adminMenu():
log.debug("Displaying admin menu")
print("\nAdmin Menu:")
print("a) Connection Test (simple math test)")
print("b) SSL Verification (verify certificates")
print("c) View ALL Saved History")
print("d) Delete ESM History")
print("e) Send Status* to Monitor [user-provided status]")
print("f) CHANGE/UPDATE Monitor Settings")
print("9) BACK (return to 'Menu')")
return input("Make a Choice\n>>> ")
def adminSelection():
global admin_selected
adminChoice = adminMenu()
if adminChoice == "a":
mathTest()
elif adminChoice == "b":
verifyCerts()
elif adminChoice == "c":
printHistory()
elif adminChoice == "d":
deleteHistory()
elif adminChoice == "e":
sendStatus()
elif adminChoice == "f":
updateMonitor()
elif adminChoice == "9":
log.debug("Admin is De-selected")
print("Back to Main Menu...")
admin_selected = False
elif adminChoice == "r":
# Refresh Menu (do nothing)
log.info("Refreshing Menu")
elif adminChoice in ["q", ":q"]:
myQuit()
else:
invalid(adminChoice)
def menu():
log.debug("Displaying menu")
print("\n\nMENU[ESM]:")
print("1) Check current ESM status")
print("2) View Monitor Connection Settings")
print("3) Send 'CLEAN' Status to Monitor")
print("4) Send 'COMPROMISED' Status to Monitor")
print("5) Start BASIC Simulation [in background]")
print("6) Test Connection with Monitor")
print("9) ADMIN MENU")
print("q) QUIT")
return input("Make a Choice\n>>> ")
def myMenu():
global admin_selected
choice = 0
if admin_selected:
choice = "9"
else:
choice = menu()
if choice == "1":
checkStatus()
elif choice == "2":
viewConnection()
elif choice == "3":
sendStatus(state=1, userInput=False)
elif choice == "4":
sendStatus(state=999, userInput=False)
elif choice == "5":
startBasicSimulation()
elif choice == "6":
testConnection()
elif choice == "9":
admin_selected = True
log.debug("Admin is Selected")
adminSelection()
elif choice in ["q", ":q"]:
myQuit()
elif choice == "r":
# Refresh Menu (do nothing)
log.info("Refreshing Menu")
else:
invalid(choice)
# Process arguments and notify user of their choices
def processArguments(args):
log.info("Processing arguments...")
global AGENT_ALIAS
global SLEEP_TIME
# Accept user-provided monitor hostname, if provided
if args.monitor:
print("Monitor hostname set manually")
print("Using hostname: %s" % (args.monitor))
log.debug("Using monitor hostname: %s" % (args.monitor))
config.mntrHostName = args.monitor
else:
print("Using default monitor hostname: %s" % config.mntrHostName)
log.debug("Using default monitor hostname: %s" % config.mntrHostName)
# Accept user-provided monitor port number, if provided
if args.port:
print("Monitor port set manually")
print("Using port#: %d" % (args.port))
log.debug("Using monitor port#: %d" % (args.port))
config.mntrServerPort = args.port
else:
print("Using default monitor port#: %s" % config.mntrServerPort)
log.debug("Using default monitor port#: %s" % config.mntrServerPort)
# Accept user-provided monitor port number, if provided
if args.alias:
print("ESM Alias set manually")
print("Using alias: %s" % (args.alias))
log.debug("Using ESM alias: %s" % (args.alias))
AGENT_ALIAS = args.alias
else:
AGENT_ALIAS = (config.agntHostName).split('.')[0]
log.debug("Using default ESM Alias: %s" % (AGENT_ALIAS))
print("Using alias: %s" % (AGENT_ALIAS))
# Accept user-provided sleep time, if provided
if args.time:
print("Sleep time set manually")
print("Using sleep = %d seconds" % (args.time))
log.debug("Using sleep = %d seconds" % (args.time))
SLEEP_TIME = args.time
# Announce running in Basic Simulation mode, if applicable
if args.basic:
print("ESM running simulation in basic mode.")
log.debug("ESM running simulation in basic mode.")
# Announce running in Simulation mode, if applicable
if args.simulation:
print("ESM now executing in simulation mode.")
log.debug("ESM executing in simulation mode.")
# Delete previous status hisotry, if applicable
if args.fresh:
log.debug("Fresh start selected.")
deleteHistory(True)
print("History Deleted: Starting Fresh")
log.info("End of 'process arguments.'")
# Start of Main
if __name__ == '__main__':
log.info("Starting MAIN. Parsing arguments.")
parser = argparse.ArgumentParser()
parser.add_argument("-S", "--simulation", help="run ESM in simulation\
mode, which does not allow user interaction",
action="store_true")
parser.add_argument("-B", "--basic", help="run simulation in basic mode\
(3 clean reports, then 3 compromised reports)\
Recommendation: Use with '-t' flag to adjust pace.",
action="store_true")
parser.add_argument("-t", "--time", help="set sleep time [in seconds]\
used for simulation (Default: 60)", type=int)
parser.add_argument("-m", "--monitor", help="set hostname of monitor\
(e.g., 'monitor.shn.local')")
parser.add_argument("-p", "--port", help="set port of monitor\
(e.g., '36363')", type=int)
parser.add_argument("-a", "--alias", help="manually set ESM alias\
(Note: MUST match alias of Agent running in\
corresponding VM's hypervisor.)")
parser.add_argument("-F", "--fresh", help="start fresh: remove status\
history before starting", action="store_true")
args = parser.parse_args()
# Process arguments
processArguments(args)
# Start of Main functionality
log.info("Starting Main [ESM]")
hostIP = getMyIP()
pid = os.getpid()
print("Host IP: %s" % (hostIP))
log.debug("PID: %d" % (pid))
# Verify certificates present prior to displaying menu
log.debug("Verifying certificates.")
verifyCerts()
time.sleep(2)
# If NOT simulation mode, dispaly menu [repeatedly] for user
if not args.simulation:
while True:
myMenu()
time.sleep(1)
# Otherwise, start daemon loop retrieving no user input
else:
if args.basic:
log.info("Simulation loop started now (Mode=Basic).")
while True:
basicSimulation(SLEEP_TIME)
log.info("End of Basic simulation: Repeating.")
else:
log.info("Simulation loop started now (Mode=Normal).")
randomSimulation(SLEEP_TIME)
|
|
import base64
import hashlib
import io
import json
import os
import threading
import traceback
import socket
import sys
from abc import ABCMeta, abstractmethod
from http.client import HTTPConnection
from typing import Any, Callable, ClassVar, Optional, Tuple, Type, TYPE_CHECKING
from urllib.parse import urljoin, urlsplit, urlunsplit
from .actions import actions
from .protocol import Protocol, BaseProtocolPart
if TYPE_CHECKING:
from ..webdriver_server import WebDriverServer
here = os.path.dirname(__file__)
def executor_kwargs(test_type, test_environment, run_info_data, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": test_environment.config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type in ("reftest", "print-reftest"):
executor_kwargs["screenshot_cache"] = test_environment.cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
# By default the executor may try to cleanup windows after a test (to best
# associate any problems with the test causing them). If the user might
# want to view the results, however, the executor has to skip that cleanup.
if kwargs["pause_after_test"] or kwargs["pause_on_unexpected"]:
executor_kwargs["cleanup_after_test"] = False
executor_kwargs["debug_test"] = kwargs["debug_test"]
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "PRECONDITION_FAILED"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
4: "PRECONDITION_FAILED"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshots(screenshots):
"""Computes the sha1 checksum of a list of base64-encoded screenshots."""
return [hashlib.sha1(base64.b64decode(screenshot)).hexdigest()
for screenshot in screenshots]
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshots([item["screenshot"]])[0]
def get_pages(ranges_value, total_pages):
"""Get a set of page numbers to include in a print reftest.
:param ranges_value: Parsed page ranges as a list e.g. [[1,2], [4], [6,None]]
:param total_pages: Integer total number of pages in the paginated output.
:retval: Set containing integer page numbers to include in the comparison e.g.
for the example ranges value and 10 total pages this would be
{1,2,4,6,7,8,9,10}"""
if not ranges_value:
return set(range(1, total_pages + 1))
rv = set()
for range_limits in ranges_value:
if len(range_limits) == 1:
range_limits = [range_limits[0], range_limits[0]]
if range_limits[0] is None:
range_limits[0] = 1
if range_limits[1] is None:
range_limits[1] = total_pages
if range_limits[0] > total_pages:
continue
rv |= set(range(range_limits[0], range_limits[1] + 1))
return rv
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
def crashtest_result_converter(self, test, result):
return test.result_cls(**result), []
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TimedRunner(object):
def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
self.func = func
self.logger = logger
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.extra_timeout = extra_timeout
self.result_flag = threading.Event()
def run(self):
for setup_fn in [self.set_timeout, self.before_run]:
err = setup_fn()
if err:
self.result = (False, err)
return self.result
executor = threading.Thread(target=self.run_func)
executor.start()
# Add twice the extra timeout since the called function is expected to
# wait at least self.timeout + self.extra_timeout and this gives some leeway
timeout = self.timeout + 2 * self.extra_timeout if self.timeout else None
finished = self.result_flag.wait(timeout)
if self.result is None:
if finished:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self.run_func fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
self.__class__.__name__)
else:
if self.protocol.is_alive():
message = "Executor hit external timeout (this may indicate a hang)\n"
# get a traceback for the current stack of the executor thread
message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
self.result = False, ("EXTERNAL-TIMEOUT", message)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive():
self.result = False, ("INTERNAL-ERROR", None)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
return self.result
def set_timeout(self):
raise NotImplementedError
def before_run(self):
pass
def run_func(self):
raise NotImplementedError
class TestExecutor(object):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
__metaclass__ = ABCMeta
test_type = None # type: ClassVar[str]
# convert_result is a class variable set to a callable converter
# (e.g. reftest_result_converter) converting from an instance of
# URLManifestItem (e.g. RefTest) + type-dependent results object +
# type-dependent extra data, returning a tuple of Result and list of
# SubtestResult. For now, any callable is accepted. TODO: Make this type
# stricter when more of the surrounding code is annotated.
convert_result = None # type: ClassVar[Callable[..., Any]]
supports_testdriver = False
supports_jsshell = False
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
self.logger = logger
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
try:
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
result = self.do_test(test)
except Exception as e:
exception_string = traceback.format_exc()
self.logger.warning(exception_string)
result = self.result_from_exception(test, e, exception_string)
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol, subdomain=False):
scheme = "https" if protocol == "h2" else protocol
host = self.server_config["browser_host"]
if subdomain:
# The only supported subdomain filename flag is "www".
host = "{subdomain}.{host}".format(subdomain="www", host=host)
return "{scheme}://{host}:{port}".format(scheme=scheme, host=host,
port=self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"],
test.subdomain), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e, exception_string):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += exception_string
return test.result_cls(status, message), []
def wait(self):
return self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = False
def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class CrashtestExecutor(TestExecutor):
convert_result = crashtest_result_converter
class PrintRefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = True
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi, page_ranges):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.get_screenshot_list(test, viewport_size, dpi, page_ranges)
if not success:
return False, data
screenshots = data
hash_values = hash_screenshots(data)
self.screenshot_cache[key] = (hash_values, screenshots)
rv = (hash_values, screenshots)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def reset(self):
self.screenshot_cache.clear()
def check_pass(self, hashes, screenshots, urls, relation, fuzzy):
"""Check if a test passes, and return a tuple of (pass, page_idx),
where page_idx is the zero-based index of the first page on which a
difference occurs if any, or None if there are no differences"""
assert relation in ("==", "!=")
lhs_hashes, rhs_hashes = hashes
lhs_screenshots, rhs_screenshots = screenshots
if len(lhs_hashes) != len(rhs_hashes):
self.logger.info("Got different number of pages")
return relation == "!=", None
assert len(lhs_screenshots) == len(lhs_hashes) == len(rhs_screenshots) == len(rhs_hashes)
for (page_idx, (lhs_hash,
rhs_hash,
lhs_screenshot,
rhs_screenshot)) in enumerate(zip(lhs_hashes,
rhs_hashes,
lhs_screenshots,
rhs_screenshots)):
comparison_screenshots = (lhs_screenshot, rhs_screenshot)
if not fuzzy or fuzzy == ((0, 0), (0, 0)):
equal = lhs_hash == rhs_hash
# sometimes images can have different hashes, but pixels can be identical.
if not equal:
self.logger.info("Image hashes didn't match%s, checking pixel differences" %
("" if len(hashes) == 1 else " on page %i" % (page_idx + 1)))
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls)
equal = pixels_different == 0 and max_per_channel == 0
else:
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls,
page_idx if len(hashes) > 1 else None)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
if not equal:
return (False if relation == "==" else True, page_idx)
# All screenshots were equal within the fuzziness
return (True if relation == "==" else False, None)
def get_differences(self, screenshots, urls, page_idx=None):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
self.check_if_solid_color(lhs, urls[0])
self.check_if_solid_color(rhs, urls[1])
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s%s" %
(count,
per_channel,
"" if page_idx is None else " on page %i" % (page_idx + 1)))
return per_channel, count
def check_if_solid_color(self, image, url):
extrema = image.getextrema()
if all(min == max for min, max in extrema):
color = ''.join('%02X' % value for value, _ in extrema)
self.message.append("Screenshot is solid color 0x%s for %s\n" % (color, url))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
page_ranges = test.page_ranges
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
page_idx = None
while stack:
hashes = [None, None]
screenshots = [None, None]
urls = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi, page_ranges)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
urls[i] = node.url
is_pass, page_idx = self.check_pass(hashes, screenshots, urls, relation, fuzzy)
if is_pass:
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1])
for item in reversed(nodes[1].references)))
else:
# We passed
return {"status": "PASS", "message": None}
# We failed, so construct a failure message
if page_idx is None:
# default to outputting the last page
page_idx = -1
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi, page_ranges)
if success:
screenshots[i] = screenshot
log_data = [
{"url": nodes[0].url,
"screenshot": screenshots[0][page_idx],
"hash": hashes[0][page_idx]},
relation,
{"url": nodes[1].url,
"screenshot": screenshots[1][page_idx],
"hash": hashes[1][page_idx]},
]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi, page_ranges):
success, data = self.get_screenshot_list(node,
viewport_size,
dpi,
page_ranges)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
def get_screenshot_list(self, node, viewport_size, dpi, page_ranges):
success, data = self.executor.screenshot(node, viewport_size, dpi, page_ranges)
if success and not isinstance(data, list):
return success, [data]
return success, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None # type: ClassVar[Type[Protocol]]
def __init__(self, logger, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, environ=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.environ = environ if environ is not None else {}
self.output_handler_kwargs = None
self.output_handler_start_kwargs = None
def setup(self, runner):
self.protocol = self.protocol_cls(self, self.browser)
super().setup(runner)
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc()
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def load(self, url):
pass
def execute_script(self, script, asynchronous=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
return False
def set_window(self, handle):
pass
def window_handles(self):
return []
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WdspecProtocol(Protocol):
server_cls = None # type: ClassVar[Optional[Type[WebDriverServer]]]
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
self.environ = os.environ.copy()
self.environ.update(executor.environ)
self.output_handler_kwargs = executor.output_handler_kwargs
self.output_handler_start_kwargs = executor.output_handler_start_kwargs
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args,
env=self.environ)
self.server.start(block=False,
output_handler_kwargs=self.output_handler_kwargs,
output_handler_start_kwargs=self.output_handler_start_kwargs)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive():
self.server.stop()
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
unimplemented_exc = (NotImplementedError,) # type: ClassVar[Tuple[Type[Exception], ...]]
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {cls.name: cls(self.logger, self.protocol) for cls in actions}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
cmd_id = payload["id"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
with ActionContext(self.logger, self.protocol, payload.get("context")):
result = action_handler(payload)
except self.unimplemented_exc:
self.logger.warning("Action %s not implemented" % action)
self._send_message(cmd_id, "complete", "error", "Action %s not implemented" % action)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message(cmd_id, "complete", "error")
raise
else:
self.logger.debug("Action %s completed with result %s" % (action, result))
return_message = {"result": result}
self._send_message(cmd_id, "complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, cmd_id, message_type, status, message=None):
self.protocol.testdriver.send_message(cmd_id, message_type, status, message=message)
class ActionContext(object):
def __init__(self, logger, protocol, context):
self.logger = logger
self.protocol = protocol
self.context = context
self.initial_window = None
def __enter__(self):
if self.context is None:
return
self.initial_window = self.protocol.base.current_window
self.logger.debug("Switching to window %s" % self.context)
self.protocol.testdriver.switch_to_window(self.context, self.initial_window)
def __exit__(self, *args):
if self.context is None:
return
self.logger.debug("Switching back to initial window")
self.protocol.base.set_window(self.initial_window)
self.initial_window = None
|
|
# Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Viewfinder BuildArchiveOperation.
This operation builds an archive of content for a given user and sends the user
an email with a link that can be used to retrieve a zip file of their content.
The zip contains all source needed to invoke the web client and display the user's
conversations.
The link is an S3 signed URL that will expire after 24 hours.
Note: This operation runs as user 0 so that only one will be active at any given time. This works
as a throttling mechanism.
"""
__authors__ = ['[email protected] (Mike Purtell)']
import calendar
import datetime
import json
import logging
import os
import random
import shutil
import string
from tornado import gen, httpclient, options, process
from viewfinder.backend.base import constants, util
from viewfinder.backend.base.environ import ServerEnvironment
from viewfinder.backend.base.exceptions import ServiceUnavailableError, NotFoundError
from viewfinder.backend.base.secrets import GetSecret
from viewfinder.backend.db import db_client
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.followed import Followed
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.post import Post
from viewfinder.backend.db.user import User
from viewfinder.backend.db.user_photo import UserPhoto
from viewfinder.backend.db.user_post import UserPost
from viewfinder.backend.db.viewpoint import Viewpoint
from viewfinder.backend.op.viewfinder_op import ViewfinderOperation
from viewfinder.backend.resources.message.error_messages import SERVICE_UNAVAILABLE
from viewfinder.backend.resources.resources_mgr import ResourcesManager
from viewfinder.backend.services.email_mgr import EmailManager
from viewfinder.backend.storage.object_store import ObjectStore
from viewfinder.backend.www import base, www_util, photo_store
CONVO_FOLDER_NAME = 'conversations'
def _CanViewViewpointContent(viewpoint, follower):
"""Returns true if the given follower is allowed to view the viewpoint's content:
1. Follower must exist
2. Viewpoint must not be removed by the follower
"""
if viewpoint is None or follower is None or not follower.CanViewContent():
return False
return True
def _MakeViewpointMetadataDict(viewpoint, follower):
"""Returns a viewpoint metadata dictionary appropriate for a service query response.
The response dictionary contains valid photo urls for the viewpoint's cover photo.
"""
def _GetNormalizedViewpointTitle(vp_dict):
"""Normalize the viewpoint title so that it can be used as a directory name in the archive.
This will strip anything except for upper/lower case letters, digits and the space character.
It will also truncate it to 100 characters to avoid file path limitations.
"""
norm_title = ''
if vp_dict['type'] == Viewpoint.DEFAULT:
norm_title = 'Personal Collection'
elif vp_dict.get('title') is not None:
for c in vp_dict['title']:
if c in BuildArchiveOperation._PATH_WHITELIST:
norm_title += c
# Avoid creating a folder path that's too long.
norm_title = norm_title[:100]
return norm_title
vp_dict = viewpoint.MakeMetadataDict(follower)
norm_vp_title = _GetNormalizedViewpointTitle(vp_dict)
# Append the viewpoint id to the path to ensure uniqueness.
vp_dict['folder_name'] = ('%s/%s %s' % (CONVO_FOLDER_NAME, norm_vp_title, vp_dict['viewpoint_id'])).strip()
if 'cover_photo' in vp_dict:
vp_dict['cover_photo']['full_get_url'] = \
os.path.join(vp_dict['folder_name'], vp_dict['cover_photo']['photo_id'] + '.f.jpg')
return vp_dict
@gen.coroutine
def _QueryFollowedForArchive(client, user_id):
"""Queries all viewpoints followed by the requested user (excluding the default/personal viewpoint)."""
followed = yield gen.Task(Followed.RangeQuery,
client,
hash_key=user_id,
range_desc=None,
limit=None,
col_names=['viewpoint_id'],
excl_start_key=None)
# Get the viewpoint associated with each follower object.
viewpoint_keys = [db_client.DBKey(f.viewpoint_id, None) for f in followed]
follower_keys = [db_client.DBKey(user_id, f.viewpoint_id) for f in followed]
viewpoints, followers = yield [gen.Task(Viewpoint.BatchQuery, client, viewpoint_keys, None, must_exist=False),
gen.Task(Follower.BatchQuery, client, follower_keys, None, must_exist=False)]
# Formulate the viewpoints list into a dict for JSON output.
response = {'viewpoints': [_MakeViewpointMetadataDict(v, f)
for v, f in zip(viewpoints, followers)
if v is not None and not v.IsDefault()]}
raise gen.Return(response)
@gen.coroutine
def _QueryViewpointsForArchive(client,
user_id,
viewpoint_ids,
get_followers=False,
get_activities=False,
get_episodes=False,
get_comments=False,
get_attributes=False):
"""Queries viewpoint metadata, as well as associated followers and episodes.
"""
@gen.coroutine
def _QueryFollowers():
"""Produces list of (followers, last_key) tuples, one for each viewpoint in the request."""
tasks = []
for vp_id in viewpoint_ids:
if get_followers:
tasks.append(Viewpoint.QueryFollowers(client, vp_id))
else:
tasks.append(util.GenConstant(None))
follower_results = yield tasks
raise gen.Return(follower_results)
@gen.coroutine
def _QueryActivities():
"""Produces list of (activities, last_key) tuples, one for each viewpoint in the request."""
tasks = []
for vp_id in viewpoint_ids:
if get_activities:
tasks.append(gen.Task(Viewpoint.QueryActivities, client, vp_id))
else:
tasks.append(util.GenConstant(None))
activity_results = yield tasks
raise gen.Return(activity_results)
@gen.coroutine
def _QueryEpisodes():
"""Produces list of (episodes, last_key) tuples, one for each viewpoint in the request."""
tasks = []
for vp_id in viewpoint_ids:
if get_episodes:
tasks.append(gen.Task(Viewpoint.QueryEpisodes, client, vp_id))
else:
tasks.append(util.GenConstant(None))
episode_results = yield tasks
raise gen.Return(episode_results)
@gen.coroutine
def _QueryComments():
"""Produces list of (comments, last_key) tuples, one for each viewpoint in the request."""
tasks = []
for vp_id in viewpoint_ids:
if get_comments:
tasks.append(gen.Task(Viewpoint.QueryComments, client, vp_id))
else:
tasks.append(util.GenConstant(None))
comment_results = yield tasks
raise gen.Return(comment_results)
viewpoint_keys = [db_client.DBKey(vp_id, None) for vp_id in viewpoint_ids]
follower_keys = [db_client.DBKey(user_id, vp_id) for vp_id in viewpoint_ids]
results = yield [gen.Task(Viewpoint.BatchQuery, client, viewpoint_keys, None, must_exist=False),
gen.Task(Follower.BatchQuery, client, follower_keys, None, must_exist=False),
_QueryFollowers(),
_QueryActivities(),
_QueryEpisodes(),
_QueryComments()]
viewpoints, followers, follower_id_results, activity_results, episode_results, comment_results = results
zip_list = zip(viewpoints, followers, follower_id_results, activity_results,
episode_results, comment_results)
response_vp_dicts = []
for viewpoint, follower, follower_result, activity_result, episode_result, comment_result in zip_list:
# Only return the viewpoint metadata if the caller is a follower of the viewpoint.
if follower is not None and not follower.IsRemoved():
response_vp_dict = {'viewpoint_id': viewpoint.viewpoint_id}
if get_attributes:
response_vp_dict.update(_MakeViewpointMetadataDict(viewpoint, follower))
if get_followers:
followers, last_key = follower_result
response_vp_dict['followers'] = [foll.MakeFriendMetadataDict() for foll in followers]
if last_key is not None:
response_vp_dict['follower_last_key'] = www_util.FormatIntegralLastKey(last_key)
if _CanViewViewpointContent(viewpoint, follower):
if get_activities:
activities, last_key = activity_result
response_vp_dict['activities'] = [act.MakeMetadataDict() for act in activities]
if last_key is not None:
response_vp_dict['activity_last_key'] = last_key
if get_episodes:
episodes, last_key = episode_result
response_vp_dict['episodes'] = [ep._asdict() for ep in episodes]
if last_key is not None:
response_vp_dict['episode_last_key'] = last_key
if get_comments:
comments, last_key = comment_result
response_vp_dict['comments'] = [co._asdict() for co in comments]
if last_key is not None:
response_vp_dict['comment_last_key'] = last_key
response_vp_dicts.append(response_vp_dict)
raise gen.Return({'viewpoints': response_vp_dicts})
@gen.coroutine
def _QueryUsersForArchive(client, requesting_user_id, user_ids):
"""Queries users by user id, filtering by friendships."""
user_friend_list = yield gen.Task(User.QueryUsers, client, requesting_user_id, user_ids)
user_dicts = yield [gen.Task(user.MakeUserMetadataDict, client, requesting_user_id, forward_friend, reverse_friend)
for user, forward_friend, reverse_friend in user_friend_list]
response = {'users': user_dicts}
raise gen.Return(response)
@gen.coroutine
def _QueryEpisodesForArchive(client, obj_store, user_id, episode_ids):
"""Queries posts from the specified episodes.
"""
def _MakePhotoDict(post, photo, user_post, user_photo):
ph_dict = photo.MakeMetadataDict(post, user_post, user_photo)
# Do not return access URLs for posts which have been removed.
if not post.IsRemoved():
ph_dict['full_get_url'] = photo_store.GeneratePhotoUrl(obj_store, ph_dict['photo_id'], '.f')
return ph_dict
# Get all requested episodes, along with posts for each episode.
episode_keys = [db_client.DBKey(ep_id, None) for ep_id in episode_ids]
post_tasks = []
for ep_id in episode_ids:
post_tasks.append(gen.Task(Post.RangeQuery, client, ep_id, None, None, None, excl_start_key=None))
episodes, posts_list = yield [gen.Task(Episode.BatchQuery, client, episode_keys, None, must_exist=False),
gen.Multi(post_tasks)]
# Get viewpoint records for all viewpoints containing episodes.
viewpoint_keys = [db_client.DBKey(viewpoint_id, None)
for viewpoint_id in set(ep.viewpoint_id for ep in episodes if ep is not None)]
# Get follower records for all viewpoints containing episodes, along with photo and user post objects.
follower_keys = [db_client.DBKey(user_id, db_key.hash_key) for db_key in viewpoint_keys]
all_posts = [post for posts in posts_list if posts is not None for post in posts]
photo_keys = [db_client.DBKey(post.photo_id, None) for post in all_posts]
user_post_keys = [db_client.DBKey(user_id, Post.ConstructPostId(post.episode_id, post.photo_id))
for post in all_posts]
if user_id:
# TODO(ben): we can probably skip this for the web view
user_photo_task = gen.Task(UserPhoto.BatchQuery, client,
[db_client.DBKey(user_id, post.photo_id) for post in all_posts],
None, must_exist=False)
else:
user_photo_task = util.GenConstant(None)
viewpoints, followers, photos, user_posts, user_photos = yield [
gen.Task(Viewpoint.BatchQuery, client, viewpoint_keys, None, must_exist=False),
gen.Task(Follower.BatchQuery, client, follower_keys, None, must_exist=False),
gen.Task(Photo.BatchQuery, client, photo_keys, None),
gen.Task(UserPost.BatchQuery, client, user_post_keys, None, must_exist=False),
user_photo_task,
]
# Get set of viewpoint ids to which the current user has access.
viewable_viewpoint_ids = set(viewpoint.viewpoint_id for viewpoint, follower in zip(viewpoints, followers)
if _CanViewViewpointContent(viewpoint, follower))
response_dict = {'episodes': []}
for ep_id, episode, posts in zip(episode_ids, episodes, posts_list):
# Gather list of (post, photo, user_post) tuples for this episode.
photo_info_list = []
for post in posts:
photo = photos.pop(0)
user_post = user_posts.pop(0)
user_photo = user_photos.pop(0) if user_photos is not None else None
assert photo.photo_id == post.photo_id, (episode, post, photo)
if user_photo:
assert user_photo.photo_id == photo.photo_id
assert user_photo.user_id == user_id
photo_info_list.append((post, photo, user_post, user_photo))
if episode is not None and episode.viewpoint_id in viewable_viewpoint_ids:
response_ep_dict = {'episode_id': ep_id}
response_ep_dict.update(episode._asdict())
response_ep_dict['photos'] = [_MakePhotoDict(photo, post, user_post, user_photo)
for photo, post, user_post, user_photo in photo_info_list]
if len(photo_info_list) > 0:
response_ep_dict['last_key'] = photo_info_list[-1][0].photo_id
response_dict['episodes'].append(response_ep_dict)
raise gen.Return(response_dict)
class BuildArchiveOperation(ViewfinderOperation):
""" Operation to:
1) Clear temporary directory used to construct zip file content.
2) Collect a given user's content into a temporary directory.
3) Copy web client code into the same temporary directory.
4) Zip the temp directory up.
5) Put the zip file into S3.
6) Generate a signed URL referencing the zip file in S3.
7) Email the signed URL to the user.
"""
_PATH_WHITELIST = ' ' + string.ascii_letters + string.digits
_OFFBOARDING_DIR_NAME = 'offboarding'
_ZIP_FILE_NAME = 'vf.zip'
_CONTENT_DIR_NAME = 'viewfinder'
# 3 days for user to retrieve their zip file.
_S3_ZIP_FILE_ACCESS_EXPIRATION = 3 * constants.SECONDS_PER_DAY
def __init__(self, client, user_id, email):
super(BuildArchiveOperation, self).__init__(client)
self._user_id = user_id
self._email = email
self._notify_timestamp = self._op.timestamp
self._photo_obj_store = ObjectStore.GetInstance(ObjectStore.PHOTO)
self._user_zips_obj_store = ObjectStore.GetInstance(ObjectStore.USER_ZIPS)
self._offboarding_assets_dir_path = ResourcesManager.Instance().GetOffboardingPath()
self._temp_dir_path = os.path.join(ServerEnvironment.GetViewfinderTempDirPath(),
BuildArchiveOperation._OFFBOARDING_DIR_NAME)
self._zip_file_path = os.path.join(self._temp_dir_path, BuildArchiveOperation._ZIP_FILE_NAME)
self._content_dir_path = os.path.join(self._temp_dir_path, BuildArchiveOperation._CONTENT_DIR_NAME)
self._data_dir_path = os.path.join(self._content_dir_path, CONVO_FOLDER_NAME)
@classmethod
@gen.coroutine
def Execute(cls, client, user_id, email):
"""Entry point called by the operation framework."""
yield BuildArchiveOperation(client, user_id, email)._BuildArchive()
def _ResetArchiveDir(self):
"""Get our temp directory into a known clean state."""
# Make sure certain directories already exists.
if not os.path.exists(ServerEnvironment.GetViewfinderTempDirPath()):
os.mkdir(ServerEnvironment.GetViewfinderTempDirPath())
if not os.path.exists(self._temp_dir_path):
os.mkdir(self._temp_dir_path)
# Blow away any previously existing content.
if os.path.exists(self._content_dir_path):
shutil.rmtree(self._content_dir_path)
assert not os.path.exists(self._content_dir_path)
# Blow away any previous zip file.
if os.path.exists(self._zip_file_path):
os.remove(self._zip_file_path)
assert not os.path.exists(self._zip_file_path)
# Recreate the content directory.
os.mkdir(self._content_dir_path)
os.mkdir(self._data_dir_path)
@gen.coroutine
def _ProcessPhoto(self, folder_path, photo_id, url):
http_client = httpclient.AsyncHTTPClient()
try:
response = yield http_client.fetch(url,
method='GET',
validate_cert=options.options.validate_cert)
except httpclient.HTTPError as e:
if e.code == 404:
logging.warning('Photo not found for users(%d) archive: %s' % (self._user_id, photo_id + '.f'))
return
else:
logging.warning('Photo store S3 GET request error: [%s] %s' % (type(e).__name__, e.message))
raise ServiceUnavailableError(SERVICE_UNAVAILABLE)
if response.code != 200:
raise AssertionError('failure on GET request for photo %s: %s' %
(photo_id + '.f', response))
# Write the image to the jpg file.
# TODO(mike): Consider moving this IO to thread pool to avoid blocking on main thread.
with open(os.path.join(folder_path, photo_id + '.f.jpg'), mode='wb') as f:
f.write(response.body)
@gen.coroutine
def _VerifyPhotoExists(self, folder_path, photo_id):
"""The file for this photo should already exist."""
assert os.path.exists(os.path.join(folder_path, photo_id + '.f.jpg'))
@gen.coroutine
def _ProcessViewpoint(self, vp_dict):
results_dict = yield _QueryViewpointsForArchive(self._client,
self._user_id,
[vp_dict['viewpoint_id']],
get_activities=True,
get_attributes=True,
get_comments=True,
get_episodes=True)
viewpoint_folder_path = os.path.join(self._content_dir_path, vp_dict['folder_name'])
# Now, grab the photos!
episode_ids = [ep_dict['episode_id'] for ep_dict in results_dict['viewpoints'][0]['episodes']]
episodes_dict = yield _QueryEpisodesForArchive(self._client, self._photo_obj_store, self._user_id, episode_ids)
photos_to_fetch = dict()
photos_to_merge = dict()
# Gather photo URL's to request and replace URL's with archive paths.
for ep_dict in episodes_dict['episodes']:
for photo_dict in ep_dict['photos']:
if photo_dict.get('full_get_url') is not None:
photos_to_fetch[photo_dict['photo_id']] = photo_dict['full_get_url']
photo_dict['full_get_url'] = os.path.join(vp_dict['folder_name'], photo_dict['photo_id'] + '.f.jpg')
photos_to_merge[ep_dict['episode_id']] = ep_dict['photos']
# Merge the photo metadata from query_episodes into the query_viewpoint response.
for ep_dict in results_dict['viewpoints'][0]['episodes']:
ep_dict['photos'] = photos_to_merge[ep_dict['episode_id']]
if os.path.exists(viewpoint_folder_path):
# Because the viewpoint folder already exists, let's just verify that everything else exists.
assert os.path.exists(os.path.join(viewpoint_folder_path,'metadata.jsn'))
for photo_id,url in photos_to_fetch.items():
yield self._VerifyPhotoExists(viewpoint_folder_path, photo_id)
else:
# TODO(mike): Consider moving this IO to thread pool to avoid blocking on main thread.
os.mkdir(viewpoint_folder_path)
with open(os.path.join(viewpoint_folder_path,'metadata.jsn'), mode='wb') as f:
f.write("viewfinder.jsonp_data =")
json.dump(results_dict['viewpoints'][0], f)
# Now, fetch all of the photos for this episode.
# We'll do this serially since writing the files will be done with blocking-IO and we don't want to
# overwhelm the server with the blocking-IO.
for photo_id,url in photos_to_fetch.items():
yield self._ProcessPhoto(viewpoint_folder_path, photo_id, url)
@gen.coroutine
def _BuildArchive(self):
"""Drive overall archive process as outlined in class header comment."""
logging.info('building archive for user: %d' % self._user_id)
# Prepare temporary destination folder (delete existing. We'll always start from scratch).
self._ResetArchiveDir()
# Copy in base assets and javascript which will drive browser experience of content for users.
proc = process.Subprocess(['cp',
'-R',
os.path.join(self._offboarding_assets_dir_path, 'web_code'),
self._content_dir_path])
code = yield gen.Task(proc.set_exit_callback)
if code != 0:
logging.error('Error copying offboarding assets: %d' % code)
raise IOError()
# Top level iteration is over viewpoints.
# For each viewpoint,
# iterate over activities and collect photos/episodes as needed.
# Build various 'tables' in json format:
# Activity, Comment, Episode, Photo, ...
#
viewpoints_dict = yield _QueryFollowedForArchive(self._client, self._user_id)
viewpoint_ids = [viewpoint['viewpoint_id'] for viewpoint in viewpoints_dict['viewpoints']]
followers_dict = yield _QueryViewpointsForArchive(self._client,
self._user_id,
viewpoint_ids,
get_followers=True)
for viewpoint, followers in zip(viewpoints_dict['viewpoints'], followers_dict['viewpoints']):
viewpoint['followers'] = followers
# Query user info for all users referenced by any of the viewpoints.
users_to_query = list({f['follower_id'] for vp in followers_dict['viewpoints'] for f in vp['followers']})
users_dict = yield _QueryUsersForArchive(self._client, self._user_id, users_to_query)
top_level_metadata_dict = dict(viewpoints_dict.items() + users_dict.items())
# Write the top level metadata to the root of the archive.
# TODO(mike): Consider moving this IO to thread pool to avoid blocking on main thread.
with open(os.path.join(self._content_dir_path, 'viewpoints.jsn'), mode='wb') as f:
# Need to set metadata as variable for JS code.
f.write("viewfinder.jsonp_data =")
json.dump(top_level_metadata_dict, f)
# Now, process each viewpoint.
for vp_dict in top_level_metadata_dict['viewpoints']:
if Follower.REMOVED not in vp_dict['labels']:
yield self._ProcessViewpoint(vp_dict)
# Now, generate user specific view file: index.html.
# This is the file that the user will open to launch the web client view of their data.
recipient_user = yield gen.Task(User.Query, self._client, self._user_id, None)
user_info = {'user_id' : recipient_user.user_id,
'name' : recipient_user.name,
'email' : recipient_user.email,
'phone' : recipient_user.phone,
'default_viewpoint_id' : recipient_user.private_vp_id
}
view_local = ResourcesManager().Instance().GenerateTemplate('view_local.html',
user_info=user_info,
viewpoint_id=None)
with open(os.path.join(self._content_dir_path, 'index.html'), mode='wb') as f:
f.write(view_local)
with open(os.path.join(self._content_dir_path, 'README.txt'), mode='wb') as f:
f.write("This Viewfinder archive contains both a readable local HTML file " +
"and backup folders including all photos included in those conversations.\n")
# Exec zip command relative to the parent of content dir so that paths in zip are relative to that.
proc = process.Subprocess(['zip',
'-r',
BuildArchiveOperation._ZIP_FILE_NAME,
BuildArchiveOperation._CONTENT_DIR_NAME],
cwd=self._temp_dir_path)
code = yield gen.Task(proc.set_exit_callback)
if code != 0:
logging.error('Error creating offboarding zip file: %d' % code)
raise IOError()
# Key is: "{user_id}/{timestamp}_{random}/Viewfinder.zip"
# timestamp is utc unix timestamp.
s3_key = '%d/%d_%d/Viewfinder.zip' % (self._user_id,
calendar.timegm(datetime.datetime.utcnow().utctimetuple()),
int(random.random() * 1000000))
if options.options.fileobjstore:
# Next, upload this to S3 (really fileobjstore in this case).
with open(self._zip_file_path, mode='rb') as f:
s3_data = f.read()
yield gen.Task(self._user_zips_obj_store.Put, s3_key, s3_data)
else:
# Running against AWS S3, so use awscli to upload zip file into S3.
s3_path = 's3://' + ObjectStore.USER_ZIPS_BUCKET + '/' + s3_key
# Use awscli to copy file into S3.
proc = process.Subprocess(['aws', 's3', 'cp', self._zip_file_path, s3_path, '--region', 'us-east-1'],
stdout=process.Subprocess.STREAM,
stderr=process.Subprocess.STREAM,
env={'AWS_ACCESS_KEY_ID': GetSecret('aws_access_key_id'),
'AWS_SECRET_ACCESS_KEY': GetSecret('aws_secret_access_key')})
result, error, code = yield [
gen.Task(proc.stdout.read_until_close),
gen.Task(proc.stderr.read_until_close),
gen.Task(proc.set_exit_callback)
]
if code != 0:
logging.error("%d = 'aws s3 cp %s %s': %s" % (code, self._zip_file_path, s3_path, error))
if result and len(result) > 0:
logging.info("aws result: %s" % result)
raise IOError()
# Generate signed URL to S3 for given user zip. Only allow link to live for 3 days.
s3_url = self._user_zips_obj_store.GenerateUrl(s3_key,
cache_control='private,max-age=%d' %
self._S3_ZIP_FILE_ACCESS_EXPIRATION,
expires_in=3 * self._S3_ZIP_FILE_ACCESS_EXPIRATION)
logging.info('user zip uploaded: %s' % s3_url)
# Finally, send the user an email with the link to download the zip files just uploaded to s3.
email_args = {'from': EmailManager.Instance().GetInfoAddress(),
'to': self._email,
'subject': 'Your Viewfinder archive download is ready'}
fmt_args = {'archive_url': s3_url,
'hello_name': recipient_user.given_name or recipient_user.name}
email_args['text'] = ResourcesManager.Instance().GenerateTemplate('user_zip.email', is_html=False, **fmt_args)
yield gen.Task(EmailManager.Instance().SendEmail, description='user archive zip', **email_args)
|
|
"""
This script performs an out of core groupby operation for different datasets.
The datasets to be processed are normally in CSV files and the key and
value to be used for the grouping are defined programatically via small
functions (see toy_stream() and statsmodel_stream() for examples).
Those datasets included in statsmodel will require this package
installed (it is available in Anaconda, so it should be an easy
dependency to solve).
Usage: $ `script` dataset_class dataset_filename
`dataset_class` can be either 'toy', 'randhie' or 'contributions'.
'toy' is a self-contained dataset and is meant for debugging mainly.
The 'randhie' implements suport for the dataset with the same name
included in the statsmodel package.
Finally 'contributions' is meant to compute aggregations on the
contributions to the different US campaigns. This latter requires a
second argument (datatset_filename) which is a CSV file downloaded from:
http://data.influenceexplorer.com/bulk/
"""
import sys
from itertools import islice
import io
import csv
import numpy as np
from dynd import nd, ndt
import blz
# Number of lines to read per each iteration
LPC = 1000
# Max number of chars to map for a bytes or string in NumPy
MAXCHARS = 64
def get_nptype(dtype, val):
"""Convert the `val` field in dtype into a numpy dtype."""
dytype = dtype[nd.as_py(dtype.field_names).index(val)]
# strings and bytes cannot be natively represented in numpy
if dytype == ndt.string:
nptype = np.dtype("U%d" % MAXCHARS)
elif dytype == ndt.bytes:
nptype = np.dtype("S%d" % MAXCHARS)
else:
# There should be no problems with the rest
nptype = dytype.as_numpy()
return nptype
def groupby(sreader, key, val, dtype, path=None, lines_per_chunk=LPC):
"""Group the `val` field in `sreader` stream of lines by `key` index.
Parameters
----------
sreader : iterator
Iterator over a stream of CSV lines.
key : string
The name of the field to be grouped by.
val : string
The field name with the values that have to be grouped.
dtype : dynd dtype
The DyND data type with all the fields of the CSV lines,
including the `key` and `val` names.
path : string
The path of the file where the BLZ array with the final
grouping will be stored. If None (default), the BLZ will be
stored in-memory (and hence non-persistent).
lines_per_chunk : int
The number of chunks that have to be read to be grouped by
in-memory. For optimal perfomance, some experimentation
should be needed. The default value should work reasonably
well, though.
Returns
-------
output : BLZ table
Returns a BLZ table with column names that are the groups
resulting from the groupby operation. The columns are filled
with the `val` field of the lines delivered by `sreader`.
"""
try:
nptype = get_nptype(dtype, val)
except ValueError:
raise ValueError("`val` should be a valid field")
# Start reading chunks
prev_keys = set()
while True:
ndbuf = nd.array(islice(sreader, lines_per_chunk), dtype)
if len(ndbuf) == 0: break # CSV data exhausted
# Do the groupby for this chunk
keys = getattr(ndbuf, key)
if val is None:
vals = ndbuf
else:
vals = getattr(ndbuf, val)
sby = nd.groupby(vals, keys)
lkeys = nd.as_py(sby.groups)
skeys = set(lkeys)
# BLZ does not understand dynd objects (yet)
sby = nd.as_py(sby.eval())
if len(prev_keys) == 0:
# Add the initial keys to a BLZ table
columns = [np.array(sby[i], nptype) for i in range(len(lkeys))]
ssby = blz.btable(columns=columns, names=lkeys, rootdir=path,
mode='w')
else:
# Have we new keys?
new_keys = skeys.difference(prev_keys)
for new_key in new_keys:
# Get the index of the new key
idx = lkeys.index(new_key)
# and add the values as a new columns
ssby.addcol(sby[idx], new_key, dtype=nptype)
# Now fill the pre-existing keys
existing_keys = skeys.intersection(prev_keys)
for existing_key in existing_keys:
# Get the index of the existing key
idx = lkeys.index(existing_key)
# and append the values here
ssby[existing_key].append(sby[idx])
# Add the new keys to the existing ones
prev_keys |= skeys
# Before returning, flush all data into disk
if path is not None:
ssby.flush()
return ssby
# A CSV toy example
csvbuf = u"""k1,v1,1,u1
k2,v2,2,u2
k3,v3,3,u3
k4,v4,4,u4
k5,v5,5,u5
k5,v6,6,u6
k4,v7,7,u7
k4,v8,8,u8
k4,v9,9,u9
k1,v10,10,u9
k5,v11,11,u11
"""
def toy_stream():
sreader = csv.reader(io.StringIO(csvbuf))
# The dynd dtype for the CSV file above
dt = ndt.type('{key: string, val1: string, val2: int32, val3: bytes}')
# The name of the persisted table where the groupby will be stored
return sreader, dt
# This access different datasets in statsmodel package
def statsmodel_stream(stream):
import statsmodels.api as sm
data = getattr(sm.datasets, stream)
f = open(data.PATH, 'rb')
if stream == 'randhie':
# For a description of this dataset, see:
# http://statsmodels.sourceforge.net/devel/datasets/generated/randhie.html
f.readline() # read out the headers line
dtypes = ('{mdvis: string, lncoins: float32, idp: int32,'
' lpi:float32, fmde: float32, physlm: float32,'
' disea: float32, hlthg: int32, hlthf: int32,'
' hlthp: int32}')
else:
raise NotImplementedError(
"Importing this dataset has not been implemented yet")
sreader = csv.reader(f)
dtype = ndt.type(dtypes)
return sreader, dtype
# For contributions to state and federal US campaings.
# CSV files can be downloaded from:
# http://data.influenceexplorer.com/bulk/
def contributions_stream(stream_file):
f = open(stream_file, 'rb')
# Description of this dataset
headers = f.readline().strip() # read out the headers line
headers = headers.split(',')
# The types for the different fields
htypes = [ ndt.int32, ndt.int16, ndt.int16] + \
[ ndt.string ] * 4 + \
[ ndt.bool, ndt.float64 ] + \
[ ndt.string ] * 33
# Build the DyND data type
dtype = ndt.make_struct(htypes, headers)
sreader = csv.reader(f)
return sreader, dtype
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Specify a dataset from: [toy, randhie, contributions]")
sys.exit()
# Which dataset do we want to group?
which = sys.argv[1]
if which == "toy":
# Get the CSV iterator and dtype of fields
sreader, dt = toy_stream()
# Do the actual sortby
ssby = groupby(sreader, 'key', 'val1', dtype=dt, path=None,
lines_per_chunk=2)
elif which == "randhie":
# Get the CSV iterator and dtype of fields
sreader, dt = statsmodel_stream(which)
# Do the actual sortby
ssby = groupby(sreader, 'mdvis', 'lncoins', dtype=dt, path=None)
elif which == "contributions":
# Get the CSV iterator and dtype of fields
if len(sys.argv) < 3:
print("Please specify a contributions file downloaded from: "
"http://data.influenceexplorer.com/bulk/")
sys.exit()
stream_file = sys.argv[2]
sreader, dt = contributions_stream(stream_file)
# Do the actual sortby
ssby = groupby(
sreader, 'recipient_party', 'amount', dtype=dt, path='contribs.blz')
else:
raise NotImplementedError(
"parsing for `%s` dataset not implemented" % which)
# Retrieve the data in the BLZ structure
#ssby = blz.from_blz(path) # open from disk, if ssby is persistent
for key in ssby.names:
values = ssby[key]
if which in ('toy', 'randhie'):
print "key:", key, values
elif which == 'contributions':
print "Party: '%s'\tAmount: %13.2f\t#contribs: %8d" % \
(key, values.sum(), len(values))
|
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Javelin makes resources that should survive an upgrade.
Javelin is a tool for creating, verifying, and deleting a small set of
resources in a declarative way.
"""
import argparse
import datetime
import logging
import os
import sys
import unittest
import yaml
import tempest.auth
from tempest import config
from tempest import exceptions
from tempest.openstack.common import timeutils
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import servers_client
from tempest.services.identity.json import identity_client
from tempest.services.image.v2.json import image_client
from tempest.services.object_storage import container_client
from tempest.services.object_storage import object_client
from tempest.services.telemetry.json import telemetry_client
from tempest.services.volume.json import volumes_client
OPTS = {}
USERS = {}
RES = {}
LOG = None
JAVELIN_START = datetime.datetime.utcnow()
class OSClient(object):
_creds = None
identity = None
servers = None
def __init__(self, user, pw, tenant):
_creds = tempest.auth.KeystoneV2Credentials(
username=user,
password=pw,
tenant_name=tenant)
_auth = tempest.auth.KeystoneV2AuthProvider(_creds)
self.identity = identity_client.IdentityClientJSON(_auth)
self.servers = servers_client.ServersClientJSON(_auth)
self.objects = object_client.ObjectClient(_auth)
self.containers = container_client.ContainerClient(_auth)
self.images = image_client.ImageClientV2JSON(_auth)
self.flavors = flavors_client.FlavorsClientJSON(_auth)
self.telemetry = telemetry_client.TelemetryClientJSON(_auth)
self.volumes = volumes_client.VolumesClientJSON(_auth)
def load_resources(fname):
"""Load the expected resources from a yaml flie."""
return yaml.load(open(fname, 'r'))
def keystone_admin():
return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name)
def client_for_user(name):
LOG.debug("Entering client_for_user")
if name in USERS:
user = USERS[name]
LOG.debug("Created client for user %s" % user)
return OSClient(user['name'], user['pass'], user['tenant'])
else:
LOG.error("%s not found in USERS: %s" % (name, USERS))
###################
#
# TENANTS
#
###################
def create_tenants(tenants):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
admin = keystone_admin()
_, body = admin.identity.list_tenants()
existing = [x['name'] for x in body]
for tenant in tenants:
if tenant not in existing:
admin.identity.create_tenant(tenant)
else:
LOG.warn("Tenant '%s' already exists in this environment" % tenant)
def destroy_tenants(tenants):
admin = keystone_admin()
for tenant in tenants:
tenant_id = admin.identity.get_tenant_by_name(tenant)['id']
r, body = admin.identity.delete_tenant(tenant_id)
##############
#
# USERS
#
##############
def _users_for_tenant(users, tenant):
u_for_t = []
for user in users:
for n in user:
if user[n]['tenant'] == tenant:
u_for_t.append(user[n])
return u_for_t
def _tenants_from_users(users):
tenants = set()
for user in users:
for n in user:
tenants.add(user[n]['tenant'])
return tenants
def _assign_swift_role(user):
admin = keystone_admin()
resp, roles = admin.identity.list_roles()
role = next(r for r in roles if r['name'] == 'Member')
LOG.debug(USERS[user])
try:
admin.identity.assign_user_role(
USERS[user]['tenant_id'],
USERS[user]['id'],
role['id'])
except exceptions.Conflict:
# don't care if it's already assigned
pass
def create_users(users):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
global USERS
LOG.info("Creating users")
admin = keystone_admin()
for u in users:
try:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
except exceptions.NotFound:
LOG.error("Tenant: %s - not found" % u['tenant'])
continue
try:
admin.identity.get_user_by_username(tenant['id'], u['name'])
LOG.warn("User '%s' already exists in this environment"
% u['name'])
except exceptions.NotFound:
admin.identity.create_user(
u['name'], u['pass'], tenant['id'],
"%s@%s" % (u['name'], tenant['id']),
enabled=True)
def destroy_users(users):
admin = keystone_admin()
for user in users:
user_id = admin.identity.get_user_by_name(user['name'])['id']
r, body = admin.identity.delete_user(user_id)
def collect_users(users):
global USERS
LOG.info("Collecting users")
admin = keystone_admin()
for u in users:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
u['tenant_id'] = tenant['id']
USERS[u['name']] = u
body = admin.identity.get_user_by_username(tenant['id'], u['name'])
USERS[u['name']]['id'] = body['id']
class JavelinCheck(unittest.TestCase):
def __init__(self, users, resources):
super(JavelinCheck, self).__init__()
self.users = users
self.res = resources
def runTest(self, *args):
pass
def check(self):
self.check_users()
self.check_objects()
self.check_servers()
# TODO(sdague): Volumes not yet working, bring it back once the
# code is self testing.
# self.check_volumes()
self.check_telemetry()
def check_users(self):
"""Check that the users we expect to exist, do.
We don't use the resource list for this because we need to validate
that things like tenantId didn't drift across versions.
"""
LOG.info("checking users")
for name, user in self.users.iteritems():
client = keystone_admin()
_, found = client.identity.get_user(user['id'])
self.assertEqual(found['name'], user['name'])
self.assertEqual(found['tenantId'], user['tenant_id'])
# also ensure we can auth with that user, and do something
# on the cloud. We don't care about the results except that it
# remains authorized.
client = client_for_user(user['name'])
resp, body = client.servers.list_servers()
self.assertEqual(resp['status'], '200')
def check_objects(self):
"""Check that the objects created are still there."""
if not self.res.get('objects'):
return
LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
r, contents = client.objects.get_object(
obj['container'], obj['name'])
source = _file_contents(obj['file'])
self.assertEqual(contents, source)
def check_servers(self):
"""Check that the servers are still up and running."""
if not self.res.get('servers'):
return
LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
found = _get_server_by_name(client, server['name'])
self.assertIsNotNone(
found,
"Couldn't find expected server %s" % server['name'])
r, found = client.servers.get_server(found['id'])
# get the ipv4 address
addr = found['addresses']['private'][0]['addr']
for count in range(60):
return_code = os.system("ping -c1 " + addr)
if return_code is 0:
break
self.assertNotEqual(count, 59,
"Server %s is not pingable at %s" % (
server['name'], addr))
def check_telemetry(self):
"""Check that ceilometer provides a sane sample.
Confirm that there are more than one sample and that they have the
expected metadata.
If in check mode confirm that the oldest sample available is from
before the upgrade.
"""
LOG.info("checking telemetry")
for server in self.res['servers']:
client = client_for_user(server['owner'])
response, body = client.telemetry.list_samples(
'instance',
query=('metadata.display_name', 'eq', server['name'])
)
self.assertEqual(response.status, 200)
self.assertTrue(len(body) >= 1, 'expecting at least one sample')
self._confirm_telemetry_sample(server, body[-1])
def check_volumes(self):
"""Check that the volumes are still there and attached."""
if not self.res.get('volumes'):
return
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
found = _get_volume_by_name(client, volume['name'])
self.assertIsNotNone(
found,
"Couldn't find expected volume %s" % volume['name'])
# Verify that a volume's attachment retrieved
server_id = _get_server_by_name(client, volume['server'])['id']
attachment = self.client.get_attachment_from_volume(volume)
self.assertEqual(volume['id'], attachment['volume_id'])
self.assertEqual(server_id, attachment['server_id'])
def _confirm_telemetry_sample(self, server, sample):
"""Check this sample matches the expected resource metadata."""
# Confirm display_name
self.assertEqual(server['name'],
sample['resource_metadata']['display_name'])
# Confirm instance_type of flavor
flavor = sample['resource_metadata'].get(
'flavor.name',
sample['resource_metadata'].get('instance_type')
)
self.assertEqual(server['flavor'], flavor)
# Confirm the oldest sample was created before upgrade.
if OPTS.mode == 'check':
oldest_timestamp = timeutils.normalize_time(
timeutils.parse_isotime(sample['timestamp']))
self.assertTrue(
oldest_timestamp < JAVELIN_START,
'timestamp should come before start of second javelin run'
)
#######################
#
# OBJECTS
#
#######################
def _file_contents(fname):
with open(fname, 'r') as f:
return f.read()
def create_objects(objects):
if not objects:
return
LOG.info("Creating objects")
for obj in objects:
LOG.debug("Object %s" % obj)
_assign_swift_role(obj['owner'])
client = client_for_user(obj['owner'])
client.containers.create_container(obj['container'])
client.objects.create_object(
obj['container'], obj['name'],
_file_contents(obj['file']))
def destroy_objects(objects):
for obj in objects:
client = client_for_user(obj['owner'])
r, body = client.objects.delete_object(obj['container'], obj['name'])
if not (200 >= int(r['status']) < 299):
raise ValueError("unable to destroy object: [%s] %s" % (r, body))
#######################
#
# IMAGES
#
#######################
def _resolve_image(image, imgtype):
name = image[imgtype]
fname = os.path.join(OPTS.devstack_base, image['imgdir'], name)
return name, fname
def _get_image_by_name(client, name):
r, body = client.images.image_list()
for image in body:
if name == image['name']:
return image
return None
def create_images(images):
if not images:
return
LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
# only upload a new image if the name isn't there
if _get_image_by_name(client, image['name']):
LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
extras = {}
if image['format'] == 'ami':
name, fname = _resolve_image(image, 'aki')
r, aki = client.images.create_image(
'javelin_' + name, 'aki', 'aki')
client.images.store_image(aki.get('id'), open(fname, 'r'))
extras['kernel_id'] = aki.get('id')
name, fname = _resolve_image(image, 'ari')
r, ari = client.images.create_image(
'javelin_' + name, 'ari', 'ari')
client.images.store_image(ari.get('id'), open(fname, 'r'))
extras['ramdisk_id'] = ari.get('id')
_, fname = _resolve_image(image, 'file')
r, body = client.images.create_image(
image['name'], image['format'], image['format'], **extras)
image_id = body.get('id')
client.images.store_image(image_id, open(fname, 'r'))
def destroy_images(images):
if not images:
return
LOG.info("Destroying images")
for image in images:
client = client_for_user(image['owner'])
response = _get_image_by_name(client, image['name'])
if not response:
LOG.info("Image '%s' does not exists" % image['name'])
continue
client.images.delete_image(response['id'])
#######################
#
# SERVERS
#
#######################
def _get_server_by_name(client, name):
r, body = client.servers.list_servers()
for server in body['servers']:
if name == server['name']:
return server
return None
def _get_flavor_by_name(client, name):
r, body = client.flavors.list_flavors()
for flavor in body:
if name == flavor['name']:
return flavor
return None
def create_servers(servers):
if not servers:
return
LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
resp, body = client.servers.create_server(server['name'], image_id,
flavor_id)
server_id = body['id']
client.servers.wait_for_server_status(server_id, 'ACTIVE')
def destroy_servers(servers):
if not servers:
return
LOG.info("Destroying servers")
for server in servers:
client = client_for_user(server['owner'])
response = _get_server_by_name(client, server['name'])
if not response:
LOG.info("Server '%s' does not exist" % server['name'])
continue
client.servers.delete_server(response['id'])
client.servers.wait_for_server_termination(response['id'],
ignore_error=True)
#######################
#
# VOLUMES
#
#######################
def _get_volume_by_name(client, name):
r, body = client.volumes.list_volumes()
for volume in body['volumes']:
if name == volume['name']:
return volume
return None
def create_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
# only create a volume if the name isn't here
r, body = client.volumes.list_volumes()
if any(item['name'] == volume['name'] for item in body):
continue
client.volumes.create_volume(volume['name'], volume['size'])
def destroy_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
volume_id = _get_volume_by_name(client, volume['name'])['id']
r, body = client.volumes.delete_volume(volume_id)
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
server_id = _get_server_by_name(client, volume['server'])['id']
client.volumes.attach_volume(volume['name'], server_id)
#######################
#
# MAIN LOGIC
#
#######################
def create_resources():
LOG.info("Creating Resources")
# first create keystone level resources, and we need to be admin
# for those.
create_tenants(RES['tenants'])
create_users(RES['users'])
collect_users(RES['users'])
# next create resources in a well known order
create_objects(RES['objects'])
create_images(RES['images'])
create_servers(RES['servers'])
# TODO(sdague): volumes definition doesn't work yet, bring it
# back once we're actually executing the code
# create_volumes(RES['volumes'])
# attach_volumes(RES['volumes'])
def destroy_resources():
LOG.info("Destroying Resources")
# Destroy in inverse order of create
destroy_servers(RES['servers'])
destroy_images(RES['images'])
destroy_objects(RES['objects'])
destroy_servers(RES['servers'])
destroy_volumes(RES['volumes'])
destroy_users(RES['users'])
destroy_tenants(RES['tenants'])
LOG.warn("Destroy mode incomplete")
def get_options():
global OPTS
parser = argparse.ArgumentParser(
description='Create and validate a fixed set of OpenStack resources')
parser.add_argument('-m', '--mode',
metavar='<create|check|destroy>',
required=True,
help=('One of (create, check, destroy)'))
parser.add_argument('-r', '--resources',
required=True,
metavar='resourcefile.yaml',
help='Resources definition yaml file')
parser.add_argument(
'-d', '--devstack-base',
required=True,
metavar='/opt/stack/old',
help='Devstack base directory for retrieving artifacts')
parser.add_argument(
'-c', '--config-file',
metavar='/etc/tempest.conf',
help='path to javelin2(tempest) config file')
# auth bits, letting us also just source the devstack openrc
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=os.environ.get('OS_USERNAME'),
help=('Defaults to env[OS_USERNAME].'))
parser.add_argument('--os-password',
metavar='<auth-password>',
default=os.environ.get('OS_PASSWORD'),
help=('Defaults to env[OS_PASSWORD].'))
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=os.environ.get('OS_TENANT_NAME'),
help=('Defaults to env[OS_TENANT_NAME].'))
OPTS = parser.parse_args()
if OPTS.mode not in ('create', 'check', 'destroy'):
print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
parser.print_help()
sys.exit(1)
if OPTS.config_file:
config.CONF.set_config_path(OPTS.config_file)
def setup_logging(debug=True):
global LOG
LOG = logging.getLogger(__name__)
if debug:
LOG.setLevel(logging.DEBUG)
else:
LOG.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
datefmt='%Y-%m-%d %H:%M:%S',
fmt='%(asctime)s.%(msecs).03d - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
LOG.addHandler(ch)
def main():
global RES
get_options()
setup_logging()
RES = load_resources(OPTS.resources)
if OPTS.mode == 'create':
create_resources()
# Make sure the resources we just created actually work
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
collect_users(RES['users'])
destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
LOG.info('javelin2 successfully finished')
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import six
import sys
import collections
import math
import paddle.fluid as fluid
from op_test import OpTest
class TestDetectionMAPOp(OpTest):
def set_data(self):
self.class_num = 4
self.init_test_case()
self.mAP = [self.calc_map(self.tf_pos, self.tf_pos_lod)]
self.label = np.array(self.label).astype('float32')
self.detect = np.array(self.detect).astype('float32')
self.mAP = np.array(self.mAP).astype('float32')
if len(self.class_pos_count) > 0:
self.class_pos_count = np.array(self.class_pos_count).astype(
'int32')
self.true_pos = np.array(self.true_pos).astype('float32')
self.false_pos = np.array(self.false_pos).astype('float32')
self.has_state = np.array([1]).astype('int32')
self.inputs = {
'Label': (self.label, self.label_lod),
'DetectRes': (self.detect, self.detect_lod),
'HasState': self.has_state,
'PosCount': self.class_pos_count,
'TruePos': (self.true_pos, self.true_pos_lod),
'FalsePos': (self.false_pos, self.false_pos_lod)
}
else:
self.inputs = {
'Label': (self.label, self.label_lod),
'DetectRes': (self.detect, self.detect_lod),
}
self.attrs = {
'overlap_threshold': self.overlap_threshold,
'evaluate_difficult': self.evaluate_difficult,
'ap_type': self.ap_type,
'class_num': self.class_num
}
self.out_class_pos_count = np.array(self.out_class_pos_count).astype(
'int')
self.out_true_pos = np.array(self.out_true_pos).astype('float32')
self.out_false_pos = np.array(self.out_false_pos).astype('float32')
self.outputs = {
'MAP': self.mAP,
'AccumPosCount': self.out_class_pos_count,
'AccumTruePos': (self.out_true_pos, self.out_true_pos_lod),
'AccumFalsePos': (self.out_false_pos, self.out_false_pos_lod)
}
def init_test_case(self):
self.overlap_threshold = 0.3
self.evaluate_difficult = True
self.ap_type = "integral"
self.label_lod = [[2, 2]]
# label difficult xmin ymin xmax ymax
self.label = [[1, 0, 0.1, 0.1, 0.3, 0.3], [1, 1, 0.6, 0.6, 0.8, 0.8],
[2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]]
# label score xmin ymin xmax ymax difficult
self.detect_lod = [[3, 4]]
self.detect = [
[1, 0.3, 0.1, 0.0, 0.4, 0.3], [1, 0.7, 0.0, 0.1, 0.2, 0.3],
[1, 0.9, 0.7, 0.6, 0.8, 0.8], [2, 0.8, 0.2, 0.1, 0.4, 0.4],
[2, 0.1, 0.4, 0.3, 0.7, 0.5], [1, 0.2, 0.8, 0.1, 1.0, 0.3],
[3, 0.2, 0.8, 0.1, 1.0, 0.3]
]
# label score true_pos false_pos
self.tf_pos_lod = [[3, 4]]
self.tf_pos = [[1, 0.9, 1, 0], [1, 0.7, 1, 0], [1, 0.3, 0, 1],
[1, 0.2, 1, 0], [2, 0.8, 0, 1], [2, 0.1, 1, 0],
[3, 0.2, 0, 1]]
self.class_pos_count = []
self.true_pos_lod = [[]]
self.true_pos = [[]]
self.false_pos_lod = [[]]
self.false_pos = [[]]
def calc_map(self, tf_pos, tf_pos_lod):
mAP = 0.0
count = 0
def get_input_pos(class_pos_count, true_pos, true_pos_lod, false_pos,
false_pos_lod):
class_pos_count_dict = collections.Counter()
true_pos_dict = collections.defaultdict(list)
false_pos_dict = collections.defaultdict(list)
for i, count in enumerate(class_pos_count):
class_pos_count_dict[i] = count
cur_pos = 0
for i in range(len(true_pos_lod[0])):
start = cur_pos
cur_pos += true_pos_lod[0][i]
end = cur_pos
for j in range(start, end):
true_pos_dict[i].append(true_pos[j])
cur_pos = 0
for i in range(len(false_pos_lod[0])):
start = cur_pos
cur_pos += false_pos_lod[0][i]
end = cur_pos
for j in range(start, end):
false_pos_dict[i].append(false_pos[j])
return class_pos_count_dict, true_pos_dict, false_pos_dict
def get_output_pos(label_count, true_pos, false_pos):
label_number = self.class_num
out_class_pos_count = []
out_true_pos_lod = []
out_true_pos = []
out_false_pos_lod = []
out_false_pos = []
for i in range(label_number):
out_class_pos_count.append([label_count[i]])
true_pos_list = true_pos[i]
out_true_pos += true_pos_list
out_true_pos_lod.append(len(true_pos_list))
false_pos_list = false_pos[i]
out_false_pos += false_pos_list
out_false_pos_lod.append(len(false_pos_list))
return out_class_pos_count, out_true_pos, [
out_true_pos_lod
], out_false_pos, [out_false_pos_lod]
def get_accumulation(pos_list):
sorted_list = sorted(pos_list, key=lambda pos: pos[0], reverse=True)
sum = 0
accu_list = []
for (score, count) in sorted_list:
sum += count
accu_list.append(sum)
return accu_list
label_count, true_pos, false_pos = get_input_pos(
self.class_pos_count, self.true_pos, self.true_pos_lod,
self.false_pos, self.false_pos_lod)
for v in self.label:
label = v[0]
difficult = False if len(v) == 5 else v[1]
if self.evaluate_difficult:
label_count[label] += 1
elif not difficult:
label_count[label] += 1
for (label, score, tp, fp) in tf_pos:
true_pos[label].append([score, tp])
false_pos[label].append([score, fp])
for (label, label_pos_num) in six.iteritems(label_count):
if label_pos_num == 0: continue
if label not in true_pos:
count += 1
continue
label_true_pos = true_pos[label]
label_false_pos = false_pos[label]
accu_tp_sum = get_accumulation(label_true_pos)
accu_fp_sum = get_accumulation(label_false_pos)
precision = []
recall = []
for i in range(len(accu_tp_sum)):
precision.append(
float(accu_tp_sum[i]) /
float(accu_tp_sum[i] + accu_fp_sum[i]))
recall.append(float(accu_tp_sum[i]) / label_pos_num)
if self.ap_type == "11point":
max_precisions = [0.0] * 11
start_idx = len(accu_tp_sum) - 1
for j in range(10, -1, -1):
for i in range(start_idx, -1, -1):
if recall[i] < float(j) / 10.0:
start_idx = i
if j > 0:
max_precisions[j - 1] = max_precisions[j]
break
else:
if max_precisions[j] < precision[i]:
max_precisions[j] = precision[i]
for j in range(10, -1, -1):
mAP += max_precisions[j] / 11
count += 1
elif self.ap_type == "integral":
average_precisions = 0.0
prev_recall = 0.0
for i in range(len(accu_tp_sum)):
if math.fabs(recall[i] - prev_recall) > 1e-6:
average_precisions += precision[i] * \
math.fabs(recall[i] - prev_recall)
prev_recall = recall[i]
mAP += average_precisions
count += 1
pcnt, tp, tp_lod, fp, fp_lod = get_output_pos(label_count, true_pos,
false_pos)
self.out_class_pos_count = pcnt
self.out_true_pos = tp
self.out_true_pos_lod = tp_lod
self.out_false_pos = fp
self.out_false_pos_lod = fp_lod
if count != 0:
mAP /= count
return mAP
def setUp(self):
self.op_type = "detection_map"
self.set_data()
def test_check_output(self):
self.check_output()
class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp):
def init_test_case(self):
super(TestDetectionMAPOpSkipDiff, self).init_test_case()
self.evaluate_difficult = False
self.tf_pos_lod = [[2, 4]]
# label score true_pos false_pos
self.tf_pos = [[1, 0.7, 1, 0], [1, 0.3, 0, 1], [1, 0.2, 1, 0],
[2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]]
class TestDetectionMAPOpWithoutDiff(TestDetectionMAPOp):
def init_test_case(self):
super(TestDetectionMAPOpWithoutDiff, self).init_test_case()
# label xmin ymin xmax ymax
self.label = [[1, 0.1, 0.1, 0.3, 0.3], [1, 0.6, 0.6, 0.8, 0.8],
[2, 0.3, 0.3, 0.6, 0.5], [1, 0.7, 0.1, 0.9, 0.3]]
class TestDetectionMAPOp11Point(TestDetectionMAPOp):
def init_test_case(self):
super(TestDetectionMAPOp11Point, self).init_test_case()
self.ap_type = "11point"
class TestDetectionMAPOpMultiBatch(TestDetectionMAPOp):
def init_test_case(self):
super(TestDetectionMAPOpMultiBatch, self).init_test_case()
self.class_pos_count = [0, 2, 1, 0]
self.true_pos_lod = [[0, 3, 2]]
self.true_pos = [[0.7, 1.], [0.3, 0.], [0.2, 1.], [0.8, 0.], [0.1, 1.]]
self.false_pos_lod = [[0, 3, 2]]
self.false_pos = [[0.7, 0.], [0.3, 1.], [0.2, 0.], [0.8, 1.], [0.1, 0.]]
class TestDetectionMAPOp11PointWithClassNoTP(TestDetectionMAPOp):
def init_test_case(self):
self.overlap_threshold = 0.3
self.evaluate_difficult = True
self.ap_type = "11point"
self.label_lod = [[2]]
# label difficult xmin ymin xmax ymax
self.label = [[2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]]
# label score xmin ymin xmax ymax difficult
self.detect_lod = [[1]]
self.detect = [[1, 0.2, 0.8, 0.1, 1.0, 0.3]]
# label score true_pos false_pos
self.tf_pos_lod = [[3, 4]]
self.tf_pos = [[1, 0.2, 1, 0]]
self.class_pos_count = []
self.true_pos_lod = [[]]
self.true_pos = [[]]
self.false_pos_lod = [[]]
self.false_pos = [[]]
if __name__ == '__main__':
unittest.main()
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from collections import namedtuple
import json
import os
import random
from string import ascii_letters
import subprocess
from tempfile import NamedTemporaryFile
import time
from atomic_reactor import __version__ as atomic_reactor_version
from atomic_reactor import start_time as atomic_reactor_start_time
from atomic_reactor.plugin import ExitPlugin
from atomic_reactor.source import GitSource
from atomic_reactor.plugins.post_rpmqa import PostBuildRPMqaPlugin
from atomic_reactor.plugins.pre_add_filesystem import AddFilesystemPlugin
from atomic_reactor.constants import PROG
from atomic_reactor.util import (get_version_of_tools, get_checksums,
get_build_json, get_preferred_label)
from atomic_reactor.koji_util import create_koji_session, TaskWatcher
from dockerfile_parse import DockerfileParser
from osbs.conf import Configuration
from osbs.api import OSBS
from osbs.exceptions import OsbsException
# An output file and its metadata
Output = namedtuple('Output', ['file', 'metadata'])
class KojiUploadLogger(object):
def __init__(self, logger, notable_percent=10):
self.logger = logger
self.notable_percent = notable_percent
self.last_percent_done = 0
def callback(self, offset, totalsize, size, t1, t2): # pylint: disable=W0613
if offset == 0:
self.logger.debug("upload size: %.1fMiB", totalsize / 1024.0 / 1024)
if not totalsize or not t1:
return
percent_done = 100 * offset / totalsize
if (percent_done >= 99 or
percent_done - self.last_percent_done >= self.notable_percent):
self.last_percent_done = percent_done
self.logger.debug("upload: %d%% done (%.1f MiB/sec)",
percent_done, size / t1 / 1024 / 1024)
class KojiPromotePlugin(ExitPlugin):
"""
Promote this build to Koji
Submits a successful build to Koji using the Content Generator API,
https://fedoraproject.org/wiki/Koji/ContentGenerators
Authentication is with Kerberos unless the koji_ssl_certs
configuration parameter is given, in which case it should be a
path at which 'cert', 'ca', and 'serverca' are the certificates
for SSL authentication.
If Kerberos is used for authentication, the default principal will
be used (from the kernel keyring) unless both koji_keytab and
koji_principal are specified. The koji_keytab parameter is a
keytab name like 'type:name', and so can be used to specify a key
in a Kubernetes secret by specifying 'FILE:/path/to/key'.
If metadata_only is set, the 'docker save' image will not be
uploaded, only the logs. The import will be marked as
metadata-only.
Runs as an exit plugin in order to capture logs from all other
plugins.
"""
key = "koji_promote"
is_allowed_to_fail = False
def __init__(self, tasker, workflow, kojihub, url,
verify_ssl=True, use_auth=True,
koji_ssl_certs=None, koji_proxy_user=None,
koji_principal=None, koji_keytab=None,
metadata_only=False, blocksize=None,
target=None, poll_interval=5):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param kojihub: string, koji hub (xmlrpc)
:param url: string, URL for OSv3 instance
:param verify_ssl: bool, verify OSv3 SSL certificate?
:param use_auth: bool, initiate authentication with OSv3?
:param koji_ssl_certs: str, path to 'cert', 'ca', 'serverca'
:param koji_proxy_user: str, user to log in as (requires hub config)
:param koji_principal: str, Kerberos principal (must specify keytab)
:param koji_keytab: str, keytab name (must specify principal)
:param metadata_only: bool, whether to omit the 'docker save' image
:param blocksize: int, blocksize to use for uploading files
:param target: str, koji target
:param poll_interval: int, seconds between Koji task status requests
"""
super(KojiPromotePlugin, self).__init__(tasker, workflow)
self.kojihub = kojihub
self.koji_ssl_certs = koji_ssl_certs
self.koji_proxy_user = koji_proxy_user
self.koji_principal = koji_principal
self.koji_keytab = koji_keytab
self.metadata_only = metadata_only
self.blocksize = blocksize
self.target = target
self.poll_interval = poll_interval
self.namespace = get_build_json().get('metadata', {}).get('namespace', None)
osbs_conf = Configuration(conf_file=None, openshift_uri=url,
use_auth=use_auth, verify_ssl=verify_ssl,
namespace=self.namespace)
self.osbs = OSBS(osbs_conf, osbs_conf)
self.build_id = None
self.nvr_image = None
@staticmethod
def parse_rpm_output(output, tags, separator=';'):
"""
Parse output of the rpm query.
:param output: list, decoded output (str) from the rpm subprocess
:param tags: list, str fields used for query output
:return: list, dicts describing each rpm package
"""
def field(tag):
"""
Get a field value by name
"""
try:
value = fields[tags.index(tag)]
except ValueError:
return None
if value == '(none)':
return None
return value
components = []
sigmarker = 'Key ID '
for rpm in output:
fields = rpm.rstrip('\n').split(separator)
if len(fields) < len(tags):
continue
signature = field('SIGPGP:pgpsig') or field('SIGGPG:pgpsig')
if signature:
parts = signature.split(sigmarker, 1)
if len(parts) > 1:
signature = parts[1]
component_rpm = {
'type': 'rpm',
'name': field('NAME'),
'version': field('VERSION'),
'release': field('RELEASE'),
'arch': field('ARCH'),
'sigmd5': field('SIGMD5'),
'signature': signature,
}
# Special handling for epoch as it must be an integer or None
epoch = field('EPOCH')
if epoch is not None:
epoch = int(epoch)
component_rpm['epoch'] = epoch
if component_rpm['name'] != 'gpg-pubkey':
components.append(component_rpm)
return components
def get_rpms(self):
"""
Build a list of installed RPMs in the format required for the
metadata.
"""
tags = [
'NAME',
'VERSION',
'RELEASE',
'ARCH',
'EPOCH',
'SIGMD5',
'SIGPGP:pgpsig',
'SIGGPG:pgpsig',
]
sep = ';'
fmt = sep.join(["%%{%s}" % tag for tag in tags])
cmd = "/bin/rpm -qa --qf '{0}\n'".format(fmt)
try:
# py3
(status, output) = subprocess.getstatusoutput(cmd)
except AttributeError:
# py2
with open('/dev/null', 'r+') as devnull:
p = subprocess.Popen(cmd,
shell=True,
stdin=devnull,
stdout=subprocess.PIPE,
stderr=devnull)
(stdout, stderr) = p.communicate()
status = p.wait()
output = stdout.decode()
if status != 0:
self.log.debug("%s: stderr output: %s", cmd, stderr)
raise RuntimeError("%s: exit code %s" % (cmd, status))
return self.parse_rpm_output(output.splitlines(), tags, separator=sep)
def get_output_metadata(self, path, filename):
"""
Describe a file by its metadata.
:return: dict
"""
checksums = get_checksums(path, ['md5'])
metadata = {'filename': filename,
'filesize': os.path.getsize(path),
'checksum': checksums['md5sum'],
'checksum_type': 'md5'}
if self.metadata_only:
metadata['metadata_only'] = True
return metadata
def get_builder_image_id(self):
"""
Find out the docker ID of the buildroot image we are in.
"""
try:
buildroot_tag = os.environ["OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE"]
except KeyError:
return ''
try:
pod = self.osbs.get_pod_for_build(self.build_id)
all_images = pod.get_container_image_ids()
except OsbsException as ex:
self.log.error("unable to find image id: %r", ex)
return buildroot_tag
try:
return all_images[buildroot_tag]
except KeyError:
self.log.error("Unable to determine buildroot image ID for %s",
buildroot_tag)
return buildroot_tag
def get_buildroot(self, build_id):
"""
Build the buildroot entry of the metadata.
:return: dict, partial metadata
"""
docker_version = self.tasker.get_version()
docker_info = self.tasker.get_info()
host_arch = docker_version['Arch']
if host_arch == 'amd64':
host_arch = 'x86_64'
buildroot = {
'id': 1,
'host': {
'os': docker_info['OperatingSystem'],
'arch': host_arch,
},
'content_generator': {
'name': PROG,
'version': atomic_reactor_version,
},
'container': {
'type': 'docker',
'arch': os.uname()[4],
},
'tools': [
{
'name': tool['name'],
'version': tool['version'],
}
for tool in get_version_of_tools()] + [
{
'name': 'docker',
'version': docker_version['Version'],
},
],
'components': self.get_rpms(),
'extra': {
'osbs': {
'build_id': build_id,
'builder_image_id': self.get_builder_image_id(),
}
},
}
return buildroot
def get_logs(self):
"""
Build the logs entry for the metadata 'output' section
:return: list, Output instances
"""
output = []
# Collect logs from server
try:
logs = self.osbs.get_build_logs(self.build_id)
except OsbsException as ex:
self.log.error("unable to get build logs: %r", ex)
else:
# Deleted once closed
logfile = NamedTemporaryFile(prefix=self.build_id,
suffix=".log",
mode='w')
logfile.write(logs)
logfile.flush()
metadata = self.get_output_metadata(logfile.name,
"openshift-final.log")
output.append(Output(file=logfile, metadata=metadata))
docker_logs = NamedTemporaryFile(prefix="docker-%s" % self.build_id,
suffix=".log",
mode='w')
docker_logs.write("\n".join(self.workflow.build_logs))
docker_logs.flush()
output.append(Output(file=docker_logs,
metadata=self.get_output_metadata(docker_logs.name,
"build.log")))
return output
def get_image_components(self):
"""
Re-package the output of the rpmqa plugin into the format required
for the metadata.
"""
try:
output = self.workflow.postbuild_results[PostBuildRPMqaPlugin.key]
except KeyError:
self.log.error("%s plugin did not run!",
PostBuildRPMqaPlugin.key)
return []
return self.parse_rpm_output(output, PostBuildRPMqaPlugin.rpm_tags,
separator=',')
def get_image_output(self, arch):
"""
Create the output for the image
This is the Koji Content Generator metadata, along with the
'docker save' output to upload.
For metadata-only builds, an empty file is used instead of the
output of 'docker save'.
:param arch: str, architecture for this output
:return: tuple, (metadata dict, Output instance)
"""
image_id = self.workflow.builder.image_id
saved_image = self.workflow.exported_image_sequence[-1].get('path')
ext = saved_image.split('.', 1)[1]
name_fmt = 'docker-image-{id}.{arch}.{ext}'
image_name = name_fmt.format(id=image_id, arch=arch, ext=ext)
if self.metadata_only:
metadata = self.get_output_metadata(os.path.devnull, image_name)
output = Output(file=None, metadata=metadata)
else:
metadata = self.get_output_metadata(saved_image, image_name)
output = Output(file=open(saved_image), metadata=metadata)
return metadata, output
def get_digests(self):
"""
Returns a map of repositories to digests
"""
digests = {} # repository -> digest
for registry in self.workflow.push_conf.docker_registries:
for image in self.workflow.tag_conf.images:
image_str = image.to_str()
if image_str in registry.digests:
digest = registry.digests[image_str]
digests[image.to_str(registry=False)] = digest
return digests
def get_repositories(self, digests):
"""
Build the repositories metadata
:param digests: dict, repository -> digest
"""
if self.workflow.push_conf.pulp_registries:
# If pulp was used, only report pulp images
registries = self.workflow.push_conf.pulp_registries
else:
# Otherwise report all the images we pushed
registries = self.workflow.push_conf.all_registries
output_images = []
for registry in registries:
image = self.nvr_image.copy()
image.registry = registry.uri
pullspec = image.to_str()
output_images.append(pullspec)
digest = digests.get(image.to_str(registry=False))
if digest:
digest_pullspec = image.to_str(tag=False) + "@" + digest
output_images.append(digest_pullspec)
return output_images
def get_output(self, buildroot_id):
"""
Build the 'output' section of the metadata.
:return: list, Output instances
"""
def add_buildroot_id(output):
logfile, metadata = output
metadata.update({'buildroot_id': buildroot_id})
return Output(file=logfile, metadata=metadata)
def add_log_type(output):
logfile, metadata = output
metadata.update({'type': 'log', 'arch': 'noarch'})
return Output(file=logfile, metadata=metadata)
output_files = [add_log_type(add_buildroot_id(metadata))
for metadata in self.get_logs()]
# Parent of squashed built image is base image
image_id = self.workflow.builder.image_id
parent_id = self.workflow.base_image_inspect['Id']
digests = self.get_digests()
repositories = self.get_repositories(digests)
arch = os.uname()[4]
metadata, output = self.get_image_output(arch)
metadata.update({
'arch': arch,
'type': 'docker-image',
'components': self.get_image_components(),
'extra': {
'image': {
'arch': arch,
},
'docker': {
'id': image_id,
'parent_id': parent_id,
'repositories': repositories,
},
},
})
# Add the 'docker save' image to the output
image = add_buildroot_id(output)
output_files.append(image)
return output_files
def get_build(self, metadata):
start_time = int(atomic_reactor_start_time)
labels = DockerfileParser(self.workflow.builder.df_path).labels
component = get_preferred_label(labels, 'com.redhat.component')
version = get_preferred_label(labels, 'version')
release = get_preferred_label(labels, 'release')
source = self.workflow.source
if not isinstance(source, GitSource):
raise RuntimeError('git source required')
extra = {'image': {}}
koji_task_id = metadata.get('labels', {}).get('koji-task-id')
if koji_task_id is not None:
self.log.info("build configuration created by Koji Task ID %s",
koji_task_id)
extra['container_koji_task_id'] = koji_task_id
fs_result = self.workflow.prebuild_results.get(AddFilesystemPlugin.key)
if fs_result is not None:
try:
task_id = fs_result['filesystem-koji-task-id']
except KeyError:
self.log.error("%s: expected filesystem-koji-task-id in result",
AddFilesystemPlugin.key)
else:
extra['filesystem_koji_task_id'] = str(task_id)
build = {
'name': component,
'version': version,
'release': release,
'source': "{0}#{1}".format(source.uri, source.commit_id),
'start_time': start_time,
'end_time': int(time.time()),
'extra': extra,
}
if self.metadata_only:
build['metadata_only'] = True
return build
def get_metadata(self):
"""
Build the metadata needed for importing the build
:return: tuple, the metadata and the list of Output instances
"""
try:
metadata = get_build_json()["metadata"]
self.build_id = metadata["name"]
except KeyError:
self.log.error("No build metadata")
raise
for image in self.workflow.tag_conf.primary_images:
# dash at first/last postition does not count
if '-' in image.tag[1:-1]:
self.nvr_image = image
break
else:
raise RuntimeError('Unable to determine name:version-release')
metadata_version = 0
build = self.get_build(metadata)
buildroot = self.get_buildroot(build_id=self.build_id)
output_files = self.get_output(buildroot['id'])
koji_metadata = {
'metadata_version': metadata_version,
'build': build,
'buildroots': [buildroot],
'output': [output.metadata for output in output_files],
}
return koji_metadata, output_files
def upload_file(self, session, output, serverdir):
"""
Upload a file to koji
:return: str, pathname on server
"""
name = output.metadata['filename']
self.log.debug("uploading %r to %r as %r",
output.file.name, serverdir, name)
kwargs = {}
if self.blocksize is not None:
kwargs['blocksize'] = self.blocksize
self.log.debug("using blocksize %d", self.blocksize)
upload_logger = KojiUploadLogger(self.log)
session.uploadWrapper(output.file.name, serverdir, name=name,
callback=upload_logger.callback, **kwargs)
path = os.path.join(serverdir, name)
self.log.debug("uploaded %r", path)
return path
@staticmethod
def get_upload_server_dir():
"""
Create a path name for uploading files to
:return: str, path name expected to be unique
"""
dir_prefix = 'koji-promote'
random_chars = ''.join([random.choice(ascii_letters)
for _ in range(8)])
unique_fragment = '%r.%s' % (time.time(), random_chars)
return os.path.join(dir_prefix, unique_fragment)
def login(self):
"""
Log in to koji
:return: koji.ClientSession instance, logged in
"""
auth_info = {
"proxyuser": self.koji_proxy_user,
"ssl_certs_dir": self.koji_ssl_certs,
"krb_principal": self.koji_principal,
"krb_keytab": self.koji_keytab
}
return create_koji_session(self.kojihub, auth_info)
def run(self):
"""
Run the plugin.
"""
if ((self.koji_principal and not self.koji_keytab) or
(self.koji_keytab and not self.koji_principal)):
raise RuntimeError("specify both koji_principal and koji_keytab "
"or neither")
# Only run if the build was successful
if self.workflow.build_process_failed:
self.log.info("Not promoting failed build to koji")
return
koji_metadata, output_files = self.get_metadata()
try:
session = self.login()
server_dir = self.get_upload_server_dir()
for output in output_files:
if output.file:
self.upload_file(session, output, server_dir)
finally:
for output in output_files:
if output.file:
output.file.close()
try:
build_info = session.CGImport(koji_metadata, server_dir)
except Exception:
self.log.debug("metadata: %r", koji_metadata)
raise
# Older versions of CGImport do not return a value.
build_id = build_info.get("id") if build_info else None
self.log.debug("Build information: %s",
json.dumps(build_info, sort_keys=True, indent=4))
# Tag the build
if build_id is not None and self.target is not None:
self.log.debug("Finding build tag for target %s", self.target)
target_info = session.getBuildTarget(self.target)
build_tag = target_info['dest_tag_name']
self.log.info("Tagging build with %s", build_tag)
task_id = session.tagBuild(build_tag, build_id)
task = TaskWatcher(session, task_id,
poll_interval=self.poll_interval)
task.wait()
if task.failed():
raise RuntimeError("Task %s failed to tag koji build" % task_id)
return build_id
|
|
# -*- coding: utf-8 -*-
import datetime
import random
import re
import string
from lxml import html
from urllib2 import urlopen
from urlparse import urljoin
from urlparse import urlparse
from werkzeug import url_encode
from openerp import models, fields, api, _
from openerp.tools import ustr
URL_REGEX = r'(\bhref=[\'"](?!mailto:)([^\'"]+)[\'"])'
def VALIDATE_URL(url):
if urlparse(url).scheme not in ('http', 'https', 'ftp', 'ftps'):
return 'http://' + url
return url
class link_tracker(models.Model):
"""link_tracker allow users to wrap any URL into a short and trackable URL.
link_tracker counts clicks on each tracked link.
This module is also used by mass_mailing, where each link in mail_mail html_body are converted into
a trackable link to get the click-through rate of each mass_mailing."""
_name = "link.tracker"
_rec_name = "short_url"
_inherit = ['utm.mixin']
url = fields.Char(string='Target URL', required=True)
count = fields.Integer(string='Number of Clicks', compute='_compute_count', store=True)
short_url = fields.Char(string='Tracked URL', compute='_compute_short_url')
link_click_ids = fields.One2many('link.tracker.click', 'link_id', string='Clicks')
title = fields.Char(string='Page Title', store=True)
favicon = fields.Char(string='Favicon', compute='_compute_favicon', store=True)
link_code_ids = fields.One2many('link.tracker.code', 'link_id', string='Codes')
code = fields.Char(string='Short URL code', compute='_compute_code')
redirected_url = fields.Char(string='Redirected URL', compute='_compute_redirected_url')
short_url_host = fields.Char(string='Host of the short URL', compute='_compute_short_url_host')
icon_src = fields.Char(string='Favicon Source', compute='_compute_icon_src')
@api.model
def convert_links(self, html, vals, blacklist=None):
for match in re.findall(URL_REGEX, html):
short_schema = self.env['ir.config_parameter'].get_param('web.base.url') + '/r/'
href = match[0]
long_url = match[1]
vals['url'] = long_url
if not blacklist or not [s for s in blacklist if s in long_url] and not long_url.startswith(short_schema):
link = self.create(vals)
shorten_url = self.browse(link.id)[0].short_url
if shorten_url:
new_href = href.replace(long_url, shorten_url)
html = html.replace(href, new_href)
return html
@api.one
@api.depends('link_click_ids.link_id')
def _compute_count(self):
self.count = len(self.link_click_ids)
@api.one
@api.depends('code')
def _compute_short_url(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
self.short_url = urljoin(base_url, '/r/%(code)s' % {'code': self.code})
@api.one
def _compute_short_url_host(self):
self.short_url_host = self.env['ir.config_parameter'].get_param('web.base.url') + '/r/'
@api.one
def _compute_code(self):
record = self.env['link.tracker.code'].search([('link_id', '=', self.id)], limit=1, order='id DESC')
self.code = record.code
@api.one
@api.depends('favicon')
def _compute_icon_src(self):
self.icon_src = 'data:image/png;base64,' + self.favicon
@api.one
@api.depends('url')
def _compute_redirected_url(self):
parsed = urlparse(self.url)
utms = {}
for key, field, cook in self.env['utm.mixin'].tracking_fields():
attr = getattr(self, field).name
if attr:
utms[key] = attr
self.redirected_url = '%s://%s%s?%s&%s#%s' % (parsed.scheme, parsed.netloc, parsed.path, url_encode(utms), parsed.query, parsed.fragment)
@api.model
@api.depends('url')
def _get_title_from_url(self, url):
try:
page = urlopen(url, timeout=5)
p = html.fromstring(ustr(page.read()).encode('utf-8'), parser=html.HTMLParser(encoding='utf-8'))
title = p.find('.//title').text
except:
title = url
return title
@api.one
@api.depends('url')
def _compute_favicon(self):
try:
icon = urlopen('http://www.google.com/s2/favicons?domain=' + self.url, timeout=5).read()
icon_base64 = icon.encode('base64').replace("\n", "")
except:
icon_base64 = 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsSAAALEgHS3X78AAACiElEQVQ4EaVTzU8TURCf2tJuS7tQtlRb6UKBIkQwkRRSEzkQgyEc6lkOKgcOph78Y+CgjXjDs2i44FXY9AMTlQRUELZapVlouy3d7kKtb0Zr0MSLTvL2zb75eL838xtTvV6H/xELBptMJojeXLCXyobnyog4YhzXYvmCFi6qVSfaeRdXdrfaU1areV5KykmX06rcvzumjY/1ggkR3Jh+bNf1mr8v1D5bLuvR3qDgFbvbBJYIrE1mCIoCrKxsHuzK+Rzvsi29+6DEbTZz9unijEYI8ObBgXOzlcrx9OAlXyDYKUCzwwrDQx1wVDGg089Dt+gR3mxmhcUnaWeoxwMbm/vzDFzmDEKMMNhquRqduT1KwXiGt0vre6iSeAUHNDE0d26NBtAXY9BACQyjFusKuL2Ry+IPb/Y9ZglwuVscdHaknUChqLF/O4jn3V5dP4mhgRJgwSYm+gV0Oi3XrvYB30yvhGa7BS70eGFHPoTJyQHhMK+F0ZesRVVznvXw5Ixv7/C10moEo6OZXbWvlFAF9FVZDOqEABUMRIkMd8GnLwVWg9/RkJF9sA4oDfYQAuzzjqzwvnaRUFxn/X2ZlmGLXAE7AL52B4xHgqAUqrC1nSNuoJkQtLkdqReszz/9aRvq90NOKdOS1nch8TpL555WDp49f3uAMXhACRjD5j4ykuCtf5PP7Fm1b0DIsl/VHGezzP1KwOiZQobFF9YyjSRYQETRENSlVzI8iK9mWlzckpSSCQHVALmN9Az1euDho9Xo8vKGd2rqooA8yBcrwHgCqYR0kMkWci08t/R+W4ljDCanWTg9TJGwGNaNk3vYZ7VUdeKsYJGFNkfSzjXNrSX20s4/h6kB81/271ghG17l+rPTAAAAAElFTkSuQmCC'
self.favicon = icon_base64
@api.multi
def action_view_statistics(self):
action = self.env['ir.actions.act_window'].for_xml_id('link_tracker', 'action_view_click_statistics')
action['domain'] = [('link_id', '=', self.id)]
return action
@api.multi
def action_visit_page(self):
return {
'name': _("Visit Webpage"),
'type': 'ir.actions.act_url',
'url': self.url,
'target': 'new',
}
@api.model
def recent_links(self, filter, limit):
if filter == 'newest':
return self.search_read([], order='create_date DESC', limit=limit)
elif filter == 'most-clicked':
return self.search_read([('count', '!=', 0)], order='count DESC', limit=limit)
elif filter == 'recently-used':
return self.search_read([('count', '!=', 0)], order='write_date DESC', limit=limit)
else:
return {'Error': "This filter doesn't exist."}
@api.model
def create(self, vals):
create_vals = vals.copy()
if 'url' not in create_vals:
raise ValueError('URL field required')
else:
create_vals['url'] = VALIDATE_URL(vals['url'])
search_domain = []
for fname, value in create_vals.iteritems():
search_domain.append((fname, '=', value))
result = self.search(search_domain, limit=1)
if result:
return result
if not create_vals.get('title'):
create_vals['title'] = self._get_title_from_url(create_vals['url'])
# Prevent the UTMs to be set by the values of UTM cookies
for (key, fname, cook) in self.env['utm.mixin'].tracking_fields():
if fname not in create_vals:
create_vals[fname] = False
link = super(link_tracker, self).create(create_vals)
code = self.env['link.tracker.code'].get_random_code_string()
self.env['link.tracker.code'].create({'code': code, 'link_id': link.id})
return link
@api.model
def get_url_from_code(self, code, context=None):
code_rec = self.env['link.tracker.code'].sudo().search([('code', '=', code)])
if not code_rec:
return None
return code_rec.link_id.redirected_url
sql_constraints = [
('url_utms_uniq', 'unique (url, campaign_id, medium_id, source_id)', 'The URL and the UTM combination must be unique')
]
class link_tracker_code(models.Model):
_name = "link.tracker.code"
code = fields.Char(string='Short URL Code', store=True)
link_id = fields.Many2one('link.tracker', 'Link', required=True, ondelete='cascade')
@api.model
def get_random_code_string(self):
size = 3
while True:
code_proposition = ''.join(random.choice(string.letters + string.digits) for _ in range(size))
if self.search([('code', '=', code_proposition)]):
size += 1
else:
return code_proposition
_sql_constraints = [
('code', 'unique( code )', 'Code must be unique.')
]
class link_tracker_click(models.Model):
_name = "link.tracker.click"
_rec_name = "link_id"
click_date = fields.Date(string='Create Date')
link_id = fields.Many2one('link.tracker', 'Link', required=True, ondelete='cascade')
ip = fields.Char(string='Internet Protocol')
country_id = fields.Many2one('res.country', 'Country')
@api.model
def add_click(self, code, ip, country_code, stat_id=False):
self = self.sudo()
code_rec = self.env['link.tracker.code'].search([('code', '=', code)])
if not code_rec:
return None
again = self.search_count([('link_id', '=', code_rec.link_id.id), ('ip', '=', ip)])
if not again:
country_record = self.env['res.country'].search([('code', '=', country_code)], limit=1)
vals = {
'link_id': code_rec.link_id.id,
'create_date': datetime.date.today(),
'ip': ip,
'country_id': country_record.id,
'mail_stat_id': stat_id
}
if stat_id:
mail_stat = self.env['mail.mail.statistics'].search([('id', '=', stat_id)])
if mail_stat.mass_mailing_campaign_id:
vals['mass_mailing_campaign_id'] = mail_stat.mass_mailing_campaign_id.id
if mail_stat.mass_mailing_id:
vals['mass_mailing_id'] = mail_stat.mass_mailing_id.id
self.create(vals)
|
|
#!/usr/bin/env python
from __future__ import print_function
import unittest
import shutil
from ruffus import add_inputs, suffix, mkdir, regex, Pipeline, output_from, touch_file
import ruffus
import sys
"""
Demonstrates the new Ruffus syntax in version 2.6
"""
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0]))
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
def touch(outfile):
with open(outfile, "w"):
pass
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
def task_originate(o):
"""
Makes new files
"""
touch(o)
def task_m_to_1(i, o):
"""
Merges files together
"""
with open(o, "w") as o_file:
for f in sorted(i):
with open(f) as ii:
o_file.write(f + "=" + ii.read() + "; ")
def task_1_to_1(i, o):
"""
1 to 1 for transform
"""
with open(o, "w") as o_file:
with open(i) as ii:
o_file.write(i + "+" + ii.read())
DEBUG_do_not_define_tail_task = False
DEBUG_do_not_define_head_task = False
#
# Returns a fully formed sub pipeline useable as a building block
#
def make_pipeline1(pipeline_name, # Pipelines need to have a unique name
starting_file_names):
test_pipeline = Pipeline(pipeline_name)
# We can change the starting files later using
# set_input() for transform etc.
# or set_output() for originate
# But it can be more convenient to just pass this to the function making the pipeline
#
test_pipeline.originate(task_originate, starting_file_names)\
.follows(mkdir(tempdir), mkdir(tempdir + "/testdir", tempdir + "/testdir2"))\
.posttask(touch_file(tempdir + "/testdir/whatever.txt"))
test_pipeline.transform(task_func=task_m_to_1,
name="add_input",
# Lookup Task from function name task_originate()
# So long as this is unique in the pipeline
input=task_originate,
filter=regex(r"(.+)"),
add_inputs=add_inputs(
tempdir + "/testdir/whatever.txt"),
output=r"\1.22")
test_pipeline.transform(task_func=task_1_to_1,
name="22_to_33",
# Lookup Task from Task name
# Function name is not unique in the pipeline
input=output_from("add_input"),
filter=suffix(".22"),
output=".33")
tail_task = test_pipeline.transform(task_func=task_1_to_1,
name="33_to_44",
# Ask Pipeline to lookup Task from Task name
input=test_pipeline["22_to_33"],
filter=suffix(".33"),
output=".44")
# Set the tail task so that users of my sub pipeline can use it as a dependency
# without knowing the details of task names
#
# Use Task() object directly without having to lookup
test_pipeline.set_tail_tasks([tail_task])
# If we try to connect a Pipeline without tail tasks defined, we have to
# specify the exact task within the Pipeline.
# Otherwise Ruffus will not know which task we mean and throw an exception
if DEBUG_do_not_define_tail_task:
test_pipeline.set_tail_tasks([])
# Set the head task so that users of my sub pipeline send input into it
# without knowing the details of task names
test_pipeline.set_head_tasks([test_pipeline[task_originate]])
return test_pipeline
#
# Returns a fully formed sub pipeline useable as a building block
#
def make_pipeline2(pipeline_name="pipeline2"):
test_pipeline2 = Pipeline(pipeline_name)
test_pipeline2.transform(task_func=task_1_to_1,
# task name
name="44_to_55",
# placeholder: will be replaced later with set_input()
input=None,
filter=suffix(".44"),
output=".55")
test_pipeline2.merge(task_func=task_m_to_1,
input=test_pipeline2["44_to_55"],
output=tempdir + "/final.output",)
# Set head and tail
test_pipeline2.set_tail_tasks([test_pipeline2[task_m_to_1]])
if not DEBUG_do_not_define_head_task:
test_pipeline2.set_head_tasks([test_pipeline2["44_to_55"]])
return test_pipeline2
def run_pipeline():
# First two pipelines are created as separate instances by the make_pipeline1 function
pipeline1a = make_pipeline1(pipeline_name="pipeline1a", starting_file_names=[
tempdir + "/" + ss for ss in ("a.1", "b.1")])
pipeline1b = make_pipeline1(pipeline_name="pipeline1b", starting_file_names=[
tempdir + "/" + ss for ss in ("c.1", "d.1")])
# The Third pipeline is a clone of pipeline1b
pipeline1c = pipeline1b.clone(new_name="pipeline1c")
# Set the "originate" files for pipeline1c to ("e.1" and "f.1")
# Otherwise they would use the original ("c.1", "d.1")
pipeline1c.set_output(output=[])
pipeline1c.set_output(output=[tempdir + "/" + ss for ss in ("e.1", "f.1")])
# Join all pipeline1a-c to pipeline2
pipeline2 = make_pipeline2()
pipeline2.set_input(input=[pipeline1a, pipeline1b, pipeline1c])
#pipeline2.printout_graph("test.svg", "svg", [task_m_to_1])
#pipeline2.printout(verbose = 0)
pipeline2.run(multiprocess=10, verbose=0)
class Test_task(unittest.TestCase):
def tearDown(self):
"""
"""
try:
shutil.rmtree(tempdir)
except:
pass
def test_subpipelines(self):
run_pipeline()
# Check that the output reflecting the pipeline topology is correct.
correct_output = '{tempdir}/a.1.55={tempdir}/a.1.44+{tempdir}/a.1.33+{tempdir}/a.1.22+{tempdir}/a.1=; {tempdir}/testdir/whatever.txt=; ; ' \
'{tempdir}/b.1.55={tempdir}/b.1.44+{tempdir}/b.1.33+{tempdir}/b.1.22+{tempdir}/b.1=; {tempdir}/testdir/whatever.txt=; ; ' \
'{tempdir}/c.1.55={tempdir}/c.1.44+{tempdir}/c.1.33+{tempdir}/c.1.22+{tempdir}/c.1=; {tempdir}/testdir/whatever.txt=; ; ' \
'{tempdir}/d.1.55={tempdir}/d.1.44+{tempdir}/d.1.33+{tempdir}/d.1.22+{tempdir}/d.1=; {tempdir}/testdir/whatever.txt=; ; ' \
'{tempdir}/e.1.55={tempdir}/e.1.44+{tempdir}/e.1.33+{tempdir}/e.1.22+{tempdir}/e.1=; {tempdir}/testdir/whatever.txt=; ; ' \
'{tempdir}/f.1.55={tempdir}/f.1.44+{tempdir}/f.1.33+{tempdir}/f.1.22+{tempdir}/f.1=; {tempdir}/testdir/whatever.txt=; ; '.format(
tempdir=tempdir)
with open(tempdir + "/final.output") as real_output:
real_output_str = real_output.read()
self.assertEqual(correct_output, real_output_str)
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import
from bot.pluginDespatch import Plugin
import re
import logging
import pytz
import time
from twisted.internet.task import LoopingCall
from datetime import datetime, timedelta
from django.utils import timezone
from django.conf import settings
from .models import DownVotes, Penalty, Probation, NickHistory, NickSummary
from logos.roomlib import get_room_option
from logos.qs_iter import queryset_foreach
logger = logging.getLogger(__name__)
logging.config.dictConfig(settings.LOGGING)
DOWN_VOTE_MINUTES = 5
DOWN_VOTES_REQUIRED = 1
PENALTY_TIME = 120
MIN_FLOOD_INTERVAL = 5 # In Seconds
FLOOD_THRESHHOLD = 3
FLOOD_PENALTY_TIME = 60*5 # Flood penalty in seconds
from bot.logos_decorators import irc_room_permission_required, \
irc_network_permission_required
# decorator to ensure logos trigger function
# has ops in room and nick is in room
def check_ops(check_nick_in_room=False, use_current_room=False, me=False):
def decorator(func):
def func_wrapper(self, regex, chan, nick, **kwargs):
if use_current_room:
if chan[0] == "#":
room = chan
else: # If in a private message window
room = None
else:
try:
room = regex.group('room')
except IndexError:
room = None
if me:
this_nick = nick
else:
this_nick = regex.group('nick').lower()
my_nick = self.get_nickname()
if room: # not private message or no current room
if room.lower() not in self.get_rooms():
self.notice(nick, 'I am not in room {}'.format(room))
return
my_ops = self.get_op_status(my_nick, room)
if my_ops is None or my_ops not in "~@&":
self.notice(nick, 'I do not have ops in room {}'.format(room))
return
if check_nick_in_room and this_nick not in self.get_room_nicks(room):
self.notice(nick, 'user {} is not in room'.format(this_nick))
else:
return func(self, regex, [chan], nick, **kwargs)
else: # is private message
rooms = self.get_rooms_for_nick(nick)
opped_rooms = []
for room in rooms:
# make sure the room management plugin is enabled
# in all rooms we are acting in
if self.is_plugin_enabled(room):
my_ops = self.get_op_status(my_nick, room)
if my_ops and my_ops in "~@&": # make sure I have ops
# Make sure nick is in room action is requested
if nick in self.get_room_nicks(room):
opped_rooms.append(room)
if not opped_rooms:
self.notice(nick, 'I do not have ops in any enabled rooms you are in')
return
return func(self, regex, opped_rooms, nick, **kwargs)
return func_wrapper
return decorator
# To test this plugin remember to type the following
# commands on IRC where the bot can see them:
# !activste plugin room_manage
# !enable plugin #myroom room_manage
# Change #myroom to be whatever room you are in.
class RoomManagementPlugin(Plugin):
# Uncomment the line below to load this plugin. Also if
# you are using this as a starting point for your own plugin
# remember to change 'sample' to be a unique identifier for your plugin,
# and 'My Bot Plugin' to a short description for your plugin.
plugin = ('rm', 'Room Management Plugin')
def __init__(self, *args, **kwargs):
# Change the line below to match the name of the class
# you change this plugin to.
super(RoomManagementPlugin, self).__init__(*args, **kwargs)
#self.repeater = LoopingCall(self.repeat)
#self.repeater.start(30, now = True)
self.commands = ((r'nicks$', self.nicks, 'show nicks in room'),
(r'kick (?P<room>#\S+) (?P<nick>\S+)', self.kick_nick, 'kick nick from room'),
(r'ban (?P<room>#\S+) (?P<nick>\S+)', self.ban_nick, 'ban (mute) nick in room'),
# (r'remove penalty (?P<room>#\S+) (?P<nick>\S+)', self.remove_penalty, 'remove room penalty'),
# (r'down vote (?P<nick>\S+)', self.down_vote, 'Down vote a nick'),
# (r'dv (?P<nick>\S+)', self.down_vote, 'Down vote a nick'),
(r'timer', self.timer, 'demonstrates the timer in a plugin'),
# (r'mute (?P<room>#\S+) (?P<nick>\S+)', self.mute, 'normal user despatch'),
# (r'mute (?P<nick>\S+)', self.mute, 'normal user despatch'),
(r'nicksdb', self.nicksdb , 'Show nicks with same ip'),
(r'aka hosts (?P<hostmask>\S+)$', self.nicks_hostmasks, ' Find all nicks matching a hostmask pattern'),
(r'aka latest (?P<nick>\S+)$', self.aka_latest, 'Show nicks with latest ip'),
(r'aka (?P<nick>\S+)$', self.aka, 'Show nicks with same ip'),
(r'hosts (?P<nick>\S+)', self.hosts, 'Show nicks with same ip'),
(r'op me', self.op_me, 'gives ops'),
(r'deop me', self.deop_me, 'removes ops'),
(r'kick me', self.kick_me, 'kicks you off channel')
)
self.antiflood = {}
def get_hostmask(self, nick):
hostmask = self.get_host(nick).split('@')[1]
if hostmask:
host_mask = '*!*@'+hostmask
else:
host_mask = nick+'!*@*'
return host_mask
def repeat(self):
for penalty in Penalty.objects.filter(network = self.network):
if timezone.now() > penalty.end_time:
self.mode( penalty.room, False, "b", mask = penalty.nick_mask)
penalty.delete()
def privmsg(self, user, channel, message):
# Anti-flood checks
if self.is_plugin_enabled(channel) and channel[0] == '#' and message[0] != get_room_option(self.network, channel, 'activation'):
my_nick = self.get_nickname()
my_ops = self.get_op_status(my_nick, channel)
if my_ops is not None and my_ops in "~@&%":
nick, user_mask = user.split('!')
nick = nick.lower()
user_mask = '*!'+user_mask
logger.debug("{} has user_mask {}".format(nick, user_mask))
timestamp = time.time()
if nick in self.antiflood:
if channel in self.antiflood[nick]:
if message == self.antiflood[nick][channel]['line']:
prior_time = self.antiflood[nick][channel]['timestamp']
if timestamp - prior_time < MIN_FLOOD_INTERVAL:
self.antiflood[nick][channel]['repeat'] += 1
self.antiflood[nick][channel]['timestamp'] = timestamp
if self.antiflood[nick][channel]['repeat'] >= FLOOD_THRESHHOLD:
self.add_penalty(channel, user_mask, FLOOD_PENALTY_TIME, reason="flooding")
self.kick(channel, nick, reason = "Stop repeating yourself!")
else:
self.antiflood[nick][channel] = {'line':message, 'timestamp':timestamp, 'repeat':1}
else:
self.antiflood[nick][channel] = {'line':message, 'timestamp':timestamp, 'repeat':1}
else:
self.antiflood[nick][channel] = {'line':message, 'timestamp':timestamp, 'repeat':1}
else:
self.antiflood[nick] = { channel: {'line':message, 'timestamp':timestamp, 'repeat':1} }
#print self.antiflood
def nicksdb(self, regex, chan, nick, **kwargs):
ndb = self.irc_conn.nicks_db
self.notice(nick, '*******')
for k in ndb.nicks_in_room:
ln = "{} - {}".format(k, str(ndb.nicks_in_room[k]))
self.notice(nick, ln)
for k in ndb.nicks_info:
ln = "{} - {}".format(k, str(ndb.nicks_info[k]))
self.notice(nick, ln)
def add_penalty(self, channel, user_mask, seconds, reason=None):
begin_date = timezone.now()
end_date = begin_date + timedelta(seconds = seconds)
wmask = '*!*@'+user_mask.split('@')[1]
penalty = Penalty(network = self.network.lower(),
room = channel.lower(),
nick_mask = wmask,
reason = reason,
begin_time = begin_date,
end_time = end_date,
kick = True)
penalty.save()
@irc_room_permission_required('room_admin')
def nicks_hostmasks(self, regex, chan, nick, **kwargs):
""" Find all nicks matching a hostmask """
hostmask = regex.group('hostmask')
if hostmask[0] == '*' and hostmask[-1] == '*':
nicks = NickSummary.objects.filter(network=self.network, host_mask__contains = hostmask[1:-1])
elif hostmask[0] == '*':
nicks = NickSummary.objects.filter(network=self.network, host_mask__endswith = hostmask[1:])
elif hostmask[-1] == '*':
nicks = NickSummary.objects.filter(network=self.network, host_mask__startswith = '*!*@' + hostmask[0:-1])
else:
nicks = NickSummary.objects.filter(network=self.network, host_mask = '*!*@'+hostmask)
unique_nicks = set()
for nickl in nicks:
unique_nicks.add(nickl.nick)
if len(unique_nicks) > 0:
nick_list = ", ".join(sorted(unique_nicks))
self.say(chan, "{} is also {}".format(hostmask, nick_list))
else:
self.say(chan, "No nicks for host mask {}".format(hostmask))
@irc_room_permission_required('room_admin')
def aka(self, regex, chan, nick, **kwargs):
this_nick = regex.group('nick')
unique_nicks = set()
qs = NickSummary.objects.filter(network=self.network, nick__iexact = this_nick.lower())
if len(qs) > 0:
for rec in qs:
hostmask = rec.host_mask
qs2 = NickSummary.objects.filter(network=self.network, host_mask = hostmask)
for rec2 in qs2:
if rec2.nick.lower() != this_nick.lower():
unique_nicks.add(rec2.nick)
if len(unique_nicks) > 0:
nick_list = ", ".join(sorted(unique_nicks))
self.say(chan, "{} is also {}".format(this_nick, nick_list))
self.say(chan, "*** end of nick list ***")
else:
self.say(chan, "No other nicks for {}".format(this_nick))
else:
self.say(chan, '** No host masks found for nick **')
@irc_room_permission_required('room_admin')
def aka_latest(self, regex, chan, nick, **kwargs):
this_nick = regex.group('nick')
unique_nicks = set()
hostrec = NickHistory.objects.filter(network=self.network, nick__iexact = this_nick).order_by('time_seen').last()
if hostrec:
hostmask = hostrec.host_mask
nicks = NickSummary.objects.filter(network=self.network, host_mask = hostmask).order_by('nick')
for nick in nicks:
if nick.nick.lower() != this_nick.lower():
unique_nicks.add(nick.nick)
if len(unique_nicks) > 0:
nick_list = ", ".join(sorted(unique_nicks))
self.say(chan, "{} is also {}".format(this_nick, nick_list))
self.say(chan, "*** end of list ***")
else:
self.say(chan, "No other nicks for {}".format(this_nick))
else:
self.say(chan, "No records for nick {} found".format(this_nick))
def _get_hostmasks(self, nick):
hosts = set()
qs = NickSummary.objects.filter(network=self.network, nick__iexact = nick).order_by('host_mask')
for rec in qs:
if '@' in rec.host_mask:
hostmask = rec.host_mask.split('@')[1]
else:
hostmask = rec.host_mask
hosts.add(hostmask)
return hosts
@irc_room_permission_required('room_admin')
def hosts(self, regex, chan, nick, **kwargs):
this_nick = regex.group('nick')
hosts = self._get_hostmasks(this_nick)
if hosts:
for host in hosts:
self.say(chan, host)
self.say(chan, '*** end of hosts list ***')
else:
self.say(chan, 'No host masks found')
@irc_room_permission_required('room_admin')
def remove_penalty(self, regex, chan, nick, **kwargs):
this_nick = regex.group('nick')
this_room = regex.group('room')
hostmask = self.get_hostmask(this_nick)
wmask = '*!*@' + hostmask.split('@')[1]
penalties = Penalty.objects.filter(nick_mask = wmask,
room = this_room.lower(),
end_time__gt = timezone.now())
for penalty in penalties:
penalty.kick = False
penalty.save()
self.notice(nick, "Penalty removed")
@check_ops()
def down_vote(self, regex, chans, nick, **kwargs):
nick_dv = regex.group('nick')
hostmask = self.get_hostmask(nick_dv)
if not hostmask:
self.notice(nick, "No hostmask available yet")
return
for chan in chans:
dvs = DownVotes.objects.filter(network=self.network.lower(),
room = chan.lower(),
nick_mask = hostmask,
downvoting_nick = nick.lower(),
downvote_datetime__gte = timezone.now() - timedelta(seconds = 60*DOWN_VOTE_MINUTES))
if dvs.exists():
self.notice(chan, "You down voted {} within {} minutes ago".format(nick_dv, DOWN_VOTE_MINUTES))
else:
dv = DownVotes(network = self.network.lower(),
room = chan.lower(),
nick_mask = hostmask,
downvoting_nick = nick.lower())
dv.save()
self.notice(nick, "Nick {} in channel {} down voted".format(nick_dv, chan))
dvs_count = DownVotes.objects.filter(network=self.network.lower(),
room = chan.lower(),
nick_mask = hostmask,
downvote_datetime__gte = timezone.now() - timedelta(seconds = 60*DOWN_VOTE_MINUTES)).count()
if dvs_count >= DOWN_VOTES_REQUIRED:
begin_date = timezone.now()
end_date = begin_date + timedelta(seconds = PENALTY_TIME)
penalty = Penalty(network = self.network.lower(),
room = chan.lower(),
nick_mask = hostmask,
begin_time = begin_date,
end_time = end_date,
kick = True)
penalty.save()
self.kick(chan, nick_dv, reason = "down voted by democracy, not allowed in room for {} minutes".format(PENALTY_TIME/60))
def nicks(self, regex, chan, nick, **kwargs):
nicks = self.get_room_nicks(chan)
nick_plus_hosts = []
for nck in nicks:
nick_plus_hosts.append(nck + " :) " + self.get_hostmask(nck) )
self.notice(nick, "Nicks in room are " + ", ".join(nick_plus_hosts))
def mute(self, regex, chan, nick, **kwargs):
"""Send a probationary nick away"""
try:
room = regex.group('room')
except IndexError:
room = chan
this_nick = regex.group('nick')
hostmask = self.get_hostmask(nick)
if Probation.objects.filter(network = self.network, room = room, host_mask = hostmask).exists():
pen, created = Penalty.objects.get_or_create(network = self.network, room = room, host_mask = banmask)
if not created:
pen.begin_time = timezone.now()
pen.end_time = pen.begin_time + timedelta(seconds = 30)
pen.save()
self.mode( room, True, "b", mask = banmask)
def timer(self, regex, chan, nick, **kwargs):
self.reactor.callLater(5, self.timer_expired, chan)
self.say(chan, "The timer will expire in 5 seconds")
def timer_expired(self, chan):
self.say(chan, "The timer has expired after 5 seconds")
# self.mode( chan, set, modes, limit = None, user = None, mask = None):
# Demonstration of changeing the modes on a user or channel.
# Explanation of parameters below:
# The {limit}, {user}, and {mask} parameters are mutually exclusive.
# chan: The name of the channel to operate on.
# set: True to give the user or channel permissions and False to
# remove them.
# modes: The mode flags to set on the user or channel.
# limit: In conjuction with the {'l'} mode flag, limits the
# number of users on the channel.
# user: The user to change the mode on.
# mask: In conjuction with the {'b'} mode flag, sets a mask of
# users to be banned from the channel.
@irc_room_permission_required('room_admin')
@check_ops(use_current_room=True, me=True)
def op_me(self, regex, chans, nick, **kwargs):
# using True is the same as +o
for chan in chans:
if self.is_plugin_enabled(chan):
self.mode(chan, True, "o", user = nick)
@irc_room_permission_required('room_admin')
@check_ops(use_current_room=True, me=True)
def deop_me(self, regex, chan, nick, **kwargs):
# using False is the same as -o
for chan in chans:
if self.is_plugin_enabled(chan):
self.mode(chan, False, "o", user = nick)
@irc_room_permission_required('room_admin')
@check_ops(use_current_room=True, me=True)
def kick_me(self, regex, chans, nick, **kwargs):
for chan in chans:
if self.is_plugin_enabled(chan):
self.kick(chan, nick, reason="Well, you asked ;)")
@irc_room_permission_required('room_admin')
@check_ops(check_nick_in_room=True)
def kick_nick(self, regex, chan, nick, **kwargs):
room = regex.group('room')
nick_to_kick = regex.group('nick')
self.kick(room, nick, reason="No reason given.")
@irc_room_permission_required('room_admin')
@check_ops(check_nick_in_room=True)
def ban_nick(self, regex, chan, nick, **kwargs):
room = regex.group('room')
nick_to_ban = regex.group('nick').lower()
hostmask = self.get_host(nick_to_ban)
if hostmask:
wmask = "*!*@" + hostmask.split('@')[1]
self.notice(nick, 'banning user {} with mask {}'.format(nick_to_ban, wmask))
else:
self.notice(nick, 'host mask for user {} unknown'.format(nick_to_ban))
def border_patrol(self, args):
action, nick, room = args
hostmask = self.get_hostmask(nick)
penalty = Penalty.objects.filter(network = self.network.lower(),
room = room.lower(),
nick_mask = hostmask,
end_time__gt = timezone.now()).order_by('end_time').last()
if penalty and penalty.kick:
time_remaining = (penalty.end_time - timezone.now()).seconds
hours = time_remaining/3600
time_remaining -= hours*3600
minutes = time_remaining/60
seconds = time_remaining - minutes*60
msg = "Not allowed in {} for {} hours {} minutes and {} seconds for {}".format(room, hours, minutes, seconds, penalty.reason)
if action == "kick":
self.kick(room, nick, reason = msg)
elif action == "mute":
self.mode( penalty.room, True, "b", mask = penalty.nick_mask)
def joined(self, channel):
pass
def userRenamed(self, old, new):
host = self.get_host(new.lower())
for rm in self.get_rooms_for_nick(new):
nh = NickHistory(network = self.network,
room = rm,
nick = new,
host_mask = host)
nh.save()
def userJoined(self, user, channel):
""" Not used - nick/room info collected in userHosts method """
pass
def userLeft(self, user, channel):
pass
def userQuit(self, user, quitMessage):
# The control room or engine room is often the room designated for notices
# and or messages if no other room is specified
# self.say(self.control_room, "%s has just quit with message %s" % (user,quitMessage))
pass
def userHosts(self, nicklist):
""" Called when userhost info is available """
# The _ throws away unneeded userhost
for nick, userhost in nicklist:
host = "*!*@" + userhost.split('@')[1]
logger.debug( str((nick, host)) )
rooms = self.get_rooms_for_nick(nick)
for room in rooms:
hist = NickHistory(network = self.network, room=room, nick = nick, host_mask = host)
hist.save()
if self.is_plugin_enabled(room):
self.border_patrol(('kick', nick, room))
|
|
"""The test for light device automation."""
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.light import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a light."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert triggers == expected_triggers
async def test_get_trigger_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a light trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, "trigger", trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "turn_off device - {} - on - off - None".format(
ent1.entity_id
)
hass.states.async_set(ent1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "turn_on device - {} - off - on - None".format(
ent1.entity_id
)
async def test_if_fires_on_state_change_with_for(hass, calls):
"""Test for triggers firing with delay."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert calls[0].data["some"] == "turn_off device - {} - on - off - 0:00:05".format(
ent1.entity_id
)
|
|
import os
import sys
import subprocess
import time
from threading import Thread
class IpproofController:
IPPROOF_SERVER_NAME = "ipproof-server"
output_redirected = False
def __init__(self, arguments_dictionary, ipproof_server_name):
self.stdout_save = sys.stdout
self.stderr_save = sys.stderr
self.dic = arguments_dictionary
self.IPPROOF_SERVER_NAME = ipproof_server_name
def demonize_program(self):
# https://gist.github.com/andreif/cbb71b0498589dac93cb
# first fork
try:
pid = os.fork()
if pid > 0:
sys.exit(1)
except OSError as error:
sys.stderr.write("first process forking failed:\n{}\n".format(
error))
sys.exit(1)
# decoupling
os.chdir("/")
os.setsid()
os.umask(0)
# second fork
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as error:
sys.stderr.write("second process forking failed:\n{}\n".format(
error))
sys.exit(1)
# redirection of standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
std_in = open(os.devnull, "r")
std_out = open(os.devnull, "a+")
std_err = open(os.devnull, "a+")
os.dup2(std_in.fileno(), sys.stdin.fileno())
os.dup2(std_out.fileno(), sys.stdin.fileno())
os.dup2(std_err.fileno(), sys.stdin.fileno())
def redirect_console_output(self, start):
if start:
time_now = time.strftime("%H_%M_%S")
self.file_out = open("/tmp/net-applet-shuffler/logs/ipproof_"
"controller_stdout_{}".format(time_now), "w")
self.file_err = open("/tmp/net-applet-shuffler/logs/ipproof_"
"controller_stderr_{}".format(time_now), "w")
self.output_redirected = True
sys.stdout = self.file_out
sys.stderr = self.file_err
if not start and self.output_redirected:
self.file_out.close()
self.file_err.close()
sys.stdout = self.stdout_save
sys.stderr = self.stderr_save
self.output_redirected = False
def ssh_exec(self, ip, remote_user, local_user, cmd):
# due to demonized root program execution, ssh uses root user params
# use the -i identity file option for the user file
# use the -o known_hosts file option for the same reason
# note: for debugging purposes use: "-vvv -E /[file_path]"
ssh_command = "ssh -i /home/{}/.ssh/id_rsa -o UserKnownHostsFile=" \
"/home/{}/.ssh/known_hosts {}@{} sudo {}"\
.format(local_user, local_user, remote_user, ip, cmd)
process = subprocess.Popen(ssh_command.split(), stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
exit_code = process.returncode
print(" - exit code: {} - ".format(exit_code) + ssh_command)
return stdout, stderr, exit_code
def execute(self, cmd):
command = "sudo {}".format(cmd)
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
exit_code = process.returncode
print(" - exit code: {} - ".format(exit_code) + command)
return stdout, stderr, exit_code
def ipproof_server_end(self):
# try graceful end
_, _, exit_code = self.ssh_exec(self.dic["ip_dest_control"],
self.dic["user_dest"],
self.dic["user_source"],
"kill -2 {}".format(
self.dic["ipproof_pid"]))
if exit_code == 0:
# remove pid file
self.ssh_exec(self.dic["ip_dest_control"],
self.dic["user_dest"], self.dic["user_source"],
"rm /tmp/net-applet-shuffler/ipproof_{}"
.format(self.dic["applet_id"],
self.dic["ipproof_pid"]))
return True
def _server_start_thread(self):
self.ssh_exec(self.dic["ip_dest_control"], self.dic["user_dest"],
self.dic["user_source"], "{} -4 -p {}"
.format(dic["ipproof_server_path"],
self.dic["ipproof_port"]))
def ipproof_server_start(self):
ipproof_pid = "0"
server_thread = Thread(target=self._server_start_thread, args=())
server_thread.daemon = True
server_thread.start()
time.sleep(1)
stdout = self.ssh_exec(self.dic["ip_dest_control"],
self.dic["user_dest"],
self.dic["user_source"],
"ps -ef | grep {}".format(
self.IPPROOF_SERVER_NAME))
stdout_decoded = stdout[0].decode("utf-8")
for line in stdout_decoded.splitlines():
# unique identifier
if "{} -4 -p {}".format(self.IPPROOF_SERVER_NAME,
self.dic["ipproof_port"]) in line:
ipproof_pid = line.split()[1]
break
if ipproof_pid == "0":
print("warning (no abort): ipproof server pid could not be "
"retrieved")
self.dic["ipproof_pid"] = ipproof_pid
self.ssh_exec(self.dic["ip_dest_control"], self.dic["user_dest"],
self.dic["user_source"],
"touch /tmp/net-applet-shuffler/ipproof_{}"
.format(self.dic["applet_id"]))
self.ssh_exec(self.dic["ip_dest_control"], self.dic["user_dest"],
self.dic["user_source"], "sh -c \"echo '{}' > "
"/tmp/net-applet-shuffler/ipproof_{}\"".format(
self.dic["ipproof_pid"], self.dic["applet_id"]))
return True
def test_running(self, starting):
# due to network congestion, this might fail, thus has to be robust
done = False
while not done:
try:
# while the following file exists, there is a ongoing transfer
if starting:
self.execute("touch /tmp/net-applet-shuffler/running_{}"
.format(self.dic["applet_id"]))
if not starting:
self.execute("rm /tmp/net-applet-shuffler/running_{}"
.format(self.dic["applet_id"]))
return True
except subprocess.SubprocessError:
pass
def main(self):
# demonize program
self.demonize_program()
# make sure necessary dirs exist, local and remote
self.execute("mkdir -p /tmp/net-applet-shuffler")
self.execute("mkdir -p /tmp/net-applet-shuffler/logs")
# redirect output to file
self.redirect_console_output(True)
self.ssh_exec(self.dic["ip_dest_control"], self.dic["user_dest"],
self.dic["user_source"],
"mkdir -p /tmp/net-applet-shuffler")
# write test in progress file
# to be checked if there are ongoing transfers
self.test_running(True)
ipproof_started = self.ipproof_server_start()
if not ipproof_started:
sys.exit(2)
amount_tries = 0
ipproof_start_failed = True
ipproof_cmd = ""
while amount_tries < 10:
print(" - trying to start ipproof")
ipproof_cmd = "{} -4 -e {} -p {} -t tcp -s {} -n {} -r {} -i " \
"{}".format(dic["ipproof_client_path"],
self.dic["ip_dest_data"],
self.dic["ipproof_port"],
self.dic["transfer_size"],
self.dic["iterations"],
self.dic["ack_size"],
self.dic["inter_send_interval"])
_, _, exit_code = self.execute(ipproof_cmd)
if exit_code == 0:
ipproof_start_failed = False
break
amount_tries += 1
time.sleep(1)
if ipproof_start_failed:
print("error: ipproof performance test could not be executed\n"
"failed params:")
print(ipproof_cmd + "\n")
sys.exit(3)
time.sleep(2)
print(" - ipproof ended")
self.test_running(False)
self.ipproof_server_end()
print(" - ipproof-controller ended gracefully\n")
self.redirect_console_output(False)
sys.exit(0)
if __name__ == '__main__':
# arguments dictionary
dic = dict()
dic["applet_id"] = sys.argv[1]
dic["user_source"] = sys.argv[2]
dic["name_dest"] = sys.argv[3]
dic["user_dest"] = sys.argv[4]
dic["ip_dest_data"] = sys.argv[5]
dic["ip_dest_control"] = sys.argv[6]
dic["ipproof_port"] = sys.argv[7]
dic["transfer_size"] = sys.argv[8]
dic["iterations"] = sys.argv[9]
dic["ack_size"] = sys.argv[10]
dic["inter_send_interval"] = sys.argv[11]
dic["ipproof_client_path"] = sys.argv[12]
dic["ipproof_server_path"] = sys.argv[13]
ipproof_server_path_split = dic["ipproof_server_path"].split("/")
ipproof_server_name = ipproof_server_path_split[
len(ipproof_server_path_split)-1]
# init
ip_cont = IpproofController(dic, ipproof_server_name)
ip_cont.main()
|
|
"""
Usecases of recursive functions.
Some functions are compiled at import time, hence a separate module.
"""
from numba import jit
@jit("i8(i8)", nopython=True)
def fib1(n):
if n < 2:
return n
# Note the second call uses a named argument
return fib1(n - 1) + fib1(n=n - 2)
def make_fib2():
@jit("i8(i8)", nopython=True)
def fib2(n):
if n < 2:
return n
return fib2(n - 1) + fib2(n=n - 2)
return fib2
fib2 = make_fib2()
def make_type_change_self(jit=lambda x: x):
@jit
def type_change_self(x, y):
if x > 1 and y > 0:
return x + type_change_self(x - y, y)
else:
return y
return type_change_self
# Implicit signature
@jit(nopython=True)
def fib3(n):
if n < 2:
return n
return fib3(n - 1) + fib3(n - 2)
# Run-away self recursion
@jit(nopython=True)
def runaway_self(x):
return runaway_self(x)
@jit(nopython=True)
def raise_self(x):
if x == 1:
raise ValueError("raise_self")
elif x > 0:
return raise_self(x - 1)
else:
return 1
# Mutual recursion
@jit(nopython=True)
def outer_fac(n):
if n < 1:
return 1
return n * inner_fac(n - 1)
@jit(nopython=True)
def inner_fac(n):
if n < 1:
return 1
return n * outer_fac(n - 1)
# Mutual recursion with different arg names
def make_mutual2(jit=lambda x: x):
@jit
def foo(x):
if x > 0:
return 2 * bar(z=1, y=x)
return 1 + x
@jit
def bar(y, z):
return foo(x=y - z)
return foo, bar
# Mutual runaway recursion
@jit(nopython=True)
def runaway_mutual(x):
return runaway_mutual_inner(x)
@jit(nopython=True)
def runaway_mutual_inner(x):
return runaway_mutual(x)
# Mutual type changing recursion
def make_type_change_mutual(jit=lambda x: x):
@jit
def foo(x, y):
if x > 1 and y > 0:
# call bar first to exercise partial type inference.
# typeinferer suspended at the call to bar() and haven't determined
# the potential return type from the else-branch
return x + bar(x - y, y)
else:
return y
@jit
def bar(x, y):
if x > 1 and y > 0:
return x + foo(x - y, y)
else:
return y
return foo
# Indirect mutual recursion
def make_four_level(jit=lambda x: x):
@jit
def first(x):
# The recursing call must have a path that is non-recursing.
if x > 0:
return second(x) * 2
else:
return 1
@jit
def second(x):
return third(x) * 3
@jit
def third(x):
return fourth(x) * 4
@jit
def fourth(x):
return first(x / 2 - 1)
return first
def make_inner_error(jit=lambda x: x):
@jit
def outer(x):
if x > 0:
return inner(x)
else:
return 1
@jit
def inner(x):
if x > 0:
return outer(x - 1)
else:
# this branch is actually never executed
return error_fun(x)
@jit
def error_fun(x):
# to trigger an untyped attribute error
return x.ndim
return outer
def make_raise_mutual(jit=lambda x: x):
@jit
def outer(x):
if x > 0:
return inner(x)
else:
return 1
@jit
def inner(x):
if x == 1:
raise ValueError('raise_mutual')
elif x > 0:
return outer(x - 1)
else:
return 1
return outer
def make_optional_return_case(jit=lambda x: x):
@jit
def foo(x):
if x > 5:
return x - 1
else:
return
@jit
def bar(x):
out = foo(x)
if out is None:
return out
elif out < 8:
return out
else:
return x * bar(out)
return bar
def make_growing_tuple_case(jit=lambda x: x):
# From issue #4387
@jit
def make_list(n):
if n <= 0:
return None
return (n, make_list(n - 1))
return make_list
|
|
from unittest import mock
from unittest.mock import Mock, sentinel, ANY
from urllib.parse import urlparse
import pytest
from django.urls import resolve
from hamcrest import assert_that, contains_inanyorder
from requests import HTTPError
from rest_framework.reverse import reverse
from osmaxx.conversion import output_format, status
from osmaxx.api_client import ConversionHelper, API_client
from osmaxx.excerptexport.models import Excerpt, ExtractionOrder
from osmaxx.job_progress.views import tracker
from tests.test_helpers import vcr_explicit_path as vcr
# Authentication tests
@vcr.use_cassette("fixtures/vcr/conversion_api-test_successful_login.yml")
def test_successful_login():
api_client = ConversionHelper()
assert api_client.token is None
api_client._login()
assert api_client.token is not None
@vcr.use_cassette("fixtures/vcr/conversion_api-test_failed_login.yml")
def test_failed_login():
api_client = ConversionHelper()
api_client.password = "invalid"
assert api_client.password == "invalid"
assert api_client.token is None
expected_msg = (
"400 Client Error: Bad Request for url: http://localhost:8901/api/token-auth/"
)
with pytest.raises(HTTPError) as excinfo:
api_client._login()
assert str(excinfo.value) == expected_msg
assert API_client.reasons_for(excinfo.value) == {
"non_field_errors": ["Unable to login with provided credentials."]
}
assert api_client.token is None
@pytest.fixture
def the_host():
return "the-host.example.com"
@pytest.fixture
def job_progress_request(the_host):
request = Mock()
request.build_absolute_uri.return_value = (
"http://" + the_host + "/job_progress/tracker/23/"
)
return request
@pytest.fixture
def excerpt_request(the_host):
request = Mock()
request.build_absolute_uri.return_value = (
"http://" + the_host + "/orders/new/new_excerpt/"
)
return request
@pytest.fixture
def excerpt(user, bounding_geometry, db):
return Excerpt.objects.create(
name="Neverland",
is_active=True,
is_public=True,
owner=user,
bounding_geometry=bounding_geometry,
)
@pytest.fixture
def extraction_order(excerpt, user, db):
extraction_order = ExtractionOrder.objects.create(
excerpt=excerpt, orderer=user, id=23
)
extraction_order.extraction_formats = [output_format.FGDB, output_format.SPATIALITE]
extraction_order.coordinate_reference_system = 4326
return extraction_order
#
# ConversionApiClient unit tests:
def test_extraction_order_forward_to_conversion_service(
rf, mocker, excerpt, extraction_order, bounding_geometry, the_host
):
mocker.patch.object(
ConversionHelper,
"create_job",
side_effect=[
{"id": 5, "status": status.RECEIVED},
{"id": 23, "status": status.RECEIVED},
],
)
mocker.patch.object(
ConversionHelper,
"create_parametrization",
side_effect=[sentinel.parametrization_1, sentinel.parametrization_2],
)
mocker.patch.object(ConversionHelper, "create_boundary")
request = rf.get("/tracker/something", HTTP_HOST=the_host)
result = extraction_order.forward_to_conversion_service(incoming_request=request)
ConversionHelper.create_boundary.assert_called_once_with(
bounding_geometry, name=excerpt.name
)
srs = extraction_order.coordinate_reference_system
detail_level = extraction_order.detail_level
assert_that(
ConversionHelper.create_parametrization.mock_calls,
contains_inanyorder(
mock.call(
boundary=ConversionHelper.create_boundary.return_value,
out_format=output_format.FGDB,
detail_level=detail_level,
out_srs=srs,
),
mock.call(
boundary=ConversionHelper.create_boundary.return_value,
out_format=output_format.SPATIALITE,
detail_level=detail_level,
out_srs=srs,
),
),
)
assert_that(
ConversionHelper.create_job.mock_calls,
contains_inanyorder(
mock.call(sentinel.parametrization_1, ANY, user=ANY),
mock.call(sentinel.parametrization_2, ANY, user=ANY),
),
)
fgdb_export = extraction_order.exports.get(file_format=output_format.FGDB)
spatialite_export = extraction_order.exports.get(
file_format=output_format.SPATIALITE
)
fgdb_callback_uri_path = reverse(
"job_progress:tracker", kwargs=dict(export_id=fgdb_export.id)
)
spatialite_callback_uri_path = reverse(
"job_progress:tracker", kwargs=dict(export_id=spatialite_export.id)
)
assert_that(
ConversionHelper.create_job.mock_calls,
contains_inanyorder(
mock.call(ANY, "http://" + the_host + fgdb_callback_uri_path, user=ANY),
mock.call(
ANY, "http://" + the_host + spatialite_callback_uri_path, user=ANY
),
),
)
assert_that(
result,
contains_inanyorder(
{"id": 5, "status": "received"},
{"id": 23, "status": "received"},
),
)
assert_that(
extraction_order.exports.values_list("file_format", flat=True),
contains_inanyorder(
output_format.FGDB,
output_format.SPATIALITE,
),
)
assert_that(
extraction_order.exports.values_list("conversion_service_job_id", flat=True),
contains_inanyorder(
5,
23,
),
)
@pytest.fixture
def api_client():
return ConversionHelper()
#
# ConversionApiClient integration tests:
@vcr.use_cassette("fixtures/vcr/conversion_api-test_create_job.yml")
def test_create_jobs_for_extraction_order(extraction_order, excerpt_request):
fgdb_export = extraction_order.exports.get(file_format=output_format.FGDB)
spatialite_export = extraction_order.exports.get(
file_format=output_format.SPATIALITE
)
assert fgdb_export.conversion_service_job_id is None
assert fgdb_export.status is None
assert spatialite_export.conversion_service_job_id is None
assert spatialite_export.status is None
jobs_json = extraction_order.forward_to_conversion_service(
incoming_request=(excerpt_request)
)
fgdb_export.refresh_from_db()
spatialite_export.refresh_from_db()
assert fgdb_export.status == status.RECEIVED
assert spatialite_export.status == status.RECEIVED
assert len(jobs_json) == 2
for job_json in jobs_json:
expected_keys_in_response = [
"callback_url",
"rq_job_id",
"id",
"status",
"resulting_file",
"parametrization",
]
actual_keys_in_response = job_json.keys()
assert_that(
expected_keys_in_response, contains_inanyorder(*actual_keys_in_response)
)
@pytest.fixture
def clipping_area_json():
return {
"id": 17,
"name": "Neverland",
"clipping_multi_polygon": {
"type": "MultiPolygon",
"coordinates": [
[
[
[29.525547623634335, 40.77546776498174],
[29.525547623634335, 40.77739734768811],
[29.528980851173397, 40.77739734768811],
[29.528980851173397, 40.77546776498174],
[29.525547623634335, 40.77546776498174],
]
]
],
},
}
@vcr.use_cassette("fixtures/vcr/conversion_api-test_create_job_for_export.yml")
def test_create_job_for_export(
extraction_order, job_progress_request, clipping_area_json
):
fgdb_export = extraction_order.exports.get(file_format=output_format.FGDB)
job_json = fgdb_export.send_to_conversion_service(
clipping_area_json, incoming_request=job_progress_request
)
expected_callback_url = "http://the-host.example.com/job_progress/tracker/23/"
assert job_json["callback_url"] == expected_callback_url
assert job_json["rq_job_id"] == "6692fa44-cc19-4252-88ae-8687496da421"
assert job_json["id"] == 29
assert job_json["status"] == status.RECEIVED
assert job_json["resulting_file"] is None
assert job_json["parametrization"] == 38
@vcr.use_cassette("fixtures/vcr/conversion_api-test_create_job_for_export.yml")
def test_callback_url_of_created_job_refers_to_correct_export(
extraction_order, job_progress_request, clipping_area_json
):
fgdb_export = extraction_order.exports.get(file_format=output_format.FGDB)
job_json = fgdb_export.send_to_conversion_service(
clipping_area_json, incoming_request=job_progress_request
)
callback_url = job_json["callback_url"]
scheme, host, callback_path, params, *_ = urlparse(callback_url)
match = resolve(callback_path)
assert match.func == tracker
job_progress_request.build_absolute_uri.assert_called_with(
"/job_progress/tracker/{}/".format(fgdb_export.id)
)
@vcr.use_cassette("fixtures/vcr/conversion_api-test_create_job_for_export.yml")
def test_callback_url_would_reach_this_django_instance(
extraction_order, job_progress_request, the_host, clipping_area_json
):
fgdb_export = extraction_order.exports.get(file_format=output_format.FGDB)
job_json = fgdb_export.send_to_conversion_service(
clipping_area_json, incoming_request=job_progress_request
)
callback_url = job_json["callback_url"]
scheme, host, callback_path, params, *_ = urlparse(callback_url)
assert scheme.startswith("http") # also matches https
assert host == the_host
|
|
"""Primary techniques for the core functionality of namebot."""
from __future__ import absolute_import
from __future__ import division
import re
from collections import defaultdict
from random import choice
from string import ascii_uppercase
import nltk
from . import nlp
from . import normalization
from . import settings as namebot_settings
_prefixes = namebot_settings.PREFIXES
_suffixes = namebot_settings.SUFFIXES
_alphabet = namebot_settings.ALPHABET
_consonants = namebot_settings.CONSONANTS
_vowels = namebot_settings.VOWELS
_regexes = namebot_settings.regexes
def slice_ends(word, count=1):
"""Slice letters off each side, in a symmetric fashion.
The idea is to find interesting substring word combinations.
:param word (string): the word to modify.
:param count (int, optional): The number of letters to chop off each end.
:rtype string: The modified string.
>>> slice_ends('potatoes', count=2)
>>> 'tato'
"""
if any([not count, count is None]):
return word
return word[count:len(word) - count]
def domainify(words, tld='com'):
"""Convert words into a domain format for testing domains.
:param words (list): List of words
:param tld (str, optional): The TLD (top-level domain) to use.
:rtype list: The modified list of words.
>>> domanify(['radio'], tld='.io')
>>> ['rad.io']
"""
_words = []
if tld.startswith('.'):
tld = tld.replace('.', '')
for word in words:
if word.endswith(tld) and tld != '':
word = word.replace(tld, '.{}'.format(tld))
_words.append(word)
return _words
def spoonerism(words):
"""Convert a list of words formatted with the spoonerism technique.
:param words (list) - The list of words to operate on
:rtype words (list) - The updated list of words
>>> spoonerism(['foo', 'bar'])
>>> ['boo', 'far']
"""
"First: [f]oo [b]ar => boo far"
new_words = []
if len(words) < 2:
raise ValueError('Need more than one word to combine')
for k, word in enumerate(words):
try:
new_words.append('{}{} {}{}'.format(
words[k + 1][0], # 2nd word, 1st letter
word[1:], # 1st word, 2nd letter to end
word[0], # 1st word, 1st letter
words[k + 1][1:])) # 2nd word, 2nd letter to end
except IndexError:
continue
return new_words
def kniferism(words):
"""Convert a list of words formatted with the kniferism technique.
:param words (list) - The list of words to operate on
:rtype words (list) - The updated list of words
>>> kniferism(['foo', 'bar'])
>>> ['fao', 'bor']
"""
"Mid: f[o]o b[a]r => fao bor"
if len(words) < 2:
raise ValueError('Need more than one word to combine')
new_words = []
for k, word in enumerate(words):
try:
middle_second = int(len(words[k + 1]) / 2)
middle_first = int(len(word) / 2)
new_words.append('{}{}{} {}{}{}'.format(
word[:middle_first],
words[k + 1][middle_second],
word[middle_first + 1:],
words[k + 1][:middle_second],
word[middle_first],
words[k + 1][middle_second + 1:]))
except IndexError:
continue
return new_words
def forkerism(words):
"""Convert a list of words formatted with the forkerism technique.
:param words (list) - The list of words to operate on
:rtype words (list) - The updated list of words
>>> forkerism(['foo', 'bar'])
>>> ['for', 'bao']
"""
"Last: fo[o] ba[r] => for bao"
if len(words) < 2:
raise ValueError('Need more than one word to combine')
new_words = []
for k, word in enumerate(words):
try:
s_word = words[k + 1]
s_word_len = len(s_word)
f_word_len = len(word)
f_w_last_letter = word[f_word_len - 1]
s_w_last_letter = words[k + 1][s_word_len - 1]
new_words.append('{}{} {}{}'.format(
word[:f_word_len - 1], # 1st word, 1st letter to last - 1
s_w_last_letter, # 2nd word, last letter
s_word[:s_word_len - 1], # 2nd word, 1st letter to last - 1
f_w_last_letter)) # 1st word, last letter
except IndexError:
continue
return new_words
def reduplication_ablaut(words, count=1, random=True, vowel='e'):
"""A technique to combine words and altering the vowels.
See http://phrases.org.uk/meanings/reduplication.html for origination.
:param words (list): The list of words to operate on.
:param count (int, optional): The number of regex substitutions to make.
:param random (bool, optional): Whether or not to randomize vowel choices.
:param vowel (string, optional): Which vowel to substitue.
If not vowel is available the word
will not change.
>>> reduplication_ablaut(['cat', 'dog'], vowel='a')
>>> ['dog', 'dag']
"""
if len(words) < 2:
raise ValueError('Need more than one word to combine')
new_words = []
substitution = choice(_vowels) if random else vowel
for word in words:
second = re.sub(r'a|e|i|o|u', substitution, word, count=count)
# Only append if the first and second are different.
if word != second:
new_words.append('{} {}'.format(word, second))
return new_words
def prefixify(words):
"""Apply a prefix technique to a set of words.
:param words (list) - The list of words to operate on.
:rtype new_arr (list): the updated *fixed words
"""
new_arr = []
for word in words:
if not word:
continue
for prefix in _prefixes:
first_prefix_no_vowel = re.search(
_regexes['no_vowels'], word[0])
second_prefix_no_vowel = re.search(
_regexes['no_vowels'], prefix[0])
if first_prefix_no_vowel or second_prefix_no_vowel:
# if there's a vowel at the end of
# prefix but not at the beginning
# of the word (or vice versa)
vowel_beginning = re.search(r'a|e|i|o|u', prefix[-1:])
vowel_end = re.search(r'^a|e|i|o|u', word[:1])
if vowel_beginning or vowel_end:
new_arr.append('{}{}'.format(prefix, word))
return new_arr
def suffixify(words):
"""Apply a suffix technique to a set of words.
:param words (list) - The list of words to operate on.
:rtype new_arr (list): the updated *fixed words
"""
new_arr = []
for word in words:
if not word:
continue
for suffix in _suffixes:
prefix_start_vowel = re.search(_regexes['all_vowels'], word[0])
suffix_start_vowel = re.search(_regexes['all_vowels'], suffix[0])
if prefix_start_vowel or suffix_start_vowel:
if suffix is 'ify':
if word[-1] is 'e':
if word[-2] is not 'i':
new_arr.append('{}{}'.format(word[:-2], suffix))
else:
new_arr.append('{}{}'.format(word[:-1], suffix))
new_arr.append(word + suffix)
else:
new_arr.append(word + suffix)
return new_arr
def duplifixify(words):
"""Apply a duplifix technique to a set of words (e.g: teeny weeny, etc...).
:param words (list) - The list of words to operate on.
:rtype new_arr (list): the updated *fixed words
"""
new_arr = []
for word in words:
if not word:
continue
for letter in _alphabet:
# check if the first letter is NOT the same as the second letter,
# or the combined word is not a duplicate of the first.
duplicate_word = '{}{}'.format(letter, word[1:]) == word
if word[0] is not letter and not duplicate_word:
new_arr.append('{} {}{}'.format(word, letter, word[1:]))
return new_arr
def disfixify(words, replaces=1):
"""Apply a disfix technique to a set of words.
Disfixing is done by removing the first set of vowel-consonant pairs.
Args:
words (list) - The list of words to operate on.
replaces (int, optional): Number of replacements
to make on this string.
Returns:
new_arr (list): the updated *fixed words
"""
new_arr = []
vc_combo = r'[a-zA-Z][aeiou]{1}[qwrtypsdfghjklzxcvbnm]{1}'
for word in words:
if len(re.findall(vc_combo, word)) > 1:
new_arr.append(re.sub(vc_combo, '', word, replaces))
else:
new_arr.append(word)
return new_arr
def infixify(words):
"""Apply a infix technique to a set of words.
Adds all consonant+vowel pairs to all inner matching vowel+consonant pairs
of a word, giving all combinations for each word.
Args:
words (list) - The list of words to operate on.
Returns:
new_arr (list): the updated *fixed words
"""
new_arr = []
vc_combo_pair = re.compile(
r'[a-zA-Z][aeiou]{1}[qwrtypsdfghjklzxcvbnm]{1}[aeiou]'
'{1}[qwrtypsdfghjklzxcvbnm]{1}')
for word in words:
matches = re.findall(vc_combo_pair, word)
if matches:
for match in matches:
for infix_pair in namebot_settings.CV_TL_PAIRS:
# Get midpoint of this string.
mid = len(match) // 2
# Get the left and right substrings to join with.
first, second = match[0:mid], match[mid:]
# Check if the infix_pair is the same as start, or end.
bad_matches = [
# Duplicates joined is bad.
infix_pair == first, infix_pair == second,
# Matching letters on start/end joining substrings
# is bad.
first[-1] == infix_pair[0],
# Matching letters on end/start joining substrings
# is also bad.
first[0] == infix_pair[-1],
]
# Skip bad 'fusings'
if any(bad_matches):
continue
replacer = '{}{}{}'.format(first, infix_pair, second)
new_arr.append(word.replace(match, replacer))
else:
new_arr.append(word)
return new_arr
def simulfixify(words, pairs=None, max=5):
"""Generate simulfixed words.
Args:
words (list) - List of words to operate on.
pairs (list, optional) - Simulfix pairs to use for each word.
If not specified, these will be generated
randomly as vowel + consonant strings.
max (int, optional): The number of simulfix pairs to generate
(if pairs is not specified.)
Returns:
results (list) - The simulfix version of each word,
for each simulfix pair.
"""
results = []
if pairs is None:
pairs = ['{}{}'.format(choice(_vowels), choice(_consonants))
for _ in range(max)]
for word in words:
for combo in pairs:
mid = len(word) // 2
_word = '{}{}{}'.format(word[0:mid], combo, word[mid:])
results.append(_word)
return results
def palindrome(word):
"""Create a palindrome from a word.
Args:
word (str): The word.
Returns:
str: The updated palindrome.
>>> palindrome('cool')
>>> 'coollooc'
"""
return '{}{}'.format(word, word[::-1])
def palindromes(words):
"""Convert a list of words into their palindromic form.
Args:
words (list): The words.
Returns:
list: The list of palindromes.
>>> palindrome(['cool', 'neat'])
>>> ['coollooc', 'neattaen']
"""
return [palindrome(word) for word in words]
def make_founder_product_name(founder1, founder2, product):
"""Get the name of two people forming a company and combine it.
Args:
founder1 (str): Your founder name 1.
founder2 (str): Your founder name 2.
product (str): Your product/feature/service name.
Returns:
str: The updated name.
>>> make_founder_product_name('chris', 'ella', 'widgets')
>>> 'chris & ella widgets'
"""
return '{} & {} {}'.format(
founder1[0].upper(),
founder2[0].upper(),
product)
def make_name_alliteration(words, divider=' '):
"""Make an alliteration with a set of words, if applicable.
Examples:
java jacket
singing sally
earth engines
...etc
1. Loop through a given array of words
2. group by words with the same first letter
3. combine them and return to new array
"""
new_arr = []
words = sorted(words)
for word1 in words:
for word2 in words:
if word1[:1] is word2[:1] and word1 is not word2:
new_arr.append(word1 + divider + word2)
return new_arr
def make_name_abbreviation(words):
"""Will make some kind of company acronym.
eg: BASF, AT&T, A&W
Returns a single string of the new word combined.
"""
return ''.join([word[:1].upper() for word in words])
def make_vowel(words, vowel_type, vowel_index):
"""Primary for all Portmanteau generators.
This creates the portmanteau based on :vowel_index, and :vowel_type.
The algorithm works as following:
It looks for the first occurrence of a specified vowel in the first word,
then gets the matching occurrence (if any) of the second word,
then determines which should be first or second position, based on
the ratio of letters (for each word) divided by the position of the vowel
in question (e.g. c[a]t (2/3) vs. cr[a]te (3/5)).
The higher number is ordered first, and the two words are then fused
together by the single matching vowel.
"""
new_arr = []
for i in words:
for j in words:
is_match_i = re.search(vowel_type, i)
is_match_j = re.search(vowel_type, j)
if i is not j and is_match_i and is_match_j:
# get the indices and lengths to use in finding the ratio
pos_i = i.index(vowel_index)
len_i = len(i)
pos_j = j.index(vowel_index)
len_j = len(j)
# If starting index is 0,
# add 1 to it so we're not dividing by zero
if pos_i is 0:
pos_i = 1
if pos_j is 0:
pos_j = 1
# Decide which word should be the
# prefix and which should be suffix
if round(pos_i / len_i) > round(pos_j / len_j):
p = i[0: pos_i + 1]
p2 = j[pos_j: len(j)]
if len(p) + len(p2) > 2:
if re.search(
_regexes['all_vowels'], p) or re.search(
_regexes['all_vowels'], p2):
if p[-1] is p2[0]:
new_arr.append(p[:-1] + p2)
else:
new_arr.append(p + p2)
return new_arr
def make_portmanteau_default_vowel(words):
"""Make a portmanteau based on vowel matches.
E.g. (ala Brad+Angelina = Brangelina)
Only matches for second to last letter
in first word and matching vowel in second word.
This defers to the make_vowel function for all the internal
magic, but is a helper in that it provides all types of vowel
combinations in one function.
"""
new_arr = []
vowel_a_re = re.compile(r'a{1}')
vowel_e_re = re.compile(r'e{1}')
vowel_i_re = re.compile(r'i{1}')
vowel_o_re = re.compile(r'o{1}')
vowel_u_re = re.compile(r'u{1}')
new_arr += make_vowel(words, vowel_a_re, 'a')
new_arr += make_vowel(words, vowel_e_re, 'e')
new_arr += make_vowel(words, vowel_i_re, 'i')
new_arr += make_vowel(words, vowel_o_re, 'o')
new_arr += make_vowel(words, vowel_u_re, 'u')
return new_arr
def make_portmanteau_split(words):
"""Make a portmeanteau, split by vowel/consonant combos.
Based on the word formation of nikon: [ni]pp[on] go[k]aku,
which is comprised of Nippon + Gokaku.
We get the first C+V in the first word,
then last V+C in the first word,
then all C in the second word.
"""
new_arr = []
for i in words:
for j in words:
if i is not j:
l1 = re.search(r'[^a|e|i|o|u{1}]+[a|e|i|o|u{1}]', i)
l2 = re.search(r'[a|e|i|o|u{1}]+[^a|e|i|o|u{1}]$', j)
if i and l1 and l2:
# Third letter used for
# consonant middle splits only
l3 = re.split(r'[a|e|i|o|u{1}]', i)
l1 = l1.group(0)
l2 = l2.group(0)
if l3 and len(l3) > 0:
for v in l3:
new_arr.append(l1 + v + l2)
else:
new_arr.append('{}{}{}'.format(l1, 't', l2))
new_arr.append('{}{}{}'.format(l1, 's', l2))
new_arr.append('{}{}{}'.format(l1, 'z', l2))
new_arr.append('{}{}{}'.format(l1, 'x', l2))
return new_arr
def make_punctuator(words, replace):
"""Put some hyphens or dots, or a given punctutation.
Works via :replace in the word, but only around vowels ala "del.ic.ious"
"""
def _replace(words, replace, replace_type='.'):
return [word.replace(
replace, replace + replace_type) for word in words]
hyphens = _replace(words, replace, replace_type='-')
periods = _replace(words, replace)
return hyphens + periods
def make_punctuator_vowels(words):
"""Helper function that combines all possible combinations for vowels."""
new_words = []
new_words += make_punctuator(words, 'a')
new_words += make_punctuator(words, 'e')
new_words += make_punctuator(words, 'i')
new_words += make_punctuator(words, 'o')
new_words += make_punctuator(words, 'u')
return new_words
def make_vowelify(words):
"""Chop off consonant ala nautica if second to last letter is a vowel."""
new_arr = []
for word in words:
if re.search(_regexes['all_vowels'], word[:-2]):
new_arr.append(word[:-1])
return new_arr
def make_misspelling(words):
"""Misspell a word in numerous ways, to create interesting results."""
token_groups = (
('ics', 'ix'),
('ph', 'f'),
('kew', 'cue'),
('f', 'ph'),
('o', 'ough'),
# these seem to have
# sucked in practice
('o', 'off'),
('ow', 'o'),
('x', 'ecks'),
('za', 'xa'),
('xa', 'za'),
('ze', 'xe'),
('xe', 'ze'),
('zi', 'xi'),
('xi', 'zi'),
('zo', 'xo'),
('xo', 'zo'),
('zu', 'xu'),
('xu', 'zu'),
# number based
('one', '1'),
('1', 'one'),
('two', '2'),
('2', 'two'),
('three', '3'),
('3', 'three'),
('four', '4'),
('4', 'four'),
('five', '5'),
('5', 'five'),
('six', '6'),
('6', 'six'),
('seven', '7'),
('7', 'seven'),
('eight', '8'),
('8', 'eight'),
('nine', '9'),
('9', 'nine'),
('ten', '10'),
('10', 'ten'),
('ecks', 'x'),
('spir', 'speer'),
('speer', 'spir'),
('x', 'ex'),
('on', 'awn'),
('ow', 'owoo'),
('awn', 'on'),
('awf', 'off'),
('s', 'z'),
('ce', 'ze'),
('ss', 'zz'),
('ku', 'koo'),
('trate', 'trait'),
('trait', 'trate'),
('ance', 'anz'),
('il', 'yll'),
('ice', 'ize'),
('chr', 'kr'),
# These should only be at end of word!
('er', 'r'),
('lee', 'ly'),
)
new_arr = []
for word in words:
for tokens in token_groups:
new_arr.append(word.replace(*tokens))
return normalization.uniquify(new_arr)
def _pig_latinize(word, postfix='ay'):
"""Generate standard pig latin style, with optional postfix argument."""
# Common postfixes: ['ay', 'yay', 'way']
if not type(postfix) is str:
raise TypeError('Must use a string for postfix.')
piggified = None
vowel_re = re.compile(r'(a|e|i|o|u)')
first_letter = word[0:1]
# clean up non letters
word = word.replace(r'[^a-zA-Z]', '')
if vowel_re.match(first_letter):
piggified = word + 'way'
else:
piggified = ''.join([word[1: len(word)], first_letter, postfix])
return piggified
def pig_latinize(words, postfix='ay'):
"""Pig latinize a set of words.
Args:
words (list): A list of words.
postfix (str, optional): A postfix to use. Default is `ay`.
Returns:
words (list): The updated list.
"""
return [_pig_latinize(word, postfix=postfix) for word in words]
def acronym_lastname(description, lastname):
"""Create an acronym plus the last name.
Inspiration: ALFA Romeo.
"""
desc = ''.join([word[0].upper() for word
in normalization.remove_stop_words(description.split(' '))])
return '{} {}'.format(desc, lastname)
def get_descriptors(words):
"""Group words by their NLTK part-of-speech descriptors.
Use NLTK to first grab tokens by looping through words,
then tag part-of-speech (in isolation)
and provide a dictionary with a list of each type
for later retrieval and usage.
"""
descriptors = defaultdict(list)
tokens = nltk.word_tokenize(' '.join(words))
parts = nltk.pos_tag(tokens)
# Then, push the word into the matching type
for part in parts:
descriptors[part[1]].append(part[0])
return descriptors
def _add_pos_subtypes(nouns, verbs):
"""Combine alternating verbs and nouns into a new list.
Args:
nouns (list) - List of nouns, noun phrases, etc...
verbs (list) - List of verbs, verb phrases, etc...
Returns:
words (list) - The newly combined list
"""
words = []
try:
for noun in nouns:
for verb in verbs:
words.append('{} {}'.format(noun, verb))
words.append('{} {}'.format(verb, noun))
except KeyError:
pass
return words
def _create_pos_subtypes(words):
"""Check part-of-speech tags for a noun-phrase, adding combinations if so.
If it exists, add combinations with noun-phrase + verb-phrase,
noun-phrase + verb, and noun-phrase + adverb,
for each pos type that exists.
:param words (list) - List of verbs, verb phrases, etc...
:rtype new_words (list) - The newly combined list
"""
new_words = []
types = words.keys()
if 'NNP' in types:
if 'VBP' in types:
new_words += _add_pos_subtypes(words['NNP'], words['VBP'])
if 'VB' in types:
new_words += _add_pos_subtypes(words['NNP'], words['VB'])
if 'RB' in types:
new_words += _add_pos_subtypes(words['NNP'], words['RB'])
return new_words
def make_descriptors(words):
"""Make descriptor names.
Based from a verb + noun, adjective + noun combination.
Examples:
-Pop Cap,
-Big Fish,
-Red Fin,
-Cold Water (grill), etc...
Combines VBP/VB/RB, with NN/NNS
"""
return list(set(_create_pos_subtypes(words)))
def all_prefix_first_vowel(word, letters=list(ascii_uppercase)):
"""Find the first vowel in a word and prefixes with consonants.
:param word (str) - the word to update
:param letters (list) - the letters to use for prefixing.
:rtype words (list) - All prefixed words
"""
re_vowels = re.compile(r'[aeiouy]')
matches = re.search(re_vowels, word)
if matches is None:
return [word]
words = []
vowels = ['A', 'E', 'I', 'O', 'U']
first_match = matches.start(0)
for letter in letters:
if letter not in vowels:
# If beginning letter is a vowel, don't offset the index
if first_match == 0:
words.append('{}{}'.format(letter, word))
else:
words.append('{}{}'.format(letter, word[first_match:]))
return words
def recycle(words, func, times=2):
"""Run a set of words applied to a function repeatedly.
It will re-run with the last output as the new input.
`words` must be a list, and `func` must return a list.
:param words (list): The list of words.
:param func (function): A function to recycle.
This function must take a single argument,
a list of strings.
:param times (int, optional): The number of times to call the function.
"""
if times > 0:
return recycle(func(words), func, times - 1)
return words
def backronym(acronym, theme, max_attempts=10):
"""Attempt to generate a backronym based on a given acronym and theme.
:param acronym (str): The starting acronym.
:param theme (str): The seed word to base other words off of.
:param max_attempts (int, optional): The number of attempts before failing.
:rtype dict: The result dictionary. If a backronym was successfully
generated, the `success` key will be True, otherwise False.
"""
ret = {
'acronym': '.'.join(list(acronym)).upper(),
'backronym': '',
'words': [],
'success_ratio': 0.0,
'success': False
}
if not acronym or not theme:
return ret
all_words = set()
words = nlp._get_synset_words(theme)
_backronym = []
acronym = acronym.lower()
# Add words if they contain the same first letter
# as any in the given acronym.
cur_step = 0
while len(_backronym) < len(acronym) or cur_step < max_attempts:
all_words.update(words)
for word in words:
if word[0].lower() in acronym:
if '_' in word:
# Don't add multi-word strings, but don't leave it blank.
_backronym.append(word[0])
else:
_backronym.append(word)
sdict = {}
# Sort the word in order of the acronyms
# letters by re-arranging indices.
for word in _backronym:
try:
index = acronym.index(word[0].lower())
sdict[index] = word
except IndexError:
continue
cur_step += 1
# Refresh words for next attempt.
words = nlp._get_synset_words(theme)
# Try again if no words existed.
if not words:
continue
# Get new theme, similar to originating theme.
theme = words[0]
vals = sdict.values()
ret.update({
'backronym': ' '.join(vals).upper(),
'words': vals,
'success_ratio': float(len(vals)) / float(len(acronym)),
'success': len(vals) == len(acronym)
})
return ret
def super_scrub(data):
"""Run words through a comprehensive list of filtering functions.
Expects a dictionary with key "words"
"""
for technique in data['words']:
data['words'][technique] = normalization.uniquify(
normalization.remove_odd_sounding_words(
normalization.clean_sort(
data['words'][technique])))
return data
def generate_all_techniques(words):
"""Generate all techniques across the library in one place."""
data = {
'words': {
'alliterations': make_name_alliteration(words),
'portmanteau': make_portmanteau_default_vowel(words),
'vowels': make_vowelify(words),
'suffix': suffixify(words),
'prefix': prefixify(words),
'duplifix': duplifixify(words),
'disfix': disfixify(words),
'infix': infixify(words),
'simulfix': simulfixify(words),
'founder_product_name': make_founder_product_name(
'Lindsey', 'Chris', 'Widgets'),
'punctuator': make_punctuator_vowels(words),
'name_abbreviation': make_name_abbreviation(words),
'make_portmanteau_split': make_portmanteau_split(words),
'forkerism': forkerism(words),
'kniferism': kniferism(words),
'spoonerism': spoonerism(words),
'palindrome': palindromes(words),
'reduplication_ablaut': reduplication_ablaut(words),
'misspelling': make_misspelling(words),
'descriptors': make_descriptors(
get_descriptors(words))
}
}
return super_scrub(data)
|
|
# Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for iRMC Boot Driver
"""
import os
import shutil
import tempfile
from ironic_lib import utils as ironic_utils
import mock
from oslo_config import cfg
import six
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common import images
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.irmc import boot as irmc_boot
from ironic.drivers.modules.irmc import common as irmc_common
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
if six.PY3:
import io
file = io.BytesIO
INFO_DICT = db_utils.get_test_irmc_info()
CONF = cfg.CONF
class IRMCDeployPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
irmc_boot.check_share_fs_mounted_patcher.start()
self.addCleanup(irmc_boot.check_share_fs_mounted_patcher.stop)
super(IRMCDeployPrivateMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='iscsi_irmc')
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_irmc', driver_info=INFO_DICT)
CONF.irmc.remote_image_share_root = '/remote_image_share_root'
CONF.irmc.remote_image_server = '10.20.30.40'
CONF.irmc.remote_image_share_type = 'NFS'
CONF.irmc.remote_image_share_name = 'share'
CONF.irmc.remote_image_user_name = 'admin'
CONF.irmc.remote_image_user_password = 'admin0'
CONF.irmc.remote_image_user_domain = 'local'
@mock.patch.object(os.path, 'isdir', spec_set=True, autospec=True)
def test__parse_config_option(self, isdir_mock):
isdir_mock.return_value = True
result = irmc_boot._parse_config_option()
isdir_mock.assert_called_once_with('/remote_image_share_root')
self.assertIsNone(result)
@mock.patch.object(os.path, 'isdir', spec_set=True, autospec=True)
def test__parse_config_option_non_existed_root(self, isdir_mock):
CONF.irmc.remote_image_share_root = '/non_existed_root'
isdir_mock.return_value = False
self.assertRaises(exception.InvalidParameterValue,
irmc_boot._parse_config_option)
isdir_mock.assert_called_once_with('/non_existed_root')
@mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True)
def test__parse_driver_info_in_share(self, isfile_mock):
"""With required 'irmc_deploy_iso' in share."""
isfile_mock.return_value = True
self.node.driver_info['irmc_deploy_iso'] = 'deploy.iso'
driver_info_expected = {'irmc_deploy_iso': 'deploy.iso'}
driver_info_actual = irmc_boot._parse_driver_info(self.node)
isfile_mock.assert_called_once_with(
'/remote_image_share_root/deploy.iso')
self.assertEqual(driver_info_expected, driver_info_actual)
@mock.patch.object(service_utils, 'is_image_href_ordinary_file_name',
spec_set=True, autospec=True)
def test__parse_driver_info_not_in_share(
self, is_image_href_ordinary_file_name_mock):
"""With required 'irmc_deploy_iso' not in share."""
self.node.driver_info[
'irmc_deploy_iso'] = 'bc784057-a140-4130-add3-ef890457e6b3'
driver_info_expected = {'irmc_deploy_iso':
'bc784057-a140-4130-add3-ef890457e6b3'}
is_image_href_ordinary_file_name_mock.return_value = False
driver_info_actual = irmc_boot._parse_driver_info(self.node)
self.assertEqual(driver_info_expected, driver_info_actual)
@mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True)
def test__parse_driver_info_with_deploy_iso_invalid(self, isfile_mock):
"""With required 'irmc_deploy_iso' non existed."""
isfile_mock.return_value = False
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_deploy_iso'] = 'deploy.iso'
error_msg = (_("Deploy ISO file, %(deploy_iso)s, "
"not found for node: %(node)s.") %
{'deploy_iso': '/remote_image_share_root/deploy.iso',
'node': task.node.uuid})
e = self.assertRaises(exception.InvalidParameterValue,
irmc_boot._parse_driver_info,
task.node)
self.assertEqual(error_msg, str(e))
def test__parse_driver_info_with_deploy_iso_missing(self):
"""With required 'irmc_deploy_iso' empty."""
self.node.driver_info['irmc_deploy_iso'] = None
error_msg = ("Error validating iRMC virtual media deploy. Some"
" parameters were missing in node's driver_info."
" Missing are: ['irmc_deploy_iso']")
e = self.assertRaises(exception.MissingParameterValue,
irmc_boot._parse_driver_info,
self.node)
self.assertEqual(error_msg, str(e))
def test__parse_instance_info_with_boot_iso_file_name_ok(self):
"""With optional 'irmc_boot_iso' file name."""
CONF.irmc.remote_image_share_root = '/etc'
self.node.instance_info['irmc_boot_iso'] = 'hosts'
instance_info_expected = {'irmc_boot_iso': 'hosts'}
instance_info_actual = irmc_boot._parse_instance_info(self.node)
self.assertEqual(instance_info_expected, instance_info_actual)
def test__parse_instance_info_without_boot_iso_ok(self):
"""With optional no 'irmc_boot_iso' file name."""
CONF.irmc.remote_image_share_root = '/etc'
self.node.instance_info['irmc_boot_iso'] = None
instance_info_expected = {}
instance_info_actual = irmc_boot._parse_instance_info(self.node)
self.assertEqual(instance_info_expected, instance_info_actual)
def test__parse_instance_info_with_boot_iso_uuid_ok(self):
"""With optional 'irmc_boot_iso' glance uuid."""
self.node.instance_info[
'irmc_boot_iso'] = 'bc784057-a140-4130-add3-ef890457e6b3'
instance_info_expected = {'irmc_boot_iso':
'bc784057-a140-4130-add3-ef890457e6b3'}
instance_info_actual = irmc_boot._parse_instance_info(self.node)
self.assertEqual(instance_info_expected, instance_info_actual)
def test__parse_instance_info_with_boot_iso_glance_ok(self):
"""With optional 'irmc_boot_iso' glance url."""
self.node.instance_info['irmc_boot_iso'] = (
'glance://bc784057-a140-4130-add3-ef890457e6b3')
instance_info_expected = {
'irmc_boot_iso': 'glance://bc784057-a140-4130-add3-ef890457e6b3',
}
instance_info_actual = irmc_boot._parse_instance_info(self.node)
self.assertEqual(instance_info_expected, instance_info_actual)
def test__parse_instance_info_with_boot_iso_http_ok(self):
"""With optional 'irmc_boot_iso' http url."""
self.node.driver_info[
'irmc_deploy_iso'] = 'http://irmc_boot_iso'
driver_info_expected = {'irmc_deploy_iso': 'http://irmc_boot_iso'}
driver_info_actual = irmc_boot._parse_driver_info(self.node)
self.assertEqual(driver_info_expected, driver_info_actual)
def test__parse_instance_info_with_boot_iso_https_ok(self):
"""With optional 'irmc_boot_iso' https url."""
self.node.instance_info[
'irmc_boot_iso'] = 'https://irmc_boot_iso'
instance_info_expected = {'irmc_boot_iso': 'https://irmc_boot_iso'}
instance_info_actual = irmc_boot._parse_instance_info(self.node)
self.assertEqual(instance_info_expected, instance_info_actual)
def test__parse_instance_info_with_boot_iso_file_url_ok(self):
"""With optional 'irmc_boot_iso' file url."""
self.node.instance_info[
'irmc_boot_iso'] = 'file://irmc_boot_iso'
instance_info_expected = {'irmc_boot_iso': 'file://irmc_boot_iso'}
instance_info_actual = irmc_boot._parse_instance_info(self.node)
self.assertEqual(instance_info_expected, instance_info_actual)
@mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True)
def test__parse_instance_info_with_boot_iso_invalid(self, isfile_mock):
CONF.irmc.remote_image_share_root = '/etc'
isfile_mock.return_value = False
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.instance_info['irmc_boot_iso'] = 'hosts~non~existed'
error_msg = (_("Boot ISO file, %(boot_iso)s, "
"not found for node: %(node)s.") %
{'boot_iso': '/etc/hosts~non~existed',
'node': task.node.uuid})
e = self.assertRaises(exception.InvalidParameterValue,
irmc_boot._parse_instance_info,
task.node)
self.assertEqual(error_msg, str(e))
@mock.patch.object(deploy_utils, 'get_image_instance_info',
spec_set=True, autospec=True)
@mock.patch('os.path.isfile', autospec=True)
def test_parse_deploy_info_ok(self, mock_isfile,
get_image_instance_info_mock):
CONF.irmc.remote_image_share_root = '/etc'
get_image_instance_info_mock.return_value = {'a': 'b'}
driver_info_expected = {'a': 'b',
'irmc_deploy_iso': 'hosts',
'irmc_boot_iso': 'fstab'}
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_deploy_iso'] = 'hosts'
task.node.instance_info['irmc_boot_iso'] = 'fstab'
driver_info_actual = irmc_boot._parse_deploy_info(task.node)
self.assertEqual(driver_info_expected, driver_info_actual)
boot_iso_path = os.path.join(
CONF.irmc.remote_image_share_root,
task.node.instance_info['irmc_boot_iso']
)
mock_isfile.assert_any_call(boot_iso_path)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(images, 'fetch', spec_set=True,
autospec=True)
def test__setup_deploy_iso_with_file(self,
fetch_mock,
setup_vmedia_mock,
set_boot_device_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['irmc_deploy_iso'] = 'deploy_iso_filename'
ramdisk_opts = {'a': 'b'}
irmc_boot._setup_deploy_iso(task, ramdisk_opts)
self.assertFalse(fetch_mock.called)
setup_vmedia_mock.assert_called_once_with(
task,
'deploy_iso_filename',
ramdisk_opts)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(images, 'fetch', spec_set=True,
autospec=True)
def test_setup_deploy_iso_with_image_service(
self,
fetch_mock,
setup_vmedia_mock,
set_boot_device_mock):
CONF.irmc.remote_image_share_root = '/'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['irmc_deploy_iso'] = 'glance://deploy_iso'
ramdisk_opts = {'a': 'b'}
irmc_boot._setup_deploy_iso(task, ramdisk_opts)
fetch_mock.assert_called_once_with(
task.context,
'glance://deploy_iso',
"/deploy-%s.iso" % self.node.uuid)
setup_vmedia_mock.assert_called_once_with(
task,
"deploy-%s.iso" % self.node.uuid,
ramdisk_opts)
set_boot_device_mock.assert_called_once_with(
task, boot_devices.CDROM)
def test__get_deploy_iso_name(self):
actual = irmc_boot._get_deploy_iso_name(self.node)
expected = "deploy-%s.iso" % self.node.uuid
self.assertEqual(expected, actual)
def test__get_boot_iso_name(self):
actual = irmc_boot._get_boot_iso_name(self.node)
expected = "boot-%s.iso" % self.node.uuid
self.assertEqual(expected, actual)
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(images, 'fetch', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__prepare_boot_iso_file(self,
deploy_info_mock,
fetch_mock,
image_props_mock,
boot_mode_mock,
create_boot_iso_mock):
deploy_info_mock.return_value = {'irmc_boot_iso': 'irmc_boot.iso'}
with task_manager.acquire(self.context, self.node.uuid) as task:
irmc_boot._prepare_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
self.assertFalse(fetch_mock.called)
self.assertFalse(image_props_mock.called)
self.assertFalse(boot_mode_mock.called)
self.assertFalse(create_boot_iso_mock.called)
task.node.refresh()
self.assertEqual('irmc_boot.iso',
task.node.driver_internal_info['irmc_boot_iso'])
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(images, 'fetch', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
@mock.patch.object(service_utils, 'is_image_href_ordinary_file_name',
spec_set=True, autospec=True)
def test__prepare_boot_iso_fetch_ok(self,
is_image_href_ordinary_file_name_mock,
deploy_info_mock,
fetch_mock,
image_props_mock,
boot_mode_mock,
create_boot_iso_mock):
CONF.irmc.remote_image_share_root = '/'
image = '733d1c44-a2ea-414b-aca7-69decf20d810'
is_image_href_ordinary_file_name_mock.return_value = False
deploy_info_mock.return_value = {'irmc_boot_iso': image}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['irmc_boot_iso'] = image
irmc_boot._prepare_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
fetch_mock.assert_called_once_with(
task.context,
image,
"/boot-%s.iso" % self.node.uuid)
self.assertFalse(image_props_mock.called)
self.assertFalse(boot_mode_mock.called)
self.assertFalse(create_boot_iso_mock.called)
task.node.refresh()
self.assertEqual("boot-%s.iso" % self.node.uuid,
task.node.driver_internal_info['irmc_boot_iso'])
@mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy', spec_set=True,
autospec=True)
@mock.patch.object(images, 'get_image_properties', spec_set=True,
autospec=True)
@mock.patch.object(images, 'fetch', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
def test__prepare_boot_iso_create_ok(self,
deploy_info_mock,
fetch_mock,
image_props_mock,
boot_mode_mock,
create_boot_iso_mock):
CONF.pxe.pxe_append_params = 'kernel-params'
deploy_info_mock.return_value = {'image_source': 'image-uuid'}
image_props_mock.return_value = {'kernel_id': 'kernel_uuid',
'ramdisk_id': 'ramdisk_uuid'}
CONF.irmc.remote_image_share_name = '/remote_image_share_root'
boot_mode_mock.return_value = 'uefi'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._prepare_boot_iso(task, 'root-uuid')
self.assertFalse(fetch_mock.called)
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid', ['kernel_id', 'ramdisk_id'])
create_boot_iso_mock.assert_called_once_with(
task.context,
'/remote_image_share_root/' +
"boot-%s.iso" % self.node.uuid,
'kernel_uuid', 'ramdisk_uuid',
'file:///remote_image_share_root/' +
"deploy-%s.iso" % self.node.uuid,
'root-uuid', 'kernel-params', 'uefi')
task.node.refresh()
self.assertEqual("boot-%s.iso" % self.node.uuid,
task.node.driver_internal_info['irmc_boot_iso'])
def test__get_floppy_image_name(self):
actual = irmc_boot._get_floppy_image_name(self.node)
expected = "image-%s.img" % self.node.uuid
self.assertEqual(expected, actual)
@mock.patch.object(shutil, 'copyfile', spec_set=True, autospec=True)
@mock.patch.object(images, 'create_vfat_image', spec_set=True,
autospec=True)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
def test__prepare_floppy_image(self,
tempfile_mock,
create_vfat_image_mock,
copyfile_mock):
mock_image_file_handle = mock.MagicMock(spec=file)
mock_image_file_obj = mock.MagicMock()
mock_image_file_obj.name = 'image-tmp-file'
mock_image_file_handle.__enter__.return_value = mock_image_file_obj
tempfile_mock.side_effect = [mock_image_file_handle]
deploy_args = {'arg1': 'val1', 'arg2': 'val2'}
CONF.irmc.remote_image_share_name = '/remote_image_share_root'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._prepare_floppy_image(task, deploy_args)
create_vfat_image_mock.assert_called_once_with(
'image-tmp-file', parameters=deploy_args)
copyfile_mock.assert_called_once_with(
'image-tmp-file',
'/remote_image_share_root/' + "image-%s.img" % self.node.uuid)
@mock.patch.object(shutil, 'copyfile', spec_set=True, autospec=True)
@mock.patch.object(images, 'create_vfat_image', spec_set=True,
autospec=True)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
def test__prepare_floppy_image_exception(self,
tempfile_mock,
create_vfat_image_mock,
copyfile_mock):
mock_image_file_handle = mock.MagicMock(spec=file)
mock_image_file_obj = mock.MagicMock()
mock_image_file_obj.name = 'image-tmp-file'
mock_image_file_handle.__enter__.return_value = mock_image_file_obj
tempfile_mock.side_effect = [mock_image_file_handle]
deploy_args = {'arg1': 'val1', 'arg2': 'val2'}
CONF.irmc.remote_image_share_name = '/remote_image_share_root'
copyfile_mock.side_effect = IOError("fake error")
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IRMCOperationError,
irmc_boot._prepare_floppy_image,
task,
deploy_args)
create_vfat_image_mock.assert_called_once_with(
'image-tmp-file', parameters=deploy_args)
copyfile_mock.assert_called_once_with(
'image-tmp-file',
'/remote_image_share_root/' + "image-%s.img" % self.node.uuid)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_setup_vmedia_for_boot', spec_set=True,
autospec=True)
def test_attach_boot_iso_if_needed(
self,
setup_vmedia_mock,
set_boot_device_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.ACTIVE
task.node.driver_internal_info['irmc_boot_iso'] = 'boot-iso'
irmc_boot.attach_boot_iso_if_needed(task)
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
set_boot_device_mock.assert_called_once_with(
task, boot_devices.CDROM)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_setup_vmedia_for_boot', spec_set=True,
autospec=True)
def test_attach_boot_iso_if_needed_on_rebuild(
self,
setup_vmedia_mock,
set_boot_device_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.DEPLOYING
task.node.driver_internal_info['irmc_boot_iso'] = 'boot-iso'
irmc_boot.attach_boot_iso_if_needed(task)
self.assertFalse(setup_vmedia_mock.called)
self.assertFalse(set_boot_device_mock.called)
@mock.patch.object(irmc_boot, '_attach_virtual_cd', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_attach_virtual_fd', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_prepare_floppy_image', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_detach_virtual_fd', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_detach_virtual_cd', spec_set=True,
autospec=True)
def test__setup_vmedia_for_boot_with_parameters(self,
_detach_virtual_cd_mock,
_detach_virtual_fd_mock,
_prepare_floppy_image_mock,
_attach_virtual_fd_mock,
_attach_virtual_cd_mock):
parameters = {'a': 'b'}
iso_filename = 'deploy_iso_or_boot_iso'
_prepare_floppy_image_mock.return_value = 'floppy_file_name'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._setup_vmedia_for_boot(task, iso_filename, parameters)
_detach_virtual_cd_mock.assert_called_once_with(task.node)
_detach_virtual_fd_mock.assert_called_once_with(task.node)
_prepare_floppy_image_mock.assert_called_once_with(task,
parameters)
_attach_virtual_fd_mock.assert_called_once_with(task.node,
'floppy_file_name')
_attach_virtual_cd_mock.assert_called_once_with(task.node,
iso_filename)
@mock.patch.object(irmc_boot, '_attach_virtual_cd', autospec=True)
@mock.patch.object(irmc_boot, '_detach_virtual_fd', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_detach_virtual_cd', spec_set=True,
autospec=True)
def test__setup_vmedia_for_boot_without_parameters(
self,
_detach_virtual_cd_mock,
_detach_virtual_fd_mock,
_attach_virtual_cd_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._setup_vmedia_for_boot(task, 'bootable_iso_filename')
_detach_virtual_cd_mock.assert_called_once_with(task.node)
_detach_virtual_fd_mock.assert_called_once_with(task.node)
_attach_virtual_cd_mock.assert_called_once_with(
task.node,
'bootable_iso_filename')
@mock.patch.object(irmc_boot, '_get_deploy_iso_name', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_get_floppy_image_name', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_remove_share_file', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_detach_virtual_fd', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_detach_virtual_cd', spec_set=True,
autospec=True)
def test__cleanup_vmedia_boot_ok(self,
_detach_virtual_cd_mock,
_detach_virtual_fd_mock,
_remove_share_file_mock,
_get_floppy_image_name_mock,
_get_deploy_iso_name_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._cleanup_vmedia_boot(task)
_detach_virtual_cd_mock.assert_called_once_with(task.node)
_detach_virtual_fd_mock.assert_called_once_with(task.node)
_get_floppy_image_name_mock.assert_called_once_with(task.node)
_get_deploy_iso_name_mock.assert_called_once_with(task.node)
self.assertTrue(_remove_share_file_mock.call_count, 2)
_remove_share_file_mock.assert_has_calls(
[mock.call(_get_floppy_image_name_mock(task.node)),
mock.call(_get_deploy_iso_name_mock(task.node))])
@mock.patch.object(ironic_utils, 'unlink_without_raise', spec_set=True,
autospec=True)
def test__remove_share_file(self, unlink_without_raise_mock):
CONF.irmc.remote_image_share_name = '/'
irmc_boot._remove_share_file("boot.iso")
unlink_without_raise_mock.assert_called_once_with('/boot.iso')
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test__attach_virtual_cd_ok(self, get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_boot.scci.get_virtual_cd_set_params_cmd = (
mock.MagicMock(sepc_set=[]))
cd_set_params = (irmc_boot.scci
.get_virtual_cd_set_params_cmd.return_value)
CONF.irmc.remote_image_server = '10.20.30.40'
CONF.irmc.remote_image_user_domain = 'local'
CONF.irmc.remote_image_share_type = 'NFS'
CONF.irmc.remote_image_share_name = 'share'
CONF.irmc.remote_image_user_name = 'admin'
CONF.irmc.remote_image_user_password = 'admin0'
irmc_boot.scci.get_share_type.return_value = 0
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._attach_virtual_cd(task.node, 'iso_filename')
get_irmc_client_mock.assert_called_once_with(task.node)
(irmc_boot.scci.get_virtual_cd_set_params_cmd
.assert_called_once_with)('10.20.30.40',
'local',
0,
'share',
'iso_filename',
'admin',
'admin0')
irmc_client.assert_has_calls(
[mock.call(cd_set_params, async=False),
mock.call(irmc_boot.scci.MOUNT_CD, async=False)])
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test__attach_virtual_cd_fail(self, get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_client.side_effect = Exception("fake error")
irmc_boot.scci.SCCIClientError = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
e = self.assertRaises(exception.IRMCOperationError,
irmc_boot._attach_virtual_cd,
task.node,
'iso_filename')
get_irmc_client_mock.assert_called_once_with(task.node)
self.assertEqual("iRMC Inserting virtual cdrom failed. " +
"Reason: fake error", str(e))
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test__detach_virtual_cd_ok(self, get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._detach_virtual_cd(task.node)
irmc_client.assert_called_once_with(irmc_boot.scci.UNMOUNT_CD)
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test__detach_virtual_cd_fail(self, get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_client.side_effect = Exception("fake error")
irmc_boot.scci.SCCIClientError = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
e = self.assertRaises(exception.IRMCOperationError,
irmc_boot._detach_virtual_cd,
task.node)
self.assertEqual("iRMC Ejecting virtual cdrom failed. " +
"Reason: fake error", str(e))
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test__attach_virtual_fd_ok(self, get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_boot.scci.get_virtual_fd_set_params_cmd = (
mock.MagicMock(sepc_set=[]))
fd_set_params = (irmc_boot.scci
.get_virtual_fd_set_params_cmd.return_value)
CONF.irmc.remote_image_server = '10.20.30.40'
CONF.irmc.remote_image_user_domain = 'local'
CONF.irmc.remote_image_share_type = 'NFS'
CONF.irmc.remote_image_share_name = 'share'
CONF.irmc.remote_image_user_name = 'admin'
CONF.irmc.remote_image_user_password = 'admin0'
irmc_boot.scci.get_share_type.return_value = 0
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._attach_virtual_fd(task.node,
'floppy_image_filename')
get_irmc_client_mock.assert_called_once_with(task.node)
(irmc_boot.scci.get_virtual_fd_set_params_cmd
.assert_called_once_with)('10.20.30.40',
'local',
0,
'share',
'floppy_image_filename',
'admin',
'admin0')
irmc_client.assert_has_calls(
[mock.call(fd_set_params, async=False),
mock.call(irmc_boot.scci.MOUNT_FD, async=False)])
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test__attach_virtual_fd_fail(self, get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_client.side_effect = Exception("fake error")
irmc_boot.scci.SCCIClientError = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
e = self.assertRaises(exception.IRMCOperationError,
irmc_boot._attach_virtual_fd,
task.node,
'iso_filename')
get_irmc_client_mock.assert_called_once_with(task.node)
self.assertEqual("iRMC Inserting virtual floppy failed. " +
"Reason: fake error", str(e))
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test__detach_virtual_fd_ok(self, get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._detach_virtual_fd(task.node)
irmc_client.assert_called_once_with(irmc_boot.scci.UNMOUNT_FD)
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test__detach_virtual_fd_fail(self, get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_client.side_effect = Exception("fake error")
irmc_boot.scci.SCCIClientError = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
e = self.assertRaises(exception.IRMCOperationError,
irmc_boot._detach_virtual_fd,
task.node)
self.assertEqual("iRMC Ejecting virtual floppy failed. "
"Reason: fake error", str(e))
@mock.patch.object(irmc_boot, '_parse_config_option', spec_set=True,
autospec=True)
def test_check_share_fs_mounted_ok(self, parse_conf_mock):
# Note(naohirot): mock.patch.stop() and mock.patch.start() don't work.
# therefor monkey patching is used to
# irmc_boot.check_share_fs_mounted.
# irmc_boot.check_share_fs_mounted is mocked in
# third_party_driver_mocks.py.
# irmc_boot.check_share_fs_mounted_orig is the real function.
CONF.irmc.remote_image_share_root = '/'
CONF.irmc.remote_image_share_type = 'nfs'
result = irmc_boot.check_share_fs_mounted_orig()
parse_conf_mock.assert_called_once_with()
self.assertIsNone(result)
@mock.patch.object(irmc_boot, '_parse_config_option', spec_set=True,
autospec=True)
def test_check_share_fs_mounted_exception(self, parse_conf_mock):
# Note(naohirot): mock.patch.stop() and mock.patch.start() don't work.
# therefor monkey patching is used to
# irmc_boot.check_share_fs_mounted.
# irmc_boot.check_share_fs_mounted is mocked in
# third_party_driver_mocks.py.
# irmc_boot.check_share_fs_mounted_orig is the real function.
CONF.irmc.remote_image_share_root = '/etc'
CONF.irmc.remote_image_share_type = 'cifs'
self.assertRaises(exception.IRMCSharedFileSystemNotMounted,
irmc_boot.check_share_fs_mounted_orig)
parse_conf_mock.assert_called_once_with()
class IRMCVirtualMediaBootTestCase(db_base.DbTestCase):
def setUp(self):
irmc_boot.check_share_fs_mounted_patcher.start()
self.addCleanup(irmc_boot.check_share_fs_mounted_patcher.stop)
super(IRMCVirtualMediaBootTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_irmc")
self.node = obj_utils.create_test_node(
self.context, driver='iscsi_irmc', driver_info=INFO_DICT)
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
autospec=True)
def test_validate_whole_disk_image(self,
check_share_fs_mounted_mock,
deploy_info_mock,
is_glance_image_mock,
validate_prop_mock):
d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'}
deploy_info_mock.return_value = d_info
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_internal_info = {'is_whole_disk_image': True}
task.driver.boot.validate(task)
check_share_fs_mounted_mock.assert_called_once_with()
deploy_info_mock.assert_called_once_with(task.node)
self.assertFalse(is_glance_image_mock.called)
validate_prop_mock.assert_called_once_with(task.context,
d_info, [])
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
autospec=True)
def test_validate_glance_image(self,
check_share_fs_mounted_mock,
deploy_info_mock,
is_glance_image_mock,
validate_prop_mock):
d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'}
deploy_info_mock.return_value = d_info
is_glance_image_mock.return_value = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.validate(task)
check_share_fs_mounted_mock.assert_called_once_with()
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(
task.context, d_info, ['kernel_id', 'ramdisk_id'])
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
@mock.patch.object(service_utils, 'is_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
autospec=True)
def test_validate_non_glance_image(self,
check_share_fs_mounted_mock,
deploy_info_mock,
is_glance_image_mock,
validate_prop_mock):
d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'}
deploy_info_mock.return_value = d_info
is_glance_image_mock.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.validate(task)
check_share_fs_mounted_mock.assert_called_once_with()
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(
task.context, d_info, ['kernel', 'ramdisk'])
@mock.patch.object(irmc_boot, '_setup_deploy_iso',
spec_set=True, autospec=True)
@mock.patch.object(deploy_utils, 'get_single_nic_with_vif_port_id',
spec_set=True, autospec=True)
def _test_prepare_ramdisk(self,
get_single_nic_with_vif_port_id_mock,
_setup_deploy_iso_mock):
instance_info = self.node.instance_info
instance_info['irmc_boot_iso'] = 'glance://abcdef'
instance_info['image_source'] = '6b2f0c0c-79e8-4db6-842e-43c9764204af'
self.node.instance_info = instance_info
self.node.save()
ramdisk_params = {'a': 'b'}
get_single_nic_with_vif_port_id_mock.return_value = '12:34:56:78:90:ab'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_ramdisk(task, ramdisk_params)
expected_ramdisk_opts = {'a': 'b', 'BOOTIF': '12:34:56:78:90:ab'}
get_single_nic_with_vif_port_id_mock.assert_called_once_with(
task)
_setup_deploy_iso_mock.assert_called_once_with(
task, expected_ramdisk_opts)
self.assertEqual('glance://abcdef',
self.node.instance_info['irmc_boot_iso'])
def test_prepare_ramdisk_glance_image_deploying(self):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk()
def test_prepare_ramdisk_glance_image_cleaning(self):
self.node.provision_state = states.CLEANING
self.node.save()
self._test_prepare_ramdisk()
@mock.patch.object(irmc_boot, '_setup_deploy_iso', spec_set=True,
autospec=True)
def test_prepare_ramdisk_not_deploying_not_cleaning(self, mock_is_image):
"""Ensure deploy ops are blocked when not deploying and not cleaning"""
for state in states.STABLE_STATES:
mock_is_image.reset_mock()
self.node.provision_state = state
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertIsNone(
task.driver.boot.prepare_ramdisk(task, None))
self.assertFalse(mock_is_image.called)
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_clean_up_ramdisk(self, _cleanup_vmedia_boot_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.clean_up_ramdisk(task)
_cleanup_vmedia_boot_mock.assert_called_once_with(task)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
def _test_prepare_instance_whole_disk_image(
self, _cleanup_vmedia_boot_mock, set_boot_device_mock):
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
_cleanup_vmedia_boot_mock.assert_called_once_with(task)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
def test_prepare_instance_whole_disk_image_local(self):
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
self.node.save()
self._test_prepare_instance_whole_disk_image()
def test_prepare_instance_whole_disk_image(self):
self._test_prepare_instance_whole_disk_image()
@mock.patch.object(irmc_boot.IRMCVirtualMediaBoot,
'_configure_vmedia_boot', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_prepare_instance_partition_image(
self, _cleanup_vmedia_boot_mock, _configure_vmedia_mock):
self.node.driver_internal_info = {'root_uuid_or_disk_id': "some_uuid"}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.prepare_instance(task)
_cleanup_vmedia_boot_mock.assert_called_once_with(task)
_configure_vmedia_mock.assert_called_once_with(mock.ANY, task,
"some_uuid")
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_remove_share_file', spec_set=True,
autospec=True)
def test_clean_up_instance(self, _remove_share_file_mock,
_cleanup_vmedia_boot_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['irmc_boot_iso'] = 'glance://deploy_iso'
task.node.driver_internal_info['irmc_boot_iso'] = 'irmc_boot.iso'
task.node.driver_internal_info = {'root_uuid_or_disk_id': (
"12312642-09d3-467f-8e09-12385826a123")}
task.driver.boot.clean_up_instance(task)
_remove_share_file_mock.assert_called_once_with(
irmc_boot._get_boot_iso_name(task.node))
self.assertNotIn('irmc_boot_iso',
task.node.driver_internal_info)
self.assertNotIn('root_uuid_or_disk_id',
task.node.driver_internal_info)
_cleanup_vmedia_boot_mock.assert_called_once_with(task)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_setup_vmedia_for_boot', spec_set=True,
autospec=True)
@mock.patch.object(irmc_boot, '_prepare_boot_iso', spec_set=True,
autospec=True)
def test__configure_vmedia_boot(self,
_prepare_boot_iso_mock,
_setup_vmedia_for_boot_mock,
node_set_boot_device):
root_uuid_or_disk_id = {'root uuid': 'root_uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_internal_info['irmc_boot_iso'] = 'boot.iso'
task.driver.boot._configure_vmedia_boot(
task, root_uuid_or_disk_id)
_prepare_boot_iso_mock.assert_called_once_with(
task, root_uuid_or_disk_id)
_setup_vmedia_for_boot_mock.assert_called_once_with(
task, 'boot.iso')
node_set_boot_device.assert_called_once_with(
task, boot_devices.CDROM, persistent=True)
def test_remote_image_share_type_values(self):
cfg.CONF.set_override('remote_image_share_type', 'cifs', 'irmc',
enforce_type=True)
cfg.CONF.set_override('remote_image_share_type', 'nfs', 'irmc',
enforce_type=True)
self.assertRaises(ValueError, cfg.CONF.set_override,
'remote_image_share_type', 'fake', 'irmc',
enforce_type=True)
|
|
from future import standard_library
standard_library.install_aliases()
from future.builtins import str, bytes
from future.utils import iteritems
import gevent
import gevent.pool
import os
import signal
import datetime
import time
import socket
import traceback
import psutil
import sys
import json as json_stdlib
import ujson as json
from bson import ObjectId
try:
from redis.lock import LuaLock
except ImportError:
# Change name to avoid NameError raised when use of LuaLock at line 151
from redis.lock import Lock as LuaLock
from collections import defaultdict
from mrq.utils import load_class_by_path
from .job import Job
from .exceptions import (TimeoutInterrupt, StopRequested, JobInterrupt, AbortInterrupt,
RetryInterrupt, MaxRetriesInterrupt, MaxConcurrencyInterrupt)
from .context import (set_current_worker, set_current_job, get_current_job, get_current_config,
connections, enable_greenlet_tracing, run_task, log)
from .queue import Queue
from .utils import MongoJSONEncoder, MovingAverage
from .processes import Process
from .redishelpers import redis_key
class Worker(Process):
""" Main worker class """
# Allow easy overloading
job_class = Job
# See the doc for valid statuses
status = "init"
mongodb_jobs = None
mongodb_logs = None
redis = None
def __init__(self):
set_current_worker(self)
if self.config.get("trace_greenlets"):
enable_greenlet_tracing()
self.datestarted = datetime.datetime.utcnow()
self.done_jobs = 0
self.max_jobs = self.config["max_jobs"]
self.max_time = datetime.timedelta(seconds=self.config["max_time"]) or None
self.paused_queues = set()
self.connected = False # MongoDB + Redis
self.process = psutil.Process(os.getpid())
self.greenlet = gevent.getcurrent()
self.graceful_stop = None
self.work_lock = gevent.lock.Semaphore()
if self.config.get("worker_id"):
self.id = ObjectId(self.config["worker_id"])
else:
self.id = ObjectId()
if self.config.get("name"):
self.name = self.config["name"]
else:
# Generate a somewhat human-readable name for this worker
self.name = "%s.%s" % (socket.gethostname().split(".")[0], os.getpid())
self.pool_size = self.config["greenlets"]
self.pool_usage_average = MovingAverage((60 / self.config["report_interval"] or 1))
self.set_logger()
self.refresh_queues(fatal=True)
self.queues_with_notify = list({redis_key("notify", q) for q in self.queues if q.use_notify()})
self.has_subqueues = any([queue.endswith("/") for queue in self.config["queues"]])
self.log.info(
"Starting worker on %s queues with %s greenlets" %
(len(self.queues), self.pool_size)
)
self.gevent_pool = gevent.pool.Pool(self.pool_size)
# Keep references to main greenlets
self.greenlets = {}
# TODO by "tag"?
self._traced_io = {
"types": defaultdict(float),
"tasks": defaultdict(float),
"total": 0
}
if self.config["ensure_indexes"]:
run_task("mrq.basetasks.indexes.EnsureIndexes", {})
def set_logger(self):
import logging
self.log = logging.getLogger(str(self.id))
logging.basicConfig(format=self.config["log_format"])
self.log.setLevel(getattr(logging, self.config["log_level"]))
# No need to send worker logs to mongo?
# logger_class = load_class_by_path(self.config["logger"])
# # All mrq handlers must have worker and collection keyword arguments
# if self.config["logger"].startswith("mrq"):
# self.log_handler = logger_class(collection=self.config["mongodb_logs"], worker=str(self.id), **self.config["logger_config"])
# else:
# self.log_handler = logger_class(**self.config["logger_config"])
# self.log.addHandler(self.log_handler)
@property
def config(self):
return get_current_config()
def connect(self, force=False):
if self.connected and not force:
return
# Accessing connections attributes will automatically connect
self.redis = connections.redis
self.mongodb_jobs = connections.mongodb_jobs
self.mongodb_logs = connections.mongodb_logs
self.connected = True
def greenlet_scheduler(self):
redis_scheduler_lock_key = "%s:schedulerlock" % get_current_config()["redis_prefix"]
while True:
with LuaLock(connections.redis, redis_scheduler_lock_key,
timeout=self.config["scheduler_interval"] + 10, blocking=False, thread_local=False):
self.scheduler.check()
time.sleep(self.config["scheduler_interval"])
def greenlet_report(self):
""" This greenlet always runs in background to update current status
in MongoDB every N seconds.
Caution: it might get delayed when doing long blocking operations.
Should we do this in a thread instead?
"""
self.report_worker(w=1)
while True:
try:
self.report_worker()
except Exception as e: # pylint: disable=broad-except
self.log.error("When reporting: %s" % e)
finally:
time.sleep(self.config["report_interval"])
def greenlet_logs(self):
""" This greenlet always runs in background to update current
logs in MongoDB every 10 seconds.
Caution: it might get delayed when doing long blocking operations.
Should we do this in a thread instead?
"""
while True:
try:
self.flush_logs()
except Exception as e: # pylint: disable=broad-except
self.log.error("When flushing logs: %s" % e)
finally:
time.sleep(self.config["report_interval"])
def greenlet_subqueues(self):
while True:
self.refresh_queues()
time.sleep(self.config["subqueues_refresh_interval"])
def refresh_queues(self, fatal=False):
""" Updates the list of currently known queues and subqueues """
try:
queues = []
prefixes = [q for q in self.config["queues"] if q.endswith("/")]
known_subqueues = Queue.all_known(prefixes=prefixes)
for q in self.config["queues"]:
queues.append(Queue(q))
if q.endswith("/"):
for subqueue in known_subqueues:
if subqueue.startswith(q):
queues.append(Queue(subqueue))
self.queues = queues
except Exception as e: # pylint: disable=broad-except
self.log.error("When refreshing subqueues: %s", e)
if fatal:
raise
def get_paused_queues(self):
""" Returns the set of currently paused queues """
return {q.decode("utf-8") for q in self.redis.smembers(redis_key("paused_queues"))}
def greenlet_paused_queues(self):
while True:
# Update the process-local list of paused queues
self.paused_queues = self.get_paused_queues()
time.sleep(self.config["paused_queues_refresh_interval"])
def get_memory(self):
try:
mmaps = self.process.memory_maps()
mem = {
"rss": sum([x.rss for x in mmaps]),
"swap": sum([getattr(x, 'swap', getattr(x, 'swapped', 0)) for x in mmaps])
}
mem["total"] = mem["rss"] + mem["swap"]
return mem
# memory_maps is unavailable on macOS
# https://github.com/pricingassistant/mrq/issues/228
except Exception as e:
return {"total": 0, "rss": 0, "swap": 0}
def get_worker_report(self, with_memory=False):
""" Returns a dict containing all the data we can about the current status of the worker and
its jobs. """
greenlets = []
for greenlet in list(self.gevent_pool):
g = {}
short_stack = []
stack = traceback.format_stack(greenlet.gr_frame)
for s in stack[1:]:
if "/gevent/hub.py" in s:
break
short_stack.append(s)
g["stack"] = short_stack
job = get_current_job(id(greenlet))
if job:
job.save()
if job.data:
g["path"] = job.data["path"]
g["datestarted"] = job.datestarted
g["id"] = str(job.id)
g["time"] = getattr(greenlet, "_trace_time", 0)
g["switches"] = getattr(greenlet, "_trace_switches", None)
# pylint: disable=protected-access
if job._current_io is not None:
g["io"] = job._current_io
greenlets.append(g)
# When faking network latency, all sockets are affected, including OS ones, but
# we still want reliable reports so this is disabled.
if (not with_memory) or (self.config["add_network_latency"] != "0" and self.config["add_network_latency"]):
cpu = {
"user": 0,
"system": 0,
"percent": 0
}
mem = {"rss": 0, "swap": 0, "total": 0}
else:
cpu_times = self.process.cpu_times()
cpu = {
"user": cpu_times.user,
"system": cpu_times.system,
"percent": self.process.cpu_percent(0)
}
mem = self.get_memory()
# Avoid sharing passwords or sensitive config!
whitelisted_config = [
"max_jobs",
"max_memory"
"greenlets",
"processes",
"queues",
"dequeue_strategy",
"scheduler",
"name",
"local_ip",
"external_ip",
"agent_id",
"worker_group"
]
io = None
if self._traced_io:
io = {}
for k, v in iteritems(self._traced_io):
if k == "total":
io[k] = v
else:
io[k] = sorted(list(v.items()), reverse=True, key=lambda x: x[1])
used_pool_slots = len(self.gevent_pool)
used_avg = self.pool_usage_average.next(used_pool_slots)
return {
"status": self.status,
"config": {k: v for k, v in iteritems(self.config) if k in whitelisted_config},
"done_jobs": self.done_jobs,
"usage_avg": used_avg / self.pool_size,
"datestarted": self.datestarted,
"datereported": datetime.datetime.utcnow(),
"name": self.name,
"io": io,
"_id": str(self.id),
"process": {
"pid": self.process.pid,
"cpu": cpu,
"mem": mem
# https://code.google.com/p/psutil/wiki/Documentation
# get_open_files
# get_connections
# get_num_ctx_switches
# get_num_fds
# get_io_counters
# get_nice
},
"jobs": greenlets
}
def report_worker(self, w=0):
report = self.get_worker_report(with_memory=True)
if self.config["max_memory"] > 0:
if report["process"]["mem"]["total"] > (self.config["max_memory"] * 1024 * 1024):
self.shutdown_max_memory()
if self.config["report_file"]:
with open(self.config["report_file"], "wb") as f:
f.write(bytes(json.dumps(report, ensure_ascii=False, default=str), 'utf-8')) # pylint: disable=no-member
if "_id" in report:
del report["_id"]
try:
self.mongodb_jobs.mrq_workers.update({
"_id": ObjectId(self.id)
}, {"$set": report}, upsert=True, w=w)
except Exception as e: # pylint: disable=broad-except
self.log.debug("Worker report failed: %s" % e)
def greenlet_timeouts(self):
""" This greenlet kills jobs in other greenlets if they timeout.
"""
while True:
now = datetime.datetime.utcnow()
for greenlet in list(self.gevent_pool):
job = get_current_job(id(greenlet))
if job and job.timeout and job.datestarted:
expires = job.datestarted + datetime.timedelta(seconds=job.timeout)
if now > expires:
job.kill(block=False, reason="timeout")
time.sleep(1)
def greenlet_admin(self):
""" This greenlet is used to get status information about the worker
when --admin_port was given
"""
if self.config["processes"] > 1:
self.log.debug(
"Admin server disabled because of multiple processes.")
return
class Devnull(object):
def write(self, *_):
pass
from gevent import pywsgi
def admin_routes(env, start_response):
path = env["PATH_INFO"]
status = "200 OK"
res = ""
if path in ["/", "/report", "/report_mem"]:
report = self.get_worker_report(with_memory=(path == "/report_mem"))
res = bytes(json_stdlib.dumps(report, cls=MongoJSONEncoder), 'utf-8')
elif path == "/wait_for_idle":
self.wait_for_idle()
res = bytes("idle", "utf-8")
else:
status = "404 Not Found"
start_response(status, [('Content-Type', 'application/json')])
return [res]
server = pywsgi.WSGIServer((self.config["admin_ip"], self.config["admin_port"]), admin_routes, log=Devnull())
try:
self.log.debug("Starting admin server on port %s" % self.config["admin_port"])
server.serve_forever()
except Exception as e: # pylint: disable=broad-except
self.log.debug("Error in admin server : %s" % e)
def flush_logs(self):
for handler in self.log.handlers:
handler.flush()
def wait_for_idle(self):
""" Waits until the worker has nothing more to do. Very useful in tests """
# Be mindful that this is being executed in a different greenlet than the work_* methods.
while True:
time.sleep(0.01)
with self.work_lock:
if self.status != "wait":
continue
if len(self.gevent_pool) > 0:
continue
# Force a refresh of the current subqueues, one might just have been created.
self.refresh_queues()
# We might be dequeueing a new subqueue. Double check that we don't have anything more to do
outcome, dequeue_jobs = self.work_once(free_pool_slots=1, max_jobs=None)
if outcome == "wait" and dequeue_jobs == 0:
break
def work(self):
"""Starts the work loop.
"""
self.work_init()
self.work_loop(max_jobs=self.max_jobs, max_time=self.max_time)
self.work_stop()
def work_init(self):
self.connect()
self.status = "started"
# An interval of 0 disables the refresh
if self.has_subqueues and self.config["subqueues_refresh_interval"] > 0:
self.greenlets["subqueues"] = gevent.spawn(self.greenlet_subqueues)
# An interval of 0 disables the refresh
if self.config["paused_queues_refresh_interval"] > 0:
self.greenlets["paused_queues"] = gevent.spawn(self.greenlet_paused_queues)
if self.config["report_interval"] > 0:
self.greenlets["report"] = gevent.spawn(self.greenlet_report)
self.greenlets["logs"] = gevent.spawn(self.greenlet_logs)
if self.config["admin_port"]:
self.greenlets["admin"] = gevent.spawn(self.greenlet_admin)
self.greenlets["timeouts"] = gevent.spawn(self.greenlet_timeouts)
if self.config["scheduler"] and self.config["scheduler_interval"] > 0:
from .scheduler import Scheduler
self.scheduler = Scheduler(self.mongodb_jobs.mrq_scheduled_jobs, self.config.get("scheduler_tasks") or [])
self.scheduler.check_config_integrity() # If this fails, we won't dequeue any jobs
self.greenlets["scheduler"] = gevent.spawn(self.greenlet_scheduler)
self.install_signal_handlers()
def work_loop(self, max_jobs=None, max_time=None):
self.done_jobs = 0
self.datestarted_work_loop = datetime.datetime.utcnow()
self.queue_offset = 0
try:
max_time_reached = False
while True:
if self.graceful_stop:
break
# If the scheduler greenlet is crashed, fail loudly.
if self.config["scheduler"] and not self.greenlets["scheduler"]:
self.exitcode = 1
break
while True:
# we put this here to make sure we have a strict limit on max_time
if max_time and datetime.datetime.utcnow() - self.datestarted >= max_time:
self.log.info("Reached max_time=%s" % max_time.seconds)
max_time_reached = True
break
free_pool_slots = self.gevent_pool.free_count()
if max_jobs:
total_started = (self.pool_size - free_pool_slots) + self.done_jobs
free_pool_slots = min(free_pool_slots, max_jobs - total_started)
if free_pool_slots == 0:
break
if free_pool_slots > 0:
break
self.status = "full"
self.gevent_pool.wait_available(timeout=60)
if max_time_reached:
break
self.status = "spawn"
with self.work_lock:
outcome, dequeue_jobs = self.work_once(free_pool_slots=free_pool_slots, max_jobs=max_jobs)
self.status = "wait"
if outcome == "break":
break
if outcome == "wait":
self.work_wait()
except StopRequested:
pass
finally:
try:
self.log.debug("Joining the greenlet pool...")
self.status = "join"
self.gevent_pool.join(timeout=None, raise_error=False)
self.log.debug("Joined.")
except StopRequested:
pass
self.datestopped_work_loop = datetime.datetime.utcnow()
lifetime = self.datestopped_work_loop - self.datestarted_work_loop
job_rate = float(self.done_jobs) / lifetime.total_seconds()
self.log.info("Worker spent %.3f seconds performing %s jobs (%.3f jobs/second)" % (
lifetime.total_seconds(), self.done_jobs, job_rate
))
def work_once(self, free_pool_slots=1, max_jobs=None):
""" Does one lookup for new jobs, inside the inner work loop """
dequeued_jobs = 0
available_queues = [
queue for queue in self.queues
if queue.root_id not in self.paused_queues and
queue.id not in self.paused_queues
]
for queue_i in range(len(available_queues)):
queue = available_queues[(queue_i + self.queue_offset) % len(available_queues)]
max_jobs_per_queue = free_pool_slots - dequeued_jobs
if max_jobs_per_queue <= 0:
queue_i -= 1
break
if self.config["dequeue_strategy"] == "parallel":
max_jobs_per_queue = max(1, int(max_jobs_per_queue / (len(available_queues) - queue_i)))
for job in queue.dequeue_jobs(
max_jobs=max_jobs_per_queue,
job_class=self.job_class,
worker=self
):
dequeued_jobs += 1
self.gevent_pool.spawn(self.perform_job, job)
# At the next pass, start at the next queue to avoid always dequeuing the same one
if self.config["dequeue_strategy"] == "parallel":
self.queue_offset = (self.queue_offset + queue_i + 1) % len(self.queues)
# TODO consider this when dequeuing jobs to have strict limits
if max_jobs and self.done_jobs >= max_jobs:
self.log.info("Reached max_jobs=%s" % self.done_jobs)
return "break", dequeued_jobs
# We seem to have exhausted available jobs, we can sleep for a
# while.
if dequeued_jobs == 0:
if self.config["dequeue_strategy"] == "burst":
self.log.info("Burst mode: stopping now because queues were empty")
return "break", dequeued_jobs
return "wait", dequeued_jobs
return None, dequeued_jobs
def work_wait(self):
""" Wait for new jobs to arrive """
if len(self.queues_with_notify) > 0:
# https://github.com/antirez/redis/issues/874
connections.redis.blpop(*(self.queues_with_notify + [max(1, int(self.config["max_latency"]))]))
else:
gevent.sleep(self.config["max_latency"])
def work_stop(self):
self.status = "kill"
self.gevent_pool.kill(exception=JobInterrupt, block=True)
for g in self.greenlets:
g_time = getattr(self.greenlets[g], "_trace_time", 0)
g_switches = getattr(self.greenlets[g], "_trace_switches", None)
self.greenlets[g].kill(block=True)
self.log.debug(
"Greenlet for %s killed (%0.5fs, %s switches)." %
(g, g_time, g_switches))
self.status = "stop"
self.report_worker(w=1)
self.flush_logs()
g_time = getattr(self.greenlet, "_trace_time", 0)
g_switches = getattr(self.greenlet, "_trace_switches", None)
self.log.debug(
"Exiting main worker greenlet (%0.5fs, %s switches)." %
(g_time, g_switches))
def perform_job(self, job):
""" Wraps a job.perform() call with timeout logic and exception handlers.
This is the first call happening inside the greenlet.
"""
if self.config["trace_memory"]:
job.trace_memory_start()
set_current_job(job)
try:
job.perform()
except MaxConcurrencyInterrupt:
self.log.error("Max concurrency reached")
job._save_status("maxconcurrency", exception=True)
except RetryInterrupt:
self.log.error("Caught retry")
job.save_retry(sys.exc_info()[1])
except MaxRetriesInterrupt:
self.log.error("Max retries reached")
job._save_status("maxretries", exception=True)
except AbortInterrupt:
self.log.error("Caught abort")
job.save_abort()
except TimeoutInterrupt:
self.log.error("Job timeouted after %s seconds" % job.timeout)
job._save_status("timeout", exception=True)
except JobInterrupt:
self.log.error("Job interrupted")
job._save_status("interrupt", exception=True)
except Exception:
self.log.error("Job failed")
job._save_status("failed", exception=True)
finally:
set_current_job(None)
self.done_jobs += 1
if self.config["trace_memory"]:
job.trace_memory_stop()
def shutdown_graceful(self):
""" Graceful shutdown: waits for all the jobs to finish. """
self.log.info("Graceful shutdown...")
raise StopRequested() # pylint: disable=nonstandard-exception
def shutdown_max_memory(self):
self.log.info("Max memory reached, shutdown...")
self.graceful_stop = True
def shutdown_now(self):
""" Forced shutdown: interrupts all the jobs. """
self.log.info("Forced shutdown...")
self.status = "killing"
self.gevent_pool.kill(exception=JobInterrupt, block=False)
raise StopRequested() # pylint: disable=nonstandard-exception
|
|
# Copyright 2016 Dravetech AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Napalm driver for Cumulus.
Read https://napalm.readthedocs.io for more information.
"""
from __future__ import print_function
from __future__ import unicode_literals
import re
import json
import ipaddress
from datetime import datetime
from pytz import timezone
from collections import defaultdict
from netmiko import ConnectHandler
from netmiko.ssh_exception import NetMikoTimeoutException
import napalm.base.constants as C
from napalm.base.utils import py23_compat
from napalm.base.utils import string_parsers
from napalm.base.base import NetworkDriver
from napalm.base.exceptions import (
ConnectionException,
MergeConfigException,
)
class CumulusDriver(NetworkDriver):
"""Napalm driver for Cumulus."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""Constructor."""
self.device = None
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.loaded = False
self.changed = False
if optional_args is None:
optional_args = {}
# Netmiko possible arguments
netmiko_argument_map = {
'port': None,
'verbose': False,
'global_delay_factor': 1,
'use_keys': False,
'key_file': None,
'ssh_strict': False,
'system_host_keys': False,
'alt_host_keys': False,
'alt_key_file': '',
'ssh_config_file': None,
'secret': None,
'allow_agent': False
}
# Build dict of any optional Netmiko args
self.netmiko_optional_args = {
k: optional_args.get(k, v)
for k, v in netmiko_argument_map.items()
}
self.port = optional_args.get('port', 22)
self.sudo_pwd = optional_args.get('sudo_pwd', self.password)
def open(self):
try:
self.device = ConnectHandler(device_type='linux',
host=self.hostname,
username=self.username,
password=self.password,
**self.netmiko_optional_args)
# Enter root mode.
if self.netmiko_optional_args.get('secret'):
self.device.enable()
except NetMikoTimeoutException:
raise ConnectionException('Cannot connect to {}'.format(self.hostname))
except ValueError:
raise ConnectionException('Cannot become root.')
def close(self):
self.device.disconnect()
def is_alive(self):
return {
'is_alive': self.device.remote_conn.transport.is_active()
}
def load_merge_candidate(self, filename=None, config=None):
if not filename and not config:
raise MergeConfigException('filename or config param must be provided.')
self.loaded = True
if filename is not None:
with open(filename, 'r') as f:
candidate = f.readlines()
else:
candidate = config
if not isinstance(candidate, list):
candidate = [candidate]
candidate = [line for line in candidate if line]
for command in candidate:
if 'sudo' not in command:
command = 'sudo {0}'.format(command)
output = self._send_command(command)
if "error" in output or "not found" in output:
raise MergeConfigException("Command '{0}' cannot be applied.".format(command))
def discard_config(self):
if self.loaded:
self._send_command('sudo net abort')
self.loaded = False
def compare_config(self):
if self.loaded:
diff = self._send_command('sudo net pending')
return re.sub(r'\x1b\[\d+m', '', diff)
return ''
def commit_config(self, message=""):
if self.loaded:
self._send_command('sudo net commit')
self.changed = True
self.loaded = False
def rollback(self):
if self.changed:
self._send_command('sudo net rollback last')
self.changed = False
def _send_command(self, command):
response = self.device.send_command_timing(command)
if '[sudo]' in response:
response = self.device.send_command_timing(self.sudo_pwd)
return response
def get_facts(self):
facts = {
'vendor': py23_compat.text_type('Cumulus')
}
# Get "net show hostname" output.
hostname = self.device.send_command('hostname')
# Get "net show system" output.
show_system_output = self._send_command('sudo net show system')
for line in show_system_output.splitlines():
if 'build' in line.lower():
os_version = line.split()[-1]
model = ' '.join(line.split()[1:3])
elif 'uptime' in line.lower():
uptime = line.split()[-1]
# Get "decode-syseeprom" output.
decode_syseeprom_output = self.device.send_command('decode-syseeprom')
for line in decode_syseeprom_output.splitlines():
if 'serial number' in line.lower():
serial_number = line.split()[-1]
# Get "net show interface all json" output.
interfaces = self._send_command('sudo net show interface all json')
# Handling bad send_command_timing return output.
try:
interfaces = json.loads(interfaces)
except ValueError:
interfaces = json.loads(self.device.send_command('sudo net show interface all json'))
facts['hostname'] = facts['fqdn'] = py23_compat.text_type(hostname)
facts['os_version'] = py23_compat.text_type(os_version)
facts['model'] = py23_compat.text_type(model)
facts['uptime'] = string_parsers.convert_uptime_string_seconds(uptime)
facts['serial_number'] = py23_compat.text_type(serial_number)
facts['interface_list'] = string_parsers.sorted_nicely(interfaces.keys())
return facts
def get_arp_table(self):
"""
'show arp' output example:
Address HWtype HWaddress Flags Mask Iface
10.129.2.254 ether 00:50:56:97:af:b1 C eth0
192.168.1.134 (incomplete) eth1
192.168.1.1 ether 00:50:56:ba:26:7f C eth1
10.129.2.97 ether 00:50:56:9f:64:09 C eth0
192.168.1.3 ether 00:50:56:86:7b:06 C eth1
"""
output = self.device.send_command('arp -n')
output = output.split("\n")
output = output[1:]
arp_table = list()
for line in output:
line = line.split()
if "incomplete" in line[1]:
macaddr = py23_compat.text_type("00:00:00:00:00:00")
else:
macaddr = py23_compat.text_type(line[2])
arp_table.append(
{
'interface': py23_compat.text_type(line[-1]),
'mac': macaddr,
'ip': py23_compat.text_type(line[0]),
'age': 0.0
}
)
return arp_table
def get_ntp_stats(self):
"""
'ntpq -np' output example
remote refid st t when poll reach delay offset jitter
==============================================================================
116.91.118.97 133.243.238.244 2 u 51 64 377 5.436 987971. 1694.82
219.117.210.137 .GPS. 1 u 17 64 377 17.586 988068. 1652.00
133.130.120.204 133.243.238.164 2 u 46 64 377 7.717 987996. 1669.77
"""
output = self.device.send_command("ntpq -np")
output = output.split("\n")[2:]
ntp_stats = list()
for ntp_info in output:
if len(ntp_info) > 0:
remote, refid, st, t, when, hostpoll, reachability, delay, offset, \
jitter = ntp_info.split()
# 'remote' contains '*' if the machine synchronized with NTP server
synchronized = "*" in remote
match = re.search(r'(\d+\.\d+\.\d+\.\d+)', remote)
ip = match.group(1)
when = when if when != '-' else 0
ntp_stats.append({
"remote": py23_compat.text_type(ip),
"referenceid": py23_compat.text_type(refid),
"synchronized": bool(synchronized),
"stratum": int(st),
"type": py23_compat.text_type(t),
"when": py23_compat.text_type(when),
"hostpoll": int(hostpoll),
"reachability": int(reachability),
"delay": float(delay),
"offset": float(offset),
"jitter": float(jitter)
})
return ntp_stats
def ping(self,
destination,
source=C.PING_SOURCE,
ttl=C.PING_TTL,
timeout=C.PING_TIMEOUT,
size=C.PING_SIZE,
count=C.PING_COUNT,
vrf=C.PING_VRF):
deadline = timeout * count
command = "ping %s " % destination
command += "-t %d " % int(ttl)
command += "-w %d " % int(deadline)
command += "-s %d " % int(size)
command += "-c %d " % int(count)
if source != "":
command += "interface %s " % source
ping_result = dict()
output_ping = self.device.send_command(command)
if "Unknown host" in output_ping:
err = "Unknown host"
else:
err = ""
if err is not "":
ping_result["error"] = err
else:
# 'packet_info' example:
# ['5', 'packets', 'transmitted,' '5', 'received,' '0%', 'packet',
# 'loss,', 'time', '3997ms']
packet_info = output_ping.split("\n")
if ('transmitted' in packet_info[-2]):
packet_info = packet_info[-2]
else:
packet_info = packet_info[-3]
packet_info = [x.strip() for x in packet_info.split()]
sent = int(packet_info[0])
received = int(packet_info[3])
lost = sent - received
# 'rtt_info' example:
# ["0.307/0.396/0.480/0.061"]
rtt_info = output_ping.split("\n")
if len(rtt_info[-1]) > 0:
rtt_info = rtt_info[-1]
else:
rtt_info = rtt_info[-2]
match = re.search(r"([\d\.]+)/([\d\.]+)/([\d\.]+)/([\d\.]+)", rtt_info)
if match is not None:
rtt_min = float(match.group(1))
rtt_avg = float(match.group(2))
rtt_max = float(match.group(3))
rtt_stddev = float(match.group(4))
else:
rtt_min = None
rtt_avg = None
rtt_max = None
rtt_stddev = None
ping_responses = list()
response_info = output_ping.split("\n")
for res in response_info:
match_res = re.search(r"from\s([\d\.]+).*time=([\d\.]+)", res)
if match_res is not None:
ping_responses.append(
{
"ip_address": match_res.group(1),
"rtt": float(match_res.group(2))
}
)
ping_result["success"] = dict()
ping_result["success"] = {
"probes_sent": sent,
"packet_loss": lost,
"rtt_min": rtt_min,
"rtt_max": rtt_max,
"rtt_avg": rtt_avg,
"rtt_stddev": rtt_stddev,
"results": ping_responses
}
return ping_result
def _get_interface_neighbors(self, neighbors_list):
neighbors = []
for neighbor in neighbors_list:
temp = {}
temp['hostname'] = neighbor['adj_hostname']
temp['port'] = neighbor['adj_port']
neighbors.append(temp)
return neighbors
def get_lldp_neighbors(self):
"""Cumulus get_lldp_neighbors."""
lldp = {}
command = 'sudo net show interface all json'
try:
intf_output = json.loads(self._send_command(command))
except ValueError:
intf_output = json.loads(self.device.send_command(command))
for interface in intf_output:
if intf_output[interface]['iface_obj']['lldp'] is not None:
lldp[interface] = self._get_interface_neighbors(
intf_output[interface]['iface_obj']['lldp'])
return lldp
def get_interfaces(self):
interfaces = {}
# Get 'net show interface all json' output.
output = self._send_command('sudo net show interface all json')
# Handling bad send_command_timing return output.
try:
output_json = json.loads(output)
except ValueError:
output_json = json.loads(self.device.send_command('sudo net show interface all json'))
for interface in output_json.keys():
interfaces[interface] = {}
if output_json[interface]['linkstate'] == "UP":
interfaces[interface]['is_up'] = True
elif output_json[interface]['linkstate'] == 'DN':
interfaces[interface]['is_up'] = False
else:
# Link state is an unhandled state
interfaces[interface]['is_up'] = False
interfaces[interface]['is_enabled'] = True
interfaces[interface]['description'] = py23_compat.text_type(
output_json[interface]['iface_obj']['description'])
speed_map = {'100M': 100, '1G': 1000, '10G': 10000, '40G': 40000, '100G': 100000}
if output_json[interface]['speed'] is None:
interfaces[interface]['speed'] = -1
else:
try:
interfaces[interface]['speed'] = speed_map[output_json[interface]['speed']]
except KeyError:
interfaces[interface]['speed'] = -1
interfaces[interface]['mac_address'] = py23_compat.text_type(
output_json[interface]['iface_obj']['mac'])
# Test if the quagga daemon is running.
quagga_test = self._send_command('service quagga status')
for line in quagga_test.splitlines():
if 'Active:' in line:
status = line.split()[1]
if 'inactive' in status:
quagga_status = False
elif 'active' in status:
quagga_status = True
else:
quagga_status = False
# If the quagga daemon is running for each interface run the show interface command
# to get information about the most recent interface change.
if quagga_status:
for interface in interfaces.keys():
command = "sudo vtysh -c 'show interface %s'" % interface
quagga_show_int_output = self._send_command(command)
# Get the link up and link down datetimes if available.
for line in quagga_show_int_output.splitlines():
if 'Link ups' in line:
if '(never)' in line.split()[4]:
last_flapped_1 = False
else:
last_flapped_1 = True
last_flapped_1_date = line.split()[4] + " " + line.split()[5]
last_flapped_1_date = datetime.strptime(
last_flapped_1_date, "%Y/%m/%d %H:%M:%S.%f")
if 'Link downs' in line:
if '(never)' in line.split()[4]:
last_flapped_2 = False
else:
last_flapped_2 = True
last_flapped_2_date = line.split()[4] + " " + line.split()[5]
last_flapped_2_date = datetime.strptime(
last_flapped_2_date, "%Y/%m/%d %H:%M:%S.%f")
# Compare the link up and link down datetimes to determine the most recent and
# set that as the last flapped after converting to seconds.
if last_flapped_1 and last_flapped_2:
last_delta = last_flapped_1_date - last_flapped_2_date
if last_delta.days >= 0:
last_flapped = last_flapped_1_date
else:
last_flapped = last_flapped_2_date
elif last_flapped_1:
last_flapped = last_flapped_1_date
elif last_flapped_2:
last_flapped = last_flapped_2_date
else:
last_flapped = -1
if last_flapped != -1:
# Get remote timezone.
tmz = self.device.send_command('date +"%Z"')
now_time = datetime.now(timezone(tmz))
last_flapped = last_flapped.replace(tzinfo=timezone(tmz))
last_flapped = (now_time - last_flapped).total_seconds()
interfaces[interface]['last_flapped'] = float(last_flapped)
# If quagga daemon isn't running set all last_flapped values to -1.
if not quagga_status:
for interface in interfaces.keys():
interfaces[interface]['last_flapped'] = -1
return interfaces
def get_interfaces_ip(self):
# Get net show interface all json output.
output = self._send_command('sudo net show interface all json')
# Handling bad send_command_timing return output.
try:
output_json = json.loads(output)
except ValueError:
output_json = json.loads(self.device.send_command('sudo net show interface all json'))
def rec_dd(): return defaultdict(rec_dd)
interfaces_ip = rec_dd()
for interface in output_json:
if not output_json[interface]['iface_obj']['ip_address']['allentries']:
continue
else:
for ip_address in output_json[interface]['iface_obj']['ip_address']['allentries']:
ip_ver = ipaddress.ip_interface(py23_compat.text_type(ip_address)).version
ip_ver = 'ipv{}'.format(ip_ver)
ip, prefix = ip_address.split('/')
interfaces_ip[interface][ip_ver][ip] = {'prefix_length': int(prefix)}
return interfaces_ip
def get_config(self, retrieve='all'):
# Initialise the configuration dictionary
configuration = {
'startup': '',
'running': '',
'candidate': '',
}
if retrieve in ('running', 'all'):
# Get net show configuration output.
output = self._send_command('net show configuration')
configuration['running'] = py23_compat.text_type(output)
if retrieve in ('candidate', 'all'):
# Get net pending output.
output = self._send_command('net pending json')
configuration['candidate'] = py23_compat.text_type(output)
return configuration
def get_bgp_neighbors(self):
vrf = 'global'
bgp_neighbors = {vrf: {}}
bgp_neighbor = {}
supported_afis = ['ipv4 unicast', 'ipv6 unicast']
bgp_summary_output = self._send_command('net show bgp summary json')
dev_bgp_summary = json.loads(bgp_summary_output)
bgp_neighbors_output = self._send_command('net show bgp neighbor json')
dev_bgp_neighbors = json.loads(bgp_neighbors_output)
for afi in dev_bgp_summary:
if not (afi.lower() in supported_afis):
continue
bgp_neighbors[vrf]['router_id'] = dev_bgp_summary[afi]['routerId']
bgp_neighbors[vrf].setdefault("peers", {})
for peer in dev_bgp_summary[afi]['peers']:
bgp_neighbor = {}
bgp_neighbor['local_as'] = dev_bgp_neighbors[peer]['localAs']
bgp_neighbor['remote_as'] = dev_bgp_neighbors[peer]['remoteAs']
bgp_neighbor['remote_id'] = dev_bgp_neighbors[peer]['remoteRouterId']
uptime = dev_bgp_neighbors[peer].get('bgpTimerUpMsec', "")
bgp_neighbor['description'] = dev_bgp_neighbors[peer].get("nbrDesc", '')
if dev_bgp_neighbors[peer]['bgpState'] == 'Established':
is_up = True
else:
is_up = False
uptime = -1
if dev_bgp_neighbors[peer].get('adminShutDown', False):
is_enabled = False
else:
is_enabled = True
bgp_neighbor['is_up'] = is_up
bgp_neighbor['is_enabled'] = is_enabled
bgp_neighbor['uptime'] = int(uptime / 1000)
bgp_neighbor.setdefault("address_family", {})
for af, af_details in dev_bgp_neighbors[peer]['addressFamilyInfo'].items():
af = af.lower()
if not (af in supported_afis):
continue
route_info = {}
bgp_peer_advertised_routes = self._send_command('net show bgp {} neighbor {} '
'advertised-routes json'
.format(af, peer))
dev_bgp_peer_advertised_routes = \
json.loads(bgp_peer_advertised_routes.replace('n\n', ''))
peer_advertised_routes = dev_bgp_peer_advertised_routes['totalPrefixCounter']
if not is_enabled:
dev_bgp_summary[af]['peers'][peer]['prefixReceivedCount'] = -1
peer_advertised_routes = -1
af_details['acceptedPrefixCounter'] = -1
route_info['received_prefixes'] = \
dev_bgp_summary[af]['peers'][peer]['prefixReceivedCount']
route_info['sent_prefixes'] = int(peer_advertised_routes)
route_info['accepted_prefixes'] = af_details['acceptedPrefixCounter']
bgp_neighbor['address_family'][af.split()[0]] = route_info
bgp_neighbors[vrf]['peers'][peer] = bgp_neighbor
return bgp_neighbors
def get_snmp_information(self):
snmp_config_output = self._send_command('net show configuration snmp-server')
contact = system_name = location = ""
snmp_information = {}
snmp_values = {}
community_list = []
snmp_values.setdefault("community", {})
for parse_snmp_value in snmp_config_output.splitlines():
if "readonly-community" in parse_snmp_value or \
"readonly-community-v6" in parse_snmp_value:
community_value = parse_snmp_value.strip().split()[1]
acl = parse_snmp_value.lstrip().split()[3]
if acl == "any":
acl = "N/A"
if community_value in community_list:
"""
Unlike other routers that use ACL for
snmp access-control, Cumulus directly defines
authorized hosts as part of SNMP config.
E.g:
snmp-server
listening-address all
readonly-community private_multi_host access 10.10.10.1
system-contact NOC
system-location LAB
system-name cumulus-rtr-1
This creates a problem as NAPALM snmp object
shows access-list name as key of community string.
To best present the authorized-host info in the SNMP object,
we show comma separate string of them as key of SNMP community.
"""
acl = snmp_values["community"][community_value]["acl"] + "," + acl
snmp_values["community"][community_value] = {"acl": acl, "mode": "ro"}
else:
community_list.append(community_value)
snmp_values["community"][community_value] = {"acl": acl, "mode": "ro"}
system_contact_parse = re.search(r'.*system-contact.(\D.*)', parse_snmp_value.strip())
if system_contact_parse:
contact = system_contact_parse.groups()[0]
system_location_parse = re.search(r'.*system-location.(\D.*)', parse_snmp_value.strip())
if system_location_parse:
location = system_location_parse.groups()[0]
system_name_parse = re.search(r'.*system-name.(\D.*)', parse_snmp_value.strip())
if system_name_parse:
system_name = system_name_parse.groups()[0]
snmp_information = snmp_values
snmp_information["contact"] = contact
snmp_information["chassis_id"] = system_name
snmp_information["location"] = location
return snmp_information
|
|
#!/usr/bin/python
# Copyright (C) 2012-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import config
import gettext
import os
import re
import shlex
import subprocess
import sys
from Cheetah.Template import Template
from ovirt_engine import configfile, java, service
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine')
class Daemon(service.Daemon):
_JBOSS_VERSION_REGEX = re.compile(
flags=re.VERBOSE,
pattern=r"""
^
[^\d]*
(?P<major>\d+)
\.
(?P<minor>\d+)
\.
(?P<revision>\d+)
.*
""",
)
def __init__(self):
super(Daemon, self).__init__()
self._tempDir = None
self._jbossRuntime = None
self._jbossVersion = None
self._jbossConfigFile = None
self._defaults = os.path.abspath(
os.path.join(
os.path.dirname(sys.argv[0]),
'ovirt-engine.conf',
)
)
def _processTemplate(self, template, dir, mode=None):
out = os.path.join(
dir,
re.sub('\.in$', '', os.path.basename(template)),
)
with open(out, 'w') as f:
if mode is not None:
os.chmod(out, mode)
f.write(
'%s' % (
Template(
file=template,
searchList=[
self._config,
self._jbossVersion,
{
'jboss_runtime': self._jbossRuntime.directory,
},
],
)
),
)
return out
def _linkModules(self, directory, modulePath):
"""
Link all the JBoss modules into a temporary directory.
This required because jboss tries to automatically update
indexes based on timestamp even if there is no permission to do so.
"""
modifiedModulePath = []
for index, element in enumerate(modulePath.split(':')):
modulesTmpDir = os.path.join(
directory,
'%02d-%s' % (
index,
'-'.join(element.split(os.sep)[-2:]),
),
)
modifiedModulePath.append(modulesTmpDir)
# For each directory in the modules directory create the
# same in the temporary directory and populate with symlinks
# pointing to the original files (excluding indexes):
for parentDir, childrenDirs, childrenFiles in os.walk(element):
parentTmpDir = os.path.join(
modulesTmpDir,
os.path.relpath(
parentDir,
element
),
)
if not os.path.exists(parentTmpDir):
os.makedirs(parentTmpDir)
for childFile in childrenFiles:
if childFile.endswith('.index'):
continue
os.symlink(
os.path.join(parentDir, childFile),
os.path.join(parentTmpDir, childFile)
)
return ':'.join(modifiedModulePath)
def _checkInstallation(
self,
pidfile,
jbossModulesJar,
):
# Check the required JBoss directories and files:
self.check(
name=self._config.get('JBOSS_HOME'),
directory=True,
)
self.check(
name=jbossModulesJar,
)
# Check the required engine directories and files:
self.check(
os.path.join(
self._config.get('ENGINE_USR'),
'services',
),
directory=True,
)
self.check(
self._config.get('ENGINE_CACHE'),
directory=True,
writable=True,
)
self.check(
self._config.get('ENGINE_TMP'),
directory=True,
writable=True,
mustExist=False,
)
self.check(
self._config.get('ENGINE_LOG'),
directory=True,
writable=True,
)
self.check(
name=os.path.join(
self._config.get("ENGINE_LOG"),
'host-deploy',
),
directory=True,
writable=True,
)
for log in ('engine.log', 'console.log', 'server.log'):
self.check(
name=os.path.join(self._config.get("ENGINE_LOG"), log),
mustExist=False,
writable=True,
)
if pidfile is not None:
self.check(
name=pidfile,
writable=True,
mustExist=False,
)
def _setupEngineApps(self):
deploymentsDir = os.path.join(
self._jbossRuntime.directory,
'deployments',
)
os.mkdir(deploymentsDir)
# The list of applications to be deployed:
for engineAppDir in shlex.split(self._config.get('ENGINE_APPS')):
self.logger.debug('Deploying: %s', engineAppDir)
if not os.path.isabs(engineAppDir):
engineAppDir = os.path.join(
self._config.get('ENGINE_USR'),
engineAppDir,
)
if not os.path.exists(engineAppDir):
self.logger.warning(
_(
"Application directory '{directory}' "
"does not exist, it will be ignored"
).format(
directory=engineAppDir,
),
)
continue
engineAppLink = os.path.join(
deploymentsDir,
os.path.basename(engineAppDir),
)
os.symlink(engineAppDir, engineAppLink)
with open('%s.dodeploy' % engineAppLink, 'w'):
pass
def _detectJBossVersion(self):
proc = subprocess.Popen(
executable=self._executable,
args=['ovirt-engine-version'] + self._engineArgs + ['-v'],
env=self._engineEnv,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = proc.communicate()
stdout = stdout.decode('utf-8', 'replace').splitlines()
stderr = stderr.decode('utf-8', 'replace').splitlines()
self.logger.debug(
"Return code: %s, \nstdout: '%s, \nstderr: '%s'",
proc.returncode,
stdout,
stderr,
)
for line in stdout:
match = self._JBOSS_VERSION_REGEX.match(line)
if match is not None:
self._jbossVersion = {
'JBOSS_MAJOR': int(match.group('major')),
'JBOSS_MINOR': int(match.group('minor')),
'JBOSS_REVISION': int(match.group('revision')),
}
break
else:
raise RuntimeError(_('Cannot detect JBoss version'))
self.logger.debug(
"Detected JBoss version: %s",
self._jbossVersion,
)
def daemonSetup(self):
if os.geteuid() == 0:
raise RuntimeError(
_('This service cannot be executed as root')
)
if not os.path.exists(self._defaults):
raise RuntimeError(
_(
"The configuration defaults file '{file}' "
"required but missing"
).format(
file=self._defaults,
)
)
self._config = configfile.ConfigFile(
(
self._defaults,
config.ENGINE_VARS,
),
)
#
# the earliest so we can abort early.
#
self._executable = os.path.join(
java.Java().getJavaHome(),
'bin',
'java',
)
jbossModulesJar = os.path.join(
self._config.get('JBOSS_HOME'),
'jboss-modules.jar',
)
self._checkInstallation(
pidfile=self.pidfile,
jbossModulesJar=jbossModulesJar,
)
self._tempDir = service.TempDir(self._config.get('ENGINE_TMP'))
self._tempDir.create()
self._jbossRuntime = service.TempDir(self._config.get('JBOSS_RUNTIME'))
self._jbossRuntime.create()
self._setupEngineApps()
jbossTempDir = os.path.join(
self._jbossRuntime.directory,
'tmp',
)
jbossConfigDir = os.path.join(
self._jbossRuntime.directory,
'config',
)
javaModulePath = self._linkModules(
os.path.join(
self._jbossRuntime.directory,
'modules',
),
'%s:%s' % (
self._config.get('ENGINE_JAVA_MODULEPATH'),
os.path.join(
self._config.get('JBOSS_HOME'),
'modules',
),
),
)
os.mkdir(jbossTempDir)
os.mkdir(jbossConfigDir)
os.chmod(jbossConfigDir, 0o700)
jbossBootLoggingFile = self._processTemplate(
template=os.path.join(
os.path.dirname(sys.argv[0]),
'ovirt-engine-logging.properties.in'
),
dir=jbossConfigDir,
)
# We start with an empty list of arguments:
self._engineArgs = []
# Add arguments for the java virtual machine:
self._engineArgs.extend([
# Virtual machine options:
'-server',
'-XX:+TieredCompilation',
'-Xms%s' % self._config.get('ENGINE_HEAP_MIN'),
'-Xmx%s' % self._config.get('ENGINE_HEAP_MAX'),
'-XX:PermSize=%s' % self._config.get('ENGINE_PERM_MIN'),
'-XX:MaxPermSize=%s' % self._config.get(
'ENGINE_PERM_MAX'
),
])
# Add extra system properties provided in the configuration:
for engineProperty in shlex.split(
self._config.get('ENGINE_PROPERTIES')
):
if not engineProperty.startswith('-D'):
engineProperty = '-D' + engineProperty
self._engineArgs.append(engineProperty)
# Add extra jvm arguments provided in the configuration:
for arg in shlex.split(self._config.get('ENGINE_JVM_ARGS')):
self._engineArgs.append(arg)
# Enable verbose garbage collection if required:
if self._config.getboolean('ENGINE_VERBOSE_GC'):
self._engineArgs.extend([
'-verbose:gc',
'-XX:+PrintGCTimeStamps',
'-XX:+PrintGCDetails',
])
# Specify special krb5.conf file if required
if self._config.get('AAA_KRB5_CONF_FILE'):
self._engineArgs.append(
'-Djava.security.krb5.conf=%s' % self._config.get(
'AAA_KRB5_CONF_FILE'
)
)
# Add arguments for JBoss:
self._engineArgs.extend([
'-Djava.util.logging.manager=org.jboss.logmanager',
'-Dlogging.configuration=file://%s' % jbossBootLoggingFile,
'-Dorg.jboss.resolver.warning=true',
'-Djboss.modules.system.pkgs=org.jboss.byteman',
'-Djboss.modules.write-indexes=false',
'-Djboss.server.default.config=ovirt-engine',
'-Djboss.home.dir=%s' % self._config.get(
'JBOSS_HOME'
),
'-Djboss.server.base.dir=%s' % self._config.get(
'ENGINE_USR'
),
'-Djboss.server.data.dir=%s' % self._config.get(
'ENGINE_VAR'
),
'-Djboss.server.log.dir=%s' % self._config.get(
'ENGINE_LOG'
),
'-Djboss.server.config.dir=%s' % jbossConfigDir,
'-Djboss.server.temp.dir=%s' % jbossTempDir,
'-Djboss.controller.temp.dir=%s' % jbossTempDir,
'-jar', jbossModulesJar,
'-mp', javaModulePath,
'-jaxpmodule', 'javax.xml.jaxp-provider',
'org.jboss.as.standalone',
])
self._engineEnv = os.environ.copy()
self._engineEnv.update({
'PATH': (
'/usr/local/sbin:/usr/local/bin:'
'/usr/sbin:/usr/bin:/sbin:/bin'
),
'LANG': 'en_US.UTF-8',
'LC_ALL': 'en_US.UTF-8',
'ENGINE_DEFAULTS': self._defaults,
'ENGINE_VARS': config.ENGINE_VARS,
'ENGINE_ETC': self._config.get('ENGINE_ETC'),
'ENGINE_LOG': self._config.get('ENGINE_LOG'),
'ENGINE_TMP': self._tempDir.directory,
'ENGINE_USR': self._config.get('ENGINE_USR'),
'ENGINE_VAR': self._config.get('ENGINE_VAR'),
'ENGINE_CACHE': self._config.get('ENGINE_CACHE'),
})
self._detectJBossVersion()
self._jbossConfigFile = self._processTemplate(
template=os.path.join(
os.path.dirname(sys.argv[0]),
'ovirt-engine.xml.in',
),
dir=jbossConfigDir,
mode=0o600,
)
def daemonStdHandles(self):
consoleLog = open(
os.path.join(
self._config.get('ENGINE_LOG'),
'console.log'
),
'w+',
)
return (consoleLog, consoleLog)
def daemonContext(self):
try:
#
# create mark file to be used by notifier service
#
with open(self._config.get('ENGINE_UP_MARK'), 'w') as f:
f.write('%s\n' % os.getpid())
#
# NOTE:
# jdwp must be set only for the process we are trying
# to debug, as jvm will open it and conflict with other
# instances.
#
self.daemonAsExternalProcess(
executable=self._executable,
args=(
['ovirt-engine'] +
([(
'-Xrunjdwp:transport=dt_socket,address=%s,'
'server=y,suspend=n'
) % (
self._config.get('ENGINE_DEBUG_ADDRESS')
)] if self._config.get('ENGINE_DEBUG_ADDRESS') else []) +
self._engineArgs +
['-c', os.path.basename(self._jbossConfigFile)]
),
env=self._engineEnv,
stopTime=self._config.getinteger(
'ENGINE_STOP_TIME'
),
stopInterval=self._config.getinteger(
'ENGINE_STOP_INTERVAL'
),
)
raise self.TerminateException()
except self.TerminateException:
if os.path.exists(self._config.get('ENGINE_UP_MARK')):
os.remove(self._config.get('ENGINE_UP_MARK'))
def daemonCleanup(self):
if self._tempDir:
self._tempDir.destroy()
if self._jbossRuntime:
self._jbossRuntime.destroy()
if __name__ == '__main__':
service.setupLogger()
d = Daemon()
d.run()
# vim: expandtab tabstop=4 shiftwidth=4
|
|
import numpy as np
import scipy as sp
from itertools import product
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_less
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
solver_list = ['full', 'arpack', 'randomized', 'auto']
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
def test_pca_arpack_solver():
# PCA on dense arrays
X = iris.data
d = X.shape[1]
# Loop excluding the extremes, invalid inputs for arpack
for n_comp in np.arange(1, d):
pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(d), 12)
pca = PCA(n_components=0, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
pca = PCA(n_components=d, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
assert_equal(pca.n_components,
PCA(n_components=d,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_greater(X.std(axis=0).std(), 43.8)
for solver, copy in product(solver_list, (True, False)):
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(n_components=n_components, whiten=True, copy=copy,
svd_solver=solver, random_state=0, iterated_power=7)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(ddof=1, axis=0),
np.ones(n_components),
decimal=6)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = PCA(n_components=n_components, whiten=False, copy=copy,
svd_solver=solver).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full').fit(X)
apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X)
assert_array_almost_equal(pca.explained_variance_,
apca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
apca.explained_variance_ratio_, 3)
rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0]
expected_result = sorted(expected_result, reverse=True)[:2]
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, ddof=1, axis=0))
assert_array_almost_equal(pca.explained_variance_, expected_result)
X_pca = apca.transform(X)
assert_array_almost_equal(apca.explained_variance_,
np.var(X_pca, ddof=1, axis=0))
assert_array_almost_equal(apca.explained_variance_, expected_result)
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, ddof=1, axis=0),
decimal=1)
assert_array_almost_equal(rpca.explained_variance_,
expected_result, decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_singular_values():
# Check that the PCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full',
random_state=rng).fit(X)
apca = PCA(n_components=2, svd_solver='arpack',
random_state=rng).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.singular_values_, apca.singular_values_, 12)
assert_array_almost_equal(pca.singular_values_, rpca.singular_values_, 1)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 1)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 9)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 0)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
apca = PCA(n_components=3, svd_solver='arpack', random_state=rng)
rpca = PCA(n_components=3, svd_solver='randomized', random_state=rng)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
apca.fit(X_hat)
rpca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
for solver in solver_list:
Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='full').fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
for solver in solver_list:
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for solver in solver_list:
for n_components in [-1, 3]:
assert_raises(ValueError,
PCA(n_components, svd_solver=solver).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by randomized PCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver='randomized',
random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by randomized PCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = PCA(n_components=1, svd_solver='randomized',
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle', svd_solver='full').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) +
np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5, svd_solver='full').fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert_true(ll1 > ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_svd_solver_auto():
rng = np.random.RandomState(0)
X = rng.uniform(size=(1000, 50))
# case: n_components in (0,1) => 'full'
pca = PCA(n_components=.5)
pca.fit(X)
pca_test = PCA(n_components=.5, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: max(X.shape) <= 500 => 'full'
pca = PCA(n_components=5, random_state=0)
Y = X[:10, :]
pca.fit(Y)
pca_test = PCA(n_components=5, svd_solver='full', random_state=0)
pca_test.fit(Y)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: n_components >= .8 * min(X.shape) => 'full'
pca = PCA(n_components=50)
pca.fit(X)
pca_test = PCA(n_components=50, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized'
pca = PCA(n_components=10, random_state=0)
pca.fit(X)
pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0)
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
def test_deprecation_randomized_pca():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA was "
"deprecated in 0.18 and will be "
"removed in 0.20. Use PCA(svd_solver='randomized') "
"instead. The new implementation DOES NOT store "
"whiten ``components_``. Apply transform to get them.")
def fit_deprecated(X):
global Y
rpca = RandomizedPCA(random_state=0)
Y = rpca.fit_transform(X)
assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X)
Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X)
assert_array_almost_equal(Y, Y_pca)
def test_pca_sparse_input():
X = np.random.RandomState(0).rand(5, 4)
X = sp.sparse.csr_matrix(X)
assert(sp.sparse.issparse(X))
for svd_solver in solver_list:
pca = PCA(n_components=3, svd_solver=svd_solver)
assert_raises(TypeError, pca.fit, X)
def test_pca_bad_solver():
X = np.random.RandomState(0).rand(5, 4)
pca = PCA(n_components=3, svd_solver='bad_argument')
assert_raises(ValueError, pca.fit, X)
def test_pca_dtype_preservation():
for svd_solver in solver_list:
yield check_pca_float_dtype_preservation, svd_solver
yield check_pca_int_dtype_upcast_to_double, svd_solver
def check_pca_float_dtype_preservation(svd_solver):
# Ensure that PCA does not upscale the dtype when input is float32
X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64)
X_32 = X_64.astype(np.float32)
pca_64 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float32
assert pca_64.transform(X_64).dtype == np.float64
assert pca_32.transform(X_32).dtype == np.float32
assert_array_almost_equal(pca_64.components_, pca_32.components_,
decimal=5)
def check_pca_int_dtype_upcast_to_double(svd_solver):
# Ensure that all int types will be upcast to float64
X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4))
X_i64 = X_i64.astype(np.int64)
X_i32 = X_i64.astype(np.int32)
pca_64 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_i64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver,
random_state=0).fit(X_i32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float64
assert pca_64.transform(X_i64).dtype == np.float64
assert pca_32.transform(X_i32).dtype == np.float64
assert_array_almost_equal(pca_64.components_, pca_32.components_,
decimal=5)
|
|
import datetime
import itertools
import unittest
from copy import copy
from django.db import (
DatabaseError, IntegrityError, OperationalError, connection,
)
from django.db.models import Model
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
DateTimeField, IntegerField, PositiveIntegerField, SlugField, TextField,
)
from django.db.models.fields.related import (
ForeignKey, ManyToManyField, OneToOneField,
)
from django.db.transaction import atomic
from django.test import TransactionTestCase, skipIfDBFeature
from .fields import CustomManyToManyField, InheritedManyToManyField
from .models import (
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak,
BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK,
Note, NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing,
UniqueTest, new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests that the schema-alteration code works correctly.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Note,
Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if 'schema' in new_apps.all_models:
for model in self.local_models:
del new_apps.all_models['schema'][model._meta.model_name]
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.table_name_converter
with atomic():
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if tbl in table_names:
with connection.schema_editor() as editor:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_indexes(cursor, table)
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check that it's there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Author.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk(self):
"Tests that creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Check that initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk_db_constraint(self):
"Tests that the db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Check that initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
# Check that BookWeak doesn't have an FK constraint
constraints = self.get_constraints(BookWeak._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.fail("FK constraint for author_id found")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for tag_id found")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Check that initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(new_field.rel.through._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['age'][0], "IntegerField")
self.assertEqual(columns['age'][1][6], True)
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['surname'][0], "CharField")
self.assertEqual(columns['surname'][1][6],
connection.features.interprets_empty_strings_as_nulls)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns['awesome'][0]
self.assertEqual(field_type, connection.features.introspected_boolean_field_type(new_field, created_separately=True))
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns['thing']
self.assertEqual(field_type, 'IntegerField')
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns['bits'][0], ("BinaryField", "TextField"))
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(columns['name'][1][6], True)
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='aaa')
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='bbb')
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns['height'][1][6])
# Create some test data
Author.objects.create(name='Not null author', height=12)
Author.objects.create(name='Null author')
# Verify null value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertIsNone(Author.objects.get(name='Null author').height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertFalse(columns['height'][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertEqual(Author.objects.get(name='Null author').height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field('name')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field('info')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field)
@unittest.skipUnless(connection.features.supports_combined_alters, "No combined ALTER support")
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns['height'][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithDefaultHeight, old_field, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns['height'][1][6])
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for name, details in constraints.items():
if details['foreign_key']:
self.fail('Found an unexpected FK constraint to %s' % details['columns'])
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
constraints = self.get_constraints(LocalBook._meta.db_table)
# Ensure FK constraint exists
for name, details in constraints.items():
if details['foreign_key'] and details['columns'] == ["author_id"]:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
BookWithO2O.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique anymore
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is still present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
Book.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique now
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name='Foo')
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field('i')
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = 'INTEGERPK'
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# model requires a new PK
old_field = IntegerPK._meta.get_field('j')
new_field = IntegerField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('j')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
old_field = IntegerPK._meta.get_field('i')
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['display_name'][0], "CharField")
self.assertNotIn("name", columns)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns['detail_info'][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").rel.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough")
tag = ForeignKey("schema.TagM2MTest")
class Meta:
app_label = 'schema'
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(columns['book_id'][0], "IntegerField")
self.assertEqual(columns['tag_id'][0], "IntegerField")
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.rel.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.rel.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.rel.through)
# Need to tear down using a model without the added M2M field that's
# been removed.
class LocalAuthorWithM2M(Model):
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough")
tag = ForeignKey("schema.TagM2MTest")
class Meta:
app_label = 'schema'
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
constraints = self.get_constraints(LocalBookWithM2M._meta.get_field("tags").rel.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id'))
break
else:
self.fail("No FK constraint for tagm2mtest_id found")
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field)
# Ensure old M2M is gone
self.assertRaises(DatabaseError, self.column_classes, LocalBookWithM2M._meta.get_field("tags").rel.through)
# This model looks like the new model and is used for teardown.
class LocalBookWithM2M(Model):
uniques = M2MFieldClass(UniqueTest)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Ensure the new M2M exists and points to UniqueTest
constraints = self.get_constraints(new_field.rel.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["uniquetest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id'))
break
else:
self.fail("No FK constraint for uniquetest_id found")
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@unittest.skipUnless(connection.features.supports_column_check_constraints, "No check constraints")
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
self.assertRaises(IntegrityError, TagUniqueRename.objects.create, title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, [])
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author)
new_field.set_attributes_from_name('author')
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [['author', 'title']], [])
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
# Ensure there is an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c['columns'] == ["slug", "title"]
),
)
def test_db_table(self):
"""
Tests renaming of the table
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
# Ensure the table is there afterwards
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table again
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['id']['primary_key'],
)
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
'id',
self.get_indexes(Tag._meta.db_table),
)
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['slug']['primary_key'],
)
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id")
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(AuthorWithEvenLongerName, related_name="something")
new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk")
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail("Errors when applying initial migration for a model "
"with a table named after a SQL reserved word: %s" % e)
# Check that it's there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Thing.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, **kwargs):
kwargs['db_column'] = "CamelCase"
field = kwargs.pop('field_class', IntegerField)(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
editor.execute(
editor.sql_create_index % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseIndex"),
"columns": editor.quote_name(column),
"extra": "",
}
)
editor.alter_field(model, get_field(db_index=True), field)
editor.execute(
editor.sql_create_unique % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseUniqConstraint"),
"columns": editor.quote_name(field.column),
}
)
editor.alter_field(model, get_field(unique=True), field)
editor.execute(
editor.sql_create_fk % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseFKConstraint"),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
}
)
editor.alter_field(model, get_field(Author, field_class=ForeignKey), field)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '')
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
|
|
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import logging
from rally import consts
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils
from rally.plugins.openstack.scenarios.glance import utils as glance_utils
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.task import atomic
from rally.task import types
from rally.task import validation
LOG = logging.getLogger(__name__)
class CinderVolumes(cinder_utils.CinderScenario,
nova_utils.NovaScenario,
glance_utils.GlanceScenario):
"""Benchmark scenarios for Cinder Volumes."""
@types.convert(image={"type": "glance_image"})
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_volume(self, size, detailed=True,
image=None, **kwargs):
"""Create a volume and list all volumes.
Measure the "cinder volume-list" command performance.
If you have only 1 user in your context, you will
add 1 volume on every iteration. So you will have more
and more volumes and will be able to measure the
performance of the "cinder volume-list" command depending on
the number of images owned by users.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param detailed: determines whether the volume listing should contain
detailed information about all of them
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
self._list_volumes(detailed)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def list_volumes(self, detailed=True):
"""List all volumes.
This simple scenario tests the cinder list command by listing
all the volumes.
:param detailed: True if detailed information about volumes
should be listed
"""
self._list_volumes(detailed)
@types.convert(image={"type": "glance_image"})
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_update_volume(self, size, image=None,
create_volume_kwargs=None,
update_volume_kwargs=None):
"""Create a volume and update its name and description.
:param size: volume size (integer, in GB)
:param image: image to be used to create volume
:param create_volume_kwargs: dict, to be used to create volume
:param update_volume_kwargs: dict, to be used to update volume
"""
create_volume_kwargs = create_volume_kwargs or {}
update_volume_kwargs = update_volume_kwargs or {}
if image:
create_volume_kwargs["imageRef"] = image
volume = self._create_volume(size, **create_volume_kwargs)
self._update_volume(volume, **update_volume_kwargs)
@types.convert(image={"type": "glance_image"})
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_delete_volume(self, size, image=None,
min_sleep=0, max_sleep=0,
**kwargs):
"""Create and then delete a volume.
Good for testing a maximal bandwidth of cloud. Optional 'min_sleep'
and 'max_sleep' parameters allow the scenario to simulate a pause
between volume creation and deletion (of random duration from
[min_sleep, max_sleep]).
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self._create_volume(size, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@types.convert(image={"type": "glance_image"})
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_volume(self, size, image=None, **kwargs):
"""Create a volume.
Good test to check how influence amount of active volumes on
performance of creating new.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@validation.required_contexts("volumes")
@scenario.configure(context={"cleanup": ["cinder"]})
def modify_volume_metadata(self, sets=10, set_size=3,
deletes=5, delete_size=3):
"""Modify a volume's metadata.
This requires a volume to be created with the volumes
context. Additionally, ``sets * set_size`` must be greater
than or equal to ``deletes * delete_size``.
:param sets: how many set_metadata operations to perform
:param set_size: number of metadata keys to set in each
set_metadata operation
:param deletes: how many delete_metadata operations to perform
:param delete_size: number of metadata keys to delete in each
delete_metadata operation
"""
if sets * set_size < deletes * delete_size:
raise exceptions.InvalidArgumentsException(
"Not enough metadata keys will be created: "
"Setting %(num_keys)s keys, but deleting %(num_deletes)s" %
{"num_keys": sets * set_size,
"num_deletes": deletes * delete_size})
volume = random.choice(self.context["tenant"]["volumes"])
keys = self._set_metadata(volume["id"], sets, set_size)
self._delete_metadata(volume["id"], keys, deletes, delete_size)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_extend_volume(self, size, new_size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and extend a volume and then delete it.
:param size: volume size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param new_size: volume new size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
to extend.
Notice: should be bigger volume size
:param min_sleep: minimum sleep time between volume extension and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume extension and
deletion (in seconds)
:param kwargs: optional args to extend the volume
"""
volume = self._create_volume(size, **kwargs)
self._extend_volume(volume, new_size)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_from_volume_and_delete_volume(self, size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create volume from volume and then delete it.
Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep'
parameters allow the scenario to simulate a pause between volume
creation and deletion (of random duration from [min_sleep, max_sleep]).
:param size: volume size (in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
Should be equal or bigger source volume size
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
source_vol = random.choice(self.context["tenant"]["volumes"])
volume = self._create_volume(size, source_volid=source_vol["id"],
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_delete_snapshot(self, force=False, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and then delete a volume-snapshot.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between snapshot creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param min_sleep: minimum sleep time between snapshot creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between snapshot creation and
deletion (in seconds)
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
snapshot = self._create_snapshot(volume["id"], force=force, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_snapshot(snapshot)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
@logging.log_deprecated_args(
"Use 'create_vm_params' for additional instance parameters.",
"0.2.0", ["kwargs"], once=True)
def create_and_attach_volume(self, size, image, flavor,
create_volume_params=None,
create_vm_params=None, **kwargs):
"""Create a VM and attach a volume to it.
Simple test to create a VM and attach a volume, then
detach the volume and delete volume/VM.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param create_volume_params: optional arguments for volume creation
:param create_vm_params: optional arguments for VM creation
:param kwargs: (deprecated) optional arguments for VM creation
"""
create_volume_params = create_volume_params or {}
if kwargs and create_vm_params:
raise ValueError("You can not set both 'kwargs'"
"and 'create_vm_params' attributes."
"Please use 'create_vm_params'.")
create_vm_params = create_vm_params or kwargs or {}
server = self._boot_server(image, flavor, **create_vm_params)
volume = self._create_volume(size, **create_volume_params)
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_volume(volume)
self._delete_server(server)
@validation.volume_type_exists("volume_type")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_snapshot_and_attach_volume(self, volume_type=False,
size=None, **kwargs):
"""Create volume, snapshot and attach/detach volume.
This scenario is based on the standalone qaStressTest.py
(https://github.com/WaltHP/cinder-stress).
:param volume_type: Whether or not to specify volume type when creating
volumes.
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if size is None:
size = {"min": 1, "max": 5}
selected_type = None
volume_types = [None]
if volume_type:
volume_types_list = self.clients("cinder").volume_types.list()
for s in volume_types_list:
volume_types.append(s.name)
selected_type = random.choice(volume_types)
volume = self._create_volume(size, volume_type=selected_type)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_nested_snapshots_and_attach_volume(self,
size=None,
nested_level=1,
**kwargs):
"""Create a volume from snapshot and attach/detach the volume
This scenario create volume, create it's snapshot, attach volume,
then create new volume from existing snapshot and so on,
with defined nested level, after all detach and delete them.
volume->snapshot->volume->snapshot->volume ...
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param nested_level: amount of nested levels
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if size is None:
size = {"min": 1, "max": 5}
# NOTE: Volume size cannot be smaller than the snapshot size, so
# volume with specified size should be created to avoid
# size mismatching between volume and snapshot due random
# size in _create_volume method.
size = random.randint(size["min"], size["max"])
source_vol = self._create_volume(size)
nes_objs = [(self.get_random_server(), source_vol,
self._create_snapshot(source_vol.id, False, **kwargs))]
self._attach_volume(nes_objs[0][0], nes_objs[0][1])
snapshot = nes_objs[0][2]
for i in range(nested_level - 1):
volume = self._create_volume(size, snapshot_id=snapshot.id)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
nes_objs.append((server, volume, snapshot))
nes_objs.reverse()
for server, volume, snapshot in nes_objs:
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_snapshots(self, force=False, detailed=True, **kwargs):
"""Create and then list a volume-snapshot.
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param detailed: True if detailed information about snapshots
should be listed
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
self._create_snapshot(volume["id"], force=force, **kwargs)
self._list_snapshots(detailed)
@types.convert(image={"type": "glance_image"})
@validation.required_services(consts.Service.CINDER, consts.Service.GLANCE)
@validation.required_openstack(users=True)
@validation.required_parameters("size")
@scenario.configure(context={"cleanup": ["cinder", "glance"]})
def create_and_upload_volume_to_image(self, size, image=None, force=False,
container_format="bare",
disk_format="raw",
do_delete=True,
**kwargs):
"""Create and upload a volume to image.
:param size: volume size (integers, in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume.
:param force: when set to True volume that is attached to an instance
could be uploaded to image
:param container_format: image container format
:param disk_format: disk format for image
:param do_delete: deletes image and volume after uploading if True
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self._create_volume(size, **kwargs)
image = self._upload_volume_to_image(volume, force, container_format,
disk_format)
if do_delete:
self._delete_volume(volume)
self._delete_image(image)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_volume_backup(self, size, do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create a volume backup.
:param size: volume size in GB
:param do_delete: if True, a volume and a volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_restore_volume_backup(self, size, do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Restore volume backup.
:param size: volume size in GB
:param do_delete: if True, the volume and the volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
self._restore_backup(backup.id)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_volume_backups(self, size, detailed=True,
do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create and then list a volume backup.
:param size: volume size in GB
:param detailed: True if detailed information about backup
should be listed
:param do_delete: if True, a volume backup will be deleted
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
self._list_backups(detailed)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
@types.convert(image={"type": "glance_image"})
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_volume_and_clone(self, size, image=None, nested_level=1,
**kwargs):
"""Create a volume, then clone it to another volume.
This creates a volume, then clone it to anothor volume,
and then clone the new volume to next volume...
1. create source volume (from image)
2. clone source volume to volume1
3. clone volume1 to volume2
4. clone volume2 to volume3
5. ...
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create initial volume
:param nested_level: amount of nested levels
:param kwargs: optional args to create volumes
"""
if image:
kwargs["imageRef"] = image
source_vol = self._create_volume(size, **kwargs)
kwargs.pop("imageRef", None)
for i in range(nested_level):
with atomic.ActionTimer(self, "cinder.clone_volume"):
source_vol = self._create_volume(source_vol.size,
source_volid=source_vol.id,
atomic_action=False, **kwargs)
|
|
import pytest
import decimal
import numpy as np
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
from numpy import iinfo
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assert_raises_regex(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assert_raises_regex(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetimelike(self):
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# ToDo: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with tm.assert_raises_regex(TypeError, "Invalid object type"):
pd.to_numeric(s)
def test_downcast(self):
# see gh-13352
mixed_data = ['1', 2, 3]
int_data = [1, 2, 3]
date_data = np.array(['1970-01-02', '1970-01-03',
'1970-01-04'], dtype='datetime64[D]')
invalid_downcast = 'unsigned-integer'
msg = 'invalid downcasting method provided'
smallest_int_dtype = np.dtype(np.typecodes['Integer'][0])
smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0])
# support below np.float32 is rare and far between
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
for data in (mixed_data, int_data, date_data):
with tm.assert_raises_regex(ValueError, msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
for signed_downcast in ('integer', 'signed'):
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast='float')
tm.assert_numpy_array_equal(res, expected)
# if we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter
data = ['foo', 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors='ignore',
downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an unsigned integer because
# we have a negative number
data = ['-1', 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an integer (signed or unsigned)
# because we have a float number
data = (['1.1', 2, 3],
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00])
expected = (np.array([1.1, 2, 3], dtype=np.float64),
np.array([10000.0, 20000, 3000,
40000.36, 50000, 50000.00], dtype=np.float64))
for _data, _expected in zip(data, expected):
for downcast in ('integer', 'signed', 'unsigned'):
res = pd.to_numeric(_data, downcast=downcast)
tm.assert_numpy_array_equal(res, _expected)
# the smallest integer dtype need not be np.(u)int8
data = ['256', 257, 258]
for downcast, expected_dtype in zip(
['integer', 'signed', 'unsigned'],
[np.int16, np.int16, np.uint16]):
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
def test_downcast_limits(self):
# Test the limits of each downcast. Bug: #14401.
i = 'integer'
u = 'unsigned'
dtype_downcast_min_max = [
('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]),
('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]),
('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]),
('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]),
('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]),
('uint64', u, [iinfo(np.uint64).min, iinfo(np.uint64).max]),
('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1])
]
for dtype, downcast, min_max in dtype_downcast_min_max:
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
assert series.dtype == dtype
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# base class for weave echo between mock device and real service.
#
from __future__ import absolute_import
from __future__ import print_function
import itertools
import os
import sys
import unittest
import set_test_path
from happy.Utils import *
import WeavePing
import WeaveTunnelStart
import WeaveTunnelStop
import plugins.testrail.TestrailResultOutput
from topologies.dynamic.thread_wifi_ap_internet_configurable_topology import thread_wifi_ap_internet_configurable_topology
from six.moves import range
TEST_OPTION_QUIET = True
DEFAULT_FABRIC_SEED = "00000"
TESTRAIL_SECTION_NAME = "Weave Echo between Mock-Client and Real-Service"
TESTRAIL_SUFFIX = "_TESTRAIL.json"
ERROR_THRESHOLD_PERCENTAGE = 5
class test_weave_echo_base(unittest.TestCase):
def setUp(self):
self.tap = None
self.tap_id = None
self.quiet = TEST_OPTION_QUIET
self.options = None
self.topology_setup_required = int(os.environ.get("TOPOLOGY", "1")) == 1
self.count = os.environ.get("ECHO_COUNT", "400")
fabric_seed = os.environ.get("FABRIC_SEED", DEFAULT_FABRIC_SEED)
if "FABRIC_OFFSET" in list(os.environ.keys()):
self.fabric_id = format(int(fabric_seed, 16) + int(os.environ["FABRIC_OFFSET"]), 'x').zfill(5)
else:
self.fabric_id = fabric_seed
self.eui64_prefix = os.environ.get("EUI64_PREFIX", '18:B4:30:AA:00:')
self.customized_eui64_seed = self.eui64_prefix + self.fabric_id[0:2] + ':' + self.fabric_id[2:4] + ':' + self.fabric_id[4:]
self.device_numbers = int(os.environ.get("DEVICE_NUMBERS", 1))
self.test_timeout = int(os.environ.get("TEST_TIMEOUT", 60 * 30))
# TODO: Once LwIP bugs for tunnel are fix, enable this test on LwIP
if "WEAVE_SYSTEM_CONFIG_USE_LWIP" in list(os.environ.keys()) and os.environ["WEAVE_SYSTEM_CONFIG_USE_LWIP"] == "1":
self.tap = True
self.tap_id = "wpan0"
return
else:
self.tap = False
self.case = int(os.environ.get("CASE", "0")) == 1
self.use_service_dir = int(os.environ.get("USE_SERVICE_DIR", "0")) == 1
if self.topology_setup_required:
self.topology = thread_wifi_ap_internet_configurable_topology(quiet=self.quiet,
fabric_id=self.fabric_id,
customized_eui64_seed=self.customized_eui64_seed,
tap=self.tap,
dns=None,
device_numbers=self.device_numbers)
self.topology.createTopology()
else:
print("topology set up not required")
self.weave_wdm = None
# Wait for a second to ensure that Weave ULA addresses passed dad
# and are no longer tentative
delayExecution(2)
# topology has nodes: ThreadNode, BorderRouter, onhub and NestService instance
# we run tunnel between BorderRouter and NestService
# Start tunnel
value, data = self.__start_tunnel_from("BorderRouter")
delayExecution(1)
def tearDown(self):
delayExecution(1)
# Stop tunnel
value, data = self.__stop_tunnel_from("BorderRouter")
if self.topology_setup_required:
self.topology.destroyTopology()
self.topology = None
def weave_echo_base(self, echo_args):
default_options = {'test_case_name' : [],
'timeout' : self.test_timeout,
'quiet': False,
'case' : self.case,
'case_shared' : False,
'tap' : self.tap_id
}
default_options.update(echo_args)
self.__dict__.update(default_options)
self.default_options = default_options
# topology has nodes: ThreadNode, BorderRouter, onhub and NestService instance
value, data = self.__run_ping_test_between("ThreadNode", "service")
self.__process_result("ThreadNode", "service", value, data)
def __start_tunnel_from(self, gateway):
options = WeaveTunnelStart.option()
options["quiet"] = False
options["border_gateway"] = gateway
options["tap"] = self.tap_id
options["case"] = self.case
options["service_dir"] = self.use_service_dir
weave_tunnel = WeaveTunnelStart.WeaveTunnelStart(options)
ret = weave_tunnel.run()
value = ret.Value()
data = ret.Data()
return value, data
def __run_ping_test_between(self, nodeA, nodeB):
options = WeavePing.option()
options.update(self.default_options)
options["server"] = nodeB
options["clients"] = [nodeA + str(index + 1).zfill(2) for index in range(self.device_numbers)]
options["device_numbers"] = self.device_numbers
self.weave_ping = WeavePing.WeavePing(options)
ret = self.weave_ping.run()
value = ret.Value()
data = ret.Data()
return value, data
def __process_result(self, nodeA, nodeB, value, all_data):
success = True
for data in all_data:
print("ping from " + data['client'] + " to " + nodeB + " ", end=' ')
if value > ERROR_THRESHOLD_PERCENTAGE:
success = False
print(hred("Failed"))
else:
print(hgreen("Passed"))
try:
self.assertTrue(value < ERROR_THRESHOLD_PERCENTAGE, "%s < %d %%" % (str(value), ERROR_THRESHOLD_PERCENTAGE))
except AssertionError as e:
print(str(e))
print("Captured experiment result:")
print("Client Output: ")
for line in data["client_output"].split("\n"):
print("\t" + line)
test_results = []
test_results.append({
'testName': self.test_case_name[0],
'testStatus': 'success' if success else 'failure',
'testDescription': "echo loss percentage is %f" % value ,
'success_iterations_count': (100-value)/100.0 * int(self.count),
'total_iterations_count': int(self.count)
})
# output the test result to a json file
output_data = {
'sectionName': TESTRAIL_SECTION_NAME,
'testResults': test_results
}
output_file_name = self.weave_ping.process_log_prefix + data['client'] + self.test_tag.upper() + TESTRAIL_SUFFIX
self.__output_test_result(output_file_name, output_data)
if not success:
raise ValueError('test failure')
def __output_test_result(self, file_path, output_data):
options = plugins.testrail.TestrailResultOutput.option()
options["quiet"] = TEST_OPTION_QUIET
options["file_path"] = file_path
options["output_data"] = output_data
output_test = plugins.testrail.TestrailResultOutput.TestrailResultOutput(options)
output_test.run()
def __stop_tunnel_from(self, gateway):
options = WeaveTunnelStop.option()
options["quiet"] = False
options["border_gateway"] = gateway
options["service_dir"] = self.use_service_dir
weave_tunnel = WeaveTunnelStop.WeaveTunnelStop(options)
ret = weave_tunnel.run()
value = ret.Value()
data = ret.Data()
return value, data
if __name__ == "__main__":
unittest.main()
|
|
"""A Python API for the MiniSat_ and MiniCard_ constraint solvers.
.. _MiniSat: http://minisat.se/
.. _MiniCard: http://git.io/minicard
Classes:
Solver
An abstract base class for the other classes.
SubsetMixin
A mixin class adding 'subset' functionality to Solver subclasses.
:class:`MinisatSolver`
Solve CNF instances using MiniSat.
:class:`MinicardSolver`
Solve CNF+ (CNF plus cardinality constraints) using MiniCard.
:class:`MinisatSubsetSolver`
Solve arbitrary subsets of CNF instances and find SAT subsets / UNSAT cores.
:class:`MinicardSubsetSolver`
Solve arbitrary subsets of CNF+ instances and find SAT subsets / UNSAT cores.
"""
import array
import os
import ctypes
from abc import ABCMeta, abstractmethod
from ctypes import c_void_p, c_ubyte, c_bool, c_int
class Solver(object):
"""The Solver class is an abstract base class for MiniSat and
MiniCard solver classes. It provides the basic methods that both
contain, closely following the methods in MiniSat and MiniCard's
Solver class.
Solver should not be instantiated directly. Instead, use its
subclasses MinisatSolver, MinicardSolver, MinisatSubsetSolver, or
MinicardSubsetSolver (see below).
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, libfilename):
self._setup_lib(libfilename)
self.s = self.lib.Solver_new()
def _setup_lib(self, libfilename):
"""Load the minisat library with ctypes and create a Solver
object. Correct return types (if not int as assumed by
ctypes) and set argtypes for functions from the minisat
library.
"""
dirname = os.path.dirname(os.path.abspath(__file__))
libfile = dirname + '/' + libfilename
if not os.path.exists(libfile):
raise IOError("Specified library file not found. Did you run 'make' to build the solver libraries?\nFile not found: %s" % libfile)
self.lib = ctypes.cdll.LoadLibrary(dirname+'/'+libfilename)
l = self.lib
l.Solver_new.restype = c_void_p
l.Solver_new.argtypes = []
l.Solver_delete.argtypes = [c_void_p]
l.nVars.argtypes = [c_void_p]
l.nClauses.argtypes = [c_void_p]
l.setPhaseSaving.argtypes = [c_void_p, c_int]
l.setRndPol.argtypes = [c_void_p, c_bool]
l.newVar.argtypes = [c_void_p, c_ubyte, c_bool]
l.addClause.restype = c_bool
l.addClause.argtypes = [c_void_p, c_int, c_void_p]
l.addUnit.restype = c_bool
l.addUnit.argtypes = [c_void_p, c_int]
l.solve.restype = c_bool
l.solve.argtypes = [c_void_p]
l.solve_assumptions.restype = c_bool
l.solve_assumptions.argtypes = [c_void_p, c_int, c_void_p]
l.simplify.restype = c_bool
l.simplify.argtypes = [c_void_p]
l.unsatCore.argtypes = [c_void_p, c_int, c_void_p]
l.modelValue.argtypes = [c_void_p, c_int]
l.fillModel.argtypes = [c_void_p, c_void_p, c_int, c_int]
l.getModelTrues.restype = c_int
l.getModelTrues.argtypes = [c_void_p, c_void_p, c_int, c_int]
l.getImplies.argtypes = [c_void_p, c_void_p]
l.getImplies.restype = c_int
def __del__(self):
"""Delete the Solver object"""
self.lib.Solver_delete(self.s)
@staticmethod
def _to_intptr(a):
"""Helper function to get a ctypes POINTER(c_int) for an array"""
addr, size = a.buffer_info()
return ctypes.cast(addr, ctypes.POINTER(c_int)), size
def new_var(self, polarity=None, dvar=True):
"""Create a new variable in the solver.
Args:
polarity (bool):
The default polarity for this variable. True = variable's
default is True, etc. Note that this is the reverse of the 'user
polarity' in MiniSat, where True indicates the *sign* is True,
hence the default value is False.
dvar (bool):
Whether this variable will be used as a decision variable.
Returns:
The new variable's index (0-based counting).
"""
if polarity is None:
pol_int = 2
elif polarity is True:
pol_int = 1
elif polarity is False:
pol_int = 0
return self.lib.newVar(self.s, pol_int, dvar)
def nvars(self):
'''Get the number of variables created in the solver.'''
return self.lib.nVars(self.s)
def nclauses(self):
'''Get the number of clauses or constraints added to the solver.'''
return self.lib.nClauses(self.s)
def set_phase_saving(self, ps):
'''Set the level of phase saving (0=none, 1=limited, 2=full (default)).'''
self.lib.setPhaseSaving(self.s, ps)
def set_rnd_pol(self, val):
'''Set whether random polarities are used for decisions (overridden if vars are created with a user polarity other than None)'''
self.lib.setRndPol(self.s, val)
def add_clause(self, lits):
"""Add a clause to the solver.
Args:
lits:
A list of literals as integers. Each integer specifies a
variable with *1*-based counting and a sign via the sign of the
integer. Ex.: [-1, 2, -3] is (!x0 + x1 + !x2)
Returns:
A boolean value returned from MiniSat's ``addClause()`` function,
indicating success (True) or conflict (False).
"""
if not all(abs(x) <= self.nvars() for x in lits):
raise Exception("Not all variables in %s are created yet. Call new_var() first." % lits)
if len(lits) > 1:
a = array.array('i', lits)
a_ptr, size = self._to_intptr(a)
return self.lib.addClause(self.s, size, a_ptr)
elif len(lits) == 1:
return self.lib.addUnit(self.s, lits[0])
else:
return self.lib.addClause(self.s, 0, None)
def solve(self, assumptions=None):
"""Solve the current set of clauses, optionally with a set of assumptions.
Args:
assumptions:
An optional iterable returning literals as integers, specified as
in ``add_clause()``.
Returns:
True if the clauses (and assumptions) are satisfiable, False otherwise.
"""
if assumptions is None:
return self.lib.solve(self.s)
else:
a = array.array('i', assumptions)
a_ptr, size = self._to_intptr(a)
return self.lib.solve_assumptions(self.s, size, a_ptr)
def simplify(self):
'''Call Solver.simplify().'''
return self.lib.simplify(self.s)
def get_model(self, start=0, end=-1):
"""Get the current model from the solver, optionally retrieving only a slice.
Args:
start, end (int):
Optional start and end indices, interpreted as in ``range()``.
Returns:
An array of booleans indexed to each variable (from 0). If a start
index was given, the returned list starts at that index (i.e.,
``get_model(10)[0]`` is index 10 from the solver's model.
"""
if end == -1:
end = self.nvars()
a = array.array('i', [-1] * (end-start))
a_ptr, size = self._to_intptr(a)
self.lib.fillModel(self.s, a_ptr, start, end)
return a
def get_model_trues(self, start=0, end=-1):
"""Get variables assigned true in the current model from the solver.
Args:
start, end (int):
Optional start and end indices, interpreted as in ``range()``.
Returns:
An array of true variables in the solver's current model. If a
start index was given, the variables are indexed from that value.
"""
if end == -1:
end = self.nvars()
a = array.array('i', [-1] * (end-start))
a_ptr, size = self._to_intptr(a)
count = self.lib.getModelTrues(self.s, a_ptr, start, end)
# reduce the array down to just the valid indexes
return a[:count]
def model_value(self, i):
'''Get the value of a given variable in the current model.'''
return self.lib.modelValue(self.s, i)
def implies(self):
"""Get literals known to be implied by the current formula. (I.e., all
assignments made at level 0.)
Returns:
An array of literals.
"""
a = array.array('i', [-1] * self.nvars())
a_ptr, size = self._to_intptr(a)
count = self.lib.getImplies(self.s, a_ptr)
# reduce the array down to just the valid indexes
return a[:count]
class SubsetMixin(object):
"""A mixin for any Solver class that lets it reason about subsets of a clause set."""
_origvars = None
_relvars = None
def set_varcounts(self, vars, constraints):
"""Record how many of the solver's variables and clauses are
"original," as opposed to clause-selector variables, etc.
"""
self._origvars = vars
self._relvars = constraints
def add_clause_instrumented(self, lits, index):
"""Add a "soft" clause with a relaxation variable (the relaxation var.
is based on the index, which is assumed to be 0-based).
Args:
lits:
A list of literals specified as in ``add_clause()``.
index (int):
A 0-based index into the set of soft clauses. The clause will
be given a relaxation variable based on this index, and it will
be used to specify the clause in subsets for
``solve_subset()``, etc.
"""
if self._origvars is None:
raise Exception("SubsetSolver.set_varcounts() must be called before .add_clause_instrumented()")
instrumented_clause = [-(self._origvars+1+index)] + lits
self.add_clause(instrumented_clause)
def solve_subset(self, subset):
"""Solve a subset of the constraints equal containing all "hard"
clauses (those added with the regular ``add_clause()`` method) and the
specified subset of soft clauses.
Args:
subset:
An iterable containing the indexes of any soft clauses to be included.
Returns:
True if the given subset is satisfiable, False otherwise.
"""
if self._origvars is None:
raise Exception("SubsetSolver.set_varcounts() must be called before .solve_subset()")
# convert clause indices to clause-selector variable indices
a = array.array('i', (i+self._origvars+1 for i in subset))
a_ptr, size = self._to_intptr(a)
return self.lib.solve_assumptions(self.s, size, a_ptr)
def unsat_core(self):
"""Get an UNSAT core from the last check performed by
``solve_subset()``. Assumes the last such check was UNSAT.
"""
a = array.array('i', [-1] * self.nclauses())
a_ptr, size = self._to_intptr(a)
length = self.lib.unsatCore(self.s, self._origvars, a_ptr)
# reduce the array down to just the valid indexes
return a[:length]
def sat_subset(self):
"""Get the set of clauses satisfied in the last check performed by
``solve_subset()``. Assumes the last such check was SAT. This may
contain additional soft clauses not in the subset that was given to
``solve_subset()``, if they were also satisfied by the model found.
"""
return self.get_model_trues(start=self._origvars, end=self._origvars+self._relvars)
class MinisatSolver(Solver):
"""A Python analog to MiniSat's Solver class.
>>> S = MinisatSolver()
Create variables using ``new_var()``. Add clauses as list of literals with
``add_clause()``, analogous to MiniSat's ``add_clause()``. Literals are
specified as integers, with the magnitude indicating the variable index
(with 1-based counting) and the sign indicating True/False. For example,
to add clauses (x0), (!x1), (!x0 + x1 + !x2), and (x2 + x3):
>>> for i in range(4):
... S.new_var() # doctest: +ELLIPSIS
0
1
2
3
>>> for clause in [1], [-2], [-1, 2, -3], [3, 4]:
... S.add_clause(clause) # doctest: +ELLIPSIS
True
True
True
True
The ``solve()`` method returns True or False just like MiniSat's.
>>> S.solve()
True
Models are returned as arrays of Booleans, indexed by var.
So the following represents x0=True, x1=False, x2=False, x3=True.
>>> list(S.get_model())
[1, 0, 0, 1]
The ``add_clause()`` method may return False if a conflict is detected
when adding the clause, even without search.
>>> S.add_clause([-4])
False
>>> S.solve()
False
"""
def __init__(self):
super(MinisatSolver, self).__init__("libminisat.so")
class MinicardSolver(Solver):
"""A Python analog to MiniCard's Solver class.
>>> S = MinicardSolver()
This has the same interface as :class:`MiniSatSolver`, with the addition of
the ``add_atmost()`` method.
>>> for i in range(4):
... S.new_var() # doctest: +ELLIPSIS
0
1
2
3
>>> for clause in [1], [-2], [3, 4]:
... S.add_clause(clause)
True
True
True
To add an AtMost constraint, specify the set of literals and the bound. For example, to add AtMost({x0, !x1, x2}, 2):
>>> S.add_atmost([1,-2,3], 2)
True
>>> S.solve()
True
>>> list(S.get_model())
[1, 0, 0, 1]
As with ``add_clause()``, the ``add_atmost()`` method may return False if a
conflict is detected when adding the constraint, even without search.
>>> S.add_atmost([1,-3,4], 2)
False
>>> S.solve()
False
"""
def __init__(self):
super(MinicardSolver, self).__init__("libminicard.so")
def _setup_lib(self, libfilename):
"""Correct return types (if not int as assumed by ctypes) and set argtypes for
functions from the minicard library.
"""
super(MinicardSolver, self)._setup_lib(libfilename)
# additional function for minicard
l = self.lib
l.addAtMost.restype = c_bool
l.addAtMost.argtypes = [c_void_p, c_int, c_void_p, c_int]
def add_atmost(self, lits, k):
"""Add an AtMost constraint to the solver.
Args:
lits:
A list of literals as integers. Each integer specifies a
variable with **1**-based counting and a sign via the sign of
the integer. Ex.: [-1, 2, -3] is {!x0, x1, !x2}
k (int):
The [upper] bound to place on these literals.
Returns:
A boolean value returned from MiniCard's ``addAtMost()``
function, indicating success (True) or conflict (False).
"""
if not all(abs(x) <= self.nvars() for x in lits):
raise Exception("Not all variables in %s are created yet. Call new_var() first." % lits)
if len(lits) > 1:
a = array.array('i', lits)
a_ptr, size = self._to_intptr(a)
return self.lib.addAtMost(self.s, size, a_ptr, k)
else:
return self.lib.addAtMost(self.s, 0, None, 0)
class MinisatSubsetSolver(SubsetMixin, MinisatSolver):
"""A class for reasoning about subsets of constraints within MiniSat.
>>> S = MinisatSubsetSolver()
It must be told explicitlyhow many of its variables are "real" and how many
are relaxation variables for constraints.
>>> S.set_varcounts(vars = 4, constraints = 5)
>>> for i in range(4+5):
... _ = S.new_var()
"Soft" clauses are added with ``add_clause_instrumented()``, which has no
return value, as it is impossible for these clauses to produce a conflict.
>>> for i, clause in enumerate([[1], [-2], [-1, 2, 3], [-3], [-1]]):
... S.add_clause_instrumented(clause, i)
Any subset of the constraints can be tested for satisfiability. Subsets
are specified as iterables containing soft clause indexes.
>>> S.solve_subset([0,1,2])
True
If a subset is found to be satisfiable, a potentially larger satisfied
subset can be found. Satisfiable subsets are returned as array objects.
>>> satset = S.sat_subset()
>>> sorted(satset)
[0, 1, 2]
If a subset is found to be unsatisfiable, an UNSAT core can be found.
Cores are returned as array objects.
>>> S.solve_subset([0,1,2,3])
False
>>> core = S.unsat_core()
>>> sorted(core)
[0, 1, 2, 3]
"""
pass
class MinicardSubsetSolver(SubsetMixin, MinicardSolver):
"""A class for reasoning about subsets of constraints within MiniCard.
This has the same interface as :class:`MinisatSubsetSolver`, with the
addition of the ``add_atmost()`` method.
>>> S = MinicardSubsetSolver()
>>> S.set_varcounts(vars = 4, constraints = 4)
>>> for i in range(4+4):
... _ = S.new_var()
>>> for i, clause in enumerate([[1], [-2], [3], [4]]):
... S.add_clause_instrumented(clause, i)
AtMost constraints cannot be instrumented -- they must be hard constraints.
>>> S.add_atmost([1,-2,3], 2)
True
>>> S.solve_subset([0,1])
True
>>> S.solve_subset([0,1,2,3])
False
>>> core = S.unsat_core()
>>> sorted(core)
[0, 1, 2]
"""
pass
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.request_helpers"""
import unittest
from swift.common.swob import Request, HTTPException, HeaderKeyDict
from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY
from swift.common.request_helpers import is_sys_meta, is_user_meta, \
is_sys_or_user_meta, strip_sys_meta_prefix, strip_user_meta_prefix, \
remove_items, copy_header_subset, get_name_and_placement, \
http_response_to_document_iters, is_object_transient_sysmeta, \
update_etag_is_at_header, resolve_etag_is_at_header
from test.unit import patch_policies
from test.unit.common.test_utils import FakeResponse
server_types = ['account', 'container', 'object']
class TestRequestHelpers(unittest.TestCase):
def test_is_user_meta(self):
m_type = 'meta'
for st in server_types:
self.assertTrue(is_user_meta(st, 'x-%s-%s-foo' % (st, m_type)))
self.assertFalse(is_user_meta(st, 'x-%s-%s-' % (st, m_type)))
self.assertFalse(is_user_meta(st, 'x-%s-%sfoo' % (st, m_type)))
def test_is_sys_meta(self):
m_type = 'sysmeta'
for st in server_types:
self.assertTrue(is_sys_meta(st, 'x-%s-%s-foo' % (st, m_type)))
self.assertFalse(is_sys_meta(st, 'x-%s-%s-' % (st, m_type)))
self.assertFalse(is_sys_meta(st, 'x-%s-%sfoo' % (st, m_type)))
def test_is_sys_or_user_meta(self):
m_types = ['sysmeta', 'meta']
for mt in m_types:
for st in server_types:
self.assertTrue(is_sys_or_user_meta(st, 'x-%s-%s-foo'
% (st, mt)))
self.assertFalse(is_sys_or_user_meta(st, 'x-%s-%s-'
% (st, mt)))
self.assertFalse(is_sys_or_user_meta(st, 'x-%s-%sfoo'
% (st, mt)))
def test_strip_sys_meta_prefix(self):
mt = 'sysmeta'
for st in server_types:
self.assertEqual(strip_sys_meta_prefix(st, 'x-%s-%s-a'
% (st, mt)), 'a')
def test_strip_user_meta_prefix(self):
mt = 'meta'
for st in server_types:
self.assertEqual(strip_user_meta_prefix(st, 'x-%s-%s-a'
% (st, mt)), 'a')
def test_is_object_transient_sysmeta(self):
self.assertTrue(is_object_transient_sysmeta(
'x-object-transient-sysmeta-foo'))
self.assertFalse(is_object_transient_sysmeta(
'x-object-transient-sysmeta-'))
self.assertFalse(is_object_transient_sysmeta(
'x-object-meatmeta-foo'))
def test_remove_items(self):
src = {'a': 'b',
'c': 'd'}
test = lambda x: x == 'a'
rem = remove_items(src, test)
self.assertEqual(src, {'c': 'd'})
self.assertEqual(rem, {'a': 'b'})
def test_copy_header_subset(self):
src = {'a': 'b',
'c': 'd'}
from_req = Request.blank('/path', environ={}, headers=src)
to_req = Request.blank('/path', {})
test = lambda x: x.lower() == 'a'
copy_header_subset(from_req, to_req, test)
self.assertTrue('A' in to_req.headers)
self.assertEqual(to_req.headers['A'], 'b')
self.assertFalse('c' in to_req.headers)
self.assertFalse('C' in to_req.headers)
@patch_policies(with_ec_default=True)
def test_get_name_and_placement_object_req(self):
path = '/device/part/account/container/object'
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '0'})
device, part, account, container, obj, policy = \
get_name_and_placement(req, 5, 5, True)
self.assertEqual(device, 'device')
self.assertEqual(part, 'part')
self.assertEqual(account, 'account')
self.assertEqual(container, 'container')
self.assertEqual(obj, 'object')
self.assertEqual(policy, POLICIES[0])
self.assertEqual(policy.policy_type, EC_POLICY)
req.headers['X-Backend-Storage-Policy-Index'] = 1
device, part, account, container, obj, policy = \
get_name_and_placement(req, 5, 5, True)
self.assertEqual(device, 'device')
self.assertEqual(part, 'part')
self.assertEqual(account, 'account')
self.assertEqual(container, 'container')
self.assertEqual(obj, 'object')
self.assertEqual(policy, POLICIES[1])
self.assertEqual(policy.policy_type, REPL_POLICY)
req.headers['X-Backend-Storage-Policy-Index'] = 'foo'
try:
device, part, account, container, obj, policy = \
get_name_and_placement(req, 5, 5, True)
except HTTPException as e:
self.assertEqual(e.status_int, 503)
self.assertEqual(str(e), '503 Service Unavailable')
self.assertEqual(e.body, "No policy with index foo")
else:
self.fail('get_name_and_placement did not raise error '
'for invalid storage policy index')
@patch_policies(with_ec_default=True)
def test_get_name_and_placement_object_replication(self):
# yup, suffixes are sent '-'.joined in the path
path = '/device/part/012-345-678-9ab-cde'
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '0'})
device, partition, suffix_parts, policy = \
get_name_and_placement(req, 2, 3, True)
self.assertEqual(device, 'device')
self.assertEqual(partition, 'part')
self.assertEqual(suffix_parts, '012-345-678-9ab-cde')
self.assertEqual(policy, POLICIES[0])
self.assertEqual(policy.policy_type, EC_POLICY)
path = '/device/part'
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '1'})
device, partition, suffix_parts, policy = \
get_name_and_placement(req, 2, 3, True)
self.assertEqual(device, 'device')
self.assertEqual(partition, 'part')
self.assertIsNone(suffix_parts) # false-y
self.assertEqual(policy, POLICIES[1])
self.assertEqual(policy.policy_type, REPL_POLICY)
path = '/device/part/' # with a trailing slash
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '1'})
device, partition, suffix_parts, policy = \
get_name_and_placement(req, 2, 3, True)
self.assertEqual(device, 'device')
self.assertEqual(partition, 'part')
self.assertEqual(suffix_parts, '') # still false-y
self.assertEqual(policy, POLICIES[1])
self.assertEqual(policy.policy_type, REPL_POLICY)
class TestHTTPResponseToDocumentIters(unittest.TestCase):
def test_200(self):
fr = FakeResponse(
200,
{'Content-Length': '10', 'Content-Type': 'application/lunch'},
'sandwiches')
doc_iters = http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertEqual(last_byte, 9)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Length'), '10')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'sandwiches')
self.assertRaises(StopIteration, next, doc_iters)
fr = FakeResponse(
200,
{'Transfer-Encoding': 'chunked',
'Content-Type': 'application/lunch'},
'sandwiches')
doc_iters = http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertIsNone(last_byte)
self.assertIsNone(length)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Transfer-Encoding'), 'chunked')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'sandwiches')
self.assertRaises(StopIteration, next, doc_iters)
def test_206_single_range(self):
fr = FakeResponse(
206,
{'Content-Length': '8', 'Content-Type': 'application/lunch',
'Content-Range': 'bytes 1-8/10'},
'andwiche')
doc_iters = http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 1)
self.assertEqual(last_byte, 8)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Length'), '8')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'andwiche')
self.assertRaises(StopIteration, next, doc_iters)
# Chunked response should be treated in the same way as non-chunked one
fr = FakeResponse(
206,
{'Transfer-Encoding': 'chunked',
'Content-Type': 'application/lunch',
'Content-Range': 'bytes 1-8/10'},
'andwiche')
doc_iters = http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 1)
self.assertEqual(last_byte, 8)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'andwiche')
self.assertRaises(StopIteration, next, doc_iters)
def test_206_multiple_ranges(self):
fr = FakeResponse(
206,
{'Content-Type': 'multipart/byteranges; boundary=asdfasdfasdf'},
("--asdfasdfasdf\r\n"
"Content-Type: application/lunch\r\n"
"Content-Range: bytes 0-3/10\r\n"
"\r\n"
"sand\r\n"
"--asdfasdfasdf\r\n"
"Content-Type: application/lunch\r\n"
"Content-Range: bytes 6-9/10\r\n"
"\r\n"
"ches\r\n"
"--asdfasdfasdf--"))
doc_iters = http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertEqual(last_byte, 3)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'sand')
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 6)
self.assertEqual(last_byte, 9)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'ches')
self.assertRaises(StopIteration, next, doc_iters)
def test_update_etag_is_at_header(self):
# start with no existing X-Backend-Etag-Is-At
req = Request.blank('/v/a/c/o')
update_etag_is_at_header(req, 'X-Object-Sysmeta-My-Etag')
self.assertEqual('X-Object-Sysmeta-My-Etag',
req.headers['X-Backend-Etag-Is-At'])
# add another alternate
update_etag_is_at_header(req, 'X-Object-Sysmeta-Ec-Etag')
self.assertEqual('X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag',
req.headers['X-Backend-Etag-Is-At'])
with self.assertRaises(ValueError) as cm:
update_etag_is_at_header(req, 'X-Object-Sysmeta-,-Bad')
self.assertEqual('Header name must not contain commas',
cm.exception.message)
def test_resolve_etag_is_at_header(self):
def do_test():
req = Request.blank('/v/a/c/o')
# ok to have no X-Backend-Etag-Is-At
self.assertIsNone(resolve_etag_is_at_header(req, metadata))
# ok to have no matching metadata
req.headers['X-Backend-Etag-Is-At'] = 'X-Not-There'
self.assertIsNone(resolve_etag_is_at_header(req, metadata))
# selects from metadata
req.headers['X-Backend-Etag-Is-At'] = 'X-Object-Sysmeta-Ec-Etag'
self.assertEqual('an etag value',
resolve_etag_is_at_header(req, metadata))
req.headers['X-Backend-Etag-Is-At'] = 'X-Object-Sysmeta-My-Etag'
self.assertEqual('another etag value',
resolve_etag_is_at_header(req, metadata))
# first in list takes precedence
req.headers['X-Backend-Etag-Is-At'] = \
'X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag'
self.assertEqual('another etag value',
resolve_etag_is_at_header(req, metadata))
# non-existent alternates are passed over
req.headers['X-Backend-Etag-Is-At'] = \
'X-Bogus,X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag'
self.assertEqual('another etag value',
resolve_etag_is_at_header(req, metadata))
# spaces in list are ok
alts = 'X-Foo, X-Object-Sysmeta-My-Etag , X-Object-Sysmeta-Ec-Etag'
req.headers['X-Backend-Etag-Is-At'] = alts
self.assertEqual('another etag value',
resolve_etag_is_at_header(req, metadata))
# lower case in list is ok
alts = alts.lower()
req.headers['X-Backend-Etag-Is-At'] = alts
self.assertEqual('another etag value',
resolve_etag_is_at_header(req, metadata))
# upper case in list is ok
alts = alts.upper()
req.headers['X-Backend-Etag-Is-At'] = alts
self.assertEqual('another etag value',
resolve_etag_is_at_header(req, metadata))
metadata = {'X-Object-Sysmeta-Ec-Etag': 'an etag value',
'X-Object-Sysmeta-My-Etag': 'another etag value'}
do_test()
metadata = dict((k.lower(), v) for k, v in metadata.items())
do_test()
metadata = dict((k.upper(), v) for k, v in metadata.items())
do_test()
|
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Oliver Clements <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
# !!! NOTE: Does not conform to new interfaces yet #################
from owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from owslib.ows import (
OwsCommon,
ServiceIdentification,
ServiceProvider,
OperationsMetadata,
)
from urllib.parse import urlencode
from owslib.util import openURL, testXMLValue
from owslib.etree import etree
from owslib.crs import Crs
import os
import errno
import dateutil.parser as parser
from datetime import timedelta
import logging
from owslib.util import log, datetime_from_ansi, datetime_from_iso, param_list_to_url_string
# function to save writing out WCS namespace in full each time
def ns(tag):
return "{http://www.opengis.net/ows/2.0}" + tag
def nsWCS2(tag):
return "{http://www.opengis.net/wcs/2.0}" + tag
class WebCoverageService_2_0_1(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 2.0.1
Implements IWebCoverageService.
"""
def __getitem__(self, name):
""" check contents dictionary to allow dict like access to service layers"""
if name in list(self.__getattribute__("contents").keys()):
return self.__getattribute__("contents")[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, xml, cookies, auth=None, timeout=30, headers=None):
super(WebCoverageService_2_0_1, self).__init__(auth=auth, headers=headers)
self.version = "2.0.1"
self.url = url
self.cookies = cookies
self.timeout = timeout
self.ows_common = OwsCommon(version="2.0.1")
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version, self.cookies, self.auth, headers=self.headers)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url, self.timeout)
# check for exceptions
se = self._capabilities.find("ServiceException")
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# serviceIdentification metadata
subelem = self._capabilities.find(ns("ServiceIdentification"))
self.identification = ServiceIdentification(
subelem, namespace=self.ows_common.namespace
)
# serviceProvider metadata
serviceproviderelem = self._capabilities.find(ns("ServiceProvider"))
self.provider = ServiceProvider(
serviceproviderelem, namespace=self.ows_common.namespace
)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find(ns("OperationsMetadata"))[:]:
if elem.tag != ns("ExtendedCapabilities"):
self.operations.append(
OperationsMetadata(elem, namespace=self.ows_common.namespace)
)
# serviceContents metadata
self.contents = {}
for elem in self._capabilities.findall(
nsWCS2("Contents/") + nsWCS2("CoverageSummary")
):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [
f.text for f in self._capabilities.findall("Capability/Exception/Format")
]
def items(self):
"""supports dict-like items() access"""
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def getCoverage(
self,
identifier=None,
bbox=None,
time=None,
format=None,
subsets=None,
resolutions=None,
sizes=None,
crs=None,
width=None,
height=None,
resx=None,
resy=None,
resz=None,
parameter=None,
method="Get",
timeout=30,
**kwargs
):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),
format='cf-netcdf')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf
example 2.0.1 URL
http://earthserver.pml.ac.uk/rasdaman/ows?&SERVICE=WCS&VERSION=2.0.1&REQUEST=GetCoverage
&COVERAGEID=V2_monthly_CCI_chlor_a_insitu_test&SUBSET=Lat(40,50)&SUBSET=Long(-10,0)&SUBSET=ansi(144883,145000)&FORMAT=application/netcdf
cvg=wcs.getCoverage(identifier=['myID'], format='application/netcdf', subsets=[('axisName',min,max),
('axisName',min,max),('axisName',min,max)])
"""
if log.isEnabledFor(logging.DEBUG):
log.debug(
"WCS 2.0.1 DEBUG: Parameters passed to GetCoverage: identifier=%s, bbox=%s, time=%s, format=%s, crs=%s, width=%s, height=%s, resx=%s, resy=%s, resz=%s, parameter=%s, method=%s, other_arguments=%s" # noqa
% (
identifier,
bbox,
time,
format,
crs,
width,
height,
resx,
resy,
resz,
parameter,
method,
str(kwargs),
)
)
try:
base_url = next(
(
m.get("url")
for m in self.getOperationByName("GetCoverage").methods
if m.get("type").lower() == method.lower()
)
)
except StopIteration:
base_url = self.url
log.debug("WCS 2.0.1 DEBUG: base url of server: %s" % base_url)
request = {"version": self.version, "request": "GetCoverage", "service": "WCS"}
assert len(identifier) > 0
request["CoverageID"] = identifier[0]
if crs:
request["crs"] = crs
request["format"] = format
if width:
request["width"] = width
if height:
request["height"] = height
# anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw] = kwargs[kw]
# encode and request
data = urlencode(request)
if subsets:
data += param_list_to_url_string(subsets, 'subset')
if resolutions:
log.debug('Adding vendor-specific RESOLUTION parameter.')
data += param_list_to_url_string(resolutions, 'resolution')
if sizes:
log.debug('Adding vendor-specific SIZE parameter.')
data += param_list_to_url_string(sizes, 'size')
log.debug("WCS 2.0.1 DEBUG: Second part of URL: %s" % data)
u = openURL(base_url, data, method, self.cookies, auth=self.auth, timeout=timeout, headers=self.headers)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ContentMetadata(object):
"""
Implements IContentMetadata
"""
def __init__(self, elem, service):
"""Initialize. service is required so that describeCoverage requests may be made"""
# TODO - examine the parent for bounding box info.
self._elem = elem
self._service = service
self.id = elem.find(nsWCS2("CoverageId")).text
self.title = testXMLValue(elem.find(ns("label")))
self.abstract = testXMLValue(elem.find(ns("description")))
self.keywords = [
f.text for f in elem.findall(ns("keywords") + "/" + ns("keyword"))
]
self.boundingBox = None # needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find(ns("lonLatEnvelope"))
if b is not None:
gmlpositions = b.findall("{http://www.opengis.net/gml}pos")
lc = gmlpositions[0].text
uc = gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]),
float(lc.split()[1]),
float(uc.split()[0]),
float(uc.split()[1]),
)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.crsOptions = None
self.defaulttimeposition = None
# grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.3/rgrid}ReferenceableGridByVectors" # noqa
)
if gridelem is not None:
grid = ReferenceableGridByVectors(gridelem)
else:
# HERE I LOOK FOR RECTIFIEDGRID
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.2}RectifiedGrid" # noqa
)
grid = RectifiedGrid(gridelem)
return grid
grid = property(_getGrid, None)
# timelimits are the start/end times, timepositions are all timepoints. WCS servers can declare one
# or both or neither of these.
# in wcs 2.0 this can be gathered from the Envelope tag
def _getTimeLimits(self):
# timepoints, timelimits=[],[]
# b=self._elem.find(ns('lonLatEnvelope'))
# if b is not None:
# timepoints=b.findall('{http://www.opengis.net/gml}timePosition')
# else:
# #have to make a describeCoverage request...
# if not hasattr(self, 'descCov'):
# self.descCov=self._service.getDescribeCoverage(self.id)
# for pos in self.descCov.findall(
# ns('CoverageOffering/')+ns('domainSet/')+ns('temporalDomain/')+'{http://www.opengis.net/gml}timePosition'):
# timepoints.append(pos)
# if timepoints:
# timelimits=[timepoints[0].text,timepoints[1].text]
return [self.timepositions[0], self.timepositions[-1]]
timelimits = property(_getTimeLimits, None)
def _getTimePositions(self):
timepositions = []
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.3/rgrid}ReferenceableGridByVectors" # noqa
)
if gridelem is not None:
# irregular time axis
cooeficients = []
grid_axes = gridelem.findall(
"{http://www.opengis.net/gml/3.3/rgrid}generalGridAxis"
)
for elem in grid_axes:
if elem.find(
"{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}gridAxesSpanned" # noqa
).text in ["ansi", "unix"]:
cooeficients = elem.find(
"{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}coefficients" # noqa
).text.split(" ")
for x in cooeficients:
x = x.replace('"', "")
t_date = datetime_from_iso(x)
timepositions.append(t_date)
else:
# regular time
if len(self.grid.origin) > 2:
t_grid = self.grid
t_date = t_grid.origin[2]
start_pos = parser.parse(t_date, fuzzy=True)
step = float(t_grid.offsetvectors[2][2])
start_pos = start_pos + timedelta(days=(step / 2))
no_steps = int(t_grid.highlimits[2])
for x in range(no_steps):
t_pos = start_pos + timedelta(days=(step * x))
# t_date = datetime_from_ansi(t_pos)
# t_date = t_pos.isoformat()
timepositions.append(t_pos)
else:
# no time axis
timepositions = None
return timepositions
timepositions = property(_getTimePositions, None)
def _getOtherBoundingBoxes(self):
""" incomplete, should return other bounding boxes not in WGS84
#TODO: find any other bounding boxes. Need to check for gml:EnvelopeWithTimePeriod."""
bboxes = []
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
for envelope in self.descCov.findall(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}boundedBy/" + "{http://www.opengis.net/gml/3.2}Envelope" # noqa
):
bbox = {}
bbox["nativeSrs"] = envelope.attrib["srsName"]
lc = envelope.find("{http://www.opengis.net/gml/3.2}lowerCorner")
lc = lc.text.split()
uc = envelope.find("{http://www.opengis.net/gml/3.2}upperCorner")
uc = uc.text.split()
bbox["bbox"] = (float(lc[0]), float(lc[1]), float(uc[0]), float(uc[1]))
bboxes.append(bbox)
return bboxes
boundingboxes = property(_getOtherBoundingBoxes, None)
def _getSupportedCRSProperty(self):
# gets supported crs info
crss = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("responseCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("requestResponseCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("nativeCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
return crss
supportedCRS = property(_getSupportedCRSProperty, None)
def _getSupportedFormatsProperty(self):
# gets supported formats info
frmts = []
for elem in self._service._capabilities.findall(
nsWCS2("ServiceMetadata/") + nsWCS2("formatSupported")
):
frmts.append(elem.text)
return frmts
supportedFormats = property(_getSupportedFormatsProperty, None)
def _getAxisDescriptionsProperty(self):
# gets any axis descriptions contained in the rangeset (requires a DescribeCoverage call to server).
axisDescs = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("rangeSet/") + ns("RangeSet/") + ns("axisDescription/") + ns("AxisDescription")
):
axisDescs.append(
AxisDescription(elem)
) # create a 'AxisDescription' object.
return axisDescs
axisDescriptions = property(_getAxisDescriptionsProperty, None)
# Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid property
# (where cvg is a member of the contents dictionary)
# There is no simple way to convert the offset values in a rectifiedgrid grid to real values without CRS understanding,
# therefore this is beyond the current scope of owslib, so the representation here is purely to provide
# access to the information in the GML.
class Grid(object):
""" Simple grid class to provide axis and value information for a gml grid """
def __init__(self, grid):
self.axislabels = []
self.dimension = None
self.lowlimits = []
self.highlimits = []
if grid is not None:
self.dimension = int(grid.get("dimension"))
self.lowlimits = grid.find(
"{http://www.opengis.net/gml/3.2}limits/{http://www.opengis.net/gml/3.2}GridEnvelope/{http://www.opengis.net/gml/3.2}low" # noqa
).text.split(" ")
self.highlimits = grid.find(
"{http://www.opengis.net/gml/3.2}limits/{http://www.opengis.net/gml/3.2}GridEnvelope/{http://www.opengis.net/gml/3.2}high" # noqa
).text.split(" ")
for axis in grid.findall("{http://www.opengis.net/gml/3.2}axisLabels")[
0
].text.split(" "):
self.axislabels.append(axis)
class RectifiedGrid(Grid):
""" RectifiedGrid class, extends Grid with additional offset vector information """
def __init__(self, rectifiedgrid):
super(RectifiedGrid, self).__init__(rectifiedgrid)
self.origin = rectifiedgrid.find(
"{http://www.opengis.net/gml/3.2}origin/{http://www.opengis.net/gml/3.2}Point/{http://www.opengis.net/gml/3.2}pos" # noqa
).text.split()
self.offsetvectors = []
for offset in rectifiedgrid.findall(
"{http://www.opengis.net/gml/3.2}offsetVector"
):
self.offsetvectors.append(offset.text.split())
class ReferenceableGridByVectors(Grid):
""" ReferenceableGridByVectors class, extends Grid with additional vector information """
def __init__(self, refereceablegridbyvectors):
super(ReferenceableGridByVectors, self).__init__(refereceablegridbyvectors)
self.origin = refereceablegridbyvectors.find(
"{http://www.opengis.net/gml/3.3/rgrid}origin/{http://www.opengis.net/gml/3.2}Point/{http://www.opengis.net/gml/3.2}pos" # noqa
).text.split()
self.offsetvectors = []
for offset in refereceablegridbyvectors.findall(
"{http://www.opengis.net/gml/3.3/rgrid}generalGridAxis/{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}offsetVector" # noqa
):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
""" Class to represent the AxisDescription element optionally found as part of the RangeSet and used to
define ordinates of additional dimensions such as wavelength bands or pressure levels"""
def __init__(self, axisdescElem):
self.name = self.label = None
self.values = []
for elem in axisdescElem.getchildren():
if elem.tag == ns("name"):
self.name = elem.text
elif elem.tag == ns("label"):
self.label = elem.text
elif elem.tag == ns("values"):
for child in elem.getchildren():
self.values.append(child.text)
|
|
from gosubl import about
from gosubl import gs
from gosubl import gsq
from gosubl import gsshell
from gosubl import mg9
from gosubl import sh
import datetime
import json
import os
import re
import shlex
import string
import sublime
import sublime_plugin
import uuid
import webbrowser
DOMAIN = "9o"
AC_OPTS = sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
SPLIT_FN_POS_PAT = re.compile(r'(.+?)(?:[:](\d+))?(?:[:](\d+))?$')
URL_SCHEME_PAT = re.compile(r'^[\w.+-]+://')
URL_PATH_PAT = re.compile(r'^(?:[\w.+-]+://|(?:www|(?:\w+\.)*(?:golang|pkgdoc|gosublime)\.org))')
HIST_EXPAND_PAT = re.compile(r'^(\^+)\s*(\d+)$')
HOURGLASS = u'\u231B'
DEFAULT_COMMANDS = [
'help',
'run',
'build',
'replay',
'clear',
'tskill',
'tskill replay',
'tskill go',
'go',
'go build',
'go clean',
'go doc',
'go env',
'go fix',
'go fmt',
'go get',
'go install',
'go list',
'go run',
'go test',
'go tool',
'go version',
'go vet',
'go help',
'settings',
'env',
'share',
'hist',
'hist erase',
'cd',
]
DEFAULT_CL = [(s, s+' ') for s in DEFAULT_COMMANDS]
stash = {}
tid_alias = {}
def active_wd(win=None):
_, v = gs.win_view(win=win)
return gs.basedir_or_cwd(v.file_name() if v else '')
def _hkey(wd):
name = gs.setting("9o_instance")
if name:
wd = name
return '9o.hist.%s' % wd
def _wdid(wd):
name = gs.setting("9o_instance")
if name:
return name
return '9o://%s' % wd
class EV(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
pos = gs.sel(view).begin()
if view.score_selector(pos, 'text.9o') == 0:
return []
cl = set()
hkey = _hkey(view.settings().get('9o.wd', ''))
cl.update((k, k+' ') for k in gs.dval(gs.aso().get(hkey), []))
cl.update((k, k+' ') for k in aliases())
cl.update((k, k+' ') for k in builtins())
cl.update(DEFAULT_CL)
return ([cl_esc(e) for e in sorted(cl)], AC_OPTS)
def cl_esc(e):
return (e[0], e[1].replace('$', '\\$'))
class Gs9oBuildCommand(sublime_plugin.WindowCommand):
def is_enabled(self):
view = gs.active_valid_go_view(self.window)
return view is not None
def run(self):
view = self.window.active_view()
args = {'run': gs.setting('build_command', ['^1'])} if gs.is_pkg_view(view) else {}
view.run_command('gs9o_open', args)
class Gs9oInsertLineCommand(sublime_plugin.TextCommand):
def run(self, edit, after=True):
insln = lambda: self.view.insert(edit, gs.sel(self.view).begin(), "\n")
if after:
self.view.run_command("move_to", {"to": "hardeol"})
insln()
else:
self.view.run_command("move_to", {"to": "hardbol"})
insln()
self.view.run_command("move", {"by": "lines", "forward": False})
class Gs9oMoveHist(sublime_plugin.TextCommand):
def run(self, edit, up):
view = self.view
pos = gs.sel(view).begin()
if view.score_selector(pos, 'prompt.9o') <= 0:
return
aso = gs.aso()
vs = view.settings()
wd = vs.get('9o.wd')
hkey = _hkey(wd)
hist = [s for s in gs.dval(aso.get(hkey), []) if s.strip()]
if not hist:
return
r = view.extract_scope(pos)
cmd = view.substr(r).strip('#').strip()
try:
idx = hist.index(cmd) + (-1 if up else 1)
found = True
except Exception:
idx = -1
found = False
if cmd and not found:
hist.append(cmd)
aso.set(hkey, hist)
gs.save_aso()
if idx >= 0 and idx < len(hist):
cmd = hist[idx]
elif up:
if not found:
cmd = hist[-1]
else:
cmd = ''
view.replace(edit, r, '# %s \n' % cmd)
n = view.line(r.begin()).end()
view.sel().clear()
view.sel().add(sublime.Region(n, n))
class Gs9oInitCommand(sublime_plugin.TextCommand):
def run(self, edit, wd=None):
v = self.view
vs = v.settings()
if not wd:
wd = vs.get('9o.wd', active_wd(win=v.window()))
was_empty = v.size() == 0
s = '[ %s ] # \n' % gs.simple_fn(wd).replace('#', '~')
if was_empty:
v.insert(edit, 0, 'GoSublime %s 9o: type `help` for help and command documentation\n\n' % about.VERSION)
if was_empty or v.substr(v.size()-1) == '\n':
v.insert(edit, v.size(), s)
else:
v.insert(edit, v.size(), '\n'+s)
v.sel().clear()
n = v.size()-1
v.sel().add(sublime.Region(n, n))
opts = {
"rulers": [],
"fold_buttons": True,
"fade_fold_buttons": False,
"gutter": True,
"margin": 0,
# pad mostly so the completion menu shows on the first line
"line_padding_top": 1,
"line_padding_bottom": 1,
"tab_size": 2,
"word_wrap": True,
"indent_subsequent_lines": True,
"line_numbers": False,
"auto_complete": True,
"auto_complete_selector": "text",
"highlight_line": True,
"draw_indent_guides": True,
"scroll_past_end": True,
"indent_guide_options": ["draw_normal", "draw_active"],
"word_separators": "./\\()\"'-:,.;<>~!@#$%&*|+=[]{}`~?",
}
opts.update(gs.setting('9o_settings'))
for opt in opts:
vs.set(opt, opts[opt])
vs.set("9o", True)
vs.set("9o.wd", wd)
color_scheme = gs.setting("9o_color_scheme", "")
if color_scheme:
if color_scheme == "default":
vs.erase("color_scheme")
else:
vs.set("color_scheme", color_scheme)
else:
vs.set("color_scheme", "")
v.set_syntax_file(gs.tm_path('9o'))
if was_empty:
v.show(0)
else:
v.show(v.size()-1)
os.chdir(wd)
class Gs9oOpenCommand(sublime_plugin.TextCommand):
def run(self, edit, wd=None, run=[], save_hist=False, focus_view=True):
self.view.window().run_command('gs9o_win_open', {
'wd': wd,
'run': run,
'save_hist': save_hist,
'focus_view': focus_view,
})
class Gs9oWinOpenCommand(sublime_plugin.WindowCommand):
def run(self, wd=None, run=[], save_hist=False, focus_view=True):
win = self.window
wid = win.id()
if not wd:
wd = active_wd(win=win)
id = _wdid(wd)
st = stash.setdefault(wid, {})
v = st.get(id)
if v is None:
v = win.get_output_panel(id)
st[id] = v
win.run_command("show_panel", {"panel": ("output.%s" % id)})
if focus_view:
win.focus_view(v)
v.run_command('gs9o_init', {'wd': wd})
if run:
v.run_command('gs9o_paste_exec', {'cmd': ' '.join(run), 'save_hist': save_hist})
class Gs9oPasteExecCommand(sublime_plugin.TextCommand):
def run(self, edit, cmd, save_hist=False):
view = self.view
view.insert(edit, view.line(view.size()-1).end(), cmd)
view.sel().clear()
view.sel().add(view.line(view.size()-1).end())
view.run_command('gs9o_exec', {'save_hist': save_hist})
class Gs9oOpenSelectionCommand(sublime_plugin.TextCommand):
def is_enabled(self):
pos = gs.sel(self.view).begin()
return self.view.score_selector(pos, 'text.9o') > 0
def run(self, edit):
actions = []
v = self.view
sel = gs.sel(v)
if (sel.end() - sel.begin()) == 0:
pos = sel.begin()
inscope = lambda p: v.score_selector(p, 'path.9o') > 0
if inscope(pos):
actions.append(v.substr(v.extract_scope(pos)))
else:
pos -= 1
if inscope(pos):
actions.append(v.substr(v.extract_scope(pos)))
else:
line = v.line(pos)
for cr in v.find_by_selector('path.9o'):
if line.contains(cr):
actions.append(v.substr(cr))
else:
actions.append(v.substr(sel))
act_on(v, actions)
def act_on(view, actions):
for a in actions:
if act_on_path(view, a):
break
def act_on_path(view, path):
row = 0
col = 0
m = gs.VFN_ID_PAT.match(path)
if m:
path = 'gs.view://%s' % m.group(1)
m2 = gs.ROWCOL_PAT.match(m.group(2))
if m2:
row = int(m2.group(1))-1 if m2.group(1) else 0
col = int(m2.group(2))-1 if m2.group(2) else 0
else:
if URL_PATH_PAT.match(path):
if path.lower().startswith('gs.packages://'):
path = os.path.join(gs.packages_dir(), path[14:])
else:
try:
if not URL_SCHEME_PAT.match(path):
path = 'http://%s' % path
gs.notify(DOMAIN, 'open url: %s' % path)
webbrowser.open_new_tab(path)
return True
except Exception:
gs.error_traceback(DOMAIN)
return False
wd = view.settings().get('9o.wd') or active_wd()
m = SPLIT_FN_POS_PAT.match(path)
path = gs.apath((m.group(1) if m else path), wd)
row = max(0, int(m.group(2))-1 if (m and m.group(2)) else 0)
col = max(0, int(m.group(3))-1 if (m and m.group(3)) else 0)
if m or os.path.exists(path):
gs.focus(path, row, col, win=view.window())
return True
else:
gs.notify(DOMAIN, "Invalid path `%s'" % path)
return False
def _exparg(s, m):
s = string.Template(s).safe_substitute(m)
s = os.path.expanduser(s)
return s
class Gs9oExecCommand(sublime_plugin.TextCommand):
def is_enabled(self):
pos = gs.sel(self.view).begin()
return self.view.score_selector(pos, 'text.9o') > 0
def run(self, edit, save_hist=False):
view = self.view
pos = gs.sel(view).begin()
line = view.line(pos)
wd = view.settings().get('9o.wd')
try:
os.chdir(wd)
except Exception:
gs.error_traceback(DOMAIN)
ln = view.substr(line).split('#', 1)
if len(ln) == 2:
cmd = ln[1].strip()
if cmd:
vs = view.settings()
aso = gs.aso()
hkey = _hkey(wd)
hist = gs.dval(aso.get(hkey), [])
m = HIST_EXPAND_PAT.match(cmd)
if m:
pfx = m.group(1)
hl = len(hist)
idx = hl - int(m.group(2))
cmd = ''
if idx >= 0 and idx < hl:
cmd = hist[idx]
if pfx == '^' or not cmd:
view.replace(edit, line, ('%s# %s' % (ln[0], cmd)))
return
elif save_hist:
try:
hist.remove(cmd)
except ValueError:
pass
hist.append(cmd)
aso.set(hkey, hist)
gs.save_aso()
if not cmd:
view.run_command('gs9o_init')
return
view.replace(edit, line, (u'[ `%s` %s ]' % (cmd, HOURGLASS)))
rkey = '9o.exec.%s' % uuid.uuid4()
view.add_regions(rkey, [sublime.Region(line.begin(), view.size())], '')
view.run_command('gs9o_init')
nv = sh.env()
anv = nv.copy()
seen = {}
am = aliases()
while True:
cli = cmd.split(' ', 1)
nm = cli[0]
if not nm:
break
ag = cli[1].strip() if len(cli) == 2 else ''
alias = am.get(nm, '')
if not alias:
break
if alias in seen:
gs.error(DOMAIN, 'recursive alias detected: `%s`' % alias)
break
seen[alias] = True
anv['_args'] = ag
cmd = string.Template(alias).safe_substitute(anv)
if nm != 'sh':
f = builtins().get(nm)
if f:
args = []
if ag:
args = [_exparg(s, nv) for s in shlex.split(gs.astr(ag))]
f(view, edit, args, wd, rkey)
return
if nm == 'sh':
args = sh.cmd(ag)
else:
args = sh.cmd(cmd)
cmd_sh(view, edit, args, wd, rkey)
else:
view.insert(edit, gs.sel(view).begin(), '\n')
class Gs9oPushOutput(sublime_plugin.TextCommand):
def run(self, edit, rkey, output, hourglass_repl=''):
view = self.view
output = '\t%s' % gs.ustr(output).strip().replace('\r', '').replace('\n', '\n\t')
regions = view.get_regions(rkey)
if regions:
line = view.line(regions[0].begin())
lsrc = view.substr(line).replace(HOURGLASS, (hourglass_repl or '| done'))
view.replace(edit, line, lsrc)
r = line
if output.strip():
line = view.line(regions[0].begin())
view.insert(edit, line.end(), '\n%s' % output)
r = view.get_regions(rkey)[0]
else:
n = view.size()
view.insert(edit, n, '\n%s' % output)
r = sublime.Region(n, view.size())
if gs.setting('9o_show_end') is True:
view.show(r.end())
else:
view.show(r.begin())
class Gs9oRunManyCommand(sublime_plugin.TextCommand):
def run(self, edit, wd=None, commands=[], save_hist=False, focus_view=False):
for run in commands:
self.view.run_command("gs9o_open", {
'run': run,
'wd': wd,
'save_hist': save_hist,
'focus_view': focus_view,
})
def aliases():
return gs.setting('9o_aliases', {}).copy()
def builtins():
m = gs.gs9o.copy()
g = globals()
for k, v in g.items():
if k.startswith('cmd_'):
k = k[4:].replace('_', '-')
if k and k not in m:
m[k] = v
return m
def push_output(view, rkey, output, hourglass_repl=''):
def f():
view.run_command('gs9o_push_output', {
'rkey': rkey,
'output': output,
'hourglass_repl': hourglass_repl,
})
sublime.set_timeout(f, 0)
def _save_all(win, wd):
if gs.setting('autosave') is True and win is not None:
for v in win.views():
try:
fn = v.file_name()
if fn and v.is_dirty() and fn.endswith('.go') and os.path.dirname(fn) == wd:
v.run_command('gs_fmt_save')
except Exception:
gs.error_traceback(DOMAIN)
def _9_begin_call(name, view, edit, args, wd, rkey, cid):
dmn = '%s: 9 %s' % (DOMAIN, name)
msg = '[ %s ] # 9 %s' % (gs.simple_fn(wd), ' '.join(args))
if not cid:
cid = '9%s-%s' % (name, uuid.uuid4())
tid = gs.begin(dmn, msg, set_status=False, cancel=lambda: mg9.acall('kill', {'cid': cid}, None))
tid_alias['%s-%s' % (name, wd)] = tid
def cb(res, err):
out = '\n'.join(s for s in (res.get('out'), res.get('err'), err) if s)
tmp_fn = res.get('tmpFn')
fn = res.get('fn')
if fn and tmp_fn:
bfn = os.path.basename(tmp_fn)
repls = [
'./%s' % bfn,
'.\\%s' % bfn,
tmp_fn,
]
for s in repls:
out = out.replace(s, fn)
def f():
gs.end(tid)
push_output(view, rkey, out, hourglass_repl='| done: %s' % res.get('dur', ''))
sublime.set_timeout(f, 0)
return cid, cb
def cmd_margo_reinstall(view, edit, args, wd, rkey):
def cb():
gs.del_attr(mg9._inst_name())
out = mg9.install('', True, True)
gs.notify(DOMAIN, 'MarGo re-installed done')
push_output(view, rkey, out)
gsq.launch(DOMAIN, cb)
def cmd_echo(view, edit, args, wd, rkey):
push_output(view, rkey, ' '.join(args))
def cmd_which(view, edit, args, wd, rkey):
l = []
am = aliases()
m = builtins()
if not args:
args = []
args.extend(sorted(m.keys()))
args.extend(sorted(am.keys()))
fm = '%{0}s: %s'.format(max(len(s) for s in args))
for k in args:
if k == 'sh':
v = '9o builtin: %s' % sh.cmd('${CMD}')
elif k in ('go'):
v = '9o builtin: %s' % sh.which(k)
elif k in m:
v = '9o builtin'
elif k in am:
v = '9o alias: `%s`' % am[k]
else:
v = sh.which(k)
l.append(fm % (k, v))
push_output(view, rkey, '\n'.join(l))
def cmd_cd(view, edit, args, wd, rkey):
try:
if args:
wd = args[0]
wd = string.Template(wd).safe_substitute(sh.env())
wd = os.path.expanduser(wd)
wd = os.path.abspath(wd)
else:
fn = view.window().active_view().file_name()
if fn:
wd = os.path.dirname(fn)
os.chdir(wd)
except Exception as ex:
push_output(view, rkey, 'Cannot chdir: %s' % ex)
return
push_output(view, rkey, '')
view.run_command('gs9o_init', {'wd': wd})
def cmd_reset(view, edit, args, wd, rkey):
push_output(view, rkey, '')
view.erase(edit, sublime.Region(0, view.size()))
view.run_command('gs9o_init')
def cmd_clear(view, edit, args, wd, rkey):
cmd_reset(view, edit, args, wd, rkey)
def cmd_go(view, edit, args, wd, rkey):
_save_all(view.window(), wd)
cid, cb = _9_begin_call('go', view, edit, args, wd, rkey, '9go-%s' % wd)
a = {
'cid': cid,
'env': sh.env(),
'cwd': wd,
'cmd': {
'name': 'go',
'args': args,
}
}
sublime.set_timeout(lambda: mg9.acall('sh', a, cb), 0)
def cmd_cancel_replay(view, edit, args, wd, rkey):
cid = ''
av = None
win = view.window()
if win is not None:
av = win.active_view()
if av is not None and not av.file_name():
cid = '9replayv-%s' % av.id()
if not cid:
cid = '9replay-%s' % wd
mg9.acall('kill', {'cid': cid}, None)
push_output(view, rkey, '')
def cmd_sh(view, edit, args, wd, rkey):
cid, cb = _9_begin_call('sh', view, edit, args, wd, rkey, '')
a = {
'cid': cid,
'env': sh.env(),
'cwd': wd,
'cmd': {
'name': args[0],
'args': args[1:],
}
}
sublime.set_timeout(lambda: mg9.acall('sh', a, cb), 0)
def cmd_share(view, edit, args, wd, rkey):
av = gs.active_valid_go_view(win=view.window())
if av is None:
push_output(view, rkey, 'not sharing non-go src')
return
def f(res, err):
s = '%s\n%s' % (err, res.get('url', ''))
push_output(view, rkey, s.strip())
mg9.share(gs.view_src(view.window().active_view()), f)
def cmd_help(view, edit, args, wd, rkey):
gs.focus(gs.dist_path('9o.md'))
push_output(view, rkey, '')
def cmd_run(view, edit, args, wd, rkey):
cmd_9(view, edit, gs.lst('run', args), wd, rkey)
def cmd_replay(view, edit, args, wd, rkey):
cmd_9(view, edit, gs.lst('replay', args), wd, rkey)
def cmd_build(view, edit, args, wd, rkey):
cmd_9(view, edit, gs.lst('build', args), wd, rkey)
def cmd_9(view, edit, args, wd, rkey):
if len(args) == 0 or args[0] not in ('run', 'replay', 'build'):
push_output(view, rkey, ('9: invalid args %s' % args))
return
subcmd = args[0]
cid = ''
if subcmd == 'replay':
cid = '9replay-%s' % wd
cid, cb = _9_begin_call(subcmd, view, edit, args, wd, rkey, cid)
a = {
'cid': cid,
'env': sh.env(),
'dir': wd,
'args': args[1:],
'build_only': (subcmd == 'build'),
}
win = view.window()
if win is not None:
av = win.active_view()
if av is not None:
fn = av.file_name()
if fn:
_save_all(win, wd)
else:
if gs.is_go_source_view(av, False):
a['fn'] = gs.view_fn(av)
a['src'] = av.substr(sublime.Region(0, av.size()))
sublime.set_timeout(lambda: mg9.acall('play', a, cb), 0)
def cmd_tskill(view, edit, args, wd, rkey):
if len(args) == 0:
sublime.set_timeout(lambda: sublime.active_window().run_command("gs_show_tasks"), 0)
push_output(view, rkey, '')
return
l = []
for tid in args:
tid = tid.lstrip('#')
tid = tid_alias.get('%s-%s' % (tid, wd), tid)
l.append('kill %s: %s' % (tid, ('yes' if gs.cancel_task(tid) else 'no')))
push_output(view, rkey, '\n'.join(l))
def _env_settings(d, view, edit, args, wd, rkey):
if len(args) > 0:
m = {}
for k in args:
m[k] = d.get(k)
else:
m = d
s = '\n'.join((
'Default Settings file: gs.packages://GoSublime/GoSublime.sublime-settings (do not edit this file)',
'User settings file: gs.packages://User/GoSublime.sublime-settings (add/change your settings here)',
json.dumps(m, sort_keys=True, indent=4),
))
push_output(view, rkey, s)
def cmd_settings(view, edit, args, wd, rkey):
_env_settings(gs.settings_dict(), view, edit, args, wd, rkey)
def cmd_env(view, edit, args, wd, rkey):
_env_settings(sh.env(), view, edit, args, wd, rkey)
def cmd_hist(view, edit, args, wd, rkey):
aso = gs.aso()
hkey = _hkey(wd)
s = 'hist: invalid args: %s' % args
if len(args) == 0:
hist = gs.dval(aso.get(hkey), [])
hist.reverse()
hlen = len(hist)
s = '\n'.join('^%d: %s' % (i+1, v) for i,v in enumerate(hist))
elif len(args) == 1:
if args[0] == 'erase':
aso.erase(hkey)
gs.save_aso()
s = ''
push_output(view, rkey, s)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
import codecs
from getpass import getpass
from os.path import abspath, expanduser
import re
import socket
from .compat import input, on_win
from .path import split_filename, strip_pkg_extension
from .._vendor.auxlib.decorators import memoize
from .._vendor.urllib3.exceptions import LocationParseError
from .._vendor.urllib3.util.url import Url, parse_url
try: # pragma: py2 no cover
# Python 3
from urllib.parse import (quote, quote_plus, unquote, unquote_plus)
except ImportError: # pragma: py3 no cover
# Python 2
from urllib import (quote, quote_plus, unquote, unquote_plus) # NOQA
def hex_octal_to_int(ho):
ho = ord(ho)
o0 = ord('0')
o9 = ord('9')
oA = ord('A')
oF = ord('F')
res = ho - o0 if ho >= o0 and ho <= o9 else (ho - oA + 10) if ho >= oA and ho <= oF else None
return res
@memoize
def percent_decode(path):
# This is not fast so avoid when we can.
if '%' not in path:
return path
ranges = []
for m in re.finditer(r'(%[0-9A-F]{2})', path):
ranges.append((m.start(), m.end()))
if not len(ranges):
return path
# Sorry! Correctness is more important than speed at the moment.
# Should use a map + lambda eventually.
result = b''
skips = 0
for i, c in enumerate(path):
if skips > 0:
skips -= 1
continue
c = c.encode('ascii')
emit = c
if c == b'%':
for r in ranges:
if i == r[0]:
import struct
emit = struct.pack(
"B", hex_octal_to_int(path[i+1])*16 + hex_octal_to_int(path[i+2]))
skips = 2
break
if emit:
result += emit
return codecs.utf_8_decode(result)[0]
file_scheme = 'file://'
# Keeping this around for now, need to combine with the same function in conda/common/path.py
"""
def url_to_path(url):
assert url.startswith(file_scheme), "{} is not a file-scheme URL".format(url)
decoded = percent_decode(url[len(file_scheme):])
if decoded.startswith('/') and decoded[2] == ':':
# A Windows path.
decoded.replace('/', '\\')
return decoded
"""
@memoize
def path_to_url(path):
if not path:
raise ValueError('Not allowed: %r' % path)
if path.startswith(file_scheme):
try:
path.decode('ascii')
except UnicodeDecodeError:
raise ValueError('Non-ascii not allowed for things claiming to be URLs: %r' % path)
return path
path = abspath(expanduser(path)).replace('\\', '/')
# We do not use urljoin here because we want to take our own
# *very* explicit control of how paths get encoded into URLs.
# We should not follow any RFCs on how to encode and decode
# them, we just need to make sure we can represent them in a
# way that will not cause problems for whatever amount of
# urllib processing we *do* need to do on them (which should
# be none anyway, but I doubt that is the case). I have gone
# for ASCII and % encoding of everything not alphanumeric or
# not in `!'()*-._/:`. This should be pretty save.
#
# To avoid risking breaking the internet, this code only runs
# for `file://` URLs.
#
percent_encode_chars = "!'()*-._/\\:"
percent_encode = lambda s: "".join(["%%%02X" % ord(c), c]
[c < "{" and c.isalnum() or c in percent_encode_chars]
for c in s)
if any(ord(char) >= 128 for char in path):
path = percent_encode(path.decode('unicode-escape')
if hasattr(path, 'decode')
else bytes(path, "utf-8").decode('unicode-escape'))
# https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/
if len(path) > 1 and path[1] == ':':
path = file_scheme + '/' + path
else:
path = file_scheme + path
return path
@memoize
def urlparse(url):
if on_win and url.startswith('file:'):
url.replace('\\', '/')
return parse_url(url)
def url_to_s3_info(url):
"""Convert an s3 url to a tuple of bucket and key.
Examples:
>>> url_to_s3_info("s3://bucket-name.bucket/here/is/the/key")
('bucket-name.bucket', '/here/is/the/key')
"""
parsed_url = parse_url(url)
assert parsed_url.scheme == 's3', "You can only use s3: urls (not %r)" % url
bucket, key = parsed_url.host, parsed_url.path
return bucket, key
def is_url(url):
"""
Examples:
>>> is_url(None)
False
>>> is_url("s3://some/bucket")
True
"""
if not url:
return False
try:
return urlparse(url).scheme is not None
except LocationParseError:
return False
def is_ipv4_address(string_ip):
"""
Examples:
>>> [is_ipv4_address(ip) for ip in ('8.8.8.8', '192.168.10.10', '255.255.255.255')]
[True, True, True]
>>> [is_ipv4_address(ip) for ip in ('8.8.8', '192.168.10.10.20', '256.255.255.255', '::1')]
[False, False, False, False]
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return string_ip.count('.') == 3
def is_ipv6_address(string_ip):
"""
Examples:
>> [is_ipv6_address(ip) for ip in ('::1', '2001:db8:85a3::370:7334', '1234:'*7+'1234')]
[True, True, True]
>> [is_ipv6_address(ip) for ip in ('192.168.10.10', '1234:'*8+'1234')]
[False, False]
"""
try:
inet_pton = socket.inet_pton
except AttributeError:
return is_ipv6_address_win_py27(string_ip)
try:
inet_pton(socket.AF_INET6, string_ip)
except socket.error:
return False
return True
def is_ipv6_address_win_py27(string_ip):
"""
Examples:
>>> [is_ipv6_address_win_py27(ip) for ip in ('::1', '1234:'*7+'1234')]
[True, True]
>>> [is_ipv6_address_win_py27(ip) for ip in ('192.168.10.10', '1234:'*8+'1234')]
[False, False]
"""
# python 2.7 on windows does not have socket.inet_pton
return bool(re.match(r"" # lgtm [py/regex/unmatchable-dollar]
r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)"
r"([\dA-F]{1,4}(\3|:\b)|\2){5}"
r"(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|"
r"(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z",
string_ip,
flags=re.DOTALL | re.IGNORECASE))
def is_ip_address(string_ip):
"""
Examples:
>> is_ip_address('192.168.10.10')
True
>> is_ip_address('::1')
True
>> is_ip_address('www.google.com')
False
"""
return is_ipv4_address(string_ip) or is_ipv6_address(string_ip)
def join(*args):
start = '/' if not args[0] or args[0].startswith('/') else ''
return start + '/'.join(y for y in (x.strip('/') for x in args if x) if y)
join_url = join
def has_scheme(value):
return re.match(r'[a-z][a-z0-9]{0,11}://', value)
def strip_scheme(url):
"""
Examples:
>>> strip_scheme("https://www.conda.io")
'www.conda.io'
>>> strip_scheme("s3://some.bucket/plus/a/path.ext")
'some.bucket/plus/a/path.ext'
"""
return url.split('://', 1)[-1]
def mask_anaconda_token(url):
_, token = split_anaconda_token(url)
return url.replace(token, "<TOKEN>", 1) if token else url
def split_anaconda_token(url):
"""
Examples:
>>> split_anaconda_token("https://1.2.3.4/t/tk-123-456/path")
(u'https://1.2.3.4/path', u'tk-123-456')
>>> split_anaconda_token("https://1.2.3.4/t//path")
(u'https://1.2.3.4/path', u'')
>>> split_anaconda_token("https://some.domain/api/t/tk-123-456/path")
(u'https://some.domain/api/path', u'tk-123-456')
>>> split_anaconda_token("https://1.2.3.4/conda/t/tk-123-456/path")
(u'https://1.2.3.4/conda/path', u'tk-123-456')
>>> split_anaconda_token("https://1.2.3.4/path")
(u'https://1.2.3.4/path', None)
>>> split_anaconda_token("https://10.2.3.4:8080/conda/t/tk-123-45")
(u'https://10.2.3.4:8080/conda', u'tk-123-45')
"""
_token_match = re.search(r'/t/([a-zA-Z0-9-]*)', url)
token = _token_match.groups()[0] if _token_match else None
cleaned_url = url.replace('/t/' + token, '', 1) if token is not None else url
return cleaned_url.rstrip('/'), token
def split_platform(url, known_subdirs):
"""
Examples:
>>> from conda.base.constants import PLATFORM_DIRECTORIES
>>> split_platform("https://1.2.3.4/t/tk-123/osx-64/path", PLATFORM_DIRECTORIES)
(u'https://1.2.3.4/t/tk-123/path', u'osx-64')
"""
_platform_match_regex = r'/(%s)/?' % r'|'.join(r'%s' % d for d in known_subdirs)
_platform_match = re.search(_platform_match_regex, url, re.IGNORECASE)
platform = _platform_match.groups()[0] if _platform_match else None
cleaned_url = url.replace('/' + platform, '', 1) if platform is not None else url
return cleaned_url.rstrip('/'), platform
def has_platform(url, known_subdirs):
url_no_package_name, _ = split_filename(url)
if not url_no_package_name:
return None
maybe_a_platform = url_no_package_name.rsplit('/', 1)[-1]
return maybe_a_platform in known_subdirs and maybe_a_platform or None
def split_scheme_auth_token(url):
"""
Examples:
>>> split_scheme_auth_token("https://u:[email protected]/t/x1029384756/more/path")
('conda.io/more/path', 'https', 'u:p', 'x1029384756')
>>> split_scheme_auth_token(None)
(None, None, None, None)
"""
if not url:
return None, None, None, None
cleaned_url, token = split_anaconda_token(url)
url_parts = urlparse(cleaned_url)
remainder_url = Url(host=url_parts.host, port=url_parts.port, path=url_parts.path,
query=url_parts.query).url
return remainder_url, url_parts.scheme, url_parts.auth, token
def split_conda_url_easy_parts(url, known_subdirs):
# scheme, auth, token, platform, package_filename, host, port, path, query
cleaned_url, token = split_anaconda_token(url)
cleaned_url, platform = split_platform(cleaned_url, known_subdirs)
_, ext = strip_pkg_extension(cleaned_url)
cleaned_url, package_filename = cleaned_url.rsplit('/', 1) if ext else (cleaned_url, None)
# TODO: split out namespace using regex
url_parts = urlparse(cleaned_url)
return (url_parts.scheme, url_parts.auth, token, platform, package_filename, url_parts.host,
url_parts.port, url_parts.path, url_parts.query)
@memoize
def get_proxy_username_and_pass(scheme):
username = input("\n%s proxy username: " % scheme)
passwd = getpass("Password: ")
return username, passwd
def add_username_and_password(url, username, password):
url_parts = parse_url(url)._asdict()
url_parts['auth'] = username + ':' + quote(password, '')
return Url(**url_parts).url
def maybe_add_auth(url, auth, force=False):
"""Add auth if the url doesn't currently have it.
By default, does not replace auth if it already exists. Setting ``force`` to ``True``
overrides this behavior.
Examples:
>>> maybe_add_auth("https://www.conda.io", "user:passwd")
'https://user:[email protected]'
>>> maybe_add_auth("https://www.conda.io", "")
'https://www.conda.io'
"""
if not auth:
return url
url_parts = urlparse(url)._asdict()
if url_parts['auth'] and not force:
return url
url_parts['auth'] = auth
return Url(**url_parts).url
def maybe_unquote(url):
return unquote_plus(url) if url else url
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
"""
Support for constructing and viewing custom "track" browsers within Galaxy.
Track browsers are currently transient -- nothing is stored to the database
when a browser is created. Building a browser consists of selecting a set
of datasets associated with the same dbkey to display. Once selected, jobs
are started to create any necessary indexes in the background, and the user
is redirected to the browser interface, which loads the appropriate datasets.
Problems
--------
- Must have a LEN file, not currently able to infer from data (not sure we
need to support that, but need to make user defined build support better)
"""
import math, re, logging
log = logging.getLogger(__name__)
from galaxy.util.json import to_json_string
from galaxy.web.base.controller import *
from galaxy.web.framework import simplejson
from galaxy.util.bunch import Bunch
from galaxy.visualization.tracks.data.array_tree import ArrayTreeDataProvider
from galaxy.visualization.tracks.data.interval_index import IntervalIndexDataProvider
# Message strings returned to browser
messages = Bunch(
PENDING = "pending",
NO_DATA = "no data",
NO_CHROMOSOME = "no chromosome",
DATA = "data",
ERROR = "error"
)
# Dataset type required for each track type. This needs to be more flexible,
# there might be multiple types of indexes that suffice for a given track type.
track_type_to_dataset_type = {
"line": "array_tree",
"feature": "interval_index"
}
# Mapping from dataset type to a class that can fetch data from a file of that
# type. This also needs to be more flexible.
dataset_type_to_data_provider = {
"array_tree": ArrayTreeDataProvider,
"interval_index": IntervalIndexDataProvider
}
# FIXME: hardcoding this for now, but it should be derived from the available
# converters
browsable_types = ( "wig", "bed" )
class TracksController( BaseController ):
"""
Controller for track browser interface. Handles building a new browser from
datasets in the current history, and display of the resulting browser.
"""
@web.expose
def index( self, trans ):
return trans.fill_template( "tracks/index.mako" )
@web.expose
@web.require_login()
def new_browser( self, trans, dbkey=None, dataset_ids=None, browse=None, title=None ):
"""
Build a new browser from datasets in the current history. Redirects
to 'browser' once datasets to browse have been selected.
"""
session = trans.sa_session
# If the user clicked the submit button explicitly, try to build the browser
if title and browse and dataset_ids:
if not isinstance( dataset_ids, list ):
dataset_ids = [ dataset_ids ]
# Build config
tracks = []
for dataset_id in dataset_ids:
tracks.append( { "dataset_id": str( dataset_id ) } )
config = { "tracks": tracks }
# Build visualization object
vis = model.Visualization()
vis.user = trans.user
vis.title = title
vis.type = "trackster"
vis_rev = model.VisualizationRevision()
vis_rev.visualization = vis
vis_rev.title = title
vis_rev.config = config
vis.latest_revision = vis_rev
session.add( vis )
session.add( vis_rev )
session.flush()
trans.response.send_redirect( web.url_for( controller='tracks', action='browser', id=trans.security.encode_id( vis.id ) ) )
else:
# Determine the set of all dbkeys that are used in the current history
dbkeys = [ d.metadata.dbkey for d in trans.get_history().datasets if not d.deleted ]
dbkey_set = set( dbkeys )
if not dbkey_set:
return trans.show_error_message( "Current history has no valid datasets to visualize." )
# If a dbkey argument was not provided, or is no longer valid, default
# to the first one
if dbkey is None or dbkey not in dbkey_set:
dbkey = dbkeys[0]
# Find all datasets in the current history that are of that dbkey
# and can be displayed
datasets = {}
for dataset in session.query( model.HistoryDatasetAssociation ).filter_by( deleted=False, history_id=trans.history.id ):
if dataset.metadata.dbkey == dbkey and dataset.extension in browsable_types:
datasets[dataset.id] = (dataset.extension, dataset.name)
# Render the template
return trans.fill_template( "tracks/new_browser.mako", converters=browsable_types, dbkey=dbkey, dbkey_set=dbkey_set, datasets=datasets )
@web.expose
def browser(self, trans, id, chrom=""):
"""
Display browser for the datasets listed in `dataset_ids`.
"""
decoded_id = trans.security.decode_id( id )
session = trans.sa_session
vis = session.query( model.Visualization ).get( decoded_id )
tracks = []
dbkey = ""
hda_query = session.query( model.HistoryDatasetAssociation )
for t in vis.latest_revision.config['tracks']:
dataset_id = t['dataset_id']
dataset = hda_query.get( dataset_id )
tracks.append( {
"type": dataset.datatype.get_track_type(),
"name": dataset.name,
"dataset_id": dataset.id
} )
dbkey = dataset.dbkey
chrom_lengths = self._chroms( trans, dbkey )
if chrom_lengths is None:
error( "No chromosome lengths file found for '%s'" % dataset.name )
return trans.fill_template( 'tracks/browser.mako',
#dataset_ids=dataset_ids,
id=id,
tracks=tracks,
chrom=chrom,
dbkey=dbkey,
LEN=chrom_lengths.get(chrom, 0) )
@web.json
def chroms(self, trans, dbkey=None ):
"""
Returns a naturally sorted list of chroms/contigs for the given dbkey
"""
def check_int(s):
if s.isdigit():
return int(s)
else:
return s
def split_by_number(s):
return [ check_int(c) for c in re.split('([0-9]+)', s) ]
chroms = self._chroms( trans, dbkey )
to_sort = [{ 'chrom': chrom, 'len': length } for chrom, length in chroms.iteritems()]
to_sort.sort(lambda a,b: cmp( split_by_number(a['chrom']), split_by_number(b['chrom']) ))
return to_sort
def _chroms( self, trans, dbkey ):
"""
Called by the browser to get a list of valid chromosomes and lengths
"""
# If there is any dataset in the history of extension `len`, this will use it
db_manifest = trans.db_dataset_for( dbkey )
if not db_manifest:
db_manifest = os.path.join( trans.app.config.tool_data_path, 'shared','ucsc','chrom', "%s.len" % dbkey )
else:
db_manifest = db_manifest.file_name
manifest = {}
if not os.path.exists( db_manifest ):
return None
for line in open( db_manifest ):
if line.startswith("#"): continue
line = line.rstrip("\r\n")
fields = line.split("\t")
manifest[fields[0]] = int(fields[1])
return manifest
@web.json
def data( self, trans, dataset_id, track_type, chrom, low, high, **kwargs ):
"""
Called by the browser to request a block of data
"""
# Load the requested dataset
dataset = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dataset_id )
# No dataset for that id
if not dataset:
return messages.NO_DATA
# Dataset is in error state, can't display
if dataset.state == trans.app.model.Job.states.ERROR:
return messages.ERROR
# Dataset is still being generated
if dataset.state != trans.app.model.Job.states.OK:
return messages.PENDING
# Determine what to return based on the type of track being drawn.
converted_dataset_type = track_type_to_dataset_type[track_type]
converted_dataset = self.__dataset_as_type( trans, dataset, converted_dataset_type )
# If at this point we still don't have an `array_tree_dataset`, there
# is no way we can display this data as an array tree
if not converted_dataset:
return messages.ERROR
# Need to check states again for the converted version
if converted_dataset.state == model.Dataset.states.ERROR:
return messages.ERROR
if converted_dataset.state != model.Dataset.states.OK:
return messages.PENDING
# We have a dataset in the right format that is ready to use, wrap in
# a data provider that knows how to access it
data_provider = dataset_type_to_data_provider[ converted_dataset_type ]( converted_dataset, dataset )
# Return stats if we need them
if 'stats' in kwargs: return data_provider.get_stats( chrom )
# Get the requested chunk of data
return data_provider.get_data( chrom, low, high, **kwargs )
def __dataset_as_type( self, trans, dataset, type ):
"""
Given a dataset, try to find a way to adapt it to a different type. If the
dataset is already of that type it is returned, if it can be converted a
converted dataset (possibly new) is returned, if it cannot be converted,
None is returned.
"""
# Already of correct type
if dataset.extension == type:
return dataset
# See if we can convert the dataset
if type not in dataset.get_converter_types():
log.debug( "Conversion from '%s' to '%s' not possible", dataset.extension, type )
return None
# See if converted dataset already exists
converted_datasets = [c for c in dataset.get_converted_files_by_type( type ) if c != None]
if converted_datasets:
for d in converted_datasets:
if d.state != 'error':
return d
else:
return None
# Conversion is possible but hasn't been done yet, run converter here
# FIXME: this is largely duplicated from DefaultToolAction
assoc = model.ImplicitlyConvertedDatasetAssociation( parent = dataset, file_type = type, metadata_safe = False )
new_dataset = dataset.datatype.convert_dataset( trans, dataset, type, return_output = True, visible = False ).values()[0]
new_dataset.hid = dataset.hid # Hrrmmm....
new_dataset.name = dataset.name
trans.sa_session.add( new_dataset )
trans.sa_session.flush()
assoc.dataset = new_dataset
trans.sa_session.add( assoc )
trans.sa_session.flush()
return new_dataset
|
|
#!/usr/bin/env python
# vim: ft=python:
import sys
import os
import os.path
import argparse
import configparser
import sh
import stat
import glob
import shutil
cmdparser = None
def check_binary():
"""Ensure all needed binaries are available in the PATH"""
binaries = ["git", "svn", "svnversion"]
unavailable = []
for i in binaries:
try:
sh.Command(i)
except sh.CommandNotFound:
unavailable.append(i)
if len(unavailable) > 0:
print("The following binaries are not available: " + str(unavailable))
sys.exit(1)
def initcfg(args):
cfg = configparser.ConfigParser()
if os.path.exists(args.config_file):
cfg.read(args.config_file)
section = cfg['general']
if args.svn_server:
section['svn_server'] = args.svn_server
if args.svn_dir:
section['svn_dir'] = args.svn_dir
if args.repository:
section['repository'] = args.repository
with open(args.config_file, 'w') as f:
cfg.write(f)
def getcfg(args):
"""Complete the command line parameter with the content of the
configuration file"""
cfg = configparser.ConfigParser()
cfg.read(args.config_file)
if 'general' not in cfg.sections():
return args
for i in cfg['general']:
if not getattr(args, i):
setattr(args, i, cfg['general'][i])
return args
def get_git_svn_repositories(args):
"""Returns the list of known svn repositories"""
dirs = []
for i in glob.glob(os.path.join(args.git_svn_dir, '*')):
dirs.append(i)
return dirs
def get_svn_repositories(args):
"""Return the list of known git-svn repositories"""
dirs = []
for i in glob.glob(os.path.join(args.svn_dir, '*')):
dirs.append(i)
return dirs
def update(args):
"""Update the known svn and git-svn repositories and fetch the data
back in the main git repository"""
for i in get_svn_repositories(args):
os.chdir(i)
sh.svn('update')
version = sh.svnversion()
print(str(i) + ': updated to "' + str(version).strip() + '"')
for i in get_git_svn_repositories(args):
os.chdir(i)
sh.git('svn', 'fetch')
sh.git('rebase', 'git-svn', 'master')
os.chdir(args.repository)
sh.git('fetch', '--all')
def list_branches(args):
for i in sorted(get_git_svn_repositories(args)):
print(os.path.basename(i))
def ls_remote(args):
ls_url = args.svn_server
if args.subdir:
ls_url = os.path.join(ls_url, args.subdir)
res = sh.svn('ls', ls_url)
print(res)
def add_branch(args):
name = os.path.basename(args.branch_name)
if name in get_git_svn_repositories(args):
print("The branch is already tracked")
return 1
branch_url = os.path.join(args.svn_server, args.branch_name)
repo_dir = os.path.join(args.git_svn_dir, name)
if None == args.r:
sh.git('svn', 'clone', branch_url, repo_dir)
else:
sh.git('svn', 'clone', '-r', str(args.r) + ':HEAD', branch_url, repo_dir)
os.chdir(args.repository)
sh.git('remote', 'add', name, repo_dir)
sh.git('fetch', name)
sh.git('branch', name, name + "/master")
os.chdir(repo_dir)
sh.git('remote', 'add', 'origin', args.repository)
def rm_branch(args):
branches = [os.path.basename(i) for i in get_git_svn_repositories(args)]
if args.branch_name not in branches:
print('The branch ' + args.branch_name + ' is not currently tracked',
file=sys.stderr)
sys.exit(1)
os.chdir(args.repository)
sh.git('branch', '-D', args.branch_name)
sh.git('remote', 'remove', args.branch_name)
print("Removing " + os.path.join(args.git_svn_dir, args.branch_name))
shutil.rmtree(os.path.join(args.git_svn_dir, args.branch_name))
def commit(args):
os.chdir(args.repository)
branches = [os.path.basename(i) for i in get_git_svn_repositories(args)]
if args.dstbranch not in branches:
print("Unknown svn branch: " + args.dstbranch)
sys.exit(1)
if not args.srcbranch:
args.srcbranch = str(sh.git('rev-parse', '--abbrev-ref', 'HEAD')).strip()
branches = [str(i).split()[1].split('/')[2] for i in sh.git('show-ref', '--heads')]
if args.srcbranch not in branches:
print("Unknwon git branch " + args.srcbranch)
sys.exit(1)
# Both branches exist, check the that the src branch is up to date
# regarding the dest branch
commits = sh.git('rev-list', args.srcbranch + '..' + args.dstbranch + '/master')
commits = str(commits).strip()
if len(commits.splitlines()) > 0:
print("The source branch is not up to date: " + str(commits.splitlines()))
sys.exit(1)
# Check there is actually something to commit
commits = sh.git('rev-list', args.dstbranch + "/master.." + args.srcbranch)
commits = str(commits).strip()
if len(commits.splitlines()) == 0:
print("There is nothing to commit on branch " + args.srcbranch)
sys.exit(1)
os.chdir(os.path.join(args.git_svn_dir, args.dstbranch))
sh.git('fetch', '--all')
cur_branch = str(sh.git('rev-parse', '--abbrev-ref', 'HEAD')).strip()
if cur_branch != "master":
print("The current branch of the git-svn repositories is not master")
sys.exit(1)
sh.git('merge', '--ff-only', 'origin/' + args.srcbranch)
if args.n:
dcommits = sh.git('svn', 'dcommit', '-n')
else:
dcommits = sh.git('svn', 'dcommit')
os.chdir(args.repository)
sh.git('fetch', '--all')
sh.git('rebase', args.dstbranch + "/master", args.srcbranch)
print("Commit on svn: " + str(dcommits))
def branch_to_git_svn_repo(branch_name):
pass
def repo2branch(reponame):
pass
def get_cmdline_parser():
global cmdparser
if cmdparser:
return cmdparser
# General options
parser = argparse.ArgumentParser(description=
"Svn Wrapper automatize the setup of a dual git/svn repository")
parser.add_argument(
'-c', '--config-file',
default=os.path.join(os.environ["HOME"], '.swrc'))
parser.add_argument(
'--svn-server',
help="The svn url which will be used as a prefix to svn branches")
parser.add_argument(
'-s', '--svn-dir',
help='the directory where are stored the svn branches checkouts')
parser.add_argument(
'-g', '--git-svn-dir',
help='The directory where are stored the git-svn repositories')
parser.add_argument(
'-r', '--repository',
help='the main git repository')
subparser = parser.add_subparsers(dest='subcommand')
# Command for setting the configuration file with general options
initcfg_parser = subparser.add_parser(
'initcfg',
help='Initialize the configuration file')
initcfg_parser.add_argument(
'-f', '--force',
action='store_true',
help='Force the overwrite of the config file')
initcfg_parser.set_defaults(func=initcfg)
# Command for updating the svn repositories
update_parser = subparser.add_parser(
'update',
help='Update all svn repositories')
update_parser.set_defaults(func=update)
# Command for listing the known svn branches
list_parser = subparser.add_parser(
'list',
help='List all known and commitable branches')
list_parser.set_defaults(func=list_branches)
# List remote svn branches
list_remote_parser = subparser.add_parser(
'ls_remote',
help='list remote svn branch from the servers')
list_remote_parser.add_argument(
'subdir',
nargs='?',
default=None,
help="Optional subdirectory to browse")
list_remote_parser.set_defaults(func=ls_remote)
# Add a new svn branch to track through git-svn
add_branch_parser = subparser.add_parser(
'add_branch',
help='Add a new svn branch to track')
add_branch_parser.add_argument(
'branch_name',
help='The name of the svn branch to track')
add_branch_parser.add_argument(
'-r',
default=None,
type=int,
help='The oldest revision to fetch on the branch')
add_branch_parser.set_defaults(func=add_branch)
# Remove a svn branch
rm_branch_parser = subparser.add_parser(
'rm_branch',
help='Stop tracking a svn branch')
rm_branch_parser.add_argument(
'branch_name')
rm_branch_parser.set_defaults(func=rm_branch)
# Command for commiting the current git branch in svn
commit_parser = subparser.add_parser(
'commit',
help='commit the current branch on the wanted svn branch')
commit_parser.add_argument('-n', help='dry-run mode', action='store_true')
commit_parser.add_argument(
'dstbranch',
help="The svn branch we want to commit on")
commit_parser.add_argument(
'srcbranch',
nargs='?',
default=None,
help="The new content we want to commit on svn. By default it is the current branch")
commit_parser.set_defaults(func=commit)
cmdparser = parser
return cmdparser
def main():
args = get_cmdline_parser().parse_args()
if hasattr(args, "func"):
func = args.func
else:
func = None
# update args missing from the command line with
# the one from the configuration file
if (not func) or (func != initcfg):
args = getcfg(args)
if func:
sys.exit(func(args))
else:
get_cmdline_parser().print_help()
sys.exit(1)
if "__main__" == __name__:
main()
|
|
import logging
from term import reserve
from term import Line, Cell
from term import TextMode
LOGGER = logging.getLogger('screen_buffer')
class ScreenBuffer(object):
def __init__(self, max_lines=1000):
super(ScreenBuffer, self).__init__()
self._max_lines = max_lines
self._lines = []
self._scrolling_region = None
self._row_count = 0
self._col_count = 0
self._line_index_fix_before_scrolling_region = 0
self._line_index_scrolling_region = 0
self._line_index_fix_after_scrolling_region = 0
self._viewing_history = False
self._line_index_view_history = 0
self._selected_lines = []
self._cursor_cell = None
def resize_buffer(self, row_count, col_count):
self._row_count, self._col_count = row_count, col_count
self._update_buffer_data()
def get_scrolling_region(self):
return self._scrolling_region \
if self._scrolling_region else (0, self._row_count - 1)
def set_scrolling_region(self, scrolling_region):
# reset old scrolling region
if self._scrolling_region:
begin, end = self._scrolling_region
# clean up saved scroll buffer
del self._lines[self._line_index_fix_before_scrolling_region + begin:self._line_index_scrolling_region]
del self._lines[self._line_index_scrolling_region + end - begin + 1:self._line_index_fix_after_scrolling_region]
self._line_index_fix_after_scrolling_region = \
self._line_index_fix_before_scrolling_region + self._row_count
self._scrolling_region = scrolling_region
if self._scrolling_region == (0, self._row_count - 1):
# reset
self._scrolling_region = None
if self._scrolling_region:
begin, end = self._scrolling_region
self._line_index_scrolling_region = \
self._line_index_fix_before_scrolling_region + begin
self._line_index_fix_after_scrolling_region = \
self._line_index_fix_before_scrolling_region + end + 1
self._update_buffer_data()
def scroll_up(self, count=1):
for i in range(count):
if self._scrolling_region:
begin, end = self._scrolling_region
if (self._line_index_scrolling_region + end - begin + 1 <
self._line_index_fix_after_scrolling_region):
# there is line can scroll up
self._line_index_scrolling_region += 1
# reset the old lines
self._lines[self._line_index_scrolling_region].reset()
else:
# there is no line can scroll up
# add new line at scrolling region end
self._lines.insert(self._line_index_scrolling_region + end - begin + 1,
Line())
self._line_index_fix_after_scrolling_region += 1
self._line_index_scrolling_region += 1
else:
self._line_index_fix_before_scrolling_region += 1
self._update_buffer_data()
def scroll_down(self, count = 1):
for i in range(count):
if self._scrolling_region:
begin, end = self._scrolling_region
if (self._line_index_scrolling_region >
self._line_index_fix_before_scrolling_region + begin):
#there is line can scroll down
self._line_index_scrolling_region -= 1
#reset the old lines
self._lines[self._line_index_scrolling_region].reset()
else:
#there is no line can scroll down
#add new line at scrolling region begin
self._lines.insert(self._line_index_scrolling_region,
Line())
self._line_index_fix_after_scrolling_region += 1
else:
if self._line_index_fix_before_scrolling_region > 0:
self._line_index_fix_before_scrolling_region -= 1
else:
self._lines.insert(0, Line())
self._line_index_fix_after_scrolling_region += 1
self._update_buffer_data()
def _update_buffer_data(self):
#make sure all line existing
min_buffer_size = self._line_index_fix_before_scrolling_region + self._row_count
if self._scrolling_region:
begin, end = self._scrolling_region
min_buffer_size = self._line_index_fix_after_scrolling_region + self._row_count - end - 1
reserve(self._lines, min_buffer_size, Line())
#fix the buffer to max size
if len(self._lines) > self._max_lines:
delta = len(self._lines) - self._max_lines
for i in range(delta):
#remove lines before fixed line first
if self._line_index_fix_before_scrolling_region > 0:
del self._lines[0]
self._line_index_fix_before_scrolling_region -= 1
self._line_index_fix_after_scrolling_region -= 1
self._line_index_scrolling_region -= 1
else:
if self._scrolling_region:
begin, end = self._scrolling_region
#remove lines between fixed lines and scrolling region
if (self._line_index_fix_before_scrolling_region + begin <
self._line_index_scrolling_region):
del self._lines[self._line_index_fix_before_scrolling_region + begin]
self._line_index_fix_after_scrolling_region -= 1
self._line_index_scrolling_region -= 1
elif (self._line_index_scrolling_region + end - begin + 1 <
self._line_index_fix_after_scrolling_region):
#remove lines between scrolling region and fixed lines
del self._lines[self._line_index_scrolling_region + end - begin + 1]
self._line_index_fix_after_scrolling_region -= 1
elif (self._line_index_fix_after_scrolling_region + self._row_count - end - 1
< len(self._lines)):
#remove lines after fixed lines
del self._lines[self._line_index_fix_after_scrolling_region + self._row_count - end - 1]
elif (self._line_index_fix_before_scrolling_region + self._row_count < len(self._lines)):
#remove lines after fixed lines
del self._lines[self._line_index_fix_before_scrolling_region + self._row_count]
def get_line(self, row):
lines = self.get_visible_lines()
if row >= len(lines):
LOGGER.error('get line out of range:{}, {}'.format(row, len(lines)))
return lines[row]
def get_visible_lines(self):
self._update_buffer_data()
if self._viewing_history:
return self._lines[self._line_index_view_history : self._line_index_view_history + self._row_count]
if self._scrolling_region:
begin, end = self._scrolling_region
return self._lines[self._line_index_fix_before_scrolling_region : self._line_index_fix_before_scrolling_region + begin] \
+ self._lines[self._line_index_scrolling_region : self._line_index_scrolling_region + end - begin + 1] \
+ self._lines[self._line_index_fix_after_scrolling_region : self._line_index_fix_after_scrolling_region + self._row_count - end - 1]
else:
return self._lines[self._line_index_fix_before_scrolling_region : self._line_index_fix_before_scrolling_region + self._row_count]
def delete_lines(self, start, count):
if start < 0 or start >= self._row_count:
LOGGER.warning('delete lines, start:{} out of range:({}, {})'.format(start, 0, self._row_count))
return
self._update_buffer_data()
begin = self._line_index_fix_before_scrolling_region
end = self._line_index_fix_before_scrolling_region + self._row_count
start_row = self._line_index_fix_before_scrolling_region + start
if self._scrolling_region:
begin, end = self._scrolling_region
if start < begin or start > end:
LOGGER.warning('delete lines, start:{} out of range:({}, {})'.format(start, begin, end))
return
begin = self._line_index_scrolling_region
end += self._line_index_scrolling_region - begin
end += 1
start_row = self._line_index_scrolling_region
start_row += start - self._scrolling_region[0]
for i in range(count):
self._lines.insert(end, Line())
del self._lines[start_row]
def insert_lines(self, start, count):
if start < 0 or start >= self._row_count:
LOGGER.warning('insert lines, start:{} out of range:({}, {})'.format(start, 0, self._row_count))
return
self._update_buffer_data()
begin = self._line_index_fix_before_scrolling_region
end = self._line_index_fix_before_scrolling_region + self._row_count - 1
start_row = self._line_index_fix_before_scrolling_region + start
if self._scrolling_region:
begin, end = self._scrolling_region
if start < begin or start > end:
LOGGER.warning('insert lines, start:{} out of range:({}, {})'.format(start, begin, end))
return
begin = self._line_index_scrolling_region
end += self._line_index_scrolling_region - begin
start_row = self._line_index_scrolling_region
start_row += start - self._scrolling_region[0]
for i in range(count):
del self._lines[end]
self._lines.insert(start_row, Line())
def view_history(self, view_history):
self._viewing_history = view_history
if view_history:
if self._scrolling_region:
begin, end = self._scrolling_region
self._line_index_view_history = self._line_index_scrolling_region - begin
else:
self._line_index_view_history = self._line_index_fix_before_scrolling_region
def is_view_history(self):
return self._viewing_history
def view_history_pageup(self):
self._view_history_update(-1 * self._row_count)
def view_history_pagedown(self):
self._view_history_update(self._row_count)
def view_history_lineup(self):
self._view_history_update(-1)
def view_history_linedown(self):
self._view_history_update(1)
def _view_history_update(self, delta):
self._line_index_view_history += delta
if self._line_index_view_history < 0:
self._line_index_view_history = 0
if self._line_index_view_history > len(self._lines) - self._row_count:
self._line_index_view_history = len(self._lines) - self._row_count
def set_selection(self, s_from, s_to):
self.clear_selection()
if s_from == s_to:
return
s_f_col, s_f_row = s_from
s_t_col, s_t_row = s_to
lines = self.get_visible_lines()
for i in range(s_f_row, s_t_row + 1):
line = lines[i]
line.select_cells(s_f_col if i == s_f_row else 0,
s_t_col if i == s_t_row else self._col_count)
self._selected_lines.append(line)
def clear_selection(self):
for line in self._selected_lines:
line.clear_selection()
self._selected_lines = []
def get_selection_text(self):
return self._selected_lines
def has_selection(self):
return len(self._selected_lines) > 0
def set_cursor(self, cursor):
if self._cursor_cell:
self._cursor_cell.get_attr().unset_mode(TextMode.CURSOR)
if cursor:
col, row = cursor
line = self.get_line(row)
cell = line.get_cell(col)
cell.get_attr().set_mode(TextMode.CURSOR)
self._cursor_cell = cell
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core import validators
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import fields
from horizon.utils.validators import validate_port_range
from openstack_dashboard import api
from ..floating_ips.utils import get_int_or_uuid
class CreateGroup(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"),
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validators.validate_slug])
description = forms.CharField(label=_("Description"))
def handle(self, request, data):
try:
sg = api.nova.security_group_create(request,
data['name'],
data['description'])
messages.success(request,
_('Successfully created security group: %s')
% data['name'])
return sg
except:
redirect = reverse("horizon:project:access_and_security:index")
exceptions.handle(request,
_('Unable to create security group.'),
redirect=redirect)
class AddRule(forms.SelfHandlingForm):
id = forms.CharField(widget=forms.HiddenInput())
ip_protocol = forms.ChoiceField(label=_('Rule'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'protocol'}))
port_or_range = forms.ChoiceField(label=_('Open'),
choices=[('port', _('Port')),
('range', _('Port Range'))],
widget=forms.Select(attrs={
'class': 'switchable switched',
'data-slug': 'range',
'data-switch-on': 'protocol',
'data-protocol-tcp': _('Open'),
'data-protocol-udp': _('Open')}))
port = forms.IntegerField(label=_("Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-port': _('Port')}),
validators=[validate_port_range])
from_port = forms.IntegerField(label=_("From Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('From Port')}),
validators=[validate_port_range])
to_port = forms.IntegerField(label=_("To Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('To Port')}),
validators=[validate_port_range])
icmp_type = forms.IntegerField(label=_("Type"),
required=False,
help_text=_("Enter a value for ICMP type "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'protocol',
'data-protocol-icmp': _('Type')}),
validators=[validate_port_range])
icmp_code = forms.IntegerField(label=_("Code"),
required=False,
help_text=_("Enter a value for ICMP code "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'protocol',
'data-protocol-icmp': _('Code')}),
validators=[validate_port_range])
source = forms.ChoiceField(label=_('Source'),
choices=[('cidr', _('CIDR')),
('sg', _('Security Group'))],
help_text=_('To specify an allowed IP '
'range, select "CIDR". To '
'allow access from all '
'members of another security '
'group select "Security '
'Group".'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source'}))
cidr = fields.IPField(label=_("CIDR"),
required=False,
initial="0.0.0.0/0",
help_text=_("Classless Inter-Domain Routing "
"(e.g. 192.168.0.0/24)"),
version=fields.IPv4 | fields.IPv6,
mask=True,
widget=forms.TextInput(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-cidr': _('CIDR')}))
security_group = forms.ChoiceField(label=_('Security Group'),
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-sg': _('Security '
'Group')}))
def __init__(self, *args, **kwargs):
sg_list = kwargs.pop('sg_list', [])
super(AddRule, self).__init__(*args, **kwargs)
# Determine if there are security groups available for the
# source group option; add the choices and enable the option if so.
if sg_list:
security_groups_choices = sg_list
else:
security_groups_choices = [("", _("No security groups available"))]
self.fields['security_group'].choices = security_groups_choices
rules_dict = getattr(settings, 'SECURITY_GROUP_RULES', {})
common_rules = [(k, _(rules_dict[k]['name']))
for k in rules_dict]
common_rules.sort()
custom_rules = [('tcp', _('Custom TCP Rule')),
('udp', _('Custom UDP Rule')),
('icmp', _('Custom ICMP Rule'))]
self.fields['ip_protocol'].choices = custom_rules + common_rules
self.rules = rules_dict
def clean(self):
cleaned_data = super(AddRule, self).clean()
ip_proto = cleaned_data.get('ip_protocol')
port_or_range = cleaned_data.get("port_or_range")
source = cleaned_data.get("source")
icmp_type = cleaned_data.get("icmp_type", None)
icmp_code = cleaned_data.get("icmp_code", None)
from_port = cleaned_data.get("from_port", None)
to_port = cleaned_data.get("to_port", None)
port = cleaned_data.get("port", None)
if ip_proto == 'icmp':
if icmp_type is None:
msg = _('The ICMP type is invalid.')
raise ValidationError(msg)
if icmp_code is None:
msg = _('The ICMP code is invalid.')
raise ValidationError(msg)
if icmp_type not in xrange(-1, 256):
msg = _('The ICMP type not in range (-1, 255)')
raise ValidationError(msg)
if icmp_code not in xrange(-1, 256):
msg = _('The ICMP code not in range (-1, 255)')
raise ValidationError(msg)
cleaned_data['from_port'] = icmp_type
cleaned_data['to_port'] = icmp_code
elif ip_proto == 'tcp' or ip_proto == 'udp':
if port_or_range == "port":
cleaned_data["from_port"] = port
cleaned_data["to_port"] = port
if port is None:
msg = _('The specified port is invalid.')
raise ValidationError(msg)
else:
if from_port is None:
msg = _('The "from" port number is invalid.')
raise ValidationError(msg)
if to_port is None:
msg = _('The "to" port number is invalid.')
raise ValidationError(msg)
if to_port < from_port:
msg = _('The "to" port number must be greater than '
'or equal to the "from" port number.')
raise ValidationError(msg)
else:
cleaned_data['ip_protocol'] = self.rules[ip_proto]['ip_protocol']
cleaned_data['from_port'] = int(self.rules[ip_proto]['from_port'])
cleaned_data['to_port'] = int(self.rules[ip_proto]['to_port'])
if source == "cidr":
cleaned_data['security_group'] = None
else:
cleaned_data['cidr'] = None
return cleaned_data
def handle(self, request, data):
try:
rule = api.nova.security_group_rule_create(
request,
get_int_or_uuid(data['id']),
data['ip_protocol'],
data['from_port'],
data['to_port'],
data['cidr'],
data['security_group'])
messages.success(request,
_('Successfully added rule: %s') % unicode(rule))
return rule
except:
redirect = reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[data['id']])
exceptions.handle(request,
_('Unable to add rule to security group.'),
redirect=redirect)
|
|
# -*- coding: utf-8 -*-
"""
Plotting utility for the production of Keepout Map Related Products
Generalized from makeKeepoutMap.py (authored by Gabriel Soto)
Written by: Dean Keithly
Written on: 3/6/2019
"""
import os
from EXOSIMS.util.vprint import vprint
import random as myRand
import sys, os.path, EXOSIMS, EXOSIMS.MissionSim
try:
import cPickle as pickle
except:
import pickle
import os
import numpy as np
from numpy import nan
if not 'DISPLAY' in os.environ.keys(): #Check environment for keys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
else:
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import colors
import argparse
import json
from EXOSIMS.util.read_ipcluster_ensemble import gen_summary
from EXOSIMS.util.read_ipcluster_ensemble import read_all
from numpy import linspace
from matplotlib.ticker import NullFormatter, MaxNLocator
from matplotlib import ticker
import astropy.units as u
import matplotlib.patheffects as PathEffects
import datetime
import re
from EXOSIMS.util.vprint import vprint
import astropy.units as u
from astropy.time import Time
import time
class plotKeepoutMap(object):
""" This plotting utility plots anything pertaining to keepout maps
"""
_modtype = 'util'
def __init__(self, args=None):
vprint(args)
vprint('initialize plotKeepoutMap done')
pass
def singleRunPostProcessing(self, PPoutpath=None, folder=None):
"""This is called by runPostProcessing
Args:
PPoutpath (string) - output path to place data in
folder (string) - full filepath to folder containing runs
"""
if not os.path.exists(folder):#Folder must exist
raise ValueError('%s not found'%folder)
if not os.path.exists(PPoutpath):#PPoutpath must exist
raise ValueError('%s not found'%PPoutpath)
outspecfile = os.path.join(folder,'outspec.json')
if not os.path.exists(outspecfile):#outspec file not found
raise ValueError('%s not found'%outspecfile)
#Create Mission Object To Extract Some Plotting Limits
sim = EXOSIMS.MissionSim.MissionSim(outspecfile, nopar=True)
obs = sim.Observatory
TL = sim.TargetList #target list
missionStart = sim.TimeKeeping.missionStart #Time Object
TK = sim.TimeKeeping
##########################################################################################
#### Generate Keepout map #array of Target List star indeces
N = np.arange(0,TL.nStars)
#Generate Keepout over Time
koEvaltimes = np.arange(TK.missionStart.value, TK.missionStart.value+TK.missionLife.to('day').value,1) #2year mission, I guess
koEvaltimes = Time(koEvaltimes,format='mjd')
#initial arrays
koGood = np.zeros([TL.nStars,len(koEvaltimes)]) #keeps track of when a star is in keepout or not (True = observable)
culprit = np.zeros([TL.nStars,len(koEvaltimes),11]) #keeps track of whose keepout the star is under
#calculating keepout angles for all stars
tic = time.clock()
for n in np.arange(TL.nStars):
koGood[n,:],r_body, r_targ, culprit[n,:,:], koangles = obs.keepout(TL,n,koEvaltimes,True)
toc = time.clock()
print('This took %s seconds' %(str(toc-tic)))
# Define Colors
#green:#00802b
#purplish:7F7FFF
#crap taupe:DEDE7F
#GOLD: FFD500
#GREY:747783
cmap = colors.ListedColormap(['white','#FFD500', 'blue', '#747783','red','m','red']) #colors for used to indicate a culprit behind keepout
bounds=[0,1,2,3,4,5,6,7]
norm = colors.BoundaryNorm(bounds, cmap.N)
#creating an array of colors based on culprit
koColor = np.zeros([TL.nStars,len(koEvaltimes)])
for t in np.arange(0,len(koEvaltimes)):
sunFault = [bool(culprit[x,t,0]) for x in np.arange(TL.nStars)]
earthFault = [bool(culprit[x,t,2]) for x in np.arange(TL.nStars)]
moonFault = [bool(culprit[x,t,1]) for x in np.arange(TL.nStars)]
mercFault = [bool(culprit[x,t,3]) for x in np.arange(TL.nStars)]
venFault = [bool(culprit[x,t,4]) for x in np.arange(TL.nStars)]
marsFault = [bool(culprit[x,t,5]) for x in np.arange(TL.nStars)]
koColor[marsFault ,t] = 4#red
koColor[venFault ,t] = 5#m
koColor[mercFault ,t] = 6#red
koColor[moonFault ,t] = 3#747783
koColor[earthFault,t] = 2#blue
koColor[sunFault ,t] = 1#FFD500
#plotting colors on a 2d map
plt.close(546832183)
fig = plt.figure(546832183, figsize=(10,5))
fig.subplots_adjust(bottom=0.15)
gs = gridspec.GridSpec(1,2, width_ratios=[6,1], height_ratios=[1])
gs.update(wspace=0.06, hspace=0.06) # set the spacing between axes.
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ax2 = plt.subplot(gs[1])
ax = plt.subplot(gs[0])
#I'm plotting a subset of koColor here that looked good to me for half the mission time (1yr)
if koColor.shape[0] > 100: #Determine maximum number of stars to track keepouts for
NUMBER_Y = 100
else:
NUMBER_Y = koColor.shape[0]
sInds = np.linspace(0, koColor.shape[0], num=NUMBER_Y, endpoint=False, dtype=int).tolist()
img = plt.imshow(koColor[sInds,0:int(np.floor(len(koEvaltimes)))], aspect='auto',#4,
cmap=cmap,interpolation='none',origin='lower',norm=norm)
ax.set_xlabel('Mission Elapsed Time (d), Mission Start %s UTC MJD' %(str(TK.missionStart.value)), weight='bold')
ax.set_ylabel(r'Target Star, $i$', weight='bold')
ax.set_xlim(left=0.,right=np.max(koEvaltimes).value-TK.missionStart.value)
ax.set_ylim(bottom=0.,top=NUMBER_Y)
outline=PathEffects.withStroke(linewidth=5, foreground='black')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[0],label='Visible',path_effects=[outline])
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[1],label=ur"$\u2609$")
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[2],label=ur'$\oplus$')##\u2641$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[3],label=ur'$\u263D$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[4],label=ur'$\u2642\u263F$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[5],label=ur'$\u2640$')
#plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[6],label=ur'$\u263F$') duplicate color so appended above
leg = plt.legend(framealpha=1.0)
# get the lines and texts inside legend box
leg_lines = leg.get_lines()
leg_texts = leg.get_texts()
# bulk-set the properties of all lines and texts
plt.setp(leg_lines, linewidth=4)
plt.setp(leg_texts, fontsize='x-large')
nullfmt = NullFormatter()
ax2.yaxis.set_major_formatter(nullfmt)
#Plot horizontal histogram
tTotal = np.max(koEvaltimes).value-TK.missionStart.value # Calculate total time width of koMap
tVis = list() # stores time visible of each star
for i in np.arange(len(sInds)):#iterate over all stars and append amount of time each star is visible
tVis.append(len(np.where(koColor[sInds[i],:]==0)[0]))
width = np.zeros(len(tVis))+1.
ax2.barh(np.arange(len(sInds))+0.5,np.asarray(tVis,dtype=float)/tTotal*100., width, align='center', color='black')
ax2.set_xlim(left=0.,right=100.)
ax2.set_ylim(bottom=0.,top=NUMBER_Y)
ax2.set_xlabel('% Time\n Visible', weight='bold')
plt.show(block=False)
date = unicode(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'koMap_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
#### Plot a koMap scaled down to 1 year
if TK.missionLife.to('year').value > 1.0:# years
plt.close(56846512161)
fig = plt.figure(56846512161)
fig.subplots_adjust(bottom=0.15)
gs = gridspec.GridSpec(1,2, width_ratios=[6,1], height_ratios=[1])
gs.update(wspace=0.06, hspace=0.06) # set the spacing between axes.
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ax2 = plt.subplot(gs[1])
ax = plt.subplot(gs[0])
#I'm plotting a subset of koColor here that looked good to me for half the mission time (1yr)
if koColor.shape[0] > 100: #Determine maximum number of stars to track keepouts for
NUMBER_Y = 100
else:
NUMBER_Y = koColor.shape[0]
sInds = np.linspace(0, koColor.shape[0], num=NUMBER_Y, endpoint=False, dtype=int).tolist()
img = plt.imshow(koColor[sInds,0:365], aspect='auto',#4,
cmap=cmap,interpolation='none',origin='lower',norm=norm)
ax.set_xlabel('Mission Elapsed Time (d)\nMission Start %s UTC MJD' %(str(TK.missionStart.value)), weight='bold')
ax.set_ylabel(r'Target Star, $i$', weight='bold')
ax.set_xlim(left=0.,right=365.)
ax.set_ylim(bottom=0.,top=NUMBER_Y)
outline=PathEffects.withStroke(linewidth=5, foreground='black')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[0],label='Visible',path_effects=[outline])
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[1],label=ur'$\u2609$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[2],label=ur'$\oplus$')#\u2641$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[3],label=ur'$\u263D$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[4],label=ur'$\u2642\u263F$')
plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[5],label=ur'$\u2640$')
#plt.plot([-1.,-1.],[-1.,-1.],color=cmap.colors[6],label=ur'$\u263F$')
leg = plt.legend(framealpha=1.0)
# get the lines and texts inside legend box
leg_lines = leg.get_lines()
leg_texts = leg.get_texts()
# bulk-set the properties of all lines and texts
plt.setp(leg_lines, linewidth=4)
plt.setp(leg_texts, fontsize='x-large')
nullfmt = NullFormatter()
ax2.yaxis.set_major_formatter(nullfmt)
#Plot horizontal histogram
tTotal = np.max(koEvaltimes).value-TK.missionStart.value # Calculate total time width of koMap
tVis = list() # stores time visible of each star
for i in np.arange(len(sInds)):#iterate over all stars and append amount of time each star is visible
tVis.append(len(np.where(koColor[sInds[i],:]==0)[0]))
width = np.zeros(len(tVis))+1.
ax2.barh(np.arange(len(sInds))+0.5,np.asarray(tVis,dtype=float)/tTotal*100., width, align='center', color='black')
ax2.set_xlim(left=0.,right=100.)
ax2.set_ylim(bottom=0.,top=NUMBER_Y)
ax2.set_xlabel('% Time\n Visible', weight='bold')
plt.show(block=False)
date = unicode(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'koMapScaled_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
#### Plot a Histogram of Percent Time Visible 10 bins
plt.close(65685621)
fig = plt.figure(65685621)
bins = np.linspace(start=0,stop=100,num=11)
plt.hist(np.asarray(tVis)/tTotal*100., bins=bins, color='black', alpha=1., histtype='bar', ec='black')
plt.ylabel('Target Count', weight='bold')
plt.xlabel('Time Visible (%)', weight='bold')
plt.xlim((0,100))
plt.show(block=False)
date = unicode(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'koMapHist10_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
#### Plot as Histogram of Percent Time Visible Many Bins
plt.close(98735654)
fig = plt.figure(98735654)
bins = np.linspace(start=0,stop=np.round(np.max(tVis)/tTotal*100.),num=np.round(np.max(tVis)/tTotal*100.)+1)
plt.hist(np.asarray(tVis)/tTotal*100., bins=bins, color='black', alpha=1., histtype='bar', ec='black')
plt.ylabel('Target Count', weight='bold')
plt.xlabel('Time Visible (%)', weight='bold')
plt.xlim((0,np.ceil(np.max(tVis)/tTotal*100.)))
plt.show(block=False)
date = unicode(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'koMapHistDetail_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
#### Plot Hist and CDF of % time ######################################
tVis2 = list() # stores time visible of each star
for i in np.arange(len(sInds)):#iterate over all stars and append amount of time each star is visible
tVis2.append(len(np.where(koColor[i,:]==0)[0]))
if tVis2[-1] > tTotal:
tVis2[-1] = tTotal
bins = np.linspace(start=0,stop=np.round(np.max(tVis2)/tTotal*100.),num=np.round(np.max(tVis2)/tTotal*100.)+1)
n, bins, patches = plt.figure(665465461286584).add_subplot(1,1,1).hist(np.asarray(tVis2)/tTotal*100., bins=bins)
plt.show(block=False)
plt.close(665465461286584) # doing this just to descroy above plot Replace with numpy.histogram in future
cdf = np.cumsum(n)#cumtrapz(n, bins[:-1], initial=0.)
cdf = cdf/np.max(cdf)
plt.close(23623)
fig2 = plt.figure(23623)
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ax2 = fig2.add_subplot(1,1,1)
bins = np.linspace(start=0,stop=np.round(np.max(tVis2)/tTotal*100.),num=np.round(np.max(tVis2)/tTotal*100.)+1)
n2, bins2, patches2 = ax2.hist(np.asarray(tVis2)/tTotal*100.,zorder=8,color='black', bins=bins[1:])
ax2.set_xlabel('Percent Time Visible (%)', weight='bold')
ax3 = ax2.twinx()
ax3.plot(bins[:-1],cdf*100.,zorder=10, color='red')
#DELETEax2.spines['right'].set_color('red') # setting the right side axis to red
ax3.spines['right'].set_color('red') # setting the right side axis to red
ax3.xaxis.label.set_color('red')
ax3.tick_params(axis='y', colors='red')
ax2.set_ylabel('Target Count', weight='bold')
ax3.set_ylabel('CDF (%)', weight='bold', color='red')
ax2.set_xlim(left=0.,right=100.)
ax2.set_ylim(bottom=0.,top=1.1*np.max(n2))
ax3.set_ylim(bottom=0.,top=100.)
plt.show(block=False)
fname = 'koMapHIST_CDF_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'))
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'))
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'))
###########################################################
|
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run memtier_benchmark against Redis.
memtier_benchmark is a load generator created by RedisLabs to benchmark
Redis.
Redis homepage: http://redis.io/
memtier_benchmark homepage: https://github.com/RedisLabs/memtier_benchmark
"""
import logging
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.packages import redis_server
flags.DEFINE_integer('redis_numprocesses', 1, 'Number of Redis processes to '
'spawn per processor.')
flags.DEFINE_integer('redis_clients', 5, 'Number of redis loadgen clients')
flags.DEFINE_string('redis_setgetratio', '1:0', 'Ratio of reads to write '
'performed by the memtier benchmark, default is '
'\'1:0\', ie: writes only.')
MEMTIER_COMMIT = '1.2.0'
FIRST_PORT = 6379
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'redis'
BENCHMARK_CONFIG = """
redis:
description: >
Run memtier_benchmark against Redis.
Specify the number of client VMs with --redis_clients.
vm_groups:
default:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['default']['vm_count'] = 1 + FLAGS.redis_clients
return config
def PrepareLoadgen(load_vm):
load_vm.Install('memtier')
def Prepare(benchmark_spec):
"""Install Redis on one VM and memtier_benchmark on another.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
redis_vm = vms[0]
# Install latest redis on the 1st machine.
redis_vm.Install('redis_server')
sed_cmd = (r"sed -i -e '/save 900/d' -e '/save 300/d' -e '/save 60/d' -e 's/#"
" save \"\"/save \"\"/g' %s/redis.conf")
redis_vm.RemoteCommand(sed_cmd % redis_server.REDIS_DIR)
for i in range(redis_vm.num_cpus * FLAGS.redis_numprocesses):
port = FIRST_PORT + i
redis_vm.RemoteCommand(
'cp %s/redis.conf %s/redis-%d.conf' %
(redis_server.REDIS_DIR, redis_server.REDIS_DIR, port))
redis_vm.RemoteCommand(
r'sed -i -e "s/port 6379/port %d/g" %s/redis-%d.conf' %
(port, redis_server.REDIS_DIR, port))
redis_vm.RemoteCommand(
'nohup sudo %s/src/redis-server %s/redis-%d.conf &> /dev/null &' %
(redis_server.REDIS_DIR, redis_server.REDIS_DIR, port))
args = [((vm,), {}) for vm in vms[1:]]
vm_util.RunThreaded(PrepareLoadgen, args)
def RunLoad(redis_vm, load_vm, threads, port, test_id, results):
"""Spawn a memteir_benchmark on the load_vm against the redis_vm:port.
Args:
redis_vm: The target of the memtier_benchmark
load_vm: The vm that will run the memtier_benchmark.
threads: The number of threads to run in this memtier_benchmark process.
port: the port to target on the redis_vm.
test_id: a number unique run this iteration load_vm
results: a dictonary within which the results of the run will be stored.
The format of the results will be id : a tuple containing
throughput acheived and average latency.
"""
if threads == 0:
return
base_cmd = ('memtier_benchmark -s %s -p %d -d 128 '
'--ratio %s --key-pattern S:S -x 1 -c 1 -t %d '
'--test-time=%d --random-data > %s ;')
final_cmd = (base_cmd % (redis_vm.internal_ip, port,
FLAGS.redis_setgetratio, threads, 10,
'/dev/null') +
base_cmd % (redis_vm.internal_ip, port,
FLAGS.redis_setgetratio, threads, 20,
'outfile-%d' % test_id) +
base_cmd % (redis_vm.internal_ip, port,
FLAGS.redis_setgetratio, threads, 10,
'/dev/null'))
load_vm.RemoteCommand(final_cmd)
output, _ = load_vm.RemoteCommand('cat outfile-%d | grep Totals | '
'tr -s \' \' | cut -d \' \' -f 2' % test_id)
throughput = float(output)
output, _ = load_vm.RemoteCommand('cat outfile-%d | grep Totals | '
'tr -s \' \' | cut -d \' \' -f 5' % test_id)
latency = float(output)
output, _ = load_vm.RemoteCommand('cat outfile-%d' % test_id)
logging.info(output)
results[test_id] = (throughput, latency)
def Run(benchmark_spec):
"""Run memtier_benchmark against Redis.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
redis_vm = vms[0]
load_vms = vms[1:]
latency = 0.0
latency_threshold = 1000000.0
threads = 0
results = []
num_servers = redis_vm.num_cpus * FLAGS.redis_numprocesses
max_throughput_for_completion_latency_under_1ms = 0.0
while latency < latency_threshold:
iteration_results = {}
threads += max(1, int(threads * .15))
num_loaders = len(load_vms) * num_servers
args = [((redis_vm, load_vms[i % len(load_vms)], threads / num_loaders +
(0 if (i + 1) > threads % num_loaders else 1),
FIRST_PORT + i % num_servers, i, iteration_results),
{}) for i in range(num_loaders)]
logging.error('BEFORE: %s', args)
vm_util.RunThreaded(RunLoad, args)
throughput = 0.0
latency = 0.0
logging.error('%s', iteration_results)
for result in iteration_results.values():
throughput += result[0]
for result in iteration_results.values():
latency += result[1] * result[0] / throughput
if latency < 1.0:
max_throughput_for_completion_latency_under_1ms = max(
max_throughput_for_completion_latency_under_1ms,
throughput)
results.append(sample.Sample('throughput', throughput, 'req/s',
{'latency': latency, 'threads': threads}))
logging.info('Threads : %d (%f, %f) < %f', threads, throughput, latency,
latency_threshold)
if threads == 1:
latency_threshold = latency * 20
results.append(sample.Sample(
'max_throughput_for_completion_latency_under_1ms',
max_throughput_for_completion_latency_under_1ms,
'req/s'))
return results
def Cleanup(benchmark_spec):
"""Remove Redis and YCSB.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
|
|
from __future__ import print_function
import glob,sys,os
from collections import Counter
import dresher_LSA as d
"""This script is adapted from dresherClean_run in order to address fully
iterated efficient trees.
next tasks: verify that novel outputs of total trees are viable trees. REMINDER, the FOR LOOP NEEDS TO BE A SUBSET OF 720 to not run forever.
Updates 12.30.2016
Wrote runtime vars to make modifications easier, cleaned up print, write, and runtime functions and loops to remove unnecessary features.
This version (vs. 1.8) uses a dictionary and dictionary keys to identify unique sets after analyzing, on the assumption that order functions are applied later in analysis.
Updates 09.30.2016
Built an optional argument for .csv
command line: python KDM_LSA.py filename.csv
default reads all CSV files in folder.
Wrote readme file.
Wrote a readIn.py file (fixed encoding file), which reads output.txt into a dictionary of objects; writes 'analyses.txt'
readIn.py: started working on deleting empty nodes in the trees.
TODO:
unicode character compliant
**if in tree [], delete.: see readIn.py for update.
**spell out vowel and the features used..
*update readme to include post-kdm_lsa.py analysis, and pre- inventory generator.
*write "look in directory x for inventories, write in directory y for analyses" functions to keep folders clean.
"""
################
# Runtime Vars #
################
def runTimeVars(mover_listofFiles,vowel_inventory_size,inventory_range=1,randomGenerator = True,writeFile=True):
#Inventory settings
dirFolder='1_input_inventories/'
#GUI settings
text_analysisInit="\n\n-------------------------\nDRESHER PARSING INITIATED\n-------------------------\n"
header='*Vowel inventory size, inventory number, language used, unique efficient permutations found, current iteration count, total iterations to parse.\n\nV-Ct\tname\tperms\tcurSet\tsets\tunique\tcurIt\titers'
GUI_update_freq = 500 #Set higher for quicker analyses, fewer updates
#Permutations settings
permOrder = True #If order matters, change here. If not, set to false (e.g. only unique sets). Needs to be set to true if looking for all unique efficient trees.
permLength_buffer = 0 #This number is subtracted from inventory size to give feature permutation size.
# randomGenerator = True #Turn on random sampling to reduce number of permutations? Set to True, and
randomGenSize = 25 # set integer between 1-100 (percent) of permutations to be sampled.
treeGenerator_on = False #build strings usable for a visual syntax maker.
#write file settings
endBuffer=2 #adds 3 lines after each block to make
wFilename='1_KDM_parsed'
writeMethod='w'
#############
# Functions #
#############
def inventoryRange(mover_listofFiles,inventory_range):
if inventory_range==0:
listofFiles=mover_listofFiles
elif inventory_range==1:
listofFiles=mover_listofFiles[:1]
else:
listofFiles=mover_listofFiles[0:2]
return listofFiles
def permLengthGen(vowel_inventory_size,curUniqueFeatures):
"""Sets a smaller size of feature sets searched, as a function of vowel inventory size to avoid combo explosion. It's a hack which eventually could be replaced."""
if vowel_inventory_size>6:
permLength=vowel_inventory_size-permLength_buffer
else:
permLength=6
"""below sets an upper bound to all possible features (in cases where vowel count>feature count."""
curLength=len(curUniqueFeatures)
# if permLength>curLength:
# permLength=curLength-(permLength_buffer*2)
return permLength
def parseBlock(curPerm,inventory):
phoneFeatArray=d.arrayBuilder(inventory,inventory.keys(),curPerm,binary='n')
eTrees=d.findDiscriminatingPhonemes(phoneFeatArray,columnLabels=[])
eTrees=d.efficientWrapper(curPerm,eTrees)
return eTrees
def uniqueSetWrapper(uniqueSets):
#preps efficient algorithm output for re-input.
fullPerms=[]
curLength=len(uniqueSets[0])
for each in uniqueSets:
j=d.permGenerator(each, curLength, True) #See Runtime Vars to configure.
for curItm in j:
fullPerms.append(curItm)
return fullPerms
###########
# RUNTIME #
###########
def runtime(inventory):
curUniqueFeatures=d.uniqueFeatures(inventory,inventory.keys())
"""List of all iterations of current unique features."""
permLength=permLengthGen(vowel_inventory_size,curUniqueFeatures)
fullPerms=d.permGenerator(curUniqueFeatures, 8, False) #See Runtime Vars to configure.
if randomGenerator: #See Runtime Vars to configure random sampling.
fullPerms=d.randomSampler(fullPerms,randomGenSize)
"""Efficient algorithm"""
totalTrees={}
counterTotal=0
fullPerm_len=len(fullPerms)
txt_fullPerms_len=str(fullPerm_len)
gui_update= txt_vowel_length+'-'+txt_invNum+"\t"+txt_curInventory+'\t'
def screenUpdate1(gui_update=gui_update): #doesn't work, don't know why.
if (counterTotal % GUI_update_freq == 0) or counterTotal==fullPerm_len:
gui_update+=str(totalTrees_len)+"\t"+str(counterTotal)+"\t"+txt_fullPerms_len
print(gui_update, end='\r')
for curPerm in fullPerms:
eTrees=parseBlock(curPerm,inventory)
if len(eTrees)>0:
totalTrees[tuple(sorted(eTrees[0]))] = True
counterTotal+=1
totalTrees_len=len(totalTrees.keys())
if counterTotal % GUI_update_freq == 0:
gui_update+=str(totalTrees_len)+"\t"+str(counterTotal)+"\t"+txt_fullPerms_len
print(gui_update, end='\r')
gui_update+=str(totalTrees_len)+"\t"+str(counterTotal)+"\t"+txt_fullPerms_len
print(gui_update, end='\r')
fullPerms=uniqueSetWrapper(totalTrees.keys()) #preps efficient algorithm output for re-input.
# print(len(fullPerms))
# print(len(fullPerms[0]))
"""Efficient algorithm"""
totalTrees={}
counterTotal2=0
txt_fullPerms_len2=str(len(fullPerms))
for curPerm in fullPerms:
eTrees=parseBlock(curPerm,inventory)
if len(eTrees)>0:
totalTrees[tuple(sorted(eTrees[0]))] = True
counterTotal2+=1
totalTrees_len2=len(totalTrees.keys())
"""Print updates to screen"""
if counterTotal2 % GUI_update_freq == 0:
gui2_update=gui_update+'\t'+str(totalTrees_len2)+"\t"+str(counterTotal2)+"\t"+txt_fullPerms_len2
print(gui2_update, end='\r')
gui2_update=gui_update+'\t'+str(totalTrees_len2)+"\t"+str(counterTotal2)+"\t"+txt_fullPerms_len2
print (gui2_update)
"""Error check."""
#If 'Saturated list' appears in output file, change length threshold on unique perms.
if counterTotal==totalTrees_len:
totalTrees={("saturated list"):True}
"""write to file"""
wRows.append([curInventory,inventory.keys(),curUniqueFeatures,totalTrees.keys()])
"""Tree Generator"""
if treeGenerator_on:
fullPermOutput=d.dresherGenerate(inventory,totalTrees.keys(),inventory.keys(),curInventory) #this is what makes the trees.
writeRow.append(fullPermOutput)
return totalTrees.keys()
#############
## Runtime ##
#############
listofFiles=[]
collection={}
invNum=1
wRows=[] #wrapper object for write to file
"""Main loop over CSV files"""
listofFiles=inventoryRange(mover_listofFiles,inventory_range) #see fn. above: creates subset for testing.
print (text_analysisInit)
print (header)
for curInventory in listofFiles:
inventory=d.inventoryImport(curInventory)
#printBlock variables, see runtime for printBlock
txt_vowel_length=str(len(inventory.keys()))
txt_invNum=str(invNum)
txt_curInventory=str(curInventory[22:28])
collection[curInventory]=runtime(inventory)
invNum+=1
"""Write to file"""
if writeFile:
if treeGenerator_on:
endBuffer+=-1
d.writeBlock(wRows, wFilename, ext='.txt', method=writeMethod, delim='\n',endBuffer=endBuffer)
return wRows
|
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import math
import multiprocessing
import optparse
import os
from os.path import join
import random
import shlex
import subprocess
import sys
import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.objects import context
ARCH_GUESS = utils.DefaultArch()
DEFAULT_TESTS = ["mjsunit", "webkit"]
TIMEOUT_DEFAULT = 60
TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
MODE_FLAGS = {
"debug" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--debug-code", "--verify-heap",
"--noconcurrent-recompilation"],
"release" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants", "--noconcurrent-recompilation"]}
SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
"arm",
"ia32",
"mipsel",
"nacl_ia32",
"nacl_x64",
"x64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_ia32",
"arm",
"mipsel",
"nacl_ia32",
"nacl_x64"]
MAX_DEOPT = 1000000000
DISTRIBUTION_MODES = ["smooth", "random"]
class RandomDistribution:
def __init__(self, seed=None):
seed = seed or random.randint(1, sys.maxint)
print "Using random distribution with seed %d" % seed
self._random = random.Random(seed)
def Distribute(self, n, m):
if n > m:
n = m
return self._random.sample(xrange(1, m + 1), n)
class SmoothDistribution:
"""Distribute n numbers into the interval [1:m].
F1: Factor of the first derivation of the distribution function.
F2: Factor of the second derivation of the distribution function.
With F1 and F2 set to 0, the distribution will be equal.
"""
def __init__(self, factor1=2.0, factor2=0.2):
self._factor1 = factor1
self._factor2 = factor2
def Distribute(self, n, m):
if n > m:
n = m
if n <= 1:
return [ 1 ]
result = []
x = 0.0
dx = 1.0
ddx = self._factor1
dddx = self._factor2
for i in range(0, n):
result += [ x ]
x += dx
dx += ddx
ddx += dddx
# Project the distribution into the interval [0:M].
result = [ x * m / result[-1] for x in result ]
# Equalize by n. The closer n is to m, the more equal will be the
# distribution.
for (i, x) in enumerate(result):
# The value of x if it was equally distributed.
equal_x = i / float(n - 1) * float(m - 1) + 1
# Difference factor between actual and equal distribution.
diff = 1 - (x / equal_x)
# Equalize x dependent on the number of values to distribute.
result[i] = int(x + (i + 1) * diff)
return result
def Distribution(options):
if options.distribution_mode == "random":
return RandomDistribution(options.seed)
if options.distribution_mode == "smooth":
return SmoothDistribution(options.distribution_factor1,
options.distribution_factor2)
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="ia32,x64,arm")
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
default=None)
result.add_option("--asan",
help="Regard test expectations for ASAN",
default=False, action="store_true")
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
result.add_option("--coverage", help=("Exponential test coverage "
"(range 0.0, 1.0) -- 0.0: one test, 1.0 all tests (slow)"),
default=0.4, type="float")
result.add_option("--coverage-lift", help=("Lifts test coverage for tests "
"with a small number of deopt points (range 0, inf)"),
default=20, type="int")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("--distribution-factor1", help=("Factor of the first "
"derivation of the distribution function"), default=2.0,
type="float")
result.add_option("--distribution-factor2", help=("Factor of the second "
"derivation of the distribution function"), default=0.7,
type="float")
result.add_option("--distribution-mode", help=("How to select deopt points "
"for a given test (smooth|random)"),
default="smooth")
result.add_option("--dump-results-file", help=("Dump maximum number of "
"deopt points per test to a file"))
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default="release,debug")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
result.add_option("--seed", help="The seed for the random distribution",
type="int")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("--random-seed", default=0, dest="random_seed",
help="Default seed for initializing random generator")
return result
def ProcessOptions(options):
global VARIANT_FLAGS
# Architecture and mode related stuff.
if options.arch_and_mode:
tokens = options.arch_and_mode.split(".")
options.arch = tokens[0]
options.mode = tokens[1]
options.mode = options.mode.split(",")
for mode in options.mode:
if not mode.lower() in ["debug", "release"]:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
options.arch = ARCH_GUESS
options.arch = options.arch.split(",")
for arch in options.arch:
if not arch in SUPPORTED_ARCHS:
print "Unknown architecture %s" % arch
return False
# Special processing of other options, sorted alphabetically.
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
while options.random_seed == 0:
options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)
if not options.distribution_mode in DISTRIBUTION_MODES:
print "Unknown distribution mode %s" % options.distribution_mode
return False
if options.distribution_factor1 < 0.0:
print ("Distribution factor1 %s is out of range. Defaulting to 0.0"
% options.distribution_factor1)
options.distribution_factor1 = 0.0
if options.distribution_factor2 < 0.0:
print ("Distribution factor2 %s is out of range. Defaulting to 0.0"
% options.distribution_factor2)
options.distribution_factor2 = 0.0
if options.coverage < 0.0 or options.coverage > 1.0:
print ("Coverage %s is out of range. Defaulting to 0.4"
% options.coverage)
options.coverage = 0.4
if options.coverage_lift < 0:
print ("Coverage lift %s is out of range. Defaulting to 0"
% options.coverage_lift)
options.coverage_lift = 0
return True
def ShardTests(tests, shard_count, shard_run):
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
exit_code = 0
workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
suite_paths = utils.GetSuitePaths(join(workspace, "test"))
if len(args) == 0:
suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
else:
args_suites = set()
for arg in args:
suite = arg.split(os.path.sep)[0]
if not suite in args_suites:
args_suites.add(suite)
suite_paths = [ s for s in suite_paths if s in args_suites ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
suites.append(suite)
if options.download_data:
for s in suites:
s.DownloadData()
for mode in options.mode:
for arch in options.arch:
try:
code = Execute(arch, mode, args, options, suites, workspace)
exit_code = exit_code or code
except KeyboardInterrupt:
return 2
return exit_code
def CalculateNTests(m, options):
"""Calculates the number of tests from m deopt points with exponential
coverage.
The coverage is expected to be between 0.0 and 1.0.
The 'coverage lift' lifts the coverage for tests with smaller m values.
"""
c = float(options.coverage)
l = float(options.coverage_lift)
return int(math.pow(m, (m * c + l) / (m + l)))
def Execute(arch, mode, args, options, suites, workspace):
print(">>> Running tests for %s.%s" % (arch, mode))
dist = Distribution(options)
shell_dir = options.shell_dir
if not shell_dir:
if options.buildbot:
shell_dir = os.path.join(workspace, options.outdir, mode)
mode = mode.lower()
else:
shell_dir = os.path.join(workspace, options.outdir,
"%s.%s" % (arch, mode))
shell_dir = os.path.relpath(shell_dir)
# Populate context object.
mode_flags = MODE_FLAGS[mode]
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
if arch in SLOW_ARCHS:
timeout = 2 * TIMEOUT_DEFAULT;
else:
timeout = TIMEOUT_DEFAULT;
timeout *= TIMEOUT_SCALEFACTOR[mode]
ctx = context.Context(arch, mode, shell_dir,
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
options.extra_flags,
False,
options.random_seed,
True)
# Find available test suites and read test cases from them.
variables = {
"arch": arch,
"asan": options.asan,
"deopt_fuzzer": True,
"gc_stress": False,
"isolates": options.isolates,
"mode": mode,
"no_i18n": False,
"no_snap": False,
"simulator": utils.UseSimulator(arch),
"system": utils.GuessOS(),
}
all_tests = []
num_tests = 0
test_id = 0
# Remember test case prototypes for the fuzzing phase.
test_backup = dict((s, []) for s in suites)
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
s.FilterTestCasesByStatus(False)
test_backup[s] = s.tests
analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
"--print-deopt-stress"]
s.tests = [ t.CopyAddingFlags(analysis_flags) for t in s.tests ]
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
test_id += 1
if num_tests == 0:
print "No tests to run."
return 0
print(">>> Collection phase")
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
print(">>> Analysis phase")
num_tests = 0
test_id = 0
for s in suites:
test_results = {}
for t in s.tests:
for line in t.output.stdout.splitlines():
if line.startswith("=== Stress deopt counter: "):
test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
for t in s.tests:
if t.path not in test_results:
print "Missing results for %s" % t.path
if options.dump_results_file:
results_dict = dict((t.path, n) for (t, n) in test_results.iteritems())
with file("%s.%d.txt" % (dump_results_file, time.time()), "w") as f:
f.write(json.dumps(results_dict))
# Reset tests and redistribute the prototypes from the collection phase.
s.tests = []
if options.verbose:
print "Test distributions:"
for t in test_backup[s]:
max_deopt = test_results.get(t.path, 0)
if max_deopt == 0:
continue
n_deopt = CalculateNTests(max_deopt, options)
distribution = dist.Distribute(n_deopt, max_deopt)
if options.verbose:
print "%s %s" % (t.path, distribution)
for i in distribution:
fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
s.tests.append(t.CopyAddingFlags(fuzzing_flags))
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
test_id += 1
if num_tests == 0:
print "No tests to run."
return 0
print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
runner = execution.Runner(suites, progress_indicator, ctx)
code = runner.Run(options.j)
return exit_code or code
if __name__ == "__main__":
sys.exit(Main())
|
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import os
import time
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import six
from six.moves.urllib import parse as urllib
from tempest.common import glance_http
from tempest import exceptions
from tempest.lib.common import rest_client
from tempest.lib.common.utils import misc as misc_utils
from tempest.lib import exceptions as lib_exc
LOG = logging.getLogger(__name__)
class ImagesClient(rest_client.RestClient):
def __init__(self, auth_provider, catalog_type, region, **kwargs):
super(ImagesClient, self).__init__(
auth_provider, catalog_type, region, **kwargs)
self._http = None
self.dscv = kwargs.get("disable_ssl_certificate_validation")
self.ca_certs = kwargs.get("ca_certs")
def _image_meta_from_headers(self, headers):
meta = {'properties': {}}
for key, value in six.iteritems(headers):
if key.startswith('x-image-meta-property-'):
_key = key[22:]
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = key[13:]
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes',
'1')
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key])
except ValueError:
pass
return meta
def _image_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
copy_from = fields_copy.pop('copy_from', None)
if copy_from is not None:
headers['x-glance-api-copy-from'] = copy_from
for key, value in six.iteritems(fields_copy.pop('properties', {})):
headers['x-image-meta-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy.pop('api', {})):
headers['x-glance-api-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy):
headers['x-image-meta-%s' % key] = str(value)
return headers
def _get_file_size(self, obj):
"""Analyze file-like object and attempt to determine its size.
:param obj: file-like object, typically redirected from stdin.
:retval The file's size or None if it cannot be determined.
"""
# For large images, we need to supply the size of the
# image file. See LP Bugs #827660 and #845788.
if hasattr(obj, 'seek') and hasattr(obj, 'tell'):
try:
obj.seek(0, os.SEEK_END)
obj_size = obj.tell()
obj.seek(0)
return obj_size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the user is trying
# to pipe image data to the client, e.g.
# echo testdata | bin/glance add blah..., or
# that stdin is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return None
else:
raise
else:
# Cannot determine size of input image
return None
def _get_http(self):
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=self.dscv,
ca_certs=self.ca_certs)
def _create_with_data(self, headers, data):
resp, body_iter = self.http.raw_request('POST', '/v1/images',
headers=headers, body=data)
self._error_checker('POST', '/v1/images', headers, data, resp,
body_iter)
body = json.loads(''.join([c for c in body_iter]))
return rest_client.ResponseBody(resp, body)
def _update_with_data(self, image_id, headers, data):
url = '/v1/images/%s' % image_id
resp, body_iter = self.http.raw_request('PUT', url, headers=headers,
body=data)
self._error_checker('PUT', url, headers, data,
resp, body_iter)
body = json.loads(''.join([c for c in body_iter]))
return rest_client.ResponseBody(resp, body)
@property
def http(self):
if self._http is None:
self._http = self._get_http()
return self._http
def create_image(self, **kwargs):
headers = {}
data = kwargs.pop('data', None)
headers.update(self._image_meta_to_headers(kwargs))
if data is not None:
return self._create_with_data(headers, data)
resp, body = self.post('v1/images', None, headers)
self.expected_success(201, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def update_image(self, image_id, **kwargs):
headers = {}
data = kwargs.pop('data', None)
headers.update(self._image_meta_to_headers(kwargs))
if data is not None:
return self._update_with_data(image_id, headers, data)
url = 'v1/images/%s' % image_id
resp, body = self.put(url, None, headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def list_images(self, detail=False, **kwargs):
"""Return a list of all images filtered by input parameters.
Available params: see http://developer.openstack.org/
api-ref-image-v1.html#listImage-v1
Most parameters except the following are passed to the API without
any changes.
:param changes_since: The name is changed to changes-since
"""
url = 'v1/images'
if detail:
url += '/detail'
properties = kwargs.pop('properties', {})
for key, value in six.iteritems(properties):
kwargs['property-%s' % key] = value
if kwargs.get('changes_since'):
kwargs['changes-since'] = kwargs.pop('changes_since')
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def get_image_meta(self, image_id):
url = 'v1/images/%s' % image_id
resp, __ = self.head(url)
self.expected_success(200, resp.status)
body = self._image_meta_from_headers(resp)
return rest_client.ResponseBody(resp, body)
def show_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return rest_client.ResponseBodyData(resp, body)
def is_resource_deleted(self, id):
try:
self.get_image_meta(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image_meta'
def list_image_members(self, image_id):
url = 'v1/images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_shared_images(self, tenant_id):
"""List shared images with the specified tenant"""
url = 'v1/shared-images/%s' % tenant_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def add_member(self, member_id, image_id, **kwargs):
"""Add a member to an image.
Available params: see http://developer.openstack.org/
api-ref-image-v1.html#addMember-v1
"""
url = 'v1/images/%s/members/%s' % (image_id, member_id)
body = json.dumps({'member': kwargs})
resp, __ = self.put(url, body)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def delete_member(self, member_id, image_id):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
# NOTE(afazekas): just for the wait function
def _get_image_status(self, image_id):
meta = self.get_image_meta(image_id)
status = meta['status']
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_image_status(self, image_id, status):
"""Waits for a Image to reach a given status."""
start_time = time.time()
old_value = value = self._get_image_status(image_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if value == status:
return value
if value == 'killed':
raise exceptions.ImageKilledException(image_id=image_id,
status=status)
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_image_status(image_id)
|
|
import datetime
import hypothesis.strategies as st
import pytest
import pytz
from dateutil.tz import tzlocal
from freezegun import freeze_time
from hypothesis import given
from todoman.cli import cli
from todoman.model import Database, FileTodo
def test_basic(tmpdir, runner, create):
result = runner.invoke(cli, ['list'], catch_exceptions=False)
assert not result.exception
assert result.output == ''
create(
'test.ics',
'SUMMARY:harhar\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'harhar' in result.output
def test_percent(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
'PERCENT-COMPLETE:78\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert '78%' in result.output
def test_list_inexistant(tmpdir, runner, create):
result = runner.invoke(cli, ['list', 'nonexistant'])
assert result.exception
assert 'Error: Invalid value for "lists":' in result.output
def test_show_existing(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
'DESCRIPTION:Lots of text. Yum!\n'
)
result = runner.invoke(cli, ['list'])
result = runner.invoke(cli, ['show', '1'])
assert not result.exception
assert 'harhar' in result.output
assert 'Lots of text. Yum!' in result.output
def test_show_inexistant(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
)
result = runner.invoke(cli, ['list'])
result = runner.invoke(cli, ['show', '2'])
assert result.exit_code == -2
assert result.output == 'No todo with id 2.\n'
def test_human(runner):
result = runner.invoke(cli, [
'new', '-l', 'default', '-d', 'tomorrow', 'hail belzebub'
])
assert not result.exception
assert 'belzebub' in result.output
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'belzebub' in result.output
@pytest.mark.xfail(reason='issue#9')
def test_two_events(tmpdir, runner):
tmpdir.join('default/test.ics').write(
'BEGIN:VCALENDAR\n'
'BEGIN:VTODO\n'
'SUMMARY:task one\n'
'END:VTODO\n'
'BEGIN:VTODO\n'
'SUMMARY:task two\n'
'END:VTODO\n'
'END:VCALENDAR'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert len(result.output.splitlines()) == 2
assert 'task one' in result.output
assert 'task two' in result.output
def test_default_command(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
)
result = runner.invoke(cli)
assert not result.exception
assert 'harhar' in result.output
def test_delete(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
result = runner.invoke(cli, ['delete', '1', '--yes'])
assert not result.exception
result = runner.invoke(cli, ['list'])
assert not result.exception
assert len(result.output.splitlines()) == 0
def test_copy(tmpdir, runner, create):
tmpdir.mkdir('other_list')
create(
'test.ics',
'SUMMARY:test_copy\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'test_copy' in result.output
assert 'default' in result.output
assert 'other_list' not in result.output
result = runner.invoke(cli, ['copy', '-l', 'other_list', '1'])
assert not result.exception
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'test_copy' in result.output
assert 'default' in result.output
assert 'other_list' in result.output
def test_move(tmpdir, runner, create):
tmpdir.mkdir('other_list')
create(
'test.ics',
'SUMMARY:test_move\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'test_move' in result.output
assert 'default' in result.output
assert 'other_list' not in result.output
result = runner.invoke(cli, ['move', '-l', 'other_list', '1'])
assert not result.exception
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'test_move' in result.output
assert 'default' not in result.output
assert 'other_list' in result.output
def test_dtstamp(tmpdir, runner, create):
"""
Test that we add the DTSTAMP to new entries as per RFC5545.
"""
result = runner.invoke(cli, ['new', '-l', 'default', 'test event'])
assert not result.exception
db = Database([tmpdir.join('default')],
tmpdir.join('/dtstamp_cache'))
todo = list(db.todos())[0]
assert todo.dtstamp is not None
assert todo.dtstamp.tzinfo is pytz.utc
def test_default_list(tmpdir, runner, create):
"""Test the default_list config parameter"""
result = runner.invoke(cli, ['new', 'test default list'])
assert result.exception
path = tmpdir.join('config')
path.write('default_list = default\n', 'a')
result = runner.invoke(cli, ['new', 'test default list'])
assert not result.exception
db = Database([tmpdir.join('default')],
tmpdir.join('/default_list'))
todo = list(db.todos())[0]
assert todo.summary == 'test default list'
@pytest.mark.parametrize(
'default_due, expected_due_hours', [(None, 24), (1, 1), (0, None)],
ids=['not specified', 'greater than 0', '0']
)
def test_default_due(
tmpdir, runner, create, default_due, expected_due_hours
):
"""Test setting the due date using the default_due config parameter"""
if default_due is not None:
path = tmpdir.join('config')
path.write('default_due = {}\n'.format(default_due), 'a')
runner.invoke(cli, ['new', '-l', 'default', 'aaa'])
db = Database([tmpdir.join('default')], tmpdir.join('/default_list'))
todo = list(db.todos())[0]
if expected_due_hours is None:
assert todo.due is None
else:
assert (todo.due - todo.created_at) == datetime.timedelta(
hours=expected_due_hours
)
@freeze_time(datetime.datetime.now())
def test_default_due2(tmpdir, runner, create, default_database):
cfg = tmpdir.join('config')
cfg.write('default_due = 24\n', 'a')
r = runner.invoke(cli, ['new', '-ldefault', '-dtomorrow', 'aaa'])
assert not r.exception
r = runner.invoke(cli, ['new', '-ldefault', 'bbb'])
assert not r.exception
r = runner.invoke(cli, ['new', '-ldefault', '-d', 'one hour', 'ccc'])
assert not r.exception
default_database.update_cache()
todos = {t.summary: t for t in default_database.todos(all=True)}
assert todos['aaa'].due.date() == todos['bbb'].due.date()
assert todos['ccc'].due == todos['bbb'].due - datetime.timedelta(hours=23)
def test_sorting_fields(tmpdir, runner, default_database):
tasks = []
for i in range(1, 10):
days = datetime.timedelta(days=i)
todo = FileTodo(new=True)
todo.list = next(default_database.lists())
todo.due = datetime.datetime.now() + days
todo.created_at = datetime.datetime.now() - days
todo.summary = 'harhar{}'.format(i)
tasks.append(todo)
todo.save()
fields = (
'id',
'uid',
'summary',
'due',
'priority',
'created_at',
'completed_at',
'dtstamp',
'status',
'description',
'location',
'categories',
)
@given(sort_key=st.lists(
st.sampled_from(fields + tuple('-' + x for x in fields)),
unique=True
))
def run_test(sort_key):
sort_key = ','.join(sort_key)
result = runner.invoke(cli, ['list', '--sort', sort_key])
assert not result.exception
assert result.exit_code == 0
assert len(result.output.strip().splitlines()) == len(tasks)
run_test()
def test_sorting_output(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:aaa\n'
'DUE;VALUE=DATE-TIME;TZID=ART:20160102T000000\n'
)
create(
'test2.ics',
'SUMMARY:bbb\n'
'DUE;VALUE=DATE-TIME;TZID=ART:20160101T000000\n'
)
examples = [
('-summary', ['aaa', 'bbb']),
('due', ['aaa', 'bbb'])
]
# Normal sorting, reversed by default
all_examples = [(['--sort', key], order) for key, order in examples]
# Testing --reverse, same exact output
all_examples.extend((['--reverse', '--sort', key], order)
for key, order in examples)
# Testing --no-reverse
all_examples.extend((['--no-reverse', '--sort', key], reversed(order))
for key, order in examples)
for args, order in all_examples:
result = runner.invoke(cli, ['list'] + args)
assert not result.exception
lines = result.output.splitlines()
for i, task in enumerate(order):
assert task in lines[i]
def test_sorting_null_values(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:aaa\n'
'PRIORITY:9\n'
)
create(
'test2.ics',
'SUMMARY:bbb\n'
'DUE;VALUE=DATE-TIME;TZID=ART:20160101T000000\n'
)
result = runner.invoke(cli)
assert not result.exception
assert 'bbb' in result.output.splitlines()[0]
assert 'aaa' in result.output.splitlines()[1]
@pytest.mark.parametrize('hours', [72, -72])
def test_color_due_dates(tmpdir, runner, create, hours):
due = datetime.datetime.now() + datetime.timedelta(hours=hours)
create(
'test.ics',
'SUMMARY:aaa\n'
'STATUS:IN-PROGRESS\n'
'DUE;VALUE=DATE-TIME;TZID=ART:{}\n'
.format(due.strftime('%Y%m%dT%H%M%S'))
)
result = runner.invoke(cli, ['--color', 'always'])
assert not result.exception
due_str = due.strftime('%Y-%m-%d')
if hours == 72:
assert result.output == \
' 1 [ ] {} aaa @default\x1b[0m\n'.format(due_str)
else:
assert result.output == \
' 1 [ ] \x1b[31m{}\x1b[0m aaa @default\x1b[0m\n'.format(due_str)
def test_flush(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:aaa\n'
'STATUS:COMPLETED\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
create(
'test2.ics',
'SUMMARY:bbb\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert ' 2 [ ] bbb @default' in result.output
result = runner.invoke(cli, ['flush'], input='y\n', catch_exceptions=False)
assert not result.exception
create(
'test2.ics',
'SUMMARY:bbb\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert ' 1 [ ] bbb @default' in result.output
def test_edit(runner, default_database):
todo = FileTodo()
todo.list = next(default_database.lists())
todo.summary = 'Eat paint'
todo.due = datetime.datetime(2016, 10, 3)
todo.save()
result = runner.invoke(cli, ['edit', '1', '--due', '2017-02-01'])
assert not result.exception
assert '2017-02-01' in result.output
default_database.update_cache()
todo = next(default_database.todos(all=True))
assert todo.due == datetime.datetime(2017, 2, 1, tzinfo=tzlocal())
assert todo.summary == 'Eat paint'
# TODO: test aware/naive datetime sorting
# TODO: test --grep
|
|
__author__ = 'frank'
import os.path
import traceback
from kvmagent import kvmagent
from zstacklib.utils import jsonobject
from zstacklib.utils import http
from zstacklib.utils import log
from zstacklib.utils import shell
from zstacklib.utils import linux
import zstacklib.utils.uuidhelper as uuidhelper
logger = log.get_logger(__name__)
class AgentResponse(object):
def __init__(self):
self.totalCapacity = None
self.availableCapacity = None
self.success = None
self.error = None
class RevertVolumeFromSnapshotRsp(AgentResponse):
def __init__(self):
super(RevertVolumeFromSnapshotRsp, self).__init__()
self.newVolumeInstallPath = None
class MergeSnapshotRsp(AgentResponse):
def __init__(self):
super(MergeSnapshotRsp, self).__init__()
self.size = None
class RebaseAndMergeSnapshotsRsp(AgentResponse):
def __init__(self):
super(RebaseAndMergeSnapshotsRsp, self).__init__()
self.size = None
class CheckBitsRsp(AgentResponse):
def __init__(self):
super(CheckBitsRsp, self).__init__()
self.existing = False
class GetMd5Rsp(AgentResponse):
def __init__(self):
super(GetMd5Rsp, self).__init__()
self.md5s = None
class GetBackingFileRsp(AgentResponse):
def __init__(self):
super(GetBackingFileRsp, self).__init__()
self.size = None
self.backingFilePath = None
class LocalStoragePlugin(kvmagent.KvmAgent):
INIT_PATH = "/localstorage/init";
GET_PHYSICAL_CAPACITY_PATH = "/localstorage/getphysicalcapacity";
CREATE_EMPTY_VOLUME_PATH = "/localstorage/volume/createempty";
CREATE_VOLUME_FROM_CACHE_PATH = "/localstorage/volume/createvolumefromcache";
DELETE_BITS_PATH = "/localstorage/delete";
UPLOAD_BIT_PATH = "/localstorage/sftp/upload";
DOWNLOAD_BIT_PATH = "/localstorage/sftp/download";
REVERT_SNAPSHOT_PATH = "/localstorage/snapshot/revert";
MERGE_SNAPSHOT_PATH = "/localstorage/snapshot/merge";
MERGE_AND_REBASE_SNAPSHOT_PATH = "/localstorage/snapshot/mergeandrebase";
OFFLINE_MERGE_PATH = "/localstorage/snapshot/offlinemerge";
CREATE_TEMPLATE_FROM_VOLUME = "/localstorage/volume/createtemplate"
CHECK_BITS_PATH = "/localstorage/checkbits"
REBASE_ROOT_VOLUME_TO_BACKING_FILE_PATH = "/localstorage/volume/rebaserootvolumetobackingfile"
VERIFY_SNAPSHOT_CHAIN_PATH = "/localstorage/snapshot/verifychain"
REBASE_SNAPSHOT_BACKING_FILES_PATH = "/localstorage/snapshot/rebasebackingfiles"
COPY_TO_REMOTE_BITS_PATH = "/localstorage/copytoremote"
GET_MD5_PATH = "/localstorage/getmd5"
CHECK_MD5_PATH = "/localstorage/checkmd5"
GET_BACKING_FILE_PATH = "/localstorage/volume/getbackingfile"
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.INIT_PATH, self.init)
http_server.register_async_uri(self.GET_PHYSICAL_CAPACITY_PATH, self.get_physical_capacity)
http_server.register_async_uri(self.CREATE_EMPTY_VOLUME_PATH, self.create_empty_volume)
http_server.register_async_uri(self.CREATE_VOLUME_FROM_CACHE_PATH, self.create_root_volume_from_template)
http_server.register_async_uri(self.DELETE_BITS_PATH, self.delete)
http_server.register_async_uri(self.DOWNLOAD_BIT_PATH, self.download_from_sftp)
http_server.register_async_uri(self.UPLOAD_BIT_PATH, self.upload_to_sftp)
http_server.register_async_uri(self.REVERT_SNAPSHOT_PATH, self.revert_snapshot)
http_server.register_async_uri(self.MERGE_SNAPSHOT_PATH, self.merge_snapshot)
http_server.register_async_uri(self.MERGE_AND_REBASE_SNAPSHOT_PATH, self.merge_and_rebase_snapshot)
http_server.register_async_uri(self.OFFLINE_MERGE_PATH, self.offline_merge_snapshot)
http_server.register_async_uri(self.CREATE_TEMPLATE_FROM_VOLUME, self.create_template_from_volume)
http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
http_server.register_async_uri(self.REBASE_ROOT_VOLUME_TO_BACKING_FILE_PATH, self.rebase_root_volume_to_backing_file)
http_server.register_async_uri(self.VERIFY_SNAPSHOT_CHAIN_PATH, self.verify_backing_file_chain)
http_server.register_async_uri(self.REBASE_SNAPSHOT_BACKING_FILES_PATH, self.rebase_backing_files)
http_server.register_async_uri(self.COPY_TO_REMOTE_BITS_PATH, self.copy_bits_to_remote)
http_server.register_async_uri(self.GET_MD5_PATH, self.get_md5)
http_server.register_async_uri(self.CHECK_MD5_PATH, self.check_md5)
http_server.register_async_uri(self.GET_BACKING_FILE_PATH, self.get_backing_file_path)
self.path = None
def stop(self):
pass
@kvmagent.replyerror
def get_backing_file_path(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
out = shell.call("qemu-img info %s | grep 'backing file' | cut -d ':' -f 2" % cmd.path)
out = out.strip(' \t\r\n')
rsp = GetBackingFileRsp()
if out:
rsp.backingFilePath = out
rsp.size = os.path.getsize(out)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_md5(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetMd5Rsp()
rsp.md5s = []
for to in cmd.md5s:
md5 = shell.call("md5sum %s | cut -d ' ' -f 1" % to.path)
rsp.md5s.append({
'resourceUuid': to.resourceUuid,
'path': to.path,
'md5': md5
})
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def check_md5(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for to in cmd.md5s:
dst_md5 = shell.call("md5sum %s | cut -d ' ' -f 1" % to.path)
if dst_md5 != to.md5:
raise Exception("MD5 unmatch. The file[uuid:%s, path:%s]'s md5 (src host:%s, dst host:%s)" %
(to.resourceUuid, to.path, to.md5, dst_md5))
rsp = AgentResponse()
return jsonobject.dumps(rsp)
def _get_disk_capacity(self):
return linux.get_disk_capacity_by_df(self.path)
@kvmagent.replyerror
def copy_bits_to_remote(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for path in cmd.paths:
shell.call('rsync -a --relative %s --rsh="/usr/bin/sshpass -p %s ssh -o StrictHostKeyChecking=no -l %s" %s:/' %
(path, cmd.dstPassword, cmd.dstUsername, cmd.dstIp))
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def verify_backing_file_chain(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for sp in cmd.snapshots:
if not os.path.exists(sp.path):
raise Exception('cannot find the file[%s]' % sp.path)
if sp.parentPath and not os.path.exists(sp.parentPath):
raise Exception('cannot find the backing file[%s]' % sp.parentPath)
if sp.parentPath:
out = shell.call("qemu-img info %s | grep 'backing file' | cut -d ':' -f 2" % sp.path)
out = out.strip(' \t\r\n')
if sp.parentPath != out:
raise Exception("resource[Snapshot or Volume, uuid:%s, path:%s]'s backing file[%s] is not equal to %s" %
(sp.snapshotUuid, sp.path, out, sp.parentPath))
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def rebase_backing_files(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for sp in cmd.snapshots:
if sp.parentPath:
linux.qcow2_rebase_no_check(sp.parentPath, sp.path)
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def check_bits(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CheckBitsRsp()
rsp.existing = os.path.exists(cmd.path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_template_from_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
dirname = os.path.dirname(cmd.installPath)
if not os.path.exists(dirname):
os.makedirs(dirname, 0755)
linux.qcow2_create_template(cmd.volumePath, cmd.installPath)
logger.debug('successfully created template[%s] from volume[%s]' % (cmd.installPath, cmd.volumePath))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def revert_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = RevertVolumeFromSnapshotRsp()
install_path = cmd.snapshotInstallPath
new_volume_path = os.path.join(os.path.dirname(install_path), '{0}.qcow2'.format(uuidhelper.uuid()))
linux.qcow2_clone(install_path, new_volume_path)
rsp.newVolumeInstallPath = new_volume_path
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def merge_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = MergeSnapshotRsp()
workspace_dir = os.path.dirname(cmd.workspaceInstallPath)
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
linux.qcow2_create_template(cmd.snapshotInstallPath, cmd.workspaceInstallPath)
rsp.size = os.path.getsize(cmd.workspaceInstallPath)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def merge_and_rebase_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
snapshots = cmd.snapshotInstallPaths
count = len(snapshots)
for i in range(count):
if i+1 < count:
target = snapshots[i]
backing_file = snapshots[i+1]
linux.qcow2_rebase_no_check(backing_file, target)
latest = snapshots[0]
rsp = RebaseAndMergeSnapshotsRsp()
workspace_dir = os.path.dirname(cmd.workspaceInstallPath)
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
linux.qcow2_create_template(latest, cmd.workspaceInstallPath)
rsp.size = os.path.getsize(cmd.workspaceInstallPath)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def offline_merge_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if not cmd.fullRebase:
linux.qcow2_rebase(cmd.srcPath, cmd.destPath)
else:
tmp = os.path.join(os.path.dirname(cmd.destPath), '%s.qcow2' % uuidhelper.uuid())
linux.qcow2_create_template(cmd.destPath, tmp)
shell.call("mv %s %s" % (tmp, cmd.destPath))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_physical_capacity(self, req):
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def rebase_root_volume_to_backing_file(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
linux.qcow2_rebase_no_check(cmd.backingFilePath, cmd.rootVolumePath)
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def init(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
self.path = cmd.path
if not os.path.exists(self.path):
os.makedirs(self.path, 0755)
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_empty_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname)
if cmd.backingFile:
linux.qcow2_create_with_backing_file(cmd.backingFile, cmd.installUrl)
else:
linux.qcow2_create(cmd.installUrl, cmd.size)
except Exception as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = 'unable to create empty volume[uuid:%s, name:%s], %s' % (cmd.uuid, cmd.name, str(e))
rsp.success = False
return jsonobject.dumps(rsp)
logger.debug('successfully create empty volume[uuid:%s, size:%s] at %s' % (cmd.volumeUuid, cmd.size, cmd.installUrl))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_root_volume_from_template(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if not os.path.exists(cmd.templatePathInCache):
rsp.error = "UNABLE_TO_FIND_IMAGE_IN_CACHE"
rsp.success = False
return jsonobject.dumps(rsp)
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname, 0775)
linux.qcow2_clone(cmd.templatePathInCache, cmd.installUrl)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def delete(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
shell.call('rm -f %s' % cmd.path)
pdir = os.path.dirname(cmd.path)
linux.rmdir_if_empty(pdir)
logger.debug('successfully delete %s' % cmd.path)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def upload_to_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
def upload():
if not os.path.exists(cmd.primaryStorageInstallPath):
raise kvmagent.KvmError('cannot find %s' % cmd.primaryStorageInstallPath)
linux.scp_upload(cmd.hostname, cmd.sshKey, cmd.primaryStorageInstallPath, cmd.backupStorageInstallPath)
try:
upload()
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def download_from_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
linux.scp_download(cmd.hostname, cmd.sshKey, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath)
logger.debug('successfully download %s/%s to %s' % (cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath))
except Exception as e:
content = traceback.format_exc()
logger.warn(content)
err = "unable to download %s/%s, because %s" % (cmd.hostname, cmd.backupStorageInstallPath, str(e))
rsp.error = err
rsp.success = False
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
return jsonobject.dumps(rsp)
|
|
from __future__ import absolute_import, division, unicode_literals
from . import base
class Filter(base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
if previous1 is not None:
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
|
# Natural Language Toolkit: Interface to MaltParser
#
# Author: Dan Garrette <[email protected]>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
import os
import tempfile
import glob
from operator import add
from functools import reduce
import subprocess
from nltk.data import ZipFilePathPointer
from nltk.tag import RegexpTagger
from nltk.tokenize import word_tokenize
from nltk.internals import find_binary
from nltk.parse.api import ParserI
from nltk.parse.dependencygraph import DependencyGraph
class MaltParser(ParserI):
def __init__(self, tagger=None, mco=None, working_dir=None, additional_java_args=None):
"""
An interface for parsing with the Malt Parser.
:param mco: The name of the pre-trained model. If provided, training
will not be required, and MaltParser will use the model file in
${working_dir}/${mco}.mco.
:type mco: str
"""
self.config_malt()
self.mco = 'malt_temp' if mco is None else mco
self.working_dir = tempfile.gettempdir() if working_dir is None\
else working_dir
self.additional_java_args = [] if additional_java_args is None else additional_java_args
self._trained = mco is not None
if tagger is not None:
self.tagger = tagger
else:
self.tagger = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
def config_malt(self, bin=None, verbose=False):
"""
Configure NLTK's interface to the ``malt`` package. This
searches for a directory containing the malt jar
:param bin: The full path to the ``malt`` binary. If not
specified, then nltk will search the system for a ``malt``
binary; and if one is not found, it will raise a
``LookupError`` exception.
:type bin: str
"""
#: A list of directories that should be searched for the malt
#: executables. This list is used by ``config_malt`` when searching
#: for the malt executables.
_malt_path = ['.',
'/usr/lib/malt-1*',
'/usr/share/malt-1*',
'/usr/local/bin',
'/usr/local/malt-1*',
'/usr/local/bin/malt-1*',
'/usr/local/malt-1*',
'/usr/local/share/malt-1*']
# Expand wildcards in _malt_path:
malt_path = reduce(add, map(glob.glob, _malt_path))
# Find the malt binary.
self._malt_bin = find_binary('malt.jar', bin,
searchpath=malt_path, env_vars=['MALT_PARSER'],
url='http://www.maltparser.org/',
verbose=verbose)
def parse_sents(self, sentences, verbose=False):
"""
Use MaltParser to parse multiple sentences. Takes multiple sentences as a
list where each sentence is a list of words.
Each sentence will be automatically tagged with this MaltParser instance's
tagger.
:param sentences: Input sentences to parse
:type sentence: list(list(str))
:return: iter(DependencyGraph)
"""
tagged_sentences = [self.tagger.tag(sentence) for sentence in sentences]
return iter(self.tagged_parse_sents(tagged_sentences, verbose))
def tagged_parse(self, sentence, verbose=False):
"""
Use MaltParser to parse a sentence. Takes a sentence as a list of
(word, tag) tuples; the sentence must have already been tokenized and
tagged.
:param sentence: Input sentence to parse
:type sentence: list(tuple(str, str))
:return: iter(DependencyGraph) the possible dependency graph representations of the sentence
"""
return next(self.tagged_parse_sents([sentence], verbose))
def tagged_parse_sents(self, sentences, verbose=False):
"""
Use MaltParser to parse multiple sentences. Takes multiple sentences
where each sentence is a list of (word, tag) tuples.
The sentences must have already been tokenized and tagged.
:param sentences: Input sentences to parse
:type sentence: list(list(tuple(str, str)))
:return: iter(iter(``DependencyGraph``)) the dependency graph representation
of each sentence
"""
if not self._malt_bin:
raise Exception("MaltParser location is not configured. Call config_malt() first.")
if not self._trained:
raise Exception("Parser has not been trained. Call train() first.")
input_file = tempfile.NamedTemporaryFile(prefix='malt_input.conll',
dir=self.working_dir,
delete=False)
output_file = tempfile.NamedTemporaryFile(prefix='malt_output.conll',
dir=self.working_dir,
delete=False)
try:
for sentence in sentences:
for (i, (word, tag)) in enumerate(sentence, start=1):
input_str = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %\
(i, word, '_', tag, tag, '_', '0', 'a', '_', '_')
input_file.write(input_str.encode("utf8"))
input_file.write(b'\n\n')
input_file.close()
cmd = ['java'] + self.additional_java_args + ['-jar', self._malt_bin,
'-w', self.working_dir,
'-c', self.mco, '-i', input_file.name,
'-o', output_file.name, '-m', 'parse']
ret = self._execute(cmd, verbose)
if ret != 0:
raise Exception("MaltParser parsing (%s) failed with exit "
"code %d" % (' '.join(cmd), ret))
# Must return iter(iter(Tree))
return (iter([dep_graph]) for dep_graph in DependencyGraph.load(output_file.name))
finally:
input_file.close()
os.remove(input_file.name)
output_file.close()
os.remove(output_file.name)
def train(self, depgraphs, verbose=False):
"""
Train MaltParser from a list of ``DependencyGraph`` objects
:param depgraphs: list of ``DependencyGraph`` objects for training input data
"""
input_file = tempfile.NamedTemporaryFile(prefix='malt_train.conll',
dir=self.working_dir,
delete=False)
try:
input_str = ('\n'.join(dg.to_conll(10) for dg in depgraphs))
input_file.write(input_str.encode("utf8"))
input_file.close()
self.train_from_file(input_file.name, verbose=verbose)
finally:
input_file.close()
os.remove(input_file.name)
def train_from_file(self, conll_file, verbose=False):
"""
Train MaltParser from a file
:param conll_file: str for the filename of the training input data
"""
if not self._malt_bin:
raise Exception("MaltParser location is not configured. Call config_malt() first.")
# If conll_file is a ZipFilePathPointer, then we need to do some extra
# massaging
if isinstance(conll_file, ZipFilePathPointer):
input_file = tempfile.NamedTemporaryFile(prefix='malt_train.conll',
dir=self.working_dir,
delete=False)
try:
conll_str = conll_file.open().read()
conll_file.close()
input_file.write(conll_str)
input_file.close()
return self.train_from_file(input_file.name, verbose=verbose)
finally:
input_file.close()
os.remove(input_file.name)
cmd = ['java', '-jar', self._malt_bin, '-w', self.working_dir,
'-c', self.mco, '-i', conll_file, '-m', 'learn']
ret = self._execute(cmd, verbose)
if ret != 0:
raise Exception("MaltParser training (%s) "
"failed with exit code %d" %
(' '.join(cmd), ret))
self._trained = True
@staticmethod
def _execute(cmd, verbose=False):
output = None if verbose else subprocess.PIPE
p = subprocess.Popen(cmd, stdout=output, stderr=output)
return p.wait()
def demo():
dg1 = DependencyGraph("""1 John _ NNP _ _ 2 SUBJ _ _
2 sees _ VB _ _ 0 ROOT _ _
3 a _ DT _ _ 4 SPEC _ _
4 dog _ NN _ _ 2 OBJ _ _
""")
dg2 = DependencyGraph("""1 John _ NNP _ _ 2 SUBJ _ _
2 walks _ VB _ _ 0 ROOT _ _
""")
verbose = False
maltParser = MaltParser()
maltParser.train([dg1,dg2], verbose=verbose)
maltParser.parse_one(['John','sees','Mary'], verbose=verbose).tree().pprint()
maltParser.parse_one(['a','man','runs'], verbose=verbose).tree().pprint()
next(maltParser.tagged_parse([('John','NNP'),('sees','VB'),('Mary','NNP')], verbose)).tree().pprint()
if __name__ == '__main__':
demo()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import datetime
import os.path
import sys
import code
import cpp_util
import model
try:
import jinja2
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party'))
import jinja2
class _PpapiGeneratorBase(object):
"""A base class for ppapi generators.
Implementations should set TEMPLATE_NAME to a string containing the name of
the template file without its extension. The template will be rendered with
the following symbols available:
name: A string containing the name of the namespace.
enums: A list of enums within the namespace.
types: A list of types within the namespace, sorted such that no element
depends on an earlier element.
events: A dict of events within the namespace.
functions: A dict of functions within the namespace.
year: An int containing the current year.
source_file: The name of the input file.
"""
def __init__(self, namespace):
self._namespace = namespace
self._required_types = {}
self._array_types = set()
self._optional_types = set()
self._optional_array_types = set()
self._dependencies = collections.OrderedDict()
self._types = []
self._enums = []
self.jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),
'templates', 'ppapi')))
self._SetupFilters()
self._ResolveTypeDependencies()
def _SetupFilters(self):
self.jinja_environment.filters.update({
'ppapi_type': self.ToPpapiType,
'classname': cpp_util.Classname,
'enum_value': self.EnumValueName,
'return_type': self.GetFunctionReturnType,
'format_param_type': self.FormatParamType,
'needs_optional': self.NeedsOptional,
'needs_array': self.NeedsArray,
'needs_optional_array': self.NeedsOptionalArray,
'has_array_outs': self.HasArrayOuts,
})
def Render(self, template_name, values):
generated_code = code.Code()
template = self.jinja_environment.get_template(
'%s.template' % template_name)
generated_code.Append(template.render(values))
return generated_code
def Generate(self):
"""Generates a Code object for a single namespace."""
return self.Render(self.TEMPLATE_NAME, {
'name': self._namespace.name,
'enums': self._enums,
'types': self._types,
'events': self._namespace.events,
'functions': self._namespace.functions,
# TODO(sammc): Don't change years when regenerating existing output files.
'year': datetime.date.today().year,
'source_file': self._namespace.source_file,
})
def _ResolveTypeDependencies(self):
"""Calculates the transitive closure of the types in _required_types.
Returns a tuple containing the list of struct types and the list of enum
types. The list of struct types is ordered such that no type depends on a
type later in the list.
"""
if self._namespace.functions:
for function in self._namespace.functions.itervalues():
self._FindFunctionDependencies(function)
if self._namespace.events:
for event in self._namespace.events.itervalues():
self._FindFunctionDependencies(event)
resolved_types = set()
while resolved_types < set(self._required_types):
for typename in sorted(set(self._required_types) - resolved_types):
type_ = self._required_types[typename]
self._dependencies.setdefault(typename, set())
for member in type_.properties.itervalues():
self._RegisterDependency(member, self._NameComponents(type_))
resolved_types.add(typename)
while self._dependencies:
for name, deps in self._dependencies.items():
if not deps:
if (self._required_types[name].property_type ==
model.PropertyType.ENUM):
self._enums.append(self._required_types[name])
else:
self._types.append(self._required_types[name])
for deps in self._dependencies.itervalues():
deps.discard(name)
del self._dependencies[name]
break
else:
raise ValueError('Circular dependency %s' % self._dependencies)
def _FindFunctionDependencies(self, function):
for param in function.params:
self._RegisterDependency(param, None)
if function.callback:
for param in function.callback.params:
self._RegisterDependency(param, None)
if function.returns:
self._RegisterTypeDependency(function.returns, None, False, False)
def _RegisterDependency(self, member, depender):
self._RegisterTypeDependency(member.type_, depender, member.optional, False)
def _RegisterTypeDependency(self, type_, depender, optional, array):
if type_.property_type == model.PropertyType.ARRAY:
self._RegisterTypeDependency(type_.item_type, depender, optional, True)
elif type_.property_type == model.PropertyType.REF:
self._RegisterTypeDependency(self._namespace.types[type_.ref_type],
depender, optional, array)
elif type_.property_type in (model.PropertyType.OBJECT,
model.PropertyType.ENUM):
name_components = self._NameComponents(type_)
self._required_types[name_components] = type_
if depender:
self._dependencies.setdefault(depender, set()).add(
name_components)
if array:
self._array_types.add(name_components)
if optional:
self._optional_array_types.add(name_components)
elif optional:
self._optional_types.add(name_components)
@staticmethod
def _NameComponents(entity):
"""Returns a tuple of the fully-qualified name of an entity."""
names = []
while entity:
if (not isinstance(entity, model.Type) or
entity.property_type != model.PropertyType.ARRAY):
names.append(entity.name)
entity = entity.parent
return tuple(reversed(names[:-1]))
def ToPpapiType(self, type_, array=False, optional=False):
"""Returns a string containing the name of the Pepper C type for |type_|.
If array is True, returns the name of an array of |type_|. If optional is
True, returns the name of an optional |type_|. If both array and optional
are True, returns the name of an optional array of |type_|.
"""
if isinstance(type_, model.Function) or type_.property_type in (
model.PropertyType.OBJECT, model.PropertyType.ENUM):
return self._FormatPpapiTypeName(
array, optional, '_'.join(
cpp_util.Classname(s) for s in self._NameComponents(type_)),
namespace=cpp_util.Classname(self._namespace.name))
elif type_.property_type == model.PropertyType.REF:
return self.ToPpapiType(self._namespace.types[type_.ref_type],
optional=optional, array=array)
elif type_.property_type == model.PropertyType.ARRAY:
return self.ToPpapiType(type_.item_type, array=True,
optional=optional)
elif type_.property_type == model.PropertyType.STRING and not array:
return 'PP_Var'
elif array or optional:
if type_.property_type in self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP:
return self._FormatPpapiTypeName(
array, optional,
self._PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP[type_.property_type], '')
return self._PPAPI_PRIMITIVE_TYPE_MAP.get(type_.property_type, 'PP_Var')
_PPAPI_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'PP_Bool',
model.PropertyType.DOUBLE: 'double_t',
model.PropertyType.INT64: 'int64_t',
model.PropertyType.INTEGER: 'int32_t',
}
_PPAPI_COMPOUND_PRIMITIVE_TYPE_MAP = {
model.PropertyType.BOOLEAN: 'Bool',
model.PropertyType.DOUBLE: 'Double',
model.PropertyType.INT64: 'Int64',
model.PropertyType.INTEGER: 'Int32',
model.PropertyType.STRING: 'String',
}
@staticmethod
def _FormatPpapiTypeName(array, optional, name, namespace=''):
if namespace:
namespace = '%s_' % namespace
if array:
if optional:
return 'PP_%sOptional_%s_Array' % (namespace, name)
return 'PP_%s%s_Array' % (namespace, name)
if optional:
return 'PP_%sOptional_%s' % (namespace, name)
return 'PP_%s%s' % (namespace, name)
def NeedsOptional(self, type_):
"""Returns True if an optional |type_| is required."""
return self._NameComponents(type_) in self._optional_types
def NeedsArray(self, type_):
"""Returns True if an array of |type_| is required."""
return self._NameComponents(type_) in self._array_types
def NeedsOptionalArray(self, type_):
"""Returns True if an optional array of |type_| is required."""
return self._NameComponents(type_) in self._optional_array_types
def FormatParamType(self, param):
"""Formats the type of a parameter or property."""
return self.ToPpapiType(param.type_, optional=param.optional)
@staticmethod
def GetFunctionReturnType(function):
return 'int32_t' if function.callback or function.returns else 'void'
def EnumValueName(self, enum_value, enum_type):
"""Returns a string containing the name for an enum value."""
return '%s_%s' % (self.ToPpapiType(enum_type).upper(),
enum_value.name.upper())
def _ResolveType(self, type_):
if type_.property_type == model.PropertyType.REF:
return self._ResolveType(self._namespace.types[type_.ref_type])
if type_.property_type == model.PropertyType.ARRAY:
return self._ResolveType(type_.item_type)
return type_
def _IsOrContainsArray(self, type_):
if type_.property_type == model.PropertyType.ARRAY:
return True
type_ = self._ResolveType(type_)
if type_.property_type == model.PropertyType.OBJECT:
return any(self._IsOrContainsArray(param.type_)
for param in type_.properties.itervalues())
return False
def HasArrayOuts(self, function):
"""Returns True if the function produces any arrays as outputs.
This includes arrays that are properties of other objects.
"""
if function.callback:
for param in function.callback.params:
if self._IsOrContainsArray(param.type_):
return True
return function.returns and self._IsOrContainsArray(function.returns)
class _IdlGenerator(_PpapiGeneratorBase):
TEMPLATE_NAME = 'idl'
class _GeneratorWrapper(object):
def __init__(self, generator_factory):
self._generator_factory = generator_factory
def Generate(self, namespace):
return self._generator_factory(namespace).Generate()
class PpapiGenerator(object):
def __init__(self):
self.idl_generator = _GeneratorWrapper(_IdlGenerator)
|
|
"""
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Kitchen environment for long horizon manipulation """
import collections
from typing import Dict, Sequence
from dm_control.mujoco import engine
from gym import spaces
import numpy as np
from adept_envs.components.robot import RobotComponentBuilder, RobotState
from adept_envs.franka.base_env import BaseFrankaEnv
from adept_envs.utils.resources import get_asset_path
from adept_envs.simulation.sim_scene import SimBackend
import pickle
ASSET_PATH = 'adept_envs/franka/assets/franka_microwave_cabinet_slider.xml'
import gym
DEFAULT_OBSERVATION_KEYS = (
'qp',
'obj_qp',
'mocap_pos',
# 'mocap_quat',
'goal'
)
import sys
sys.path.append(".")
from rlkit.torch.networks import ConcatMlp, Mlp
import torch
import torch.nn as nn
class FrankaMicrowaveCabinetSlider(BaseFrankaEnv):
# Number of degrees of freedom of all objects.
N_DOF_OBJ = 4
def __init__(self,
asset_path: str = ASSET_PATH,
observation_keys: Sequence[str] = DEFAULT_OBSERVATION_KEYS,
frame_skip: int = 40,
use_raw_actions: bool = False,
camera_settings=dict(
distance=2.5,
azimuth=66,
elevation=-35,),
eval_mode=False,
attempt_limit=50,
reset_frequency=-1,
idx_completion=False,
learned_model=False,
learned_model_path=None,
counts_enabled=False,
**kwargs):
"""Initializes the environment.
Args:
asset_path: The XML model file to load.
observation_keys: The keys in `get_obs_dict` to concatenate as the
observations returned by `step` and `reset`.
frame_skip: The number of simulation steps per environment step.
"""
self._eval_mode = eval_mode
self.reset_counter = 0
self._reset_frequency = reset_frequency
self._idx_completion = idx_completion
self.current_idx = 0
self._counts_enabled = counts_enabled
super().__init__(
sim_model=get_asset_path(asset_path),
observation_keys=observation_keys,
frame_skip=frame_skip,
camera_settings=camera_settings,
sim_backend=SimBackend.DM_CONTROL,
**kwargs)
self.commanded_start = -1
self.commanded_goal = -1
self.goal = np.zeros(10)
self.use_raw_actions = use_raw_actions
self.init_qpos = self.sim.model.key_qpos[0].copy()
self.init_qvel = self.sim.model.key_qvel[0].copy()
self.labeled_goals = pickle.load(open('sim_slider_cabinet_labeled_goals.pkl', 'rb'))
self.adjacency_matrix = pickle.load(open('sim_slider_cabinet_adjacency_matrix.pkl', 'rb'))
self._counts = np.zeros(self.adjacency_matrix.shape[0])
self.midpoint_pos = np.array([-0.440, 0.152, 2.226])
self.range = np.array([0.035, 0.035, 0.02])
self.attempt_counter = 0
self.attempt_limit = attempt_limit
self.mocap_pos_clip_lower = np.array([-0.85, 0., 1.8])
self.mocap_pos_clip_upper = np.array([0.55, 0.5, 2.7])
# TODO: Configure robot
self.learned_model = learned_model
self.learned_model_path = learned_model_path
self.model = None
if self.learned_model:
self.model = Mlp(input_size=4,
output_size=4,
hidden_sizes=(256, 256, 256))
dat = torch.load(self.learned_model_path)
state_dict = dat.state_dict()
self.model.load_state_dict(state_dict)
@property
def action_space(self):
return gym.spaces.Box(-1, 1, shape=(5,))
def _configure_robot(self, builder: RobotComponentBuilder):
"""Configures the robot component."""
super()._configure_robot(builder)
def _preprocess_action(self, action: np.ndarray) -> np.ndarray:
""" If using raw actions, there is no need to do any processing to the action array."""
if self.use_raw_actions:
return action
else:
return super()._preprocess_action(action)
def _reset(self):
pass
def _reset(self):
pass
def reset(self):
"""Resets the environment.
Args:
state: The state to reset to. This must match with the state space
of the environment.
Returns:
The initial observation of the environment after resetting.
"""
if self.attempt_counter >= self.attempt_limit:
self.reset_counter = 0
self.last_action = None
# self.sim.reset()
# self.sim.forward()
# """Resets the environment."""
# self.robot.set_state({
# 'arm': RobotState(
# qpos=self.init_qpos[0:self.N_DOF_ARM],
# qvel=np.zeros(self.N_DOF_ARM)),
# 'gripper': RobotState(
# qpos=self.init_qpos[self.N_DOF_ARM:self.N_DOF_ARM +
# self.N_DOF_GRIPPER],
# qvel=np.zeros(self.N_DOF_GRIPPER))
# })
# Choose a random state from labeled goals as the reset state
if self._eval_mode or self.reset_counter == 0 or \
(self._reset_frequency != -1 and self.reset_counter % self._reset_frequency == 0):
print("Resetting the environment fully")
if self.commanded_start == -1:
curr_goal_idx = np.random.randint(4)
else:
curr_goal_idx = self.commanded_start
print("RESET TO GOAL POSITION", curr_goal_idx)
li = 0 #np.random.choice(np.arange(len(self.labeled_goals[curr_goal_idx])))
curr_goal = self.labeled_goals[curr_goal_idx][li]
# Choose a random state from next state in relabeled goals as the goal
# Forward
new_qpos = np.zeros(13)
new_qpos[:7] = np.array([-2.64311209, -1.76372997, -0.23182923, -2.1470029 , 2.55216266, -0.44102682, -0.01343831])
new_qpos[7:9] = np.array([0.1, 0.1])
new_qpos[9:] = curr_goal[2:6]
self.sim.data.qpos[:] = new_qpos.copy()
self.sim.data.mocap_pos[:] = curr_goal[6:9]
for _ in range(100):
self.sim.step()
self.robot.step({
'gripper': 1*np.ones(2)
}, True)
if self.commanded_goal == -1:
next_index = np.random.choice(np.where(self.adjacency_matrix[curr_goal_idx] > 0)[0])
else:
next_index = self.commanded_goal
next_li = 0 #np.random.choice(np.arange(len(self.labeled_goals[next_index])))
self.goal = np.ones((10,))*next_index #self.labeled_goals[next_index][next_li][:13]
self.goal_idx = next_index
self.attempt_counter = 0
print("NEXT_GOAL", next_index)
else:
print("Not resetting")
# TODO: Check if current goal is accomplished
if self.check_goal_completion(self.get_obs_dict()['obj_qp']) == self.goal_idx:
curr_goal_idx = self.goal_idx
next_index = np.random.choice(np.where(self.adjacency_matrix[curr_goal_idx] > 0)[0])
next_index = self.learned_goal_select(next_index)
next_li = np.random.choice(np.arange(len(self.labeled_goals[next_index])))
self.goal = np.ones((10,))*next_index #self.labeled_goals[next_index][next_li][:13]
self.goal_idx = next_index
self.attempt_counter = 0
print("GOING TO GOAL %d"%self.goal_idx)
else:
self.attempt_counter += 1
# Move arm back to the middle
obj_qp = self.get_obs_dict()['obj_qp'].copy()
curr_goal_idx = self.goal_idx
li = np.random.choice(np.arange(len(self.labeled_goals[curr_goal_idx])))
curr_goal = self.labeled_goals[curr_goal_idx][li]
# Choose a random state from next state in relabeled goals as the goal
# Forward
new_qpos = np.zeros(13)
new_qpos[:7] = np.array([-2.64311209, -1.76372997, -0.23182923, -2.1470029 , 2.55216266, -0.44102682, -0.01343831])
new_qpos[7:9] = np.array([0.1, 0.1])
new_qpos[9:] = obj_qp
self.sim.data.qpos[:] = new_qpos.copy()
self.sim.data.mocap_pos[:] = curr_goal[6:9]
for _ in range(100):
self.sim.step()
self.robot.step({
'gripper': 1*np.ones(2)
}, True)
# else keep going with current goal
obs_dict = self.get_obs_dict()
self.last_obs_dict = obs_dict
self.last_reward_dict = None
self.last_score_dict = None
self.is_done = False
self.step_count = 0
self.reset_counter += 1
self.current_idx = self.check_goal_completion(self.get_obs_dict()['obj_qp'][None,:].squeeze(axis=0))
return self._get_obs(obs_dict)
def learned_goal_select(self, goal_selected):
if self.learned_model:
print("IN LEARNED MODEL")
o = self._get_obs(self.get_obs_dict())[2:6]
input_x = torch.Tensor(o)[None, :]
output_x = torch.nn.Softmax()(self.model(input_x)*np.exp(-self._counts)).detach().numpy()[0]
goal_selected = np.random.choice(range(4), p=output_x)
print("LEARNED LIKELIHOOD PREDICTIONS " + str(output_x))
# Updating counts
curr_count = np.zeros((self._counts.shape[0],))
curr_count[goal_selected] += 1
self.update_counts(curr_count)
return goal_selected
def update_counts(self, new_counts):
if self._counts_enabled:
self._counts += new_counts
def check_goal_completion(self, curr_pos):
max_objs = np.array([0.17, 1, 0.6, -0.05])
min_objs = np.array([0.08, 0.1, 0.2, -0.2])
init_bitflips = np.array([0, 0, 0, 1])
curr_bitflips = init_bitflips.copy()
for j in range(4):
if curr_pos[j] > max_objs[j]:
curr_bitflips[j] = 1
elif curr_pos[j] < min_objs[j]:
curr_bitflips[j] = 0
new_idx = 2 * curr_bitflips[0] + curr_bitflips[2]
return new_idx
def _step(self, action: np.ndarray):
"""Applies an action to the robot."""
# TODO: How do deal with goal changing?
denormalize = False if self.use_raw_actions else True
current_pos = self.sim.data.mocap_pos.copy()
meanval = (self.mocap_pos_clip_upper + self.mocap_pos_clip_lower)/2.0
rng = (self.mocap_pos_clip_upper - self.mocap_pos_clip_lower)/2.0
# new_pos = action[:3]*rng + meanval #current_pos + action[:3]*self.range
new_pos = current_pos + action[:3]*self.range
new_pos = np.clip(new_pos, self.mocap_pos_clip_lower, self.mocap_pos_clip_upper)
self.sim.data.mocap_pos[:] = new_pos.copy()
self.robot.step({
'gripper': action[-2:]
}, denormalize)
def get_obs_dict(self):
"""Returns the current observation of the environment.
Returns:
A dictionary of observation values. This should be an ordered
dictionary if `observation_keys` isn't set.
"""
arm_state = self.robot.get_state('arm')
gripper_state = self.robot.get_state('gripper')
# obj_state = self.robot.get_state('object')
obs_dict = collections.OrderedDict((
('t', self.robot.time),
('qp', np.concatenate([gripper_state.qpos])),
('qv', np.concatenate([gripper_state.qvel])),
('obj_qp', self.sim.data.qpos[-self.N_DOF_OBJ:]),
('mocap_pos', self.sim.data.mocap_pos.copy()),
('mocap_quat', self.sim.data.mocap_quat.copy()),
('goal', self.goal),
))
return obs_dict
def set_goal(self, goal):
self.goal = goal
def get_goal(self):
return self.goal
def get_score_dict(
self,
obs_dict: Dict[str, np.ndarray],
reward_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns a standardized measure of success for the environment."""
score_dict = collections.OrderedDict()
return score_dict
# Only include goal
@property
def goal_space(self):
len_obs = self.observation_space.low.shape[0]
env_lim = np.abs(self.observation_space.low[0])
return spaces.Box(low=-env_lim, high=env_lim, shape=(len_obs // 2,))
def render(self, mode='human'):
if mode == 'rgb_array':
camera = engine.MovableCamera(self.sim, 84, 84)
camera.set_pose(
distance=2.2, lookat=[-0.2, .5, 2.1], azimuth=70, elevation=-35)
img = camera.render()
return img
else:
super().render()
def get_reward_dict(self,
action: np.ndarray,
obs_dict: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Returns the reward for the given action and observation."""
# TODO: Check if the goal index is satisfied.
max_delta_slider = 0.22874171
max_delta_cabinet = 1.01982685
if not self._idx_completion:
g = self.labeled_goals[self.goal_idx][0]
if self.goal_idx == 0:
if self.current_idx == 2 or self.current_idx == 0:
target_pos = self.sim.data.site_xpos[self.sim.model.site_name2id('slide_site')]
arm_error = obs_dict['mocap_pos'] - target_pos
slider_error = (obs_dict['obj_qp'][:, 0:1] - g[2:3])/max_delta_slider
reward_dict = collections.OrderedDict((
('ee_slider', np.array([-20*np.float(np.linalg.norm(slider_error))])),
('arm_dist', np.array([-np.float(np.linalg.norm(arm_error))])),
))
elif self.current_idx == 1 or self.current_idx == 3:
target_pos = self.sim.data.site_xpos[self.sim.model.site_name2id('hinge_site2')]
arm_error = obs_dict['mocap_pos'] - target_pos
slider_error = (obs_dict['obj_qp'][:, 2:3] - g[4:5])/max_delta_cabinet
reward_dict = collections.OrderedDict((
('ee_slider', np.array([-20*np.float(np.linalg.norm(slider_error))])),
('arm_dist', np.array([-np.float(np.linalg.norm(arm_error))])),
))
return reward_dict
elif self.goal_idx == 1:
if self.current_idx == 1 or self.current_idx == 3:
target_pos = self.sim.data.site_xpos[self.sim.model.site_name2id('slide_site')]
arm_error = obs_dict['mocap_pos'] - target_pos
slider_error = (obs_dict['obj_qp'][:, 0:1] - g[2:3])/max_delta_slider
reward_dict = collections.OrderedDict((
('ee_slider', np.array([-20*np.float(np.linalg.norm(slider_error))])),
('arm_dist', np.array([-np.float(np.linalg.norm(arm_error))])),
))
elif self.current_idx == 2 or self.current_idx == 0:
target_pos = self.sim.data.site_xpos[self.sim.model.site_name2id('hinge_site2')]
arm_error = obs_dict['mocap_pos'] - target_pos
slider_error = (obs_dict['obj_qp'][:, 2:3] - g[4:5])/max_delta_cabinet
reward_dict = collections.OrderedDict((
('ee_slider', np.array([-20*np.float(np.linalg.norm(slider_error))])),
('arm_dist', np.array([-np.float(np.linalg.norm(arm_error))])),
))
return reward_dict
elif self.goal_idx == 2:
if self.current_idx == 0 or self.current_idx == 2:
target_pos = self.sim.data.site_xpos[self.sim.model.site_name2id('slide_site')]
arm_error = obs_dict['mocap_pos'] - target_pos
slider_error = (obs_dict['obj_qp'][:, 0:1] - g[2:3])/max_delta_slider
reward_dict = collections.OrderedDict((
('ee_slider', np.array([-20*np.float(np.linalg.norm(slider_error))])),
('arm_dist', np.array([-np.float(np.linalg.norm(arm_error))])),
))
elif self.current_idx == 1 or self.current_idx == 3:
target_pos = self.sim.data.site_xpos[self.sim.model.site_name2id('hinge_site2')]
arm_error = obs_dict['mocap_pos'] - target_pos
slider_error = (obs_dict['obj_qp'][:, 2:3] - g[4:5])/max_delta_cabinet
reward_dict = collections.OrderedDict((
('ee_slider', np.array([-20*np.float(np.linalg.norm(slider_error))])),
('arm_dist', np.array([-np.float(np.linalg.norm(arm_error))])),
))
return reward_dict
elif self.goal_idx == 3:
if self.current_idx == 1 or self.current_idx == 3:
target_pos = self.sim.data.site_xpos[self.sim.model.site_name2id('slide_site')]
arm_error = obs_dict['mocap_pos'] - target_pos
slider_error = (obs_dict['obj_qp'][:, 0:1] - g[2:3])/max_delta_slider
reward_dict = collections.OrderedDict((
('ee_slider', np.array([-20*np.float(np.linalg.norm(slider_error))])),
('arm_dist', np.array([-np.float(np.linalg.norm(arm_error))])),
))
elif self.current_idx == 0 or self.current_idx == 2:
target_pos = self.sim.data.site_xpos[self.sim.model.site_name2id('hinge_site2')]
arm_error = obs_dict['mocap_pos'] - target_pos
slider_error = (obs_dict['obj_qp'][:, 2:3] - g[4:5])/max_delta_cabinet
reward_dict = collections.OrderedDict((
('ee_slider', np.array([-20*np.float(np.linalg.norm(slider_error))])),
('arm_dist', np.array([-np.float(np.linalg.norm(arm_error))])),
))
return reward_dict
else:
raise Exception("Wrong index")
else:
current_idx = self.check_goal_completion(obs_dict['obj_qp'].squeeze(axis=0))
reward_dict = collections.OrderedDict((
('completion', np.array([np.float(current_idx == self.goal_idx)])),
))
return reward_dict
|
|
# -----------------------------------------------------------------------------
# A Three-Pronged Approach to Exploring the Limits of Static Malware Analyses:
# Callsite Parameter Cardinality (CPC) Counting: callee_context.py
#
# This is keeps track of argument registers at the callee level and performs
# cpc calculation based on the registers used as source
#
# Luke Jones ([email protected])
#
# The MIT License (MIT)
# Copyright (c) 2016 Chthonian Cyber Services
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
from asm_helper import *
class CalleeContext(object):
def __init__(self):
self.stack_arg_count = 0
self.def_chain = list() # for debugging purpose, functions for ea cpc
self.init_regs()
def init_regs(self):
self.rdi_set = False
self.rsi_set = False
self.rdx_set = False
self.rcx_set = False
self.r10_set = False
self.r8_set = False
self.r9_set = False
self.xmm0_set = False
self.xmm1_set = False
self.xmm2_set = False
self.xmm3_set = False
self.xmm4_set = False
self.xmm5_set = False
self.xmm6_set = False
self.xmm7_set = False
self.rdi_src = False
self.rsi_src = False
self.rdx_src = False
self.rcx_src = False
self.r10_src = False
self.r8_src = False
self.r9_src = False
self.xmm0_src = False
self.xmm1_src = False
self.xmm2_src = False
self.xmm3_src = False
self.xmm4_src = False
self.xmm5_src = False
self.xmm6_src = False
self.xmm7_src = False
def reset(self):
self.init_regs()
def print_arg_regs(self):
if self.rdi_src is True:
print("rdi,")
if self.rsi_src is True:
print("rsi,")
if self.rdx_src is True:
print("rdx,")
if self.rcx_src is True:
print("rcx,")
if self.r10_src is True:
print("r10,")
if self.r8_src is True:
print("r8,")
if self.r9_src is True:
print("r9,")
if self.xmm0_src is True:
print("xmm0,")
if self.xmm1_src is True:
print("xmm1,")
if self.xmm2_src is True:
print("xmm2,")
if self.xmm3_src is True:
print("xmm3,")
if self.xmm4_src is True:
print("xmm4,")
if self.xmm5_src is True:
print("xmm5,")
if self.xmm6_src is True:
print("xmm6,")
if self.xmm7_src is True:
print("xmm7,")
def add_set_arg(self,operand):
""" Adds a possible argument to args
"""
if operand in arg_reg_rdi and not self.rdi_src:
self.rdi_set = True
return True
elif operand in arg_reg_rsi and not self.rsi_src:
self.rsi_set = True
return True
elif operand in arg_reg_rdx and not self.rdx_src:
self.rdx_set = True
return True
elif operand in arg_reg_rcx and not self.rcx_src:
self.rcx_set = True
return True
elif operand in arg_reg_r10 and not self.r10_src:
self.r10_set = True
return True
elif operand in arg_reg_r8 and not self.r8_src:
self.r8_set = True
return True
elif operand in arg_reg_r9 and not self.r9_src:
self.r9_set = True
return True
elif operand in arg_reg_xmm0 and not self.xmm0_src:
self.xmm0_set = True
return True
elif operand in arg_reg_xmm1 and not self.xmm1_src:
self.xmm1_set = True
return True
elif operand in arg_reg_xmm2 and not self.xmm2_src:
self.xmm2_set = True
return True
elif operand in arg_reg_xmm3 and not self.xmm3_src:
self.xmm3_set = True
return True
elif operand in arg_reg_xmm4 and not self.xmm4_src:
self.xmm4_set = True
return True
elif operand in arg_reg_xmm5 and not self.xmm5_src:
self.xmm5_set = True
return True
elif operand in arg_reg_xmm6 and not self.xmm6_src:
self.xmm6_set = True
return True
elif operand in arg_reg_xmm7 and not self.xmm7_src:
self.xmm7_set = True
return True
return False
def add_src_arg(self,operand):
""" Adds a possible argument to args
"""
if operand in arg_reg_rdi and not self.rdi_set:
self.rdi_src = True
return True
elif operand in arg_reg_rsi and not self.rsi_set:
self.rsi_src = True
return True
elif operand in arg_reg_rdx and not self.rdx_set:
self.rdx_src = True
return True
elif operand in arg_reg_rcx and not self.rcx_set:
self.rcx_src = True
return True
elif operand in arg_reg_r10 and not self.r10_set:
self.r10_src = True
return True
elif operand in arg_reg_r8 and not self.r8_set:
self.r8_src = True
return True
elif operand in arg_reg_r9 and not self.r9_set:
self.r9_src = True
return True
elif operand in arg_reg_xmm0 and not self.xmm0_set:
self.xmm0_src = True
return True
elif operand in arg_reg_xmm1 and not self.xmm1_set:
self.xmm1_src = True
return True
elif operand in arg_reg_xmm2 and not self.xmm2_set:
self.xmm2_src = True
return True
elif operand in arg_reg_xmm3 and not self.xmm3_set:
self.xmm3_src = True
return True
elif operand in arg_reg_xmm4 and not self.xmm4_set:
self.xmm4_src = True
return True
elif operand in arg_reg_xmm5 and not self.xmm5_set:
self.xmm5_src = True
return True
elif operand in arg_reg_xmm6 and not self.xmm6_set:
self.xmm6_src = True
return True
elif operand in arg_reg_xmm7 and not self.xmm7_set:
self.xmm7_src = True
return True
return False
def add_child_context(self, child):
if child.rdi_src and not self.rdi_set:
self.rdi_src = True
if child.rsi_src and not self.rsi_set:
self.rsi_src = True
if child.rdx_src and not self.rdx_set:
self.rdx_src = True
if child.rcx_src and not self.rcx_set:
self.rcx_src = True
if child.r10_src and not self.r10_set:
self.r10_src = True
if child.r8_src and not self.r8_set:
self.r8_src = True
if child.r9_src and not self.r9_set:
self.r9_src = True
if child.xmm0_src and not self.xmm0_set:
self.xmm0_src = True
if child.xmm1_src and not self.xmm1_set:
self.xmm1_src = True
if child.xmm2_src and not self.xmm2_set:
self.xmm2_src = True
if child.xmm3_src and not self.xmm3_set:
self.xmm3_src = True
if child.xmm4_src and not self.xmm4_set:
self.xmm4_src = True
if child.xmm5_src and not self.xmm5_set:
self.xmm5_src = True
if child.xmm6_src and not self.xmm6_set:
self.xmm6_src = True
if child.xmm7_src and not self.xmm7_set:
self.xmm7_src = True
def calculate_cpc(self):
""" Determine callsite parameter cardinality based on argument
registers seen in assignment commands and their order
"""
int_regs = 0
fp_regs = 0
#Calculate number of int-ptr arguments used in context
if self.rdi_src is False:
int_regs = 0
elif self.rdi_src is True and self.rsi_src is False:
int_regs = 1
elif self.rsi_src is True and self.rdx_src is False:
int_regs = 2
#special handling for syscalls where r10 is used
elif self.rdx_src is True and self.rcx_src is False and self.r10_src is False:
int_regs = 3
elif (self.rcx_src is True or self.r10_src is True) and self.r8_src is False:
int_regs = 4
elif self.r8_src is True and self.r9_src is False:
int_regs = 5
elif self.r9_src is True:
int_regs = 6
#Calculate number of fp arguments used in context
if self.xmm0_src is False:
fp_regs = 0
elif self.xmm0_src is True and self.xmm1_src is False:
fp_regs = 1
elif self.xmm1_src is True and self.xmm2_src is False:
fp_regs = 2
elif self.xmm2_src is True and self.xmm3_src is False:
fp_regs = 3
elif self.xmm3_src is True and self.xmm4_src is False:
fp_regs = 4
elif self.xmm4_src is True and self.xmm5_src is False:
fp_regs = 5
elif self.xmm5_src is True and self.xmm6_src is False:
fp_regs = 6
elif self.xmm6_src is True and self.xmm7_src is False:
fp_regs = 7
elif self.xmm7_src is True:
fp_regs = 8
return int_regs + fp_regs + self.stack_arg_count
def calculate_cpc_split(self):
""" Determine callsite parameter cardinality based on argument
registers seen in assignment commands and their order
"""
int_regs = 0
fp_regs = 0
#Calculate number of int-ptr arguments used in context
if self.rdi_src is False:
int_regs = 0
elif self.rdi_src is True and self.rsi_src is False:
int_regs = 1
elif self.rsi_src is True and self.rdx_src is False:
int_regs = 2
#special handling for syscalls where r10 is used
elif self.rdx_src is True and self.rcx_src is False and self.r10_src is False:
int_regs = 3
elif (self.rcx_src is True or self.r10_src is True) and self.r8_src is False:
int_regs = 4
elif self.r8_src is True and self.r9_src is False:
int_regs = 5
elif self.r9_src is True:
int_regs = 6
#Calculate number of fp arguments used in context
if self.xmm0_src is False:
fp_regs = 0
elif self.xmm0_src is True and self.xmm1_src is False:
fp_regs = 1
elif self.xmm1_src is True and self.xmm2_src is False:
fp_regs = 2
elif self.xmm2_src is True and self.xmm3_src is False:
fp_regs = 3
elif self.xmm3_src is True and self.xmm4_src is False:
fp_regs = 4
elif self.xmm4_src is True and self.xmm5_src is False:
fp_regs = 5
elif self.xmm5_src is True and self.xmm6_src is False:
fp_regs = 6
elif self.xmm6_src is True and self.xmm7_src is False:
fp_regs = 7
elif self.xmm7_src is True:
fp_regs = 8
return str(int_regs) + "i" + str(fp_regs) + "f."
|
|
from typing import Iterator, Tuple, List
import os
import json
import urllib
import urllib.parse
import logging
import warnings
import shutil
import glob
import git
import yaml
from .build import BuildManager
from ..exceptions import NameInUseError, BadManifestFile
from ..compiler import Compiler
from ..core import TestSuite, Bug, Language, BuildInstructions, \
CoverageInstructions, Tool, Source, SourceContents, RemoteSource, \
LocalSource
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
__all__ = ['SourceManager']
class SourceManager(object):
"""
TODO: we *could* cache the contents of all of the sources to disk, avoiding
the need to scan for them at startup. Although that might be cool, it
seems like overengineering and may create compatibility headaches in
the future.
"""
def __init__(self, installation: 'BugZoo') -> None:
self.__installation = installation
self.__path = os.path.join(installation.path, 'sources')
# TODO
self.__registry_fn = os.path.join(self.__path, 'registry.yml')
self.__sources = {}
self.__contents = {}
self.refresh()
def __iter__(self) -> Iterator[Source]:
"""
Returns an iterator over the sources registered with this server.
"""
return self.__sources.values().__iter__()
def __getitem__(self, name: str) -> Source:
"""
Attempts to fetch the description of a given source.
Parameters:
name: the name of the source.
Returns:
a description of the source.
Raises:
KeyError: if no source is found with the given name.
"""
return self.__sources[name]
def __delitem__(self, name: str) -> None:
"""
See `remove`.
"""
return self.remove(self[name])
def refresh(self) -> None:
"""
Reloads all sources that are registered with this server.
"""
logger.info('refreshing sources')
for source in list(self):
self.unload(source)
if not os.path.exists(self.__registry_fn):
return
# TODO add version
with open(self.__registry_fn, 'r') as f:
registry = yaml.safe_load(f)
assert isinstance(registry, list)
for source_description in registry:
source = Source.from_dict(source_description)
self.load(source)
logger.info('refreshed sources')
def update(self) -> None:
"""
Ensures that all remote sources are up-to-date.
"""
for source_old in self:
if isinstance(source_old, RemoteSource):
repo = git.Repo(source_old.location)
origin = repo.remotes.origin
origin.pull()
sha = repo.head.object.hexsha
version = repo.git.rev_parse(sha, short=8)
if version != source_old.version:
source_new = RemoteSource(source_old.name,
source_old.location,
source_old.url,
version)
logger.info("updated source: %s [%s -> %s]", source_old.name,
source_old.version,
source_new.version)
self.load(source_new)
else:
logger.debug("no updates for source: %s", source_old.name)
# write to disk
# TODO local directory may be corrupted if program terminates between
# repo being updated and registry being saved; could add a "fix"
# command to recalculate versions for remote sources
self.save()
def save(self) -> None:
"""
Saves the contents of the source manager to disk.
"""
logger.info('saving registry to: %s', self.__registry_fn)
d = [s.to_dict() for s in self]
os.makedirs(self.__path, exist_ok=True)
with open(self.__registry_fn, 'w') as f:
yaml.dump(d, f, indent=2, default_flow_style=False)
logger.info('saved registry to: %s', self.__registry_fn)
def unload(self, source: Source) -> None:
"""
Unloads a registered source, causing all of its associated bugs, tools,
and blueprints to also be unloaded. If the given source is not loaded,
this function will do nothing.
"""
logger.info('unloading source: %s', source.name)
try:
contents = self.contents(source)
del self.__contents[source.name]
del self.__sources[source.name]
for name in contents.bugs:
bug = self.__installation.bugs[name]
self.__installation.bugs.remove(bug)
for name in contents.blueprints:
blueprint = self.__installation.build[name]
self.__installation.build.remove(blueprint)
for name in contents.tools:
tool = self.__installation.tools[name]
self.__installation.tools.remove(tool)
except KeyError:
pass
logger.info('unloaded source: %s', source.name)
def __parse_blueprint(self, source: Source, fn: str, d: dict) -> BuildInstructions:
return BuildInstructions(root=os.path.dirname(fn),
tag=d['tag'],
context=d.get('context', '.'),
filename=d.get('file', 'Dockerfile'),
arguments=d.get('arguments', {}),
source=source.name,
build_stage=d.get('build-stage', None),
depends_on=d.get('depends-on', None))
def __parse_bug(self, source: Source, fn: str, d: dict) -> Bug:
d_ = d.copy()
d_['dataset'] = d.get('dataset', None)
d_['program'] = d.get('program', None)
d_['source'] = source.name
return Bug.from_dict(d_)
def __parse_tool(self, source: Source, fn: str, d: dict) -> Tool:
return Tool(d['name'],
d['image'],
d.get('environment', {}),
source.name)
def __parse_file(self,
source: Source,
fn: str,
bugs: List[Bug],
blueprints: List[BuildInstructions],
tools: List[Tool]
) -> None:
with open(fn, 'r') as f:
yml = yaml.safe_load(f)
# TODO check version
if 'version' not in yml:
logger.warning("no version specified in manifest file: %s", fn)
for description in yml.get('bugs', []):
logger.debug("parsing bug: %s", json.dumps(description))
try:
bug = self.__parse_bug(source, fn, description)
logger.debug("parsed bug: %s", bug.name)
bugs.append(bug)
except KeyError as e:
logger.exception("missing property in bug description: %s",
str(e))
for description in yml.get('blueprints', []):
logger.debug("parsing blueprint: %s", json.dumps(description))
try:
blueprint = self.__parse_blueprint(source, fn, description)
logger.debug("parsed blueprint for image: %s",
blueprint.name)
blueprints.append(blueprint)
except KeyError as e:
logger.exception("missing property in blueprint description: %s",
str(e))
for description in yml.get('tools', []):
logger.debug("parsing tool: %s", json.dumps(description))
try:
tool = self.__parse_tool(source, fn, description)
logger.debug("parsed tool: %s", tool.name)
tools.append(tool)
except KeyError as e:
logger.exception("missing property in tool description: %s",
str(e))
def load(self, source: Source) -> None:
"""
Attempts to load all resources (i.e., bugs, tools, and blueprints)
provided by a given source. If the given source has already been
loaded, then that resources for that source are unloaded and
reloaded.
"""
logger.info('loading source %s at %s', source.name, source.location)
if source.name in self.__sources:
self.unload(source)
bugs = []
blueprints = []
tools = []
# find and parse all bugzoo files
glob_pattern = '{}/**/*.bugzoo.y*ml'.format(source.location)
for fn in glob.iglob(glob_pattern, recursive=True):
if fn.endswith('.yml') or fn.endswith('.yaml'):
logger.debug('found manifest file: %s', fn)
self.__parse_file(source, fn, bugs, blueprints, tools)
logger.debug('parsed manifest file: %s', fn)
# register contents
for bug in bugs:
self.__installation.bugs.add(bug)
for blueprint in blueprints:
self.__installation.build.add(blueprint)
for tool in tools:
self.__installation.tools.add(tool)
# record contents of source
contents = SourceContents([b.name for b in blueprints],
[b.name for b in bugs],
[t.name for t in tools])
self.__sources[source.name] = source
self.__contents[source.name] = contents
logger.info("loaded source: %s", source.name)
def contents(self, source: Source) -> SourceContents:
"""
Returns a summary of the bugs, tools, and blueprints provided by a
given source.
"""
return self.__contents[source.name]
def add(self, name: str, path_or_url: str) -> Source:
"""
Attempts to register a source provided by a given URL or local path
under a given name.
Returns:
a description of the registered source.
Raises:
NameInUseError: if an existing source is already registered under
the given name.
IOError: if no directory exists at the given path.
IOError: if downloading the remote source failed. (FIXME)
"""
logger.info("adding source: %s -> %s", name, path_or_url)
if name in self.__sources:
logger.info("name already used by existing source: %s", name)
raise NameInUseError(name)
is_url = False
try:
scheme = urllib.parse.urlparse(path_or_url).scheme
is_url = scheme in ['http', 'https']
logger.debug("source determined to be remote: %s", path_or_url)
except ValueError:
logger.debug("source determined to be local: %s", path_or_url)
if is_url:
url = path_or_url
# convert url to a local path
path = url.replace('https://', '')
path = path.replace('/', '_')
path = path.replace('.', '_')
path = os.path.join(self.__path, path)
# download from remote to local
shutil.rmtree(path, ignore_errors=True)
try:
# TODO shallow clone
logger.debug("cloning repository %s to %s", url, path)
repo = git.Repo.clone_from(url, path)
logger.debug("cloned repository %s to %s", url, path)
sha = repo.head.object.hexsha
version = repo.git.rev_parse(sha, short=8)
except: # TODO determine error type
shutil.rmtree(path, ignore_errors=True)
logger.error("failed to download remote source to local: %s -> %s", url, path)
raise IOError("failed to download remote source to local installation: '{}' -> '{}'".format(url, path))
source = RemoteSource(name, path, url, version)
else:
path = os.path.abspath(path_or_url)
if not os.path.isdir(path):
raise IOError("no directory found at path: {}".format(path))
source = LocalSource(name, path)
self.load(source)
self.save()
logger.info('added source: %s', name)
def remove(self, source: Source) -> None:
"""
Unregisters a given source with this server. If the given source is a
remote source, then its local copy will be removed from disk.
Raises:
KeyError: if the given source is not registered with this server.
"""
self.unload(source)
if isinstance(source, RemoteSource):
shutil.rmtree(source.location, ignore_errors=True)
self.save()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.core import mail
from django.utils import timezone
from sentry.models import (
Activity, Commit, CommitAuthor, Deploy, Environment, GroupSubscriptionReason, Release,
ReleaseCommit, Repository, UserEmail, UserOption, UserOptionValue
)
from sentry.plugins.sentry_mail.activity.release import ReleaseActivityEmail
from sentry.testutils import TestCase
class ReleaseTestCase(TestCase):
def setUp(self):
super(ReleaseTestCase, self).setUp()
self.user = self.create_user('[email protected]')
assert UserEmail.objects.filter(
user=self.user,
email=self.user.email,
).update(
is_verified=True,
)
self.user2 = self.create_user('[email protected]')
assert UserEmail.objects.filter(
user=self.user2,
email=self.user2.email,
).update(
is_verified=True,
)
self.user3 = self.create_user('[email protected]')
assert UserEmail.objects.filter(
user=self.user3,
email=self.user3.email,
).update(
is_verified=True,
)
self.user4 = self.create_user('[email protected]')
assert UserEmail.objects.filter(
user=self.user4,
email=self.user4.email,
).update(
is_verified=True,
)
self.user5 = self.create_user('[email protected]')
user5_alt_email = '[email protected]'
UserEmail.objects.create(email=user5_alt_email, user=self.user5)
assert UserEmail.objects.filter(
user=self.user5,
email=self.user5.email,
).update(
is_verified=True,
)
assert UserEmail.objects.filter(
user=self.user5,
email=user5_alt_email,
).update(
is_verified=True,
)
self.org = self.create_organization(owner=None)
self.org.flags.allow_joinleave = False
self.org.save()
self.team = self.create_team(organization=self.org)
self.team2 = self.create_team(organization=self.org)
self.create_member(user=self.user, organization=self.org, teams=[self.team])
self.create_member(user=self.user2, organization=self.org)
self.create_member(user=self.user3, organization=self.org, teams=[self.team])
self.create_member(user=self.user4, organization=self.org, teams=[self.team])
self.create_member(user=self.user5, organization=self.org, teams=[self.team])
self.project = self.create_project(
organization=self.org,
team=self.team,
)
self.project2 = self.create_project(
organization=self.org,
team=self.team2,
)
self.release = Release.objects.create(
version='a' * 40,
organization_id=self.project.organization_id,
date_released=timezone.now(),
)
self.release.add_project(self.project)
self.release.add_project(self.project2)
self.deploy = Deploy.objects.create(
release=self.release,
organization_id=self.org.id,
environment_id=Environment.objects.create(
name='production', organization_id=self.org.id
).id
)
repository = Repository.objects.create(
organization_id=self.org.id,
name=self.project.name,
)
self.commit = Commit.objects.create(
key='a' * 40,
repository_id=repository.id,
organization_id=self.org.id,
author=CommitAuthor.objects.create(
organization_id=self.org.id,
name=self.user.name,
email=self.user.email,
),
)
self.commit2 = Commit.objects.create(
key='b' * 40,
repository_id=repository.id,
organization_id=self.org.id,
author=CommitAuthor.objects.create(
organization_id=self.org.id,
name=self.user2.name,
email=self.user2.email,
)
)
self.commit3 = Commit.objects.create(
key='c' * 40,
repository_id=repository.id,
organization_id=self.org.id,
author=CommitAuthor.objects.create(
organization_id=self.org.id,
name=self.user4.name,
email=self.user4.email,
)
)
self.commit4 = Commit.objects.create(
key='e' * 40,
repository_id=repository.id,
organization_id=self.org.id,
author=CommitAuthor.objects.create(
organization_id=self.org.id,
name=self.user5.name,
email=user5_alt_email,
)
)
ReleaseCommit.objects.create(
organization_id=self.project.organization_id,
release=self.release,
commit=self.commit,
order=0,
)
ReleaseCommit.objects.create(
organization_id=self.project.organization_id,
release=self.release,
commit=self.commit2,
order=1,
)
ReleaseCommit.objects.create(
organization_id=self.project.organization_id,
release=self.release,
commit=self.commit3,
order=2,
)
ReleaseCommit.objects.create(
organization_id=self.project.organization_id,
release=self.release,
commit=self.commit4,
order=3,
)
UserOption.objects.set_value(
user=self.user3,
organization=self.org,
key='deploy-emails',
value=UserOptionValue.all_deploys,
)
UserOption.objects.set_value(
user=self.user4,
organization=self.org,
key='deploy-emails',
value=UserOptionValue.no_deploys,
)
def test_simple(self):
email = ReleaseActivityEmail(
Activity(
project=self.project,
user=self.user,
type=Activity.RELEASE,
data={
'version': self.release.version,
'deploy_id': self.deploy.id,
},
)
)
# user is included because they committed
# user2 committed but isn't in a team associated with the project.
# user3 is included because they oped into all deploy emails
# user4 committed but isn't included because they opted out of all deploy emails
# user5 committed with another email address and is still included.
assert len(email.get_participants()) == 3
assert email.get_participants() == {
self.user: GroupSubscriptionReason.committed,
self.user3: GroupSubscriptionReason.deploy_setting,
self.user5: GroupSubscriptionReason.committed,
}
context = email.get_context()
assert context['environment'] == 'production'
assert context['repos'][0]['commits'] == [
(self.commit, self.user),
(self.commit2, self.user2),
(self.commit3, self.user4),
(self.commit4, self.user5),
]
user_context = email.get_user_context(self.user)
# make sure this only includes projects user has access to
assert len(user_context['projects']) == 1
assert user_context['projects'][0][0] == self.project
with self.tasks():
email.send()
assert len(mail.outbox) == 3
sent_email_addresses = {msg.to[0] for msg in mail.outbox}
assert sent_email_addresses == {self.user.email, self.user3.email, self.user5.email}
def test_doesnt_generate_on_no_release(self):
email = ReleaseActivityEmail(
Activity(
project=self.project,
user=self.user,
type=Activity.RELEASE,
data={'version': 'a',
'deploy_id': 5},
)
)
assert email.release is None
assert not email.should_email()
|
|
"""Active Directory authentication backend."""
from __future__ import absolute_import, unicode_literals
import itertools
import logging
import dns
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
try:
import ldap
from ldap.dn import dn2str, str2dn
from ldap.filter import filter_format
except ImportError:
ldap = None
from reviewboard.accounts.backends.base import BaseAuthBackend
from reviewboard.accounts.forms.auth import ActiveDirectorySettingsForm
logger = logging.getLogger(__name__)
class ActiveDirectoryBackend(BaseAuthBackend):
"""Authenticate a user against an Active Directory server.
This is controlled by the following Django settings:
.. setting:: AD_DOMAIN_CONTROLLER
``AD_DOMAIN_CONTROLLER``:
The domain controller (or controllers) to connect to. This must be
a string, but multiple controllers can be specified by separating
each with a space.
This is ``auth_ad_domain_controller`` in the site configuration.
.. setting:: AD_DOMAIN_NAME
``AD_DOMAIN_NAME``:
The Active Directory domain name. This must be a string.
This is ``auth_ad_domain_name`` in the site configuration.
.. setting:: AD_FIND_DC_FROM_DNS
``AD_FIND_DC_FROM_DNS``:
Whether domain controllers should be found by using DNS. This must be
a boolean.
This is ``auth_ad_find_dc_from_dns`` in the site configuration.
.. setting:: AD_GROUP_NAME
``AD_GROUP_NAME``:
The optional name of the group to restrict available users to. This
must be a string.
This is ``auth_ad_group_name`` in the site configuration.
.. setting:: AD_OU_NAME
``AD_OU_NAME``:
The optional name of the Organizational Unit to restrict available users
to. This must be a string.
This is ``auth_ad_ou_name`` in the site configuration.
.. setting:: AD_RECURSION_DEPTH
``AD_RECURSION_DEPTH``:
Maximum depth to recurse when checking group membership. A value of
-1 means infinite depth is supported. A value of 0 turns off recursive
checks.
This is ``auth_ad_recursion_depth`` in the site configuration.
.. setting:: AD_SEARCH_ROOT
``AD_SEARCH_ROOT``:
A custom search root for entries in Active Directory. This must be a
string.
This is ``auth_ad_search_root`` in the site configuration.
.. setting:: AD_USE_TLS
``AD_USE_TLS``:
Whether to use TLS when communicating over LDAP. This must be a
boolean.
This is ``auth_ad_use_tls`` in the site configuration.
"""
backend_id = 'ad'
name = _('Active Directory')
settings_form = ActiveDirectorySettingsForm
login_instructions = \
_('Use your standard Active Directory username and password.')
def get_domain_name(self):
"""Return the current Active Directory domain name.
This returns the domain name as set in :setting:`AD_DOMAIN_NAME`.
Returns:
unicode:
The Active Directory domain name.
"""
return settings.AD_DOMAIN_NAME
def get_ldap_search_root(self, user_domain=None):
"""Return the search root(s) for users in the LDAP server.
If :setting:`AD_SEARCH_ROOT` is set, then it will be used. Otherwise,
a suitable search root will be computed based on the domain name
(either the provided ``user_domain`` or the result of
:py:meth:`get_domain_name`) and any configured Organizational Unit
name (:setting:`AD_OU_NAME`).
Args:
user_domain (unicode, optional):
An explicit Active Directory domain to use for the search root.
Returns:
unicode:
The search root used to locate users.
"""
if getattr(settings, 'AD_SEARCH_ROOT', None):
return settings.AD_SEARCH_ROOT
dn = []
if settings.AD_OU_NAME:
dn.append([('ou', settings.AD_OU_NAME, None)])
if user_domain is None:
user_domain = self.get_domain_name()
if user_domain:
dn += [
[('dc', dc, None)]
for dc in user_domain.split('.')
]
return dn2str(dn)
def search_ad(self, con, filterstr, user_domain=None):
"""Search the given LDAP server based on the provided filter.
Args:
con (ldap.LDAPObject):
The LDAP connection to search.
filterstr (unicode):
The filter string used to locate objects in Active Directory.
user_domain (unicode, optional):
An explicit domain used for the search. If not provided,
:py:meth:`get_domain_name` will be used.
Returns:
list of tuple:
The list of search results. Each tuple in the list is in the form
of ``(dn, attrs)``, where ``dn`` is the Distinguished Name of the
entry and ``attrs`` is a dictionary of attributes for that entry.
"""
search_root = self.get_ldap_search_root(user_domain)
logger.debug('Search root "%s" for filter "%s"',
search_root, filterstr)
return con.search_s(search_root,
scope=ldap.SCOPE_SUBTREE,
filterstr=filterstr)
def find_domain_controllers_from_dns(self, user_domain=None):
"""Find and return the active domain controllers using DNS.
Args:
user_domain (unicode, optional):
An explicit domain used for the search. If not provided,
:py:meth:`get_domain_name` will be used.
Returns:
list of unicode:
The list of domain controllers.
"""
record_name = '_ldap._tcp.%s' % (user_domain or self.get_domain_name())
try:
answer = dns.resolver.query(record_name, 'SRV')
return [
(rdata.port, rdata.target.to_unicode(omit_final_dot=True))
for rdata in sorted(answer,
key=lambda rdata: (rdata.priority,
-rdata.weight))
]
except dns.resolver.NXDOMAIN:
# The domain could not be found. Skip it.
pass
except Exception as e:
logger.error('Unable to query for Active Directory domain '
'controllers using DNS record "%s": %s',
record_name,
e)
return []
def can_recurse(self, depth):
"""Return whether the given recursion depth is too deep.
Args:
depth (int):
The current depth to check.
Returns:
bool:
``True`` if the provided depth can be recursed into. ``False``
if it's too deep.
"""
return (settings.AD_RECURSION_DEPTH == -1 or
depth <= settings.AD_RECURSION_DEPTH)
def get_member_of(self, con, search_results, seen=None, depth=0):
"""Return the LDAP groups for the given users.
This iterates over the users specified in ``search_results`` and
returns a set of groups of which those users are members.
Args:
con (ldap.LDAPObject):
The LDAP connection used for checking groups memberships.
search_results (list of tuple):
The list of search results to check. This expects a result
from :py:meth:`search_ad`.
seen (set, optional):
The set of groups that have already been seen when recursing.
This is used internally by this method and should not be
provided by the caller.
depth (int, optional):
The current recursion depth. This is used internally by this
method and should not be provided by the caller.
Returns:
set:
The group memberships found for the given users.
"""
depth += 1
if seen is None:
seen = set()
can_recurse = self.can_recurse(depth)
for name, data in search_results:
if name is None:
continue
new_groups = []
for group_dn in data.get('memberOf', []):
parts = itertools.chain.from_iterable(str2dn(group_dn))
for attr, value, flags in parts:
if attr.lower() == 'cn':
new_groups.append(value)
break
old_seen = seen.copy()
seen.update(new_groups)
# Collect groups recursively.
if not can_recurse:
logger.warning('Recursive group check reached maximum '
'recursion depth (%s)',
depth)
continue
for group in new_groups:
if group in old_seen:
continue
# Search for groups with the specified CN. Use the CN rather
# than the sAMAccountName so that behavior is correct when
# the values differ (e.g. if a "pre-Windows 2000" group name
# is set in AD).
group_data = self.search_ad(
con,
filter_format('(&(objectClass=group)(cn=%s))', [group]))
seen.update(self.get_member_of(con, group_data,
seen=seen, depth=depth))
return seen
def get_ldap_connections(self, user_domain, request=None):
"""Return all LDAP connections used for Active Directory.
This returns an iterable of connections to the LDAP servers specified
in :setting:`AD_DOMAIN_CONTROLLER`.
Args:
user_domain (unicode, optional):
The domain for the user.
request (django.http.HttpRequest, optional):
The HTTP request from the client. This is used only for logging
purposes.
Yields:
tuple of (unicode, ldap.LDAPObject):
The connections to the configured LDAP servers.
"""
if settings.AD_FIND_DC_FROM_DNS:
dcs = self.find_domain_controllers_from_dns(user_domain)
else:
dcs = []
for dc_entry in settings.AD_DOMAIN_CONTROLLER.split():
if ':' in dc_entry:
host, port = dc_entry.split(':')
else:
host = dc_entry
port = '389'
dcs.append((port, host))
for port, host in dcs:
ldap_uri = 'ldap://%s:%s' % (host, port)
connection = ldap.initialize(ldap_uri,
bytes_mode=False)
if settings.AD_USE_TLS:
try:
connection.start_tls_s()
except ldap.UNAVAILABLE:
logger.warning('Domain controller "%s:%d" for domain "%s" '
'unavailable',
host, int(port), user_domain,
request=request)
continue
except ldap.CONNECT_ERROR:
logger.warning('Could not connect to domain controller '
'"%s:%d" for domain "%s". The certificate '
'may not be verifiable.',
host, int(port), user_domain,
request=request)
continue
connection.set_option(ldap.OPT_REFERRALS, 0)
yield ldap_uri, connection
def authenticate(self, request, username, password, **kwargs):
"""Authenticate a user against Active Directory.
This will attempt to authenticate the user against Active Directory.
If the username and password are valid, a user will be returned, and
added to the database if it doesn't already exist.
Version Changed:
4.0:
The ``request`` argument is now mandatory as the first positional
argument, as per requirements in Django.
Args:
request (django.http.HttpRequest):
The HTTP request from the caller. This may be ``None``.
username (unicode):
The username to authenticate.
password (unicode):
The user's password.
**kwargs (dict, unused):
Additional keyword arguments passed by the caller.
Returns:
django.contrib.auth.models.User:
The authenticated user, or ``None`` if the user could not be
authenticated for any reason.
"""
username = username.strip()
if ldap is None:
logger.error('Attempted to authenticate user "%s" in LDAP, but '
'the python-ldap package is not installed!',
username,
request=request)
return None
user_subdomain = ''
if '@' in username:
username, user_subdomain = username.split('@', 1)
elif '\\' in username:
user_subdomain, username = username.split('\\', 1)
user_domain = self.get_domain_name()
if user_subdomain:
user_domain = '%s.%s' % (user_subdomain, user_domain)
required_group = settings.AD_GROUP_NAME
for uri, connection in self.get_ldap_connections(user_domain,
request=request):
try:
bind_username = '%s@%s' % (username, user_domain)
connection.simple_bind_s(bind_username, password)
user_data = self.search_ad(
connection,
filter_format('(&(objectClass=user)(sAMAccountName=%s))',
[username]),
user_domain)
if not user_data:
return None
if required_group:
try:
group_names = self.get_member_of(connection, user_data)
except Exception as e:
logger.error('Unable to retrieve groups for user '
'"%s" from controller "%s": %s',
username, uri, e,
request=request,
exc_info=1)
return None
if required_group not in group_names:
logger.warning('User %s is not in required group "%s" '
'on controller "%s"',
username, required_group, uri,
request=request)
return None
return self.get_or_create_user(username=username,
request=request,
ad_user_data=user_data)
except ldap.SERVER_DOWN:
logger.warning('Unable to authenticate with the domain '
'controller "%s". It is down.',
uri,
request=request)
continue
except ldap.INVALID_CREDENTIALS:
logger.warning('Unable to authenticate user "%s" on '
'domain controller "%s". The user credentials '
'are invalid.',
username, uri,
request=request)
return None
except Exception as e:
logger.exception('Unexpected error occurred while '
'authenticating with Active Directory: %s',
e,
request=request)
continue
logger.error('Could not contact any domain controller servers when '
'authenticating for user "%s".',
username,
request=request)
return None
def get_or_create_user(self, username, request=None, ad_user_data=None):
"""Return an existing user or create one if it doesn't exist.
This does not authenticate the user.
If the user does not exist in the database, but does in Active
Directory, its information will be stored in the database for later
lookup. However, this will only happen if ``ad_user_data`` is provided.
Args:
username (unicode):
The name of the user to look up or create.
request (django.http.HttpRequest, unused):
The HTTP request from the client. This is unused.
ad_user_data (list of tuple, optional):
Data about the user to create. This is generally provided by
:py:meth:`authenticate`.
Returns:
django.contrib.auth.models.User:
The resulting user, or ``None`` if one could not be found.
"""
username = self.INVALID_USERNAME_CHAR_REGEX.sub('', username).lower()
try:
return User.objects.get(username=username)
except User.DoesNotExist:
if ad_user_data is None:
return None
try:
user_info = ad_user_data[0][1]
first_name = force_text(
user_info.get('givenName', [username])[0])
last_name = force_text(user_info.get('sn', [''])[0])
email = force_text(user_info.get(
'mail',
['%s@%s' % (username, settings.AD_DOMAIN_NAME)])[0])
user = User(username=username,
password='',
first_name=first_name,
last_name=last_name,
email=email)
user.is_staff = False
user.is_superuser = False
user.set_unusable_password()
user.save()
return user
except Exception:
return None
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpacetoDepth op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SpaceToDepthTest(tf.test.TestCase):
def testBasic(self):
x_np = [[[[1], [2]],
[[3], [4]]]]
with self.test_session(use_gpu=False):
block_size = 2
out_tf = tf.space_to_depth(x_np, block_size)
self.assertAllEqual(out_tf.eval(), [[[[1, 2, 3, 4]]]])
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
x_np = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
with self.test_session(use_gpu=False):
block_size = 2
out_tf = tf.space_to_depth(x_np, block_size)
self.assertAllEqual(out_tf.eval(), [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]])
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
x_np = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
with self.test_session(use_gpu=False):
block_size = 4
out_tf = tf.space_to_depth(x_np, block_size)
self.assertAllEqual(
out_tf.eval(),
[[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]])
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]]]]
with self.test_session(use_gpu=False):
block_size = 2
out_tf = tf.space_to_depth(x_np, block_size)
self.assertAllEqual(out_tf.eval(), [[[[1, 10, 2, 20, 3, 30, 4, 40]]]])
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
with self.test_session(use_gpu=False):
block_size = 2
out_tf = tf.space_to_depth(x_np, block_size)
self.assertAllEqual(out_tf.eval(),
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]])
# Tests for larger input dimensions AND for larger input depths.
# To make sure elements are properly interleaved in depth and ordered
# spatially.
def testDepthInterleavedLarge(self):
x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
with self.test_session(use_gpu=False):
block_size = 2
out_tf = tf.space_to_depth(x_np, block_size)
self.assertAllEqual(out_tf.eval(),
[[[[1, 10, 2, 20, 3, 30, 4, 40],
[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]])
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]],
[[5, 50], [6, 60]],
[[7, 70], [8, 80]],
[[9, 90], [10, 100]],
[[11, 110], [12, 120]]]]
with self.test_session(use_gpu=False):
block_size = 2
out_tf = tf.space_to_depth(x_np, block_size)
self.assertAllEqual(out_tf.eval(),
[[[[1, 10, 2, 20, 3, 30, 4, 40]],
[[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]])
# Error handling:
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
x_np = [[[1, 2],
[3, 4]]]
block_size = 2
with self.assertRaises(ValueError):
out_tf = tf.space_to_depth(x_np, block_size)
out_tf.eval()
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]],
[[3], [4]]]
block_size = 2
with self.assertRaises(ValueError):
_ = tf.space_to_depth(x_np, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = tf.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = tf.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 10
with self.assertRaises(IndexError):
out_tf = tf.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]],
[[3], [4], [7]]]]
block_size = 3
with self.assertRaises(IndexError):
_ = tf.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]],
[[3], [4]],
[[5], [6]]]]
block_size = 3
with self.assertRaises(IndexError):
_ = tf.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 3
with self.assertRaises(IndexError):
_ = tf.space_to_depth(x_np, block_size)
class SpaceToDepthGradientTest(tf.test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size):
assert 4 == x.ndim
with self.test_session():
tf_x = tf.convert_to_tensor(x)
tf_y = tf.space_to_depth(tf_x, block_size)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_depth of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size):
block_size_sq = block_size * block_size
x = np.random.normal(
0, 1, b * h * w * d * block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
self._checkGrad(x, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
self._compare(1, 2, 3, 5, block_size)
def testSmall2(self):
block_size = 2
self._compare(2, 4, 3, 2, block_size)
if __name__ == "__main__":
tf.test.main()
|
|
#!/usr/bin/env python
#
#===- exploded-graph-rewriter.py - ExplodedGraph dump tool -----*- python -*--#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===-----------------------------------------------------------------------===#
from __future__ import print_function
import argparse
import collections
import difflib
import json
import logging
import os
import re
#===-----------------------------------------------------------------------===#
# These data structures represent a deserialized ExplodedGraph.
#===-----------------------------------------------------------------------===#
# A helper function for finding the difference between two dictionaries.
def diff_dicts(curr, prev):
removed = [k for k in prev if k not in curr or curr[k] != prev[k]]
added = [k for k in curr if k not in prev or curr[k] != prev[k]]
return (removed, added)
# Represents any program state trait that is a dictionary of key-value pairs.
class GenericMap(object):
def __init__(self, items):
self.generic_map = collections.OrderedDict(items)
def diff(self, prev):
return diff_dicts(self.generic_map, prev.generic_map)
def is_different(self, prev):
removed, added = self.diff(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized source location.
class SourceLocation(object):
def __init__(self, json_loc):
super(SourceLocation, self).__init__()
logging.debug('json: %s' % json_loc)
self.line = json_loc['line']
self.col = json_loc['column']
self.filename = os.path.basename(json_loc['file']) \
if 'file' in json_loc else '(main file)'
self.spelling = SourceLocation(json_loc['spelling']) \
if 'spelling' in json_loc else None
def is_macro(self):
return self.spelling is not None
# A deserialized program point.
class ProgramPoint(object):
def __init__(self, json_pp):
super(ProgramPoint, self).__init__()
self.kind = json_pp['kind']
self.tag = json_pp['tag']
self.node_id = json_pp['node_id']
self.is_sink = bool(json_pp['is_sink'])
self.has_report = bool(json_pp['has_report'])
if self.kind == 'Edge':
self.src_id = json_pp['src_id']
self.dst_id = json_pp['dst_id']
elif self.kind == 'Statement':
logging.debug(json_pp)
self.stmt_kind = json_pp['stmt_kind']
self.cast_kind = json_pp['cast_kind'] \
if 'cast_kind' in json_pp else None
self.stmt_point_kind = json_pp['stmt_point_kind']
self.stmt_id = json_pp['stmt_id']
self.pointer = json_pp['pointer']
self.pretty = json_pp['pretty']
self.loc = SourceLocation(json_pp['location']) \
if json_pp['location'] is not None else None
elif self.kind == 'BlockEntrance':
self.block_id = json_pp['block_id']
# A single expression acting as a key in a deserialized Environment.
class EnvironmentBindingKey(object):
def __init__(self, json_ek):
super(EnvironmentBindingKey, self).__init__()
# CXXCtorInitializer is not a Stmt!
self.stmt_id = json_ek['stmt_id'] if 'stmt_id' in json_ek \
else json_ek['init_id']
self.pretty = json_ek['pretty']
self.kind = json_ek['kind'] if 'kind' in json_ek else None
def _key(self):
return self.stmt_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# Deserialized description of a location context.
class LocationContext(object):
def __init__(self, json_frame):
super(LocationContext, self).__init__()
self.lctx_id = json_frame['lctx_id']
self.caption = json_frame['location_context']
self.decl = json_frame['calling']
self.loc = SourceLocation(json_frame['location']) \
if json_frame['location'] is not None else None
def _key(self):
return self.lctx_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# A group of deserialized Environment bindings that correspond to a specific
# location context.
class EnvironmentFrame(object):
def __init__(self, json_frame):
super(EnvironmentFrame, self).__init__()
self.location_context = LocationContext(json_frame)
self.bindings = collections.OrderedDict(
[(EnvironmentBindingKey(b),
b['value']) for b in json_frame['items']]
if json_frame['items'] is not None else [])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized Environment. This class can also hold other entities that
# are similar to Environment, such as Objects Under Construction.
class GenericEnvironment(object):
def __init__(self, json_e):
super(GenericEnvironment, self).__init__()
self.frames = [EnvironmentFrame(f) for f in json_e]
def diff_frames(self, prev):
# TODO: It's difficult to display a good diff when frame numbers shift.
if len(self.frames) != len(prev.frames):
return None
updated = []
for i in range(len(self.frames)):
f = self.frames[i]
prev_f = prev.frames[i]
if f.location_context == prev_f.location_context:
if f.is_different(prev_f):
updated.append(i)
else:
# We have the whole frame replaced with another frame.
# TODO: Produce a nice diff.
return None
# TODO: Add support for added/removed.
return updated
def is_different(self, prev):
updated = self.diff_frames(prev)
return updated is None or len(updated) > 0
# A single binding key in a deserialized RegionStore cluster.
class StoreBindingKey(object):
def __init__(self, json_sk):
super(StoreBindingKey, self).__init__()
self.kind = json_sk['kind']
self.offset = json_sk['offset']
def _key(self):
return (self.kind, self.offset)
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# A single cluster of the deserialized RegionStore.
class StoreCluster(object):
def __init__(self, json_sc):
super(StoreCluster, self).__init__()
self.base_region = json_sc['cluster']
self.bindings = collections.OrderedDict(
[(StoreBindingKey(b), b['value']) for b in json_sc['items']])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized RegionStore.
class Store(object):
def __init__(self, json_s):
super(Store, self).__init__()
self.ptr = json_s['pointer']
self.clusters = collections.OrderedDict(
[(c['pointer'], StoreCluster(c)) for c in json_s['items']])
def diff_clusters(self, prev):
removed = [k for k in prev.clusters if k not in self.clusters]
added = [k for k in self.clusters if k not in prev.clusters]
updated = [k for k in prev.clusters if k in self.clusters
and prev.clusters[k].is_different(self.clusters[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_clusters(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# Deserialized messages from a single checker in a single program state.
# Basically a list of raw strings.
class CheckerLines(object):
def __init__(self, json_lines):
super(CheckerLines, self).__init__()
self.lines = json_lines
def diff_lines(self, prev):
lines = difflib.ndiff(prev.lines, self.lines)
return [l.strip() for l in lines
if l.startswith('+') or l.startswith('-')]
def is_different(self, prev):
return len(self.diff_lines(prev)) > 0
# Deserialized messages of all checkers, separated by checker.
class CheckerMessages(object):
def __init__(self, json_m):
super(CheckerMessages, self).__init__()
self.items = collections.OrderedDict(
[(m['checker'], CheckerLines(m['messages'])) for m in json_m])
def diff_messages(self, prev):
removed = [k for k in prev.items if k not in self.items]
added = [k for k in self.items if k not in prev.items]
updated = [k for k in prev.items if k in self.items
and prev.items[k].is_different(self.items[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_messages(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# A deserialized program state.
class ProgramState(object):
def __init__(self, state_id, json_ps):
super(ProgramState, self).__init__()
logging.debug('Adding ProgramState ' + str(state_id))
if json_ps is None:
json_ps = {
'store': None,
'environment': None,
'constraints': None,
'dynamic_types': None,
'constructing_objects': None,
'checker_messages': None
}
self.state_id = state_id
self.store = Store(json_ps['store']) \
if json_ps['store'] is not None else None
self.environment = \
GenericEnvironment(json_ps['environment']['items']) \
if json_ps['environment'] is not None else None
self.constraints = GenericMap([
(c['symbol'], c['range']) for c in json_ps['constraints']
]) if json_ps['constraints'] is not None else None
self.dynamic_types = GenericMap([
(t['region'], '%s%s' % (t['dyn_type'],
' (or a sub-class)'
if t['sub_classable'] else ''))
for t in json_ps['dynamic_types']]) \
if json_ps['dynamic_types'] is not None else None
self.constructing_objects = \
GenericEnvironment(json_ps['constructing_objects']) \
if json_ps['constructing_objects'] is not None else None
self.checker_messages = CheckerMessages(json_ps['checker_messages']) \
if json_ps['checker_messages'] is not None else None
# A deserialized exploded graph node. Has a default constructor because it
# may be referenced as part of an edge before its contents are deserialized,
# and in this moment we already need a room for predecessors and successors.
class ExplodedNode(object):
def __init__(self):
super(ExplodedNode, self).__init__()
self.predecessors = []
self.successors = []
def construct(self, node_id, json_node):
logging.debug('Adding ' + node_id)
self.ptr = node_id[4:]
self.points = [ProgramPoint(p) for p in json_node['program_points']]
self.node_id = self.points[-1].node_id
self.state = ProgramState(json_node['state_id'],
json_node['program_state']
if json_node['program_state'] is not None else None);
assert self.node_name() == node_id
def node_name(self):
return 'Node' + self.ptr
# A deserialized ExplodedGraph. Constructed by consuming a .dot file
# line-by-line.
class ExplodedGraph(object):
# Parse .dot files with regular expressions.
node_re = re.compile(
'^(Node0x[0-9a-f]*) \\[shape=record,.*label="{(.*)\\\\l}"\\];$')
edge_re = re.compile(
'^(Node0x[0-9a-f]*) -> (Node0x[0-9a-f]*);$')
def __init__(self):
super(ExplodedGraph, self).__init__()
self.nodes = collections.defaultdict(ExplodedNode)
self.root_id = None
self.incomplete_line = ''
def add_raw_line(self, raw_line):
if raw_line.startswith('//'):
return
# Allow line breaks by waiting for ';'. This is not valid in
# a .dot file, but it is useful for writing tests.
if len(raw_line) > 0 and raw_line[-1] != ';':
self.incomplete_line += raw_line
return
raw_line = self.incomplete_line + raw_line
self.incomplete_line = ''
# Apply regexps one by one to see if it's a node or an edge
# and extract contents if necessary.
logging.debug('Line: ' + raw_line)
result = self.edge_re.match(raw_line)
if result is not None:
logging.debug('Classified as edge line.')
pred = result.group(1)
succ = result.group(2)
self.nodes[pred].successors.append(succ)
self.nodes[succ].predecessors.append(pred)
return
result = self.node_re.match(raw_line)
if result is not None:
logging.debug('Classified as node line.')
node_id = result.group(1)
if len(self.nodes) == 0:
self.root_id = node_id
# Note: when writing tests you don't need to escape everything,
# even though in a valid dot file everything is escaped.
node_label = result.group(2).replace('\\l', '') \
.replace(' ', '') \
.replace('\\"', '"') \
.replace('\\{', '{') \
.replace('\\}', '}') \
.replace('\\\\', '\\') \
.replace('\\|', '|') \
.replace('\\<', '\\\\<') \
.replace('\\>', '\\\\>') \
.rstrip(',')
logging.debug(node_label)
json_node = json.loads(node_label)
self.nodes[node_id].construct(node_id, json_node)
return
logging.debug('Skipping.')
#===-----------------------------------------------------------------------===#
# Visitors traverse a deserialized ExplodedGraph and do different things
# with every node and edge.
#===-----------------------------------------------------------------------===#
# A visitor that dumps the ExplodedGraph into a DOT file with fancy HTML-based
# syntax highlighing.
class DotDumpVisitor(object):
def __init__(self, do_diffs, dark_mode, gray_mode,
topo_mode, dump_dot_only):
super(DotDumpVisitor, self).__init__()
self._do_diffs = do_diffs
self._dark_mode = dark_mode
self._gray_mode = gray_mode
self._topo_mode = topo_mode
self._dump_dot_only = dump_dot_only
self._output = []
def _dump_raw(self, s):
if self._dump_dot_only:
print(s, end='')
else:
self._output.append(s)
def output(self):
assert not self._dump_dot_only
return ''.join(self._output)
def _dump(self, s):
s = s.replace('&', '&') \
.replace('{', '\\{') \
.replace('}', '\\}') \
.replace('\\<', '<') \
.replace('\\>', '>') \
.replace('\\l', '<br />') \
.replace('|', '\\|')
if self._gray_mode:
s = re.sub(r'<font color="[a-z0-9]*">', '', s)
s = re.sub(r'</font>', '', s)
self._dump_raw(s)
@staticmethod
def _diff_plus_minus(is_added):
if is_added is None:
return ''
if is_added:
return '<font color="forestgreen">+</font>'
return '<font color="red">-</font>'
@staticmethod
def _short_pretty(s):
if s is None:
return None
if len(s) < 20:
return s
left = s.find('{')
right = s.rfind('}')
if left == -1 or right == -1 or left >= right:
return s
candidate = s[0:left + 1] + ' ... ' + s[right:]
if len(candidate) >= len(s):
return s
return candidate
@staticmethod
def _make_sloc(loc):
if loc is None:
return '<i>Invalid Source Location</i>'
def make_plain_loc(loc):
return '%s:<b>%s</b>:<b>%s</b>' \
% (loc.filename, loc.line, loc.col)
if loc.is_macro():
return '%s <font color="royalblue1">' \
'(<i>spelling at </i> %s)</font>' \
% (make_plain_loc(loc), make_plain_loc(loc.spelling))
return make_plain_loc(loc)
def visit_begin_graph(self, graph):
self._graph = graph
self._dump_raw('digraph "ExplodedGraph" {\n')
if self._dark_mode:
self._dump_raw('bgcolor="gray10";\n')
self._dump_raw('label="";\n')
def visit_program_point(self, p):
if p.kind in ['Edge', 'BlockEntrance', 'BlockExit']:
color = 'gold3'
elif p.kind in ['PreStmtPurgeDeadSymbols',
'PostStmtPurgeDeadSymbols']:
color = 'red'
elif p.kind in ['CallEnter', 'CallExitBegin', 'CallExitEnd']:
color = 'dodgerblue' if self._dark_mode else 'blue'
elif p.kind in ['Statement']:
color = 'cyan4'
else:
color = 'forestgreen'
self._dump('<tr><td align="left">%s.</td>' % p.node_id)
if p.kind == 'Statement':
# This avoids pretty-printing huge statements such as CompoundStmt.
# Such statements show up only at [Pre|Post]StmtPurgeDeadSymbols
skip_pretty = 'PurgeDeadSymbols' in p.stmt_point_kind
stmt_color = 'cyan3'
self._dump('<td align="left" width="0">%s:</td>'
'<td align="left" width="0"><font color="%s">'
'%s</font> </td>'
'<td align="left"><i>S%s</i></td>'
'<td align="left"><font color="%s">%s</font></td>'
'<td align="left">%s</td></tr>'
% (self._make_sloc(p.loc), color,
'%s (%s)' % (p.stmt_kind, p.cast_kind)
if p.cast_kind is not None else p.stmt_kind,
p.stmt_id, stmt_color, p.stmt_point_kind,
self._short_pretty(p.pretty)
if not skip_pretty else ''))
elif p.kind == 'Edge':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td><td align="left">'
'[B%d] -\\> [B%d]</td></tr>'
% (color, 'BlockEdge', p.src_id, p.dst_id))
elif p.kind == 'BlockEntrance':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td>'
'<td align="left">[B%d]</td></tr>'
% (color, p.kind, p.block_id))
else:
# TODO: Print more stuff for other kinds of points.
self._dump('<td width="0"></td>'
'<td align="left" width="0" colspan="2">'
'<font color="%s">%s</font></td></tr>'
% (color, p.kind))
if p.tag is not None:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<b>Tag: </b> <font color="crimson">'
'%s</font></td></tr>' % p.tag)
if p.has_report:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="red"><b>Bug Report Attached'
'</b></font></td></tr>')
if p.is_sink:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="cornflowerblue"><b>Sink Node'
'</b></font></td></tr>')
def visit_environment(self, e, prev_e=None):
self._dump('<table border="0">')
def dump_location_context(lc, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><b>%s</b></td>'
'<td align="left" colspan="2">'
'<font color="gray60">%s </font>'
'%s</td></tr>'
% (self._diff_plus_minus(is_added),
lc.caption, lc.decl,
('(%s)' % self._make_sloc(lc.loc))
if lc.loc is not None else ''))
def dump_binding(f, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><i>S%s</i></td>'
'%s'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
b.stmt_id,
'<td align="left"><font color="%s"><i>'
'%s</i></font></td>' % (
'lavender' if self._dark_mode else 'darkgreen',
('(%s)' % b.kind) if b.kind is not None else ' '
),
self._short_pretty(b.pretty), f.bindings[b]))
frames_updated = e.diff_frames(prev_e) if prev_e is not None else None
if frames_updated:
for i in frames_updated:
f = e.frames[i]
prev_f = prev_e.frames[i]
dump_location_context(f.location_context)
bindings_removed, bindings_added = f.diff_bindings(prev_f)
for b in bindings_removed:
dump_binding(prev_f, b, False)
for b in bindings_added:
dump_binding(f, b, True)
else:
for f in e.frames:
dump_location_context(f.location_context)
for b in f.bindings:
dump_binding(f, b)
self._dump('</table>')
def visit_environment_in_state(self, selector, title, s, prev_s=None):
e = getattr(s, selector)
prev_e = getattr(prev_s, selector) if prev_s is not None else None
if e is None and prev_e is None:
return
self._dump('<hr /><tr><td align="left"><b>%s: </b>' % title)
if e is None:
self._dump('<i> Nothing!</i>')
else:
if prev_e is not None:
if e.is_different(prev_e):
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e, prev_e)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e)
self._dump('</td></tr>')
def visit_store(self, s, prev_s=None):
self._dump('<table border="0">')
def dump_binding(s, c, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
s.clusters[c].base_region, b.offset,
'(<i>Default</i>)' if b.kind == 'Default'
else '',
s.clusters[c].bindings[b]))
if prev_s is not None:
clusters_removed, clusters_added, clusters_updated = \
s.diff_clusters(prev_s)
for c in clusters_removed:
for b in prev_s.clusters[c].bindings:
dump_binding(prev_s, c, b, False)
for c in clusters_updated:
bindings_removed, bindings_added = \
s.clusters[c].diff_bindings(prev_s.clusters[c])
for b in bindings_removed:
dump_binding(prev_s, c, b, False)
for b in bindings_added:
dump_binding(s, c, b, True)
for c in clusters_added:
for b in s.clusters[c].bindings:
dump_binding(s, c, b, True)
else:
for c in s.clusters:
for b in s.clusters[c].bindings:
dump_binding(s, c, b)
self._dump('</table>')
def visit_store_in_state(self, s, prev_s=None):
st = s.store
prev_st = prev_s.store if prev_s is not None else None
if st is None and prev_st is None:
return
self._dump('<hr /><tr><td align="left"><b>Store: </b>')
if st is None:
self._dump('<i> Nothing!</i>')
else:
if self._dark_mode:
self._dump(' <font color="gray30">(%s)</font>' % st.ptr)
else:
self._dump(' <font color="gray">(%s)</font>' % st.ptr)
if prev_st is not None:
if s.store.is_different(prev_st):
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st, prev_st)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st)
self._dump('</td></tr>')
def visit_generic_map(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_pair(m, k, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
k, m.generic_map[k]))
if prev_m is not None:
removed, added = m.diff(prev_m)
for k in removed:
dump_pair(prev_m, k, False)
for k in added:
dump_pair(m, k, True)
else:
for k in m.generic_map:
dump_pair(m, k, None)
self._dump('</table>')
def visit_generic_map_in_state(self, selector, title, s, prev_s=None):
m = getattr(s, selector)
prev_m = getattr(prev_s, selector) if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>%s: </b>' % title)
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m)
self._dump('</td></tr>')
def visit_checker_messages(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_line(l, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added), l))
def dump_chk(chk, is_added=None):
dump_line('<i>%s</i>:' % chk, is_added)
if prev_m is not None:
removed, added, updated = m.diff_messages(prev_m)
for chk in removed:
dump_chk(chk, False)
for l in prev_m.items[chk].lines:
dump_line(l, False)
for chk in updated:
dump_chk(chk)
for l in m.items[chk].diff_lines(prev_m.items[chk]):
dump_line(l[1:], l.startswith('+'))
for chk in added:
dump_chk(chk, True)
for l in m.items[chk].lines:
dump_line(l, True)
else:
for chk in m.items:
dump_chk(chk)
for l in m.items[chk].lines:
dump_line(l)
self._dump('</table>')
def visit_checker_messages_in_state(self, s, prev_s=None):
m = s.checker_messages
prev_m = prev_s.checker_messages if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>Checker State: </b>')
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m)
self._dump('</td></tr>')
def visit_state(self, s, prev_s):
self.visit_store_in_state(s, prev_s)
self.visit_environment_in_state('environment', 'Expressions',
s, prev_s)
self.visit_generic_map_in_state('constraints', 'Ranges',
s, prev_s)
self.visit_generic_map_in_state('dynamic_types', 'Dynamic Types',
s, prev_s)
self.visit_environment_in_state('constructing_objects',
'Objects Under Construction',
s, prev_s)
self.visit_checker_messages_in_state(s, prev_s)
def visit_node(self, node):
self._dump('%s [shape=record,'
% (node.node_name()))
if self._dark_mode:
self._dump('color="white",fontcolor="gray80",')
self._dump('label=<<table border="0">')
self._dump('<tr><td bgcolor="%s"><b>State %s</b></td></tr>'
% ("gray20" if self._dark_mode else "gray70",
node.state.state_id
if node.state is not None else 'Unspecified'))
if not self._topo_mode:
self._dump('<tr><td align="left" width="0">')
if len(node.points) > 1:
self._dump('<b>Program points:</b></td></tr>')
else:
self._dump('<b>Program point:</b></td></tr>')
self._dump('<tr><td align="left" width="0">'
'<table border="0" align="left" width="0">')
for p in node.points:
self.visit_program_point(p)
self._dump('</table></td></tr>')
if node.state is not None and not self._topo_mode:
prev_s = None
# Do diffs only when we have a unique predecessor.
# Don't do diffs on the leaf nodes because they're
# the important ones.
if self._do_diffs and len(node.predecessors) == 1 \
and len(node.successors) > 0:
prev_s = self._graph.nodes[node.predecessors[0]].state
self.visit_state(node.state, prev_s)
self._dump_raw('</table>>];\n')
def visit_edge(self, pred, succ):
self._dump_raw('%s -> %s%s;\n' % (
pred.node_name(), succ.node_name(),
' [color="white"]' if self._dark_mode else ''
))
def visit_end_of_graph(self):
self._dump_raw('}\n')
if not self._dump_dot_only:
import sys
import tempfile
def write_temp_file(suffix, data):
fd, filename = tempfile.mkstemp(suffix=suffix)
print('Writing "%s"...' % filename)
with os.fdopen(fd, 'w') as fp:
fp.write(data)
print('Done! Please remember to remove the file.')
return filename
try:
import graphviz
except ImportError:
# The fallback behavior if graphviz is not installed!
print('Python graphviz not found. Please invoke')
print(' $ pip install graphviz')
print('in order to enable automatic conversion to HTML.')
print()
print('You may also convert DOT to SVG manually via')
print(' $ dot -Tsvg input.dot -o output.svg')
print()
write_temp_file('.dot', self.output())
return
svg = graphviz.pipe('dot', 'svg', self.output())
filename = write_temp_file(
'.html', '<html><body bgcolor="%s">%s</body></html>' % (
'#1a1a1a' if self._dark_mode else 'white', svg))
if sys.platform == 'win32':
os.startfile(filename)
elif sys.platform == 'darwin':
os.system('open "%s"' % filename)
else:
os.system('xdg-open "%s"' % filename)
#===-----------------------------------------------------------------------===#
# Explorers know how to traverse the ExplodedGraph in a certain order.
# They would invoke a Visitor on every node or edge they encounter.
#===-----------------------------------------------------------------------===#
# BasicExplorer explores the whole graph in no particular order.
class BasicExplorer(object):
def __init__(self):
super(BasicExplorer, self).__init__()
def explore(self, graph, visitor):
visitor.visit_begin_graph(graph)
for node in sorted(graph.nodes):
logging.debug('Visiting ' + node)
visitor.visit_node(graph.nodes[node])
for succ in sorted(graph.nodes[node].successors):
logging.debug('Visiting edge: %s -> %s ' % (node, succ))
visitor.visit_edge(graph.nodes[node], graph.nodes[succ])
visitor.visit_end_of_graph()
#===-----------------------------------------------------------------------===#
# Trimmers cut out parts of the ExplodedGraph so that to focus on other parts.
# Trimmers can be combined together by applying them sequentially.
#===-----------------------------------------------------------------------===#
# SinglePathTrimmer keeps only a single path - the leftmost path from the root.
# Useful when the trimmed graph is still too large.
class SinglePathTrimmer(object):
def __init__(self):
super(SinglePathTrimmer, self).__init__()
def trim(self, graph):
visited_nodes = set()
node_id = graph.root_id
while True:
visited_nodes.add(node_id)
node = graph.nodes[node_id]
if len(node.successors) > 0:
succ_id = node.successors[0]
succ = graph.nodes[succ_id]
node.successors = [succ_id]
succ.predecessors = [node_id]
if succ_id in visited_nodes:
break
node_id = succ_id
else:
break
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
# TargetedTrimmer keeps paths that lead to specific nodes and discards all
# other paths. Useful when you cannot use -trim-egraph (e.g. when debugging
# a crash).
class TargetedTrimmer(object):
def __init__(self, target_nodes):
super(TargetedTrimmer, self).__init__()
self._target_nodes = target_nodes
@staticmethod
def parse_target_node(node, graph):
if node.startswith('0x'):
ret = 'Node' + node
assert ret in graph.nodes
return ret
else:
for other_id in graph.nodes:
other = graph.nodes[other_id]
if other.node_id == int(node):
return other_id
@staticmethod
def parse_target_nodes(target_nodes, graph):
return [TargetedTrimmer.parse_target_node(node, graph)
for node in target_nodes.split(',')]
def trim(self, graph):
queue = self._target_nodes
visited_nodes = set()
while len(queue) > 0:
node_id = queue.pop()
visited_nodes.add(node_id)
node = graph.nodes[node_id]
for pred_id in node.predecessors:
if pred_id not in visited_nodes:
queue.append(pred_id)
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
for node_id in graph.nodes:
node = graph.nodes[node_id]
node.successors = [succ_id for succ_id in node.successors
if succ_id in visited_nodes]
node.predecessors = [succ_id for succ_id in node.predecessors
if succ_id in visited_nodes]
#===-----------------------------------------------------------------------===#
# The entry point to the script.
#===-----------------------------------------------------------------------===#
def main():
parser = argparse.ArgumentParser(
description='Display and manipulate Exploded Graph dumps.')
parser.add_argument('filename', type=str,
help='the .dot file produced by the Static Analyzer')
parser.add_argument('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
default=logging.WARNING,
help='enable info prints')
parser.add_argument('-d', '--diff', action='store_const', dest='diff',
const=True, default=False,
help='display differences between states')
parser.add_argument('-t', '--topology', action='store_const',
dest='topology', const=True, default=False,
help='only display program points, omit states')
parser.add_argument('-s', '--single-path', action='store_const',
dest='single_path', const=True, default=False,
help='only display the leftmost path in the graph '
'(useful for trimmed graphs that still '
'branch too much)')
parser.add_argument('--to', type=str, default=None,
help='only display execution paths from the root '
'to the given comma-separated list of nodes '
'identified by a pointer or a stable ID; '
'compatible with --single-path')
parser.add_argument('--dark', action='store_const', dest='dark',
const=True, default=False,
help='dark mode')
parser.add_argument('--gray', action='store_const', dest='gray',
const=True, default=False,
help='black-and-white mode')
parser.add_argument('--dump-dot-only', action='store_const',
dest='dump_dot_only', const=True, default=False,
help='instead of writing an HTML file and immediately '
'displaying it, dump the rewritten dot file '
'to stdout')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
graph = ExplodedGraph()
with open(args.filename) as fd:
for raw_line in fd:
raw_line = raw_line.strip()
graph.add_raw_line(raw_line)
trimmers = []
if args.to is not None:
trimmers.append(TargetedTrimmer(
TargetedTrimmer.parse_target_nodes(args.to, graph)))
if args.single_path:
trimmers.append(SinglePathTrimmer())
explorer = BasicExplorer()
visitor = DotDumpVisitor(args.diff, args.dark, args.gray, args.topology,
args.dump_dot_only)
for trimmer in trimmers:
trimmer.trim(graph)
explorer.explore(graph, visitor)
if __name__ == '__main__':
main()
|
|
"""
Web socket API for Zigbee Home Automation devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import logging
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.const import ATTR_ENTITY_ID
import homeassistant.helpers.config_validation as cv
from .core.const import (
DOMAIN, ATTR_CLUSTER_ID, ATTR_CLUSTER_TYPE, ATTR_ATTRIBUTE, ATTR_VALUE,
ATTR_MANUFACTURER, ATTR_COMMAND, ATTR_COMMAND_TYPE, ATTR_ARGS, IN, OUT,
CLIENT_COMMANDS, SERVER_COMMANDS, SERVER)
_LOGGER = logging.getLogger(__name__)
TYPE = 'type'
CLIENT = 'client'
ID = 'id'
NAME = 'name'
RESPONSE = 'response'
DEVICE_INFO = 'device_info'
ATTR_DURATION = 'duration'
ATTR_IEEE_ADDRESS = 'ieee_address'
ATTR_IEEE = 'ieee'
SERVICE_PERMIT = 'permit'
SERVICE_REMOVE = 'remove'
SERVICE_SET_ZIGBEE_CLUSTER_ATTRIBUTE = 'set_zigbee_cluster_attribute'
SERVICE_ISSUE_ZIGBEE_CLUSTER_COMMAND = 'issue_zigbee_cluster_command'
ZIGBEE_CLUSTER_SERVICE = 'zigbee_cluster_service'
IEEE_SERVICE = 'ieee_based_service'
SERVICE_SCHEMAS = {
SERVICE_PERMIT: vol.Schema({
vol.Optional(ATTR_DURATION, default=60):
vol.All(vol.Coerce(int), vol.Range(1, 254)),
}),
IEEE_SERVICE: vol.Schema({
vol.Required(ATTR_IEEE_ADDRESS): cv.string,
}),
ZIGBEE_CLUSTER_SERVICE: vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_CLUSTER_ID): cv.positive_int,
vol.Optional(ATTR_CLUSTER_TYPE, default=IN): cv.string
}),
SERVICE_SET_ZIGBEE_CLUSTER_ATTRIBUTE: vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_CLUSTER_ID): cv.positive_int,
vol.Optional(ATTR_CLUSTER_TYPE, default=IN): cv.string,
vol.Required(ATTR_ATTRIBUTE): cv.positive_int,
vol.Required(ATTR_VALUE): cv.string,
vol.Optional(ATTR_MANUFACTURER): cv.positive_int,
}),
SERVICE_ISSUE_ZIGBEE_CLUSTER_COMMAND: vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_CLUSTER_ID): cv.positive_int,
vol.Optional(ATTR_CLUSTER_TYPE, default=IN): cv.string,
vol.Required(ATTR_COMMAND): cv.positive_int,
vol.Required(ATTR_COMMAND_TYPE): cv.string,
vol.Optional(ATTR_ARGS, default=''): cv.string,
vol.Optional(ATTR_MANUFACTURER): cv.positive_int,
}),
}
WS_RECONFIGURE_NODE = 'zha/nodes/reconfigure'
SCHEMA_WS_RECONFIGURE_NODE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required(TYPE): WS_RECONFIGURE_NODE,
vol.Required(ATTR_IEEE): str
})
WS_ENTITIES_BY_IEEE = 'zha/entities'
SCHEMA_WS_LIST = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required(TYPE): WS_ENTITIES_BY_IEEE,
})
WS_ENTITY_CLUSTERS = 'zha/entities/clusters'
SCHEMA_WS_CLUSTERS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required(TYPE): WS_ENTITY_CLUSTERS,
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_IEEE): str
})
WS_ENTITY_CLUSTER_ATTRIBUTES = 'zha/entities/clusters/attributes'
SCHEMA_WS_CLUSTER_ATTRIBUTES = \
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required(TYPE): WS_ENTITY_CLUSTER_ATTRIBUTES,
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_IEEE): str,
vol.Required(ATTR_CLUSTER_ID): int,
vol.Required(ATTR_CLUSTER_TYPE): str
})
WS_READ_CLUSTER_ATTRIBUTE = 'zha/entities/clusters/attributes/value'
SCHEMA_WS_READ_CLUSTER_ATTRIBUTE = \
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required(TYPE): WS_READ_CLUSTER_ATTRIBUTE,
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_CLUSTER_ID): int,
vol.Required(ATTR_CLUSTER_TYPE): str,
vol.Required(ATTR_ATTRIBUTE): int,
vol.Optional(ATTR_MANUFACTURER): object,
})
WS_ENTITY_CLUSTER_COMMANDS = 'zha/entities/clusters/commands'
SCHEMA_WS_CLUSTER_COMMANDS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required(TYPE): WS_ENTITY_CLUSTER_COMMANDS,
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_IEEE): str,
vol.Required(ATTR_CLUSTER_ID): int,
vol.Required(ATTR_CLUSTER_TYPE): str
})
def async_load_api(hass, application_controller, zha_gateway):
"""Set up the web socket API."""
async def permit(service):
"""Allow devices to join this network."""
duration = service.data.get(ATTR_DURATION)
_LOGGER.info("Permitting joins for %ss", duration)
await application_controller.permit(duration)
hass.services.async_register(DOMAIN, SERVICE_PERMIT, permit,
schema=SERVICE_SCHEMAS[SERVICE_PERMIT])
async def remove(service):
"""Remove a node from the network."""
from bellows.types import EmberEUI64, uint8_t
ieee = service.data.get(ATTR_IEEE_ADDRESS)
ieee = EmberEUI64([uint8_t(p, base=16) for p in ieee.split(':')])
_LOGGER.info("Removing node %s", ieee)
await application_controller.remove(ieee)
hass.services.async_register(DOMAIN, SERVICE_REMOVE, remove,
schema=SERVICE_SCHEMAS[IEEE_SERVICE])
async def set_zigbee_cluster_attributes(service):
"""Set zigbee attribute for cluster on zha entity."""
entity_id = service.data.get(ATTR_ENTITY_ID)
cluster_id = service.data.get(ATTR_CLUSTER_ID)
cluster_type = service.data.get(ATTR_CLUSTER_TYPE)
attribute = service.data.get(ATTR_ATTRIBUTE)
value = service.data.get(ATTR_VALUE)
manufacturer = service.data.get(ATTR_MANUFACTURER) or None
entity_ref = zha_gateway.get_entity_reference(entity_id)
response = None
if entity_ref is not None:
response = await entity_ref.zha_device.write_zigbee_attribute(
list(entity_ref.cluster_listeners.values())[
0].cluster.endpoint.endpoint_id,
cluster_id,
attribute,
value,
cluster_type=cluster_type,
manufacturer=manufacturer
)
_LOGGER.debug("Set attribute for: %s %s %s %s %s %s %s",
"{}: [{}]".format(ATTR_CLUSTER_ID, cluster_id),
"{}: [{}]".format(ATTR_CLUSTER_TYPE, cluster_type),
"{}: [{}]".format(ATTR_ENTITY_ID, entity_id),
"{}: [{}]".format(ATTR_ATTRIBUTE, attribute),
"{}: [{}]".format(ATTR_VALUE, value),
"{}: [{}]".format(ATTR_MANUFACTURER, manufacturer),
"{}: [{}]".format(RESPONSE, response)
)
hass.services.async_register(DOMAIN, SERVICE_SET_ZIGBEE_CLUSTER_ATTRIBUTE,
set_zigbee_cluster_attributes,
schema=SERVICE_SCHEMAS[
SERVICE_SET_ZIGBEE_CLUSTER_ATTRIBUTE
])
async def issue_zigbee_cluster_command(service):
"""Issue command on zigbee cluster on zha entity."""
entity_id = service.data.get(ATTR_ENTITY_ID)
cluster_id = service.data.get(ATTR_CLUSTER_ID)
cluster_type = service.data.get(ATTR_CLUSTER_TYPE)
command = service.data.get(ATTR_COMMAND)
command_type = service.data.get(ATTR_COMMAND_TYPE)
args = service.data.get(ATTR_ARGS)
manufacturer = service.data.get(ATTR_MANUFACTURER) or None
entity_ref = zha_gateway.get_entity_reference(entity_id)
zha_device = entity_ref.zha_device
response = None
if entity_ref is not None:
response = await zha_device.issue_cluster_command(
list(entity_ref.cluster_listeners.values())[
0].cluster.endpoint.endpoint_id,
cluster_id,
command,
command_type,
args,
cluster_type=cluster_type,
manufacturer=manufacturer
)
_LOGGER.debug("Issue command for: %s %s %s %s %s %s %s %s",
"{}: [{}]".format(ATTR_CLUSTER_ID, cluster_id),
"{}: [{}]".format(ATTR_CLUSTER_TYPE, cluster_type),
"{}: [{}]".format(ATTR_ENTITY_ID, entity_id),
"{}: [{}]".format(ATTR_COMMAND, command),
"{}: [{}]".format(ATTR_COMMAND_TYPE, command_type),
"{}: [{}]".format(ATTR_ARGS, args),
"{}: [{}]".format(ATTR_MANUFACTURER, manufacturer),
"{}: [{}]".format(RESPONSE, response)
)
hass.services.async_register(DOMAIN, SERVICE_ISSUE_ZIGBEE_CLUSTER_COMMAND,
issue_zigbee_cluster_command,
schema=SERVICE_SCHEMAS[
SERVICE_ISSUE_ZIGBEE_CLUSTER_COMMAND
])
@websocket_api.async_response
async def websocket_reconfigure_node(hass, connection, msg):
"""Reconfigure a ZHA nodes entities by its ieee address."""
ieee = msg[ATTR_IEEE]
device = zha_gateway.get_device(ieee)
_LOGGER.debug("Reconfiguring node with ieee_address: %s", ieee)
hass.async_create_task(device.async_configure())
hass.components.websocket_api.async_register_command(
WS_RECONFIGURE_NODE, websocket_reconfigure_node,
SCHEMA_WS_RECONFIGURE_NODE
)
@websocket_api.async_response
async def websocket_entities_by_ieee(hass, connection, msg):
"""Return a dict of all zha entities grouped by ieee."""
entities_by_ieee = {}
for ieee, entities in zha_gateway.device_registry.items():
ieee_string = str(ieee)
entities_by_ieee[ieee_string] = []
for entity in entities:
entities_by_ieee[ieee_string].append({
ATTR_ENTITY_ID: entity.reference_id,
DEVICE_INFO: entity.device_info
})
connection.send_message(websocket_api.result_message(
msg[ID],
entities_by_ieee
))
hass.components.websocket_api.async_register_command(
WS_ENTITIES_BY_IEEE, websocket_entities_by_ieee,
SCHEMA_WS_LIST
)
@websocket_api.async_response
async def websocket_entity_clusters(hass, connection, msg):
"""Return a list of entity clusters."""
entity_id = msg[ATTR_ENTITY_ID]
entity_ref = zha_gateway.get_entity_reference(entity_id)
clusters = []
if entity_ref is not None:
for listener in entity_ref.cluster_listeners.values():
cluster = listener.cluster
in_clusters = cluster.endpoint.in_clusters.values()
out_clusters = cluster.endpoint.out_clusters.values()
if cluster in in_clusters:
clusters.append({
TYPE: IN,
ID: cluster.cluster_id,
NAME: cluster.__class__.__name__
})
elif cluster in out_clusters:
clusters.append({
TYPE: OUT,
ID: cluster.cluster_id,
NAME: cluster.__class__.__name__
})
connection.send_message(websocket_api.result_message(
msg[ID],
clusters
))
hass.components.websocket_api.async_register_command(
WS_ENTITY_CLUSTERS, websocket_entity_clusters,
SCHEMA_WS_CLUSTERS
)
@websocket_api.async_response
async def websocket_entity_cluster_attributes(hass, connection, msg):
"""Return a list of cluster attributes."""
entity_id = msg[ATTR_ENTITY_ID]
cluster_id = msg[ATTR_CLUSTER_ID]
cluster_type = msg[ATTR_CLUSTER_TYPE]
ieee = msg[ATTR_IEEE]
cluster_attributes = []
entity_ref = zha_gateway.get_entity_reference(entity_id)
device = zha_gateway.get_device(ieee)
attributes = None
if entity_ref is not None:
attributes = await device.get_cluster_attributes(
list(entity_ref.cluster_listeners.values())[
0].cluster.endpoint.endpoint_id,
cluster_id,
cluster_type)
if attributes is not None:
for attr_id in attributes:
cluster_attributes.append(
{
ID: attr_id,
NAME: attributes[attr_id][0]
}
)
_LOGGER.debug("Requested attributes for: %s %s %s %s",
"{}: [{}]".format(ATTR_CLUSTER_ID, cluster_id),
"{}: [{}]".format(ATTR_CLUSTER_TYPE, cluster_type),
"{}: [{}]".format(ATTR_ENTITY_ID, entity_id),
"{}: [{}]".format(RESPONSE, cluster_attributes)
)
connection.send_message(websocket_api.result_message(
msg[ID],
cluster_attributes
))
hass.components.websocket_api.async_register_command(
WS_ENTITY_CLUSTER_ATTRIBUTES, websocket_entity_cluster_attributes,
SCHEMA_WS_CLUSTER_ATTRIBUTES
)
@websocket_api.async_response
async def websocket_entity_cluster_commands(hass, connection, msg):
"""Return a list of cluster commands."""
entity_id = msg[ATTR_ENTITY_ID]
cluster_id = msg[ATTR_CLUSTER_ID]
cluster_type = msg[ATTR_CLUSTER_TYPE]
ieee = msg[ATTR_IEEE]
entity_ref = zha_gateway.get_entity_reference(entity_id)
device = zha_gateway.get_device(ieee)
cluster_commands = []
commands = None
if entity_ref is not None:
commands = await device.get_cluster_commands(
list(entity_ref.cluster_listeners.values())[
0].cluster.endpoint.endpoint_id,
cluster_id,
cluster_type)
if commands is not None:
for cmd_id in commands[CLIENT_COMMANDS]:
cluster_commands.append(
{
TYPE: CLIENT,
ID: cmd_id,
NAME: commands[CLIENT_COMMANDS][cmd_id][0]
}
)
for cmd_id in commands[SERVER_COMMANDS]:
cluster_commands.append(
{
TYPE: SERVER,
ID: cmd_id,
NAME: commands[SERVER_COMMANDS][cmd_id][0]
}
)
_LOGGER.debug("Requested commands for: %s %s %s %s",
"{}: [{}]".format(ATTR_CLUSTER_ID, cluster_id),
"{}: [{}]".format(ATTR_CLUSTER_TYPE, cluster_type),
"{}: [{}]".format(ATTR_ENTITY_ID, entity_id),
"{}: [{}]".format(RESPONSE, cluster_commands)
)
connection.send_message(websocket_api.result_message(
msg[ID],
cluster_commands
))
hass.components.websocket_api.async_register_command(
WS_ENTITY_CLUSTER_COMMANDS, websocket_entity_cluster_commands,
SCHEMA_WS_CLUSTER_COMMANDS
)
@websocket_api.async_response
async def websocket_read_zigbee_cluster_attributes(hass, connection, msg):
"""Read zigbee attribute for cluster on zha entity."""
entity_id = msg[ATTR_ENTITY_ID]
cluster_id = msg[ATTR_CLUSTER_ID]
cluster_type = msg[ATTR_CLUSTER_TYPE]
attribute = msg[ATTR_ATTRIBUTE]
entity_ref = zha_gateway.get_entity_reference(entity_id)
manufacturer = msg.get(ATTR_MANUFACTURER) or None
success = failure = None
clusters = []
if cluster_type == IN:
clusters = \
list(entity_ref.cluster_listeners.values())[
0].cluster.endpoint.in_clusters
else:
clusters = \
list(entity_ref.cluster_listeners.values())[
0].cluster.endpoint.out_clusters
cluster = clusters[cluster_id]
if entity_ref is not None:
success, failure = await cluster.read_attributes(
[attribute],
allow_cache=False,
only_cache=False,
manufacturer=manufacturer
)
_LOGGER.debug("Read attribute for: %s %s %s %s %s %s %s",
"{}: [{}]".format(ATTR_CLUSTER_ID, cluster_id),
"{}: [{}]".format(ATTR_CLUSTER_TYPE, cluster_type),
"{}: [{}]".format(ATTR_ENTITY_ID, entity_id),
"{}: [{}]".format(ATTR_ATTRIBUTE, attribute),
"{}: [{}]".format(ATTR_MANUFACTURER, manufacturer),
"{}: [{}]".format(RESPONSE, str(success.get(attribute))),
"{}: [{}]".format('failure', failure)
)
connection.send_message(websocket_api.result_message(
msg[ID],
str(success.get(attribute))
))
hass.components.websocket_api.async_register_command(
WS_READ_CLUSTER_ATTRIBUTE, websocket_read_zigbee_cluster_attributes,
SCHEMA_WS_READ_CLUSTER_ATTRIBUTE
)
def async_unload_api(hass):
"""Unload the ZHA API."""
hass.services.async_remove(DOMAIN, SERVICE_PERMIT)
hass.services.async_remove(DOMAIN, SERVICE_REMOVE)
hass.services.async_remove(DOMAIN, SERVICE_SET_ZIGBEE_CLUSTER_ATTRIBUTE)
hass.services.async_remove(DOMAIN, SERVICE_ISSUE_ZIGBEE_CLUSTER_COMMAND)
|
|
# Copyright (C) 2020 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import logging
import numpy as np
from phonopy.harmonic.force_constants import (
get_fc2,
similarity_transformation,
distribute_force_constants,
solve_force_constants,
get_rotated_displacement,
get_positions_sent_by_rot_inv,
get_nsym_list_and_s2pp)
from phono3py.phonon3.displacement_fc3 import (
get_reduced_site_symmetry,
get_bond_symmetry,
get_equivalent_smallest_vectors)
from phonopy.structure.cells import compute_all_sg_permutations
logger = logging.getLogger(__name__)
def get_fc3(supercell,
primitive,
disp_dataset,
symmetry,
is_compact_fc=False,
verbose=False):
# fc2 has to be full matrix to compute delta-fc2
# p2s_map elements are extracted if is_compact_fc=True at the last part.
fc2 = get_fc2(supercell, symmetry, disp_dataset)
fc3 = _get_fc3_least_atoms(supercell,
primitive,
disp_dataset,
fc2,
symmetry,
is_compact_fc=is_compact_fc,
verbose=verbose)
if verbose:
print("Expanding fc3.")
first_disp_atoms = np.unique(
[x['number'] for x in disp_dataset['first_atoms']])
rotations = symmetry.get_symmetry_operations()['rotations']
lattice = supercell.cell.T
permutations = symmetry.get_atomic_permutations()
if is_compact_fc:
s2p_map = primitive.s2p_map
p2s_map = primitive.p2s_map
p2p_map = primitive.p2p_map
s2compact = np.array([p2p_map[i] for i in s2p_map], dtype='intc')
for i in first_disp_atoms:
assert i in p2s_map
target_atoms = [i for i in p2s_map if i not in first_disp_atoms]
else:
s2compact = np.arange(supercell.get_number_of_atoms(), dtype='intc')
target_atoms = [i for i in s2compact if i not in first_disp_atoms]
distribute_fc3(fc3,
first_disp_atoms,
target_atoms,
lattice,
rotations,
permutations,
s2compact,
verbose=verbose)
if 'cutoff_distance' in disp_dataset:
if verbose:
print("Cutting-off fc3 (cut-off distance: %f)" %
disp_dataset['cutoff_distance'])
if is_compact_fc:
print("cutoff_fc3 doesn't support compact-fc3 yet.")
raise ValueError
cutoff_fc3(fc3,
supercell,
disp_dataset,
symmetry,
verbose=verbose)
if is_compact_fc:
p2s_map = primitive.get_primitive_to_supercell_map()
fc2 = np.array(fc2[p2s_map], dtype='double', order='C')
return fc2, fc3
def distribute_fc3(fc3,
first_disp_atoms,
target_atoms,
lattice,
rotations,
permutations,
s2compact,
verbose=False):
"""Distribute fc3
fc3[i, :, :, 0:3, 0:3, 0:3] where i=indices done are distributed to
symmetrically equivalent fc3 elements by tensor rotations.
Search symmetry operation (R, t) that performs
i_target -> i_done
and
atom_mapping[i_target] = i_done
fc3[i_target, j_target, k_target] = R_inv[i_done, j, k]
Parameters
----------
target_atoms: list or ndarray
Supercell atom indices to which fc3 are distributed.
s2compact: ndarray
Maps supercell index to compact index. For full-fc3,
s2compact=np.arange(n_satom).
shape=(n_satom,)
dtype=intc
"""
n_satom = fc3.shape[1]
for i_target in target_atoms:
for i_done in first_disp_atoms:
rot_indices = np.where(permutations[:, i_target] == i_done)[0]
if len(rot_indices) > 0:
atom_mapping = np.array(permutations[rot_indices[0]],
dtype='intc')
rot = rotations[rot_indices[0]]
rot_cart_inv = np.array(
similarity_transformation(lattice, rot).T,
dtype='double', order='C')
break
if len(rot_indices) == 0:
print("Position or symmetry may be wrong.")
raise RuntimeError
if verbose > 2:
print(" [ %d, x, x ] to [ %d, x, x ]" %
(i_done + 1, i_target + 1))
sys.stdout.flush()
try:
import phono3py._phono3py as phono3c
phono3c.distribute_fc3(fc3,
int(s2compact[i_target]),
int(s2compact[i_done]),
atom_mapping,
rot_cart_inv)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for j in range(n_satom):
j_rot = atom_mapping[j]
for k in range(n_satom):
k_rot = atom_mapping[k]
fc3[i_target, j, k] = third_rank_tensor_rotation(
rot_cart_inv, fc3[i_done, j_rot, k_rot])
def set_permutation_symmetry_fc3(fc3):
try:
import phono3py._phono3py as phono3c
phono3c.permutation_symmetry_fc3(fc3)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
num_atom = fc3.shape[0]
for i in range(num_atom):
for j in range(i, num_atom):
for k in range(j, num_atom):
fc3_elem = set_permutation_symmetry_fc3_elem(fc3, i, j, k)
copy_permutation_symmetry_fc3_elem(fc3, fc3_elem, i, j, k)
def set_permutation_symmetry_compact_fc3(fc3, primitive):
try:
import phono3py._phono3py as phono3c
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
permutations = primitive.get_atomic_permutations()
s2pp_map, nsym_list = get_nsym_list_and_s2pp(s2p_map,
p2p_map,
permutations)
phono3c.permutation_symmetry_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list)
except ImportError:
text = ("Import error at phono3c.permutation_symmetry_compact_fc3. "
"Corresponding python code is not implemented.")
raise RuntimeError(text)
def copy_permutation_symmetry_fc3_elem(fc3, fc3_elem, a, b, c):
for (i, j, k) in list(np.ndindex(3, 3, 3)):
fc3[a, b, c, i, j, k] = fc3_elem[i, j, k]
fc3[c, a, b, k, i, j] = fc3_elem[i, j, k]
fc3[b, c, a, j, k, i] = fc3_elem[i, j, k]
fc3[a, c, b, i, k, j] = fc3_elem[i, j, k]
fc3[b, a, c, j, i, k] = fc3_elem[i, j, k]
fc3[c, b, a, k, j, i] = fc3_elem[i, j, k]
def set_permutation_symmetry_fc3_elem(fc3, a, b, c, divisor=6):
tensor3 = np.zeros((3, 3, 3), dtype='double')
for (i, j, k) in list(np.ndindex(3, 3, 3)):
tensor3[i, j, k] = (fc3[a, b, c, i, j, k] +
fc3[c, a, b, k, i, j] +
fc3[b, c, a, j, k, i] +
fc3[a, c, b, i, k, j] +
fc3[b, a, c, j, i, k] +
fc3[c, b, a, k, j, i]) / divisor
return tensor3
def set_translational_invariance_fc3(fc3):
for i in range(3):
set_translational_invariance_fc3_per_index(fc3, index=i)
def set_translational_invariance_compact_fc3(fc3, primitive):
try:
import phono3py._phono3py as phono3c
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
permutations = primitive.get_atomic_permutations()
s2pp_map, nsym_list = get_nsym_list_and_s2pp(s2p_map,
p2p_map,
permutations)
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
set_translational_invariance_fc3_per_index(fc3, index=1)
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
set_translational_invariance_fc3_per_index(fc3, index=1)
set_translational_invariance_fc3_per_index(fc3, index=2)
except ImportError:
text = ("Import error at phono3c.tranpose_compact_fc3. "
"Corresponding python code is not implemented.")
raise RuntimeError(text)
def set_translational_invariance_fc3_per_index(fc3, index=0):
for i in range(fc3.shape[(1 + index) % 3]):
for j in range(fc3.shape[(2 + index) % 3]):
for k, l, m in list(np.ndindex(3, 3, 3)):
if index == 0:
fc3[:, i, j, k, l, m] -= np.sum(
fc3[:, i, j, k, l, m]) / fc3.shape[0]
elif index == 1:
fc3[j, :, i, k, l, m] -= np.sum(
fc3[j, :, i, k, l, m]) / fc3.shape[1]
elif index == 2:
fc3[i, j, :, k, l, m] -= np.sum(
fc3[i, j, :, k, l, m]) / fc3.shape[2]
def third_rank_tensor_rotation(rot_cart, tensor):
rot_tensor = np.zeros((3, 3, 3), dtype='double')
for i in (0, 1, 2):
for j in (0, 1, 2):
for k in (0, 1, 2):
rot_tensor[i, j, k] = _third_rank_tensor_rotation_elem(
rot_cart, tensor, i, j, k)
return rot_tensor
def get_delta_fc2(dataset_second_atoms,
atom1,
forces1,
fc2,
supercell,
reduced_site_sym,
symprec):
logger.debug("get_delta_fc2")
disp_fc2 = get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
forces1,
reduced_site_sym,
symprec)
return disp_fc2 - fc2
def get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
forces1,
reduced_site_sym,
symprec):
"""
dataset_second_atoms: [{'number': 7,
'displacement': [],
'forces': []}, ...]
"""
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
num_atom = supercell.get_number_of_atoms()
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
atom_list = np.unique([x['number'] for x in dataset_second_atoms])
for atom2 in atom_list:
disps2 = []
sets_of_forces = []
for disps_second in dataset_second_atoms:
if atom2 != disps_second['number']:
continue
bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
disps2.append(disps_second['displacement'])
sets_of_forces.append(disps_second['forces'] - forces1)
solve_force_constants(fc2,
atom2,
disps2,
sets_of_forces,
supercell,
bond_sym,
symprec)
# Shift positions according to set atom1 is at origin
pos_center = positions[atom1].copy()
positions -= pos_center
rotations = np.array(reduced_site_sym, dtype='intc', order='C')
translations = np.zeros((len(reduced_site_sym), 3),
dtype='double', order='C')
permutations = compute_all_sg_permutations(positions,
rotations,
translations,
lattice,
symprec)
distribute_force_constants(fc2,
atom_list,
lattice,
rotations,
permutations)
return fc2
def solve_fc3(first_atom_num,
supercell,
site_symmetry,
displacements_first,
delta_fc2s,
symprec,
pinv_solver="numpy",
verbose=False):
logger.debug("solve_fc3")
if pinv_solver == "numpy":
solver = "numpy.linalg.pinv"
else:
try:
import phono3py._lapackepy as lapackepy
solver = "lapacke-dgesvd"
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
solver = "numpy.linalg.pinv"
if verbose:
text = ("Computing fc3[ %d, x, x ] using %s with " %
(first_atom_num + 1, solver))
if len(displacements_first) > 1:
text += "displacements:"
else:
text += "a displacement:"
print(text)
for i, v in enumerate(displacements_first):
print(" [%7.4f %7.4f %7.4f]" % tuple(v))
sys.stdout.flush()
if verbose > 2:
print(" Site symmetry:")
for i, v in enumerate(site_symmetry):
print(" [%2d %2d %2d] #%2d" % tuple(list(v[0])+[i + 1]))
print(" [%2d %2d %2d]" % tuple(v[1]))
print(" [%2d %2d %2d]\n" % tuple(v[2]))
sys.stdout.flush()
lattice = supercell.get_cell().T
site_sym_cart = np.array([similarity_transformation(lattice, sym)
for sym in site_symmetry],
dtype='double', order='C')
num_atom = supercell.get_number_of_atoms()
positions = supercell.get_scaled_positions()
pos_center = positions[first_atom_num].copy()
positions -= pos_center
logger.debug("get_positions_sent_by_rot_inv")
rot_map_syms = get_positions_sent_by_rot_inv(lattice,
positions,
site_symmetry,
symprec)
rot_disps = get_rotated_displacement(displacements_first, site_sym_cart)
logger.debug("pinv")
if "numpy" in solver:
inv_U = np.array(np.linalg.pinv(rot_disps), dtype='double', order='C')
else:
inv_U = np.zeros((rot_disps.shape[1], rot_disps.shape[0]),
dtype='double', order='C')
lapackepy.pinv(inv_U, rot_disps, 1e-13)
fc3 = np.zeros((num_atom, num_atom, 3, 3, 3), dtype='double', order='C')
logger.debug("rotate_delta_fc2s")
try:
import phono3py._phono3py as phono3c
phono3c.rotate_delta_fc2s(fc3,
delta_fc2s,
inv_U,
site_sym_cart,
rot_map_syms)
except ImportError:
for i, j in np.ndindex(num_atom, num_atom):
fc3[i, j] = np.dot(inv_U, _get_rotated_fc2s(
i, j, delta_fc2s, rot_map_syms, site_sym_cart)
).reshape(3, 3, 3)
return fc3
def cutoff_fc3(fc3,
supercell,
disp_dataset,
symmetry,
verbose=False):
if verbose:
print("Building atom mapping table...")
fc3_done = _get_fc3_done(supercell, disp_dataset, symmetry, fc3.shape[:3])
if verbose:
print("Creating contracted fc3...")
num_atom = len(supercell)
for i in range(num_atom):
for j in range(i, num_atom):
for k in range(j, num_atom):
ave_fc3 = _set_permutation_symmetry_fc3_elem_with_cutoff(
fc3, fc3_done, i, j, k)
copy_permutation_symmetry_fc3_elem(fc3, ave_fc3, i, j, k)
def cutoff_fc3_by_zero(fc3, supercell, cutoff_distance, symprec=1e-5):
num_atom = supercell.get_number_of_atoms()
lattice = supercell.get_cell().T
min_distances = np.zeros((num_atom, num_atom), dtype='double')
for i in range(num_atom): # run in supercell
for j in range(num_atom): # run in primitive
min_distances[i, j] = np.linalg.norm(
np.dot(lattice,
get_equivalent_smallest_vectors(
i, j, supercell, symprec)[0]))
for i, j, k in np.ndindex(num_atom, num_atom, num_atom):
for pair in ((i, j), (j, k), (k, i)):
if min_distances[pair] > cutoff_distance:
fc3[i, j, k] = 0
break
def show_drift_fc3(fc3,
primitive=None,
name="fc3"):
if fc3.shape[0] == fc3.shape[1]:
num_atom = fc3.shape[0]
maxval1 = 0
maxval2 = 0
maxval3 = 0
klm1 = [0, 0, 0]
klm2 = [0, 0, 0]
klm3 = [0, 0, 0]
for i, j, k, l, m in list(np.ndindex((num_atom, num_atom, 3, 3, 3))):
val1 = fc3[:, i, j, k, l, m].sum()
val2 = fc3[i, :, j, k, l, m].sum()
val3 = fc3[i, j, :, k, l, m].sum()
if abs(val1) > abs(maxval1):
maxval1 = val1
klm1 = [k, l, m]
if abs(val2) > abs(maxval2):
maxval2 = val2
klm2 = [k, l, m]
if abs(val3) > abs(maxval3):
maxval3 = val3
klm3 = [k, l, m]
else:
try:
import phono3py._phono3py as phono3c
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
permutations = primitive.get_atomic_permutations()
s2pp_map, nsym_list = get_nsym_list_and_s2pp(s2p_map,
p2p_map,
permutations)
num_patom = fc3.shape[0]
num_satom = fc3.shape[1]
maxval1 = 0
maxval2 = 0
maxval3 = 0
klm1 = [0, 0, 0]
klm2 = [0, 0, 0]
klm3 = [0, 0, 0]
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
for i, j, k, l, m in np.ndindex((num_patom, num_satom, 3, 3, 3)):
val1 = fc3[i, :, j, k, l, m].sum()
if abs(val1) > abs(maxval1):
maxval1 = val1
klm1 = [k, l, m]
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
for i, j, k, l, m in np.ndindex((num_patom, num_satom, 3, 3, 3)):
val2 = fc3[i, :, j, k, l, m].sum()
val3 = fc3[i, j, :, k, l, m].sum()
if abs(val2) > abs(maxval2):
maxval2 = val2
klm2 = [k, l, m]
if abs(val3) > abs(maxval3):
maxval3 = val3
klm3 = [k, l, m]
except ImportError:
text = ("Import error at phono3c.tranpose_compact_fc3. "
"Corresponding python code is not implemented.")
raise RuntimeError(text)
text = "Max drift of %s: " % name
text += "%f (%s%s%s) " % (maxval1,
"xyz"[klm1[0]], "xyz"[klm1[1]], "xyz"[klm1[2]])
text += "%f (%s%s%s) " % (maxval2,
"xyz"[klm2[0]], "xyz"[klm2[1]], "xyz"[klm2[2]])
text += "%f (%s%s%s)" % (maxval3,
"xyz"[klm3[0]], "xyz"[klm3[1]], "xyz"[klm3[2]])
print(text)
def _set_permutation_symmetry_fc3_elem_with_cutoff(fc3, fc3_done, a, b, c):
sum_done = (fc3_done[a, b, c] +
fc3_done[c, a, b] +
fc3_done[b, c, a] +
fc3_done[b, a, c] +
fc3_done[c, b, a] +
fc3_done[a, c, b])
tensor3 = np.zeros((3, 3, 3), dtype='double')
if sum_done > 0:
for (i, j, k) in list(np.ndindex(3, 3, 3)):
tensor3[i, j, k] = (fc3[a, b, c, i, j, k] * fc3_done[a, b, c] +
fc3[c, a, b, k, i, j] * fc3_done[c, a, b] +
fc3[b, c, a, j, k, i] * fc3_done[b, c, a] +
fc3[a, c, b, i, k, j] * fc3_done[a, c, b] +
fc3[b, a, c, j, i, k] * fc3_done[b, a, c] +
fc3[c, b, a, k, j, i] * fc3_done[c, b, a])
tensor3[i, j, k] /= sum_done
return tensor3
def _get_fc3_least_atoms(supercell,
primitive,
disp_dataset,
fc2,
symmetry,
is_compact_fc=False,
verbose=True):
symprec = symmetry.get_symmetry_tolerance()
num_satom = supercell.get_number_of_atoms()
unique_first_atom_nums = np.unique(
[x['number'] for x in disp_dataset['first_atoms']])
if is_compact_fc:
num_patom = primitive.get_number_of_atoms()
s2p_map = primitive.get_supercell_to_primitive_map()
p2p_map = primitive.get_primitive_to_primitive_map()
first_atom_nums = []
for i in unique_first_atom_nums:
if i != s2p_map[i]:
print("Something wrong in disp_fc3.yaml")
raise RuntimeError
else:
first_atom_nums.append(i)
fc3 = np.zeros((num_patom, num_satom, num_satom, 3, 3, 3),
dtype='double', order='C')
else:
first_atom_nums = unique_first_atom_nums
fc3 = np.zeros((num_satom, num_satom, num_satom, 3, 3, 3),
dtype='double', order='C')
for first_atom_num in first_atom_nums:
site_symmetry = symmetry.get_site_symmetry(first_atom_num)
displacements_first = []
delta_fc2s = []
for dataset_first_atom in disp_dataset['first_atoms']:
if first_atom_num != dataset_first_atom['number']:
continue
displacements_first.append(dataset_first_atom['displacement'])
if 'delta_fc2' in dataset_first_atom:
delta_fc2s.append(dataset_first_atom['delta_fc2'])
else:
direction = np.dot(dataset_first_atom['displacement'],
np.linalg.inv(supercell.get_cell()))
reduced_site_sym = get_reduced_site_symmetry(
site_symmetry, direction, symprec)
delta_fc2s.append(get_delta_fc2(
dataset_first_atom['second_atoms'],
dataset_first_atom['number'],
dataset_first_atom['forces'],
fc2,
supercell,
reduced_site_sym,
symprec))
fc3_first = solve_fc3(first_atom_num,
supercell,
site_symmetry,
displacements_first,
np.array(delta_fc2s, dtype='double', order='C'),
symprec,
verbose=verbose)
if is_compact_fc:
fc3[p2p_map[s2p_map[first_atom_num]]] = fc3_first
else:
fc3[first_atom_num] = fc3_first
return fc3
def _get_rotated_fc2s(i, j, fc2s, rot_map_syms, site_sym_cart):
rotated_fc2s = []
for fc2 in fc2s:
for sym, map_sym in zip(site_sym_cart, rot_map_syms):
fc2_rot = fc2[map_sym[i], map_sym[j]]
rotated_fc2s.append(similarity_transformation(sym, fc2_rot))
return np.reshape(rotated_fc2s, (-1, 9))
def _third_rank_tensor_rotation_elem(rot, tensor, l, m, n):
sum_elems = 0.
for i in (0, 1, 2):
for j in (0, 1, 2):
for k in (0, 1, 2):
sum_elems += (rot[l, i] * rot[m, j] * rot[n, k]
* tensor[i, j, k])
return sum_elems
def _get_fc3_done(supercell, disp_dataset, symmetry, array_shape):
num_atom = len(supercell)
fc3_done = np.zeros(array_shape, dtype='byte')
symprec = symmetry.tolerance
lattice = supercell.cell.T
positions = supercell.scaled_positions
rotations = symmetry.get_symmetry_operations()['rotations']
translations = symmetry.get_symmetry_operations()['translations']
atom_mapping = []
for rot, trans in zip(rotations, translations):
atom_indices = [
_get_atom_by_symmetry(lattice,
positions,
rot,
trans,
i,
symprec) for i in range(num_atom)]
atom_mapping.append(atom_indices)
for dataset_first_atom in disp_dataset['first_atoms']:
first_atom_num = dataset_first_atom['number']
site_symmetry = symmetry.get_site_symmetry(first_atom_num)
direction = np.dot(dataset_first_atom['displacement'],
np.linalg.inv(supercell.get_cell()))
reduced_site_sym = get_reduced_site_symmetry(
site_symmetry, direction, symprec)
least_second_atom_nums = []
for second_atoms in dataset_first_atom['second_atoms']:
if 'included' in second_atoms:
if second_atoms['included']:
least_second_atom_nums.append(second_atoms['number'])
elif 'cutoff_distance' in disp_dataset:
min_vec = get_equivalent_smallest_vectors(
first_atom_num,
second_atoms['number'],
supercell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
if 'pair_distance' in second_atoms:
assert (abs(min_distance - second_atoms['pair_distance'])
< 1e-4)
if min_distance < disp_dataset['cutoff_distance']:
least_second_atom_nums.append(second_atoms['number'])
positions_shifted = positions - positions[first_atom_num]
least_second_atom_nums = np.unique(least_second_atom_nums)
for red_rot in reduced_site_sym:
second_atom_nums = [
_get_atom_by_symmetry(lattice,
positions_shifted,
red_rot,
np.zeros(3, dtype='double'),
i,
symprec) for i in least_second_atom_nums]
second_atom_nums = np.unique(second_atom_nums)
for i in range(len(rotations)):
rotated_atom1 = atom_mapping[i][first_atom_num]
for j in second_atom_nums:
fc3_done[rotated_atom1, atom_mapping[i][j]] = 1
return fc3_done
def _get_atom_by_symmetry(lattice,
positions,
rotation,
trans,
atom_number,
symprec):
rot_pos = np.dot(positions[atom_number], rotation.T) + trans
diffs = positions - rot_pos
diffs -= np.rint(diffs)
dists = np.sqrt((np.dot(diffs, lattice.T) ** 2).sum(axis=1))
rot_atoms = np.where(dists < symprec)[0] # only one should be found
if len(rot_atoms) > 0:
return rot_atoms[0]
else:
print("Position or symmetry is wrong.")
raise ValueError
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A basic command line interface for a local version of The Cannon.
"""
import argparse
import logging
import multiprocessing as mp
import numpy as np
import os
from random import shuffle
from six.moves import cPickle as pickle
from six import string_types
from astropy.table import Table
import AnniesLasso_2 as tc
logger = logging.getLogger("AnniesLasso_2")
_DEFAULT_FILENAME_COLUMN = "FILENAME"
_DEFAULT_OUTPUT_SUFFIX = "result"
def get_neighbours(labelled_set, star, label_names, K, exclude=None):
"""
Return `K` indices of the nearest neighbours of `star` in the `labelled_set`.
:param labelled_set:
A table containing high fidelity labels for all stars.
:param star:
The star to determine neighbours to.
:param label_names:
A list of label names that will be used in the model, and which will be
used to gauge proximity in label space.
:param K:
The number of neighbours to return.
:param exclude: [optional]
A list-like of indices to exclude from the list of neighbouring indices.
For example, this may be the index that corresponds to `star`.
:returns:
An array of indices of length `K`.
"""
# Pivot and rescale the labels.
D = np.sum(np.abs([(labelled_set[_] - star[_])/np.ptp(labelled_set[_]) \
for _ in label_names]), axis=0)
assert np.all(np.isfinite(D))
if exclude is not None:
if isinstance(exclude, int): exclude = [exclude]
max_D = 1 + np.max(D)
for index in exclude:
D[index] = max_D
return np.argsort(D)[:K]
def loocv(labelled_set, label_names, K=None, model_order=1,
filename_column=None, output_suffix=None, overwrite=False, **kwargs):
"""
Perform leave-one-out cross-validation using a local Cannon model at every
point in the labelled set.
:param labelled_set:
The path of a table containing labels of stars, and a column including
the path of a spectrum for that star.
:param label_names:
A list of label names to include in the model.
:param K: [optional]
The number of K nearby training set stars to train a model with. If
`None` is specified, then `K = 2 * N_labels`
:param model_order: [optional]
The polynomial order of the model to use. If the `model_order` is given
as 3, and A is a label, then `A^3` is a term in the model.
:param filename_column: [optional]
The name of a column in the `labelled_set` filename that refers to the
path of a spectrum for that star. If `None` is given, it defaults to
{_DEFAULT_FILENAME_COLUMN}
:param output_suffix: [optional]
A string suffix that will be appended to the path of every spectrum
path. If `None` is given, it defaults to {_DEFAULT_OUTPUT_SUFFIX}
:param overwrite: [optional]
Overwrite paths of existing result files.
"""
filename_column = filename_column or _DEFAULT_FILENAME_COLUMN
output_suffix = output_suffix or _DEFAULT_OUTPUT_SUFFIX
K = K or 2 * label_names
if 1 > model_order:
raise ValueError("model order must be greater than zero")
if 2 > K:
raise ValueError("K must be greater than 1")
if kwargs.get("shuffle", False):
labelled_set = shuffle(labelled_set)
results = []
failed, N = (0, len(labelled_set))
for i, star in enumerate(labelled_set):
spectrum_filename = star[filename_column]
basename, _ = os.path.splitext(spectrum_filename)
output_filename = "{}.pkl".format("-".join([basename, output_suffix]))
logger.info("At star {0}/{1}: {2}".format(i + 1, N, spectrum_filename))
if os.path.exists(output_filename) and not overwrite:
logger.info("Output filename {} already exists and not overwriting."\
.format(output_filename))
continue
# [1] Load the spectrum.
try:
with open(spectrum_filename, "rb") as fp:
test_flux, test_ivar = pickle.load(fp)
except:
logger.exception("Error when loading {}".format(spectrum_filename))
failed += 1
continue
# [2] What are the K closest neighbours?
indices = get_neighbours(labelled_set, star, label_names, K, exclude=(i, ))
# [3] Load those K stars and train a model.
train_flux = np.ones((K, test_flux.size))
train_ivar = np.zeros_like(train_flux)
for j, index in enumerate(indices):
with open(labelled_set[filename_column][j], "rb") as fp:
flux, ivar = pickle.load(fp)
train_flux[j, :] = flux
train_ivar[j, :] = ivar
# [4] Train a model using those K nearest neighbours.
model = tc.L1RegularizedCannonModel(labelled_set[indices], train_flux,
train_ivar, threads=kwargs.get("threads", 1))
# TODO: Revisit this. Should these default to zero?
model.s2 = 0
model.regularization = 0
model.vectorizer = tc.vectorizer.NormalizedPolynomialVectorizer(
labelled_set[indices],
tc.vectorizer.polynomial.terminator(label_names, model_order))
model.train()
model._set_s2_by_hogg_heuristic()
# [5] Test on that star, using the initial labels.
result, cov, meta = model.fit(test_flux, test_ivar,
initial_labels=[star[label_name] for label_name in label_names],
full_output=True)
results.append([spectrum_filename] + list(result.flatten()))
# Insert a flag as to whether the result is within a convex hull of the
# labelled set.
meta = meta[0] # The first (and only) star we tested against.
meta["in_convex_hull"] = model.in_convex_hull(result)[0]
with open(output_filename, "wb") as fp:
pickle.dump((result, cov, meta), fp, 2) # For legacy.
logger.info("Saved output to {}".format(output_filename))
# Close the pool
if model.pool is not None:
model.pool.close()
model.pool.join()
del model
logger.info("Number of failures: {}".format(failed))
logger.info("Number of successes: {}".format(N - failed))
# Make the comparisons to the original set!
t = Table(rows=results, names=["FILENAME"] + list(label_names))
t.write("cannon-local-loocv-{}.fits".format(output_suffix),
overwrite=overwrite)
return None
def _loocv_wrapper(labelled_set, label_names, **kwargs):
if isinstance(label_names, string_types):
label_names = label_names.split(",")
if isinstance(labelled_set, string_types):
labelled_set = Table.read(labelled_set)
return loocv(labelled_set, label_names, **kwargs)
def _train_and_test(labelled_set, train_flux, train_ivar, label_names,
model_order, test_flux, test_ivar, initial_labels, output_filename,
**kwargs):
print("Doing {} in parallel".format(output_filename))
# [4] Train a model using those K nearest neighbours.
model = tc.L1RegularizedCannonModel(labelled_set, train_flux, train_ivar,
**kwargs)
# TODO: Revisit this. Should these default to zero?
model.s2 = 0
model.regularization = 0
model.vectorizer = tc.vectorizer.NormalizedPolynomialVectorizer(
labelled_set,
tc.vectorizer.polynomial.terminator(label_names, model_order))
model.train(progressbar=False)
model._set_s2_by_hogg_heuristic()
# [5] Test on that star, using the initial labels.
result, cov, meta = model.fit(test_flux, test_ivar,
initial_labels=initial_labels, full_output=True)
with open(output_filename, "wb") as fp:
pickle.dump((result, cov, meta), fp, 2) # For legacy.
logger.info("Saved output to {}".format(output_filename))
if model.pool is not None:
model.pool.close()
model.pool.join()
del model
return None
def test(labelled_set, test_set, label_names, K=None, model_order=1,
filename_column=None, output_suffix=None, overwrite=False, **kwargs):
"""
Perform the test step on stars in the test set, by building local Cannon
models from stars in the labelled set.
:param labelled_set:
The path of a table containing labels of stars, and a column including
the path of a spectrum for that star.
:param test_set:
The path of a table containing initial labels of stars to test, as well
as a column including the path of a spectrum for that star.
:param label_names:
A list of label names to include in the model.
:param K: [optional]
The number of K nearby training set stars to train a model with. If
`None` is specified, then `K = 2 * N_labels`
:param model_order: [optional]
The polynomial order of the model to use. If the `model_order` is given
as 3, and A is a label, then `A^3` is a term in the model.
:param filename_column: [optional]
The name of a column in the `labelled_set` filename that refers to the
path of a spectrum for that star. If `None` is given, it defaults to
{_DEFAULT_FILENAME_COLUMN}
:param output_suffix: [optional]
A string suffix that will be appended to the path of every spectrum
path. If `None` is given, it defaults to {_DEFAULT_OUTPUT_SUFFIX}
:param overwrite: [optional]
Overwrite paths of existing result files.
"""
filename_column = filename_column or _DEFAULT_FILENAME_COLUMN
output_suffix = output_suffix or _DEFAULT_OUTPUT_SUFFIX
K = K or 2 * label_names
if 1 > model_order:
raise ValueError("model order must be greater than zero")
if 2 > K:
raise ValueError("K must be greater than 1")
if kwargs.get("shuffle", False):
test_set = shuffle(test_set)
threads = kwargs.get("threads", 1)
threads = threads if threads > 0 else mp.cpu_count()
pool = None if threads < 2 else mp.Pool(threads)
processes = []
failed, N = (0, len(test_set))
for i, star in enumerate(test_set):
spectrum_filename = star[filename_column]
basename, _ = os.path.splitext(spectrum_filename)
output_filename = "{}.pkl".format("-".join([basename, output_suffix]))
logger.info("At star {0}/{1}: {2}".format(i + 1, N, spectrum_filename))
if os.path.exists(output_filename) and not overwrite:
logger.info("Output filename {} already exists and not overwriting."\
.format(output_filename))
continue
# [1] Load the spectrum.
try:
with open(spectrum_filename, "rb") as fp:
test_flux, test_ivar = pickle.load(fp)
except:
logger.exception("Error when loading {}".format(spectrum_filename))
failed += 1
continue
# [2] What are the K closest neighbours?
indices = get_neighbours(labelled_set, star, label_names, K)
# [3] Load those K stars and train a model.
train_flux = np.ones((K, test_flux.size))
train_ivar = np.zeros_like(train_flux)
for j, index in enumerate(indices):
with open(labelled_set[filename_column][j], "rb") as fp:
flux, ivar = pickle.load(fp)
train_flux[j, :] = flux
train_ivar[j, :] = ivar
# --- parallelism can begin here
initial_labels = [star[label_name] for label_name in label_names]
args = (labelled_set[indices], train_flux, train_ivar, label_names,
model_order, test_flux, test_ivar, initial_labels, output_filename)
if pool is None:
_train_and_test(*args)
else:
processes.append(pool.apply_async(_train_and_test, args))
while len(processes) >= threads:
processes.pop(0).get()
# --- parallelism can end here
if pool is not None:
logger.info("Cleaning up the pool..")
pool.close()
pool.join()
logger.info("Number of failures: {}".format(failed))
logger.info("Number of successes: {}".format(N - failed))
return None
def _test_wrapper(labelled_set, test_set, label_names, **kwargs):
if isinstance(label_names, string_types):
label_names = label_names.split(",")
if isinstance(labelled_set, string_types):
labelled_set = Table.read(labelled_set)
if isinstance(test_set, string_types):
test_set = Table.read(test_set)
return test(labelled_set, test_set, label_names, **kwargs)
def main():
""" A command line tool parser for the Mini Cannon. """
# Create the main parser.
parser = argparse.ArgumentParser(
description="The Cannon", epilog="http://TheCannon.io")
# Create parent parser.
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-v", "--verbose", dest="verbose", action="store_true", default=False,
help="Verbose logging mode")
parent_parser.add_argument(
"-t", "--threads", dest="threads", type=int, default=1,
help="The number of threads to use")
parent_parser.add_argument(
"--condor", dest="condor", action="store_true", default=False,
help="Distribute action using Condor")
parent_parser.add_argument(
"--condor-chunks", dest="condor_chunks", type=int, default=100,
help="The number of chunks to distribute across Condor. "\
"This argument is ignored if --condor is not used")
parent_parser.add_argument(
"--condor-memory", dest="memory", type=int, default=2000,
help="The amount of memory (MB) to request for each Condor job. "\
"This argument is ignored if --condor is not used")
parent_parser.add_argument(
"--condor-check-frequency", dest="condor_check_frequency", default=1,
help="The number of seconds to wait before checking Condor jobs")
subparsers = parser.add_subparsers(title="action", dest="action",
description="Specify the action to perform")
loocv_parser = subparsers.add_parser("loocv", parents=[parent_parser],
help="Perform leave-one-out cross-validation")
loocv_parser.add_argument(
"labelled_set", type=str,
help="Path of a file containing labels for stars, as well as a column "\
"that refers to the path for a spectrum of that star")
loocv_parser.add_argument(
"label_names", type=str,
help="List of label names (in the `labelled_set` file) to include in "\
"the model. These should be separated by a comma")
loocv_parser.add_argument(
"--K", dest="K", type=int, default=None,
help="The number of nearest labelled set neighbours to train on. "\
"By default, this will be set to 2 * N_labels")
loocv_parser.add_argument(
"--model-order", dest="model_order", type=int, default=1,
help="The maximum order of the label. For example, if A is a label, "\
"and `model_order` is 3, then that implies `A^3` is a model term")
loocv_parser.add_argument(
"--filename-column", dest="filename_column", type=str,
help="Name of the column in the `labelled_set` that refers to the "\
"path location of the spectrum for that star")
loocv_parser.add_argument(
"--output-suffix", dest="output_suffix", type=str,
help="A string suffix that will be added to the spectrum filenames "\
"when creating the result filename")
loocv_parser.add_argument(
"--overwrite", action="store_true", default=False,
help="Overwrite existing result files")
loocv_parser.add_argument(
"--shuffle", action="store_true", default=False,
help="Shuffle the input spectra (useful for running multiple jobs "\
"in parallel)")
loocv_parser.set_defaults(func=_loocv_wrapper)
test_parser = subparsers.add_parser("test", parents=[parent_parser],
help="Run the test step (infer labels for stars) from spectra")
test_parser.add_argument(
"labelled_set", type=str,
help="Path of a file containing labels for stars, as well as a column "\
"that refers to the path for a spectrum of that star")
test_parser.add_argument(
"test_set", type=str,
help="Path of a file containing initial labels for stars, as well as a"\
" column that refers to the path for a spectrum of that star")
test_parser.add_argument(
"label_names", type=str,
help="List of label names (in the `labelled_set` file) to include in "\
"the model. These should be separated by a comma")
test_parser.add_argument(
"--K", dest="K", type=int, default=None,
help="The number of nearest labelled set neighbours to train on. "\
"By default, this will be set to 2 * N_labels")
test_parser.add_argument(
"--model-order", dest="model_order", type=int, default=1,
help="The maximum order of the label. For example, if A is a label, "\
"and `model_order` is 3, then that implies `A^3` is a model term")
test_parser.add_argument(
"--filename-column", dest="filename_column", type=str,
help="Name of the column in the `labelled_set` that refers to the "\
"path location of the spectrum for that star")
test_parser.add_argument(
"--output-suffix", dest="output_suffix", type=str,
help="A string suffix that will be added to the spectrum filenames "\
"when creating the result filename")
test_parser.add_argument(
"--overwrite", action="store_true", default=False,
help="Overwrite existing result files")
test_parser.add_argument(
"--shuffle", action="store_true", default=False,
help="Shuffle the input list of spectra (useful for running parallel "\
"jobs)")
test_parser.set_defaults(func=_test_wrapper)
args = parser.parse_args()
if args.action is None: return
if args.verbose:
logger.setLevel(logging.DEBUG)
return args.func(**args.__dict__)
if __name__ == "__main__":
main()
|
|
import dilap.core.vector as dpv
import dilap.core.model as dmo
import dilap.core.tools as dpr
import dilap.mesh.pointset as dps
import dilap.mesh.tools as dtl
import dilap.mesh.pointset as dps
import matplotlib.pyplot as plt
import math
import pdb
sqrt3 = math.sqrt(3)
class triangulation:
def plot(self,ax = None):
if ax is None:ax = dtl.plot_axes()
for tdx in range(self.tricnt):
tri = self.triangles[tdx]
if tri is None:continue
vtri = self.points.get_points(*tri)
dtl.plot_polygon(vtri,ax,center = True)
return ax
def plot_xy(self,ax = None):
if ax is None:ax = dtl.plot_axes_xy()
for tdx in range(self.tricnt):
tri = self.triangles[tdx]
if tri is None:continue
vtri = self.points.get_points(*tri)
dtl.plot_polygon_xy(vtri,ax,center = True)
for gdx in range(self.ghostcnt):
gst = self.ghosts[gdx]
if gst is None:continue
gpair = self.points.get_points(gst[0],gst[1])
dtl.plot_edges_xy(gpair,ax,lw = 5.0)
return ax
# add a positively oriented ghost triangle u,v,g
def add_ghost(self,u,v):
self.ghosts.append((u,v,'g'))
self.eg_ghost_lookup[(u,v)] = self.ghostcnt
self.ghostcnt += 1
# delete a positively oriented ghost triangle u,v,g
def delete_ghost(self,u,v):
ghost = self.eg_ghost_lookup[(u,v)]
if not ghost is None:self.ghosts[ghost] = None
self.eg_ghost_lookup[(u,v)] = None
# add a positively oriented triangle u,v,w
def add_triangle(self,u,v,w):
self.triangles.append((u,v,w))
self.eg_tri_lookup[(u,v)] = self.tricnt
self.eg_tri_lookup[(v,w)] = self.tricnt
self.eg_tri_lookup[(w,u)] = self.tricnt
self.tricnt += 1
# delete a positively oriented triangle u,v,w
def delete_triangle(self,u,v,w):
tri = self.eg_tri_lookup[(u,v)]
if not tri is None:self.triangles[tri] = None
self.eg_tri_lookup[(u,v)] = None
self.eg_tri_lookup[(v,w)] = None
self.eg_tri_lookup[(w,u)] = None
# return a vertex x such that uv
# is a positively oriented edge
def adjacent(self,u,v):
ekey = (u,v)
if ekey in self.eg_tri_lookup:
tri = self.eg_tri_lookup[(u,v)]
if not tri is None:
triv = [x for x in self.triangles[tri] if not x in ekey][0]
return triv
if ekey in self.eg_ghost_lookup:
tri = self.eg_ghost_lookup[(u,v)]
if not tri is None:return self.ghosts[tri][2]
# return vertices v,w such that uvw
# is a positively oriented triangle
def adjacent_one(self,u):raise NotImplemented
# plc is a piecewise linear complex to be tetrahedralized
def __init__(self,plc):
self.points = dps.pointset()
self.triangles = []
self.tricnt = 0
self.eg_tri_lookup = {}
self.ghosts = []
self.ghostcnt = 0
self.eg_ghost_lookup = {}
self.plc = plc
self.cover(plc)
def initial_cover(self,plc):
convexbnd = dpr.pts_to_convex_xy(plc.points.get_points())
convexcom = dpv.center_of_mass(convexbnd)
convexrad = max([dpv.distance(cx,convexcom) for cx in convexbnd])+1000.0
#convexrad = max([dpv.distance(cx,convexcom) for cx in convexbnd])+10
c01delta = dpv.vector(-1,-1,0).normalize().scale_u(convexrad)
c02delta = dpv.vector( 1,-1,0).normalize().scale_u(convexrad)
c03delta = dpv.vector( 0, 1,0).normalize().scale_u(convexrad)
c01 = convexcom.copy().translate(c01delta)
c02 = convexcom.copy().translate(c02delta)
c03 = convexcom.copy().translate(c03delta)
c0psx = self.points.add_points(c01,c02,c03)
self.add_triangle(*c0psx)
ghost1 = (c0psx[2],c0psx[1])
self.add_ghost(*ghost1)
ghost2 = (c0psx[1],c0psx[0])
self.add_ghost(*ghost2)
ghost3 = (c0psx[0],c0psx[2])
self.add_ghost(*ghost3)
self.initial_cover_extras = c0psx
#ax = self.plc.plot_xy()
#self.plot_xy(ax)
#plt.show()
# generate tetrahedralization of the plc
def cover(self,plc):
hmin = plc.chew1_subdivide_edges()
plc.subdivide_edges()
self.initial_cover(plc)
self.cover_points(plc)
#self.cover_edges(plc)
self.cover_polygons(plc)
self.cover_edges(plc)
self.chew1_refine(plc,hmin)
#self.ruppert_refine(plc)
def cover_points(self,plc):
for plcx in range(plc.points.pcnt):
plcp = plc.points.ps[plcx].copy()
self.point_location(plcp)
#ax = self.plc.plot_xy()
#self.plot_xy(ax)
#plt.show()
# given v1,v2, the positions of the endpoints of an edge,
# return True if p encroaches upon the edge
def encroaches_edge(self,v1,v2,p):
cc = dpv.midpoint(v1,v2)
cr = dpv.distance(cc,v1)
if p.near(v1) or p.near(v2):return False
if dpr.inside_circle(p,cc,cr,(dpv.zero(),dpv.zhat)):return True
else:return False
# given the edge u,v which bounds two non-ghost triangles
# remove those triangles and replace with the alternative two that are
# bounded by the same 4 vertices
def flip_edge(self,u,v):
print('flipping an edge')
o1 = self.adjacent(u,v)
o2 = self.adjacent(v,u)
vs = self.points.get_points(u,v,o1,o2)
tcp1,tcr1 = dpr.circumscribe_tri(vs[0],vs[1],vs[2])
tcp2,tcr2 = dpr.circumscribe_tri(vs[1],vs[0],vs[3])
if tcp1.near(tcp2) and dtl.isnear(tcr1,tcr2):
print('4way!',tcp1,tcp2,tcr1,tcr2,u,v,o1,o2)
return ()
'''#
ax = dtl.plot_axes_xy()
dtl.plot_polygon_xy(self.points.get_points(u,v,o1),ax)
vs = self.points.get_points(u,v,o1)
tcp,tcr = dpr.circumscribe_tri(*vs)
dtl.plot_circle_xy(tcp,tcr,ax,True)
dtl.plot_polygon_xy(self.points.get_points(v,u,o2),ax)
vs = self.points.get_points(v,u,o2)
tcp,tcr = dpr.circumscribe_tri(*vs)
dtl.plot_circle_xy(tcp,tcr,ax,True)
self.plc.plot_xy(ax)
self.plot_xy(ax)
#plt.show()
'''#
self.delete_triangle(u,v,o1)
self.delete_triangle(v,u,o2)
self.add_triangle(o1,o2,v)
self.add_triangle(o2,o1,u)
#self.dig_cavity(o1,o2,v)
#self.dig_cavity(o2,o1,u)
ax = dtl.plot_axes_xy()
dtl.plot_polygon_xy(self.points.get_points(o1,o2,v),ax,lw = 4.0)
vs = self.points.get_points(o1,o2,v)
tcp,tcr = dpr.circumscribe_tri(*vs)
dtl.plot_circle_xy(tcp,tcr,ax,True)
dtl.plot_polygon_xy(self.points.get_points(o2,o1,u),ax,lw = 4.0)
vs = self.points.get_points(o2,o1,u)
tcp,tcr = dpr.circumscribe_tri(*vs)
dtl.plot_circle_xy(tcp,tcr,ax,True)
self.plc.plot_xy(ax)
self.plot_xy(ax)
plt.show()
return (o2,v),(v,o1),(o1,u),(u,o2)
# given v1,v2, the positions of the endpoints of an edge,
# return True if locally delaunay
def locally_delaunay_edge(self,u,v):
plcu,plcv = self.plc.points.find_points(*self.points.get_points(u,v))
if self.plc.segment(plcu,plcv):return True
o1 = self.adjacent(u,v)
o2 = self.adjacent(v,u)
if o1 is None or o2 is None:return True
if o1 == 'g' or o2 == 'g':return True
up,vp,op1,op2 = self.points.get_points(u,v,o1,o2)
if dtl.segments_intersect((up,vp),(op1,op2)):
if dtl.incircle(up,vp,op1,op2) > 0:return False
if dtl.incircle(vp,up,op2,op1) > 0:return False
return True
# apply the flip algorithm until all edges are locally delaunay
def cover_edges(self,plc):
unfinished = [e for e in self.eg_tri_lookup]
while unfinished:
pre = unfinished[:]
unfin = unfinished.pop(0)
if not self.locally_delaunay_edge(*unfin):
nedges = self.flip_edge(*unfin)
unfinished.extend(nedges)
def cover_polygons(self,plc):
extras = []
for tdx in range(self.tricnt):
tri = self.triangles[tdx]
if tri is None:continue
else:u,v,w = tri
ptri = self.points.get_points(u,v,w)
extras.append(tdx)
for p in plc.polygons:
eb,ibs = p
ebnd = plc.points.get_points(*[plc.edges[x][0] for x in eb])
if dtl.concaves_contains(ebnd,ptri):
extras.remove(tdx)
for ib in ibs:
ibndxs = [plc.edges[x][0] for x in ib]
ibnd = plc.points.get_points(*ibndxs)
if dtl.concaves_contains(ibnd,ptri):
extras.append(tdx)
break
for x in extras:
xtri = self.triangles[x]
if xtri is None:continue
x1,x2,x3 = xtri
self.delete_triangle(x1,x2,x3)
self.ghost_border(plc)
# delete all ghosts and add new ghosts according to where they
# should be after covering the plc
def ghost_border(self,plc):
for gst in self.ghosts:
if gst is None:continue
g1,g2,g = gst
self.delete_ghost(g1,g2)
for plce in plc.edges:
if plce is None:continue
plce1,plce2 = plc.points.get_points(*plce)
e1,e2 = self.points.find_points(plce1,plce2)
eadj = self.adjacent(e1,e2)
if eadj is None:self.add_ghost(e1,e2)
eadj = self.adjacent(e2,e1)
if eadj is None:self.add_ghost(e2,e1)
# return a dictionary of the length of
# every edge currently in the mesh
def edge_lengths(self):
elengths = {}
for tdx in range(self.tricnt):
tri = self.triangles[tdx]
if tri is None:continue
t1,t2,t3 = tri
tp1,tp2,tp3 = self.points.get_points(t1,t2,t3)
if not (t1,t2) in elengths:
d12 = dpv.distance(tp1,tp2)
elengths[(t1,t2)] = d12
elengths[(t2,t1)] = d12
if not (t2,t3) in elengths:
d23 = dpv.distance(tp2,tp3)
elengths[(t2,t3)] = d23
elengths[(t3,t2)] = d23
if not (t3,t1) in elengths:
d31 = dpv.distance(tp3,tp1)
elengths[(t3,t1)] = d31
elengths[(t1,t3)] = d31
return elengths
def chew1_refine(self,plc,h):
unfinished = [t for t in self.triangles]
while unfinished:
unfin = unfinished.pop(0)
if unfin is None:continue
if not unfin in self.triangles:continue
ufx1,ufx2,ufx3 = unfin
tcp = self.chew1_skinny_triangle(ufx1,ufx2,ufx3,h)
if not tcp is None:
trip = self.points.get_points(ufx1)
tcppoly = plc.find_polygon(tcp)
tripoly = plc.find_polygon(*trip)
#print('chewref',tcppoly,tripoly)
if not tcppoly is None and tcppoly == tripoly:
ntxs = self.point_location(tcp)
for ntx in ntxs:unfinished.append(self.triangles[ntx])
'''#
if ntxs:
ax = self.plot_xy()
for ntx in ntxs:
tvs = self.points.get_points(*self.triangles[ntx])
dtl.plot_polygon_xy(tvs,ax,False,10.0)
tvs = self.points.get_points(ufx1,ufx2,ufx3)
tcp,tcr = dpr.circumscribe_tri(*tvs)
dtl.plot_circle_xy(tcp,tcr,ax,True)
dtl.plot_polygon_xy(tvs,ax,True,5.0)
plt.show()
'''#
# if triangle uvw is skinny by chew1 standards,
# return the center of its circumcircle otherwise return None
def chew1_skinny_triangle(self,u,v,w,h):
vs = self.points.get_points(u,v,w)
tcp,tcr = dpr.circumscribe_tri(*vs)
if tcr/h > 1.0:return tcp
def chew2_refine(self,plc,b = 2.0):
raise NotImplemented
# if triangle uvw is skinny by ruppert standards,
# return the center of its circumcircle otherwise return None
def ruppert_skinny_triangle(self,u,v,w,b = 2.0):
vs = self.points.get_points(u,v,w)
tcp,tcr = dpr.circumscribe_tri(*vs)
if tcr/dtl.shortest_edge_tri(*vs) > b:return tcp
def ruppert_refine(self,plc,b = 2.0):
unfinished = [e for e in plc.edges]
while unfinished:
unfin = unfinished.pop(0)
if unfin is None:continue
v1,v2 = plc.points.get_points(*unfin)
isld = self.locally_delaunay_edge(v1,v2)
if not isld:
ne1,ne2 = plc.split_edge(*unfin)
unfinished.append(ne1)
unfinished.append(ne2)
newp = plc.points.ps[ne1[1]]
self.point_location(newp)
unfinished = [t for t in self.triangles]
while unfinished:
unfin = unfinished.pop(0)
if unfin is None:continue
tcp = self.ruppert_skinny_triangle(*unfin)
if not tcp is None:
print('refinnnning skinny guy!',tcp)
dodig = True
for e in plc.edges:
if e is None:continue
v1,v2 = plc.points.get_points(*e)
ench = self.encroaches_edge(v1,v2,tcp)
if ench:
dodig = False
ne1,ne2 = plc.split_edge(*e)
newp = plc.points.ps[ne1[1]]
self.point_location(newp)
if dodig:
ntxs = self.point_location(tcp)
for ntx in ntxs:unfinished.append(self.triangles[ntx])
print('ruppert refined the mesh!')
ax = self.plot_xy()
plt.show()
def point_location(self,y):
pretricnt = self.tricnt
for pdx in range(self.points.pcnt):
if self.points.ps[pdx].near(y):
return ()
nv = self.points.add_point(y)
onb = self.point_on_boundary(nv)
if onb:
v,w,x = self.ghosts[onb]
self.delete_ghost(v,w)
self.add_ghost(v,nv)
self.add_ghost(nv,w)
tu = self.adjacent(w,v)
self.delete_triangle(w,v,tu)
self.add_triangle(tu,nv,v)
self.add_triangle(tu,w,nv)
#self.dig_cavity(tu,nv,v)
#self.dig_cavity(tu,w,nv)
return [x for x in range(pretricnt,self.tricnt)]
for tdx in range(self.tricnt):
tri = self.triangles[tdx]
if tri is None:continue
else:u,v,w = tri
vu,vv,vw = self.points.get_points(u,v,w)
if dpv.inside(y,[vu,vv,vw]):
self.insert_vertex(nv,*self.triangles[tdx])
return [x for x in range(pretricnt,self.tricnt)]
for gdx in range(self.ghostcnt):
ghost = self.ghosts[gdx]
if ghost is None:continue
else:u,v,w = ghost
vu,vv = self.points.get_points(u,v)
if not dtl.orient2d(vu,vv,y) < 0:
self.insert_ghost_vertex(nv,u,v,w)
return [x for x in range(pretricnt,self.tricnt)]
def point_on_boundary(self,u):
up = self.points.ps[u]
for gdx in range(self.ghostcnt):
gst = self.ghosts[gdx]
if gst is None:continue
g1,g2 = self.points.get_points(gst[0],gst[1])
dx = g2.x - g1.x
dy = g2.y - g1.y
dv = math.sqrt(dx**2 + dy**2)
norm = dpv.vector(dy/dv,-dx/dv,0)
nrmd = dpv.distance_to_edge(up,g1,g2,norm)
if dtl.isnear(nrmd,0):
linkd = dpv.distance(up,g1)+dpv.distance(up,g2)
if dtl.isnear(linkd,dpv.distance(g1,g2)):
return gdx
# u is the vertex to insert. vwx is a positively oriented triangle whose
# circumcircle encloses u
def insert_vertex(self,u,v,w,x):
self.delete_triangle(v,w,x)
self.dig_cavity(u,v,w)
self.dig_cavity(u,w,x)
self.dig_cavity(u,x,v)
# u is a new vertex; is the oriented triangle u,v,w delaunay?
def dig_cavity(self,u,v,w):
# find triangle wvx opposite the facet vw from u
x = self.adjacent(w,v)
if x is None:return
elif x == 'g':self.add_triangle(u,v,w)
else:
vu,vv,vw,vx = self.points.get_points(u,v,w,x)
if dtl.incircle(vu,vv,vw,vx) > 0:
self.delete_triangle(w,v,x)
self.dig_cavity(u,v,x)
self.dig_cavity(u,x,w)
else:
# w,v is a facet of the cavity and uvw is delaunay
self.add_triangle(u,v,w)
# u is the vertex to insert. vwg is a positively oriented ghost triangle whose
# circumcircle encloses u
def insert_ghost_vertex(self,u,v,w,x):
if not x == 'g':raise ValueError
self.delete_ghost(v,w)
self.add_ghost(v,u)
self.add_ghost(u,w)
onb = self.point_on_boundary(u)
if onb is None:self.add_triangle(u,v,w)
def pelt(self):
s = dmo.model()
for f in self.triangles:
if f is None:continue
v1,v2,v3 = self.points.get_points(*f)
s._triangle(v1,v2,v3)
return s
|
|
import PythonQt
from PythonQt import QtCore, QtGui
import re
import ddapp.objectmodel as om
import ddapp.visualization as vis
from ddapp.timercallback import TimerCallback
from ddapp import affordanceitems
#from ddapp import lcmUtils
from ddapp import callbacks
from ddapp import cameracontrol
#from ddapp import midi
from ddapp import propertyset
from ddapp import splinewidget
from ddapp import transformUtils
#from ddapp import teleoppanel
#from ddapp import footstepsdriverpanel
from ddapp import applogic as app
from ddapp import vtkAll as vtk
from ddapp import filterUtils
from ddapp.shallowCopy import shallowCopy
#from ddapp import segmentationpanel
from ddapp import segmentation
from ddapp import segmentationroutines
from ddapp import frameupdater
import numpy as np
import ioUtils
import os
import random
import colorsys
# todo: refactor these global variables
# several functions in this module depend on these global variables
# which are set by calling ViewBehaviors.addRobotBehaviors().
# These could be refactored to be members of a new behaviors class.
robotModel = None
handFactory = None
neckDriver = None
footstepsDriver = None
robotLinkSelector = None
lastRandomColor = 0.0
class MidiBehaviorControl(object):
def __init__(self):
self.timer = TimerCallback(targetFps=10)
self.timer.callback = self.tick
self.stop = self.timer.stop
self.reader = None
self.initReader()
self.inputs = {
'slider' : [0, 8, True],
'dial' : [16, 8, True],
'r_button' : [64, 8, False],
'm_button' : [48, 8, False],
's_button' : [32, 8, False],
'track_left' : [58, 1, False],
'track_right' : [59, 1, False],
'cycle' : [46, 1, False],
'marker_set' : [60, 1, False],
'marker_left' : [61, 1, False],
'marker_right' : [62, 1, False],
'rewind' : [43, 1, False],
'fastforward' : [44, 1, False],
'stop' : [42, 1, False],
'play' : [41, 1, False],
'record' : [45, 1, False],
}
signalNames = []
for inputName, inputDescription in self.inputs.iteritems():
channelStart, numChannels, isContinuous = inputDescription
for i in xrange(numChannels):
channelId = '' if numChannels == 1 else '_%d' % i
if isContinuous:
signalNames.append('%s%s_value_changed' % (inputName, channelId))
else:
signalNames.append('%s%s_pressed' % (inputName, channelId))
signalNames.append('%s%s_released' % (inputName, channelId))
self.callbacks = callbacks.CallbackRegistry(signalNames)
def start(self):
self.initReader()
if self.reader is not None:
self.timer.start()
def initReader(self):
if self.reader:
return
try:
self.reader = midi.MidiReader(midi.findKorgNanoKontrol2())
except:
print 'midi controller not found.'
self.reader = None
def onMidiCommand(self, channel, value):
#print channel, '%.2f' % value
inputs = self.inputs
for inputName, inputDescription in inputs.iteritems():
channelStart, numChannels, isContinuous = inputDescription
if channelStart <= channel < (channelStart + numChannels):
if numChannels > 1:
inputName = '%s_%d' % (inputName, channel - channelStart)
if isContinuous:
self.onContinuousInput(inputName, value)
elif value == 1:
self.onButtonDown(inputName)
elif value == 0:
self.onButtonUp(inputName)
def onContinuousInput(self, name, value):
#print name, '%.2f' % value
self.callbacks.process(name + '_value_changed', value)
def onButtonDown(self, name):
#print name, 'down'
self.callbacks.process(name + '_pressed')
def onButtonUp(self, name):
#print name, 'up'
self.callbacks.process(name + '_released')
def tick(self):
try:
messages = self.reader.getMessages()
except:
messages = []
if not messages:
return
targets = {}
for message in messages:
channel = message[2]
value = message[3]
targets[channel] = value
for channel, value in targets.iteritems():
position = value/127.0
self.onMidiCommand(channel, position)
def resetCameraToRobot(view):
t = robotModel.getLinkFrame('utorso')
focalPoint = [0.0, 0.0, 0.0]
position = [-4.0, -2.0, 2.0]
t.TransformPoint(focalPoint, focalPoint)
t.TransformPoint(position, position)
flyer = cameracontrol.Flyer(view)
flyer.zoomTo(focalPoint, position)
def resetCameraToHeadView():
head = robotModel.getLinkFrame('head')
utorso = robotModel.getLinkFrame('utorso')
viewDirection = np.array([1.0, 0.0, 0.0])
utorso.TransformVector(viewDirection, viewDirection)
cameraPosition = np.array(head.GetPosition()) + 0.10 * viewDirection
camera = view.camera()
focalOffset = np.array(camera.GetFocalPoint()) - np.array(camera.GetPosition())
focalOffset /= np.linalg.norm(focalOffset)
camera.SetPosition(cameraPosition)
camera.SetFocalPoint(cameraPosition + focalOffset*0.03)
camera.SetViewUp([0, 0, 1])
camera.SetViewAngle(90)
view.render()
def zoomToPick(displayPoint, view):
pickedPoint, prop, _ = vis.pickProp(displayPoint, view)
if not prop:
return
flyer = cameracontrol.Flyer(view)
flyer.zoomTo(pickedPoint)
def getChildFrame(obj):
if hasattr(obj, 'getChildFrame'):
return obj.getChildFrame()
def placeHandModel(displayPoint, view, side='left'):
obj, _ = vis.findPickedObject(displayPoint, view)
if isinstance(obj, vis.FrameItem):
_, handFrame = handFactory.placeHandModelWithTransform(obj.transform, view, side=side, parent=obj.parent())
handFrame.frameSync = vis.FrameSync()
handFrame.frameSync.addFrame(obj)
handFrame.frameSync.addFrame(handFrame, ignoreIncoming=True)
return
pickedPoint, prop, _, normal = vis.pickPoint(displayPoint, view, pickType='cells', tolerance=0.0, returnNormal=True)
obj = vis.getObjectByProp(prop)
if not obj:
return
yaxis = -normal
zaxis = [0,0,1]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
zaxis = np.cross(xaxis, yaxis)
zaxis /= np.linalg.norm(zaxis)
t = transformUtils.getTransformFromAxes(-zaxis, yaxis, xaxis)
t.PostMultiply()
t.Translate(pickedPoint)
handObj, handFrame = handFactory.placeHandModelWithTransform(t, view, side=side, parent=obj)
syncFrame = getChildFrame(obj)
if syncFrame:
handFrame.frameSync = vis.FrameSync()
handFrame.frameSync.addFrame(handFrame, ignoreIncoming=True)
handFrame.frameSync.addFrame(syncFrame)
class RobotLinkSelector(object):
def __init__(self):
self.selectedLink = None
self.setupMenuAction()
def setupMenuAction(self):
self.action = app.addMenuAction('Tools', 'Robot Link Selector')
self.action.setCheckable(True)
self.action.checked = False
def enabled(self):
return self.action.checked == True
def selectLink(self, displayPoint, view):
if not self.enabled():
return False
robotModel, _ = vis.findPickedObject(displayPoint, view)
try:
robotModel.model.getLinkNameForMesh
except AttributeError:
return False
model = robotModel.model
pickedPoint, _, polyData = vis.pickProp(displayPoint, view)
linkName = model.getLinkNameForMesh(polyData)
if not linkName:
return False
fadeValue = 1.0 if linkName == self.selectedLink else 0.05
for name in model.getLinkNames():
linkColor = model.getLinkColor(name)
linkColor.setAlphaF(fadeValue)
model.setLinkColor(name, linkColor)
if linkName == self.selectedLink:
self.selectedLink = None
vis.hideCaptionWidget()
om.removeFromObjectModel(om.findObjectByName('selected link frame'))
else:
self.selectedLink = linkName
linkColor = model.getLinkColor(self.selectedLink)
linkColor.setAlphaF(1.0)
model.setLinkColor(self.selectedLink, linkColor)
vis.showCaptionWidget(robotModel.getLinkFrame(self.selectedLink).GetPosition(), self.selectedLink, view=view)
vis.updateFrame(robotModel.getLinkFrame(self.selectedLink), 'selected link frame', scale=0.2, parent=robotModel)
return True
def toggleFrameWidget(displayPoint, view):
obj, _ = vis.findPickedObject(displayPoint, view)
if not isinstance(obj, vis.FrameItem):
obj = getChildFrame(obj)
if not obj:
return False
edit = not obj.getProperty('Edit')
obj.setProperty('Edit', edit)
parent = obj.parent()
if getChildFrame(parent) == obj:
parent.setProperty('Alpha', 0.5 if edit else 1.0)
return True
def newWalkingGoal(displayPoint, view):
footFrame = footstepsDriver.getFeetMidPoint(robotModel)
worldPt1, worldPt2 = vis.getRayFromDisplayPoint(view, displayPoint)
groundOrigin = footFrame.GetPosition()
groundNormal = [0.0, 0.0, 1.0]
selectedGroundPoint = [0.0, 0.0, 0.0]
t = vtk.mutable(0.0)
vtk.vtkPlane.IntersectWithLine(worldPt1, worldPt2, groundNormal, groundOrigin, t, selectedGroundPoint)
footFrame.Translate(np.array(selectedGroundPoint) - np.array(footFrame.GetPosition()))
footstepsdriverpanel.panel.onNewWalkingGoal(footFrame)
def toggleFootstepWidget(displayPoint, view, useHorizontalWidget=False):
obj, _ = vis.findPickedObject(displayPoint, view)
if not obj:
return False
name = obj.getProperty('Name')
if name in ('footstep widget', 'footstep widget frame'):
om.removeFromObjectModel(om.findObjectByName('footstep widget'))
return True
match = re.match('^step (\d+)$', name)
if not match:
return False
stepIndex = int(match.group(1))
existingWidget = om.findObjectByName('footstep widget')
if existingWidget:
previousStep = existingWidget.stepIndex
print 'have existing widget for step:', stepIndex
om.removeFromObjectModel(existingWidget)
if previousStep == stepIndex:
print 'returning because widget was for selected step'
return True
footMesh = shallowCopy(obj.polyData)
footFrame = transformUtils.copyFrame(obj.getChildFrame().transform)
if useHorizontalWidget:
rpy = [0.0, 0.0, transformUtils.rollPitchYawFromTransform(footFrame)[2]]
footFrame = transformUtils.frameFromPositionAndRPY(footFrame.GetPosition(), np.degrees(rpy))
footObj = vis.showPolyData(footMesh, 'footstep widget', parent='planning', alpha=0.2)
footObj.stepIndex = stepIndex
frameObj = vis.showFrame(footFrame, 'footstep widget frame', parent=footObj, scale=0.2)
footObj.actor.SetUserTransform(frameObj.transform)
footObj.setProperty('Color', obj.getProperty('Color'))
frameObj.setProperty('Edit', True)
rep = frameObj.widget.GetRepresentation()
rep.SetTranslateAxisEnabled(2, False)
rep.SetRotateAxisEnabled(0, False)
rep.SetRotateAxisEnabled(1, False)
frameObj.widget.HandleRotationEnabledOff()
walkGoal = om.findObjectByName('walking goal')
if walkGoal:
walkGoal.setProperty('Edit', False)
def onFootWidgetChanged(frame):
footstepsDriver.onStepModified(stepIndex - 1, frame)
frameObj.connectFrameModified(onFootWidgetChanged)
return True
def reachToFrame(frameObj, side, collisionObj):
goalFrame = teleoppanel.panel.endEffectorTeleop.newReachTeleop(frameObj.transform, side, collisionObj)
goalFrame.frameSync = vis.FrameSync()
goalFrame.frameSync.addFrame(goalFrame, ignoreIncoming=True)
goalFrame.frameSync.addFrame(frameObj)
def getAsFrame(obj):
if isinstance(obj, vis.FrameItem):
return obj
elif hasattr(obj, 'getChildFrame'):
return obj.getChildFrame()
def isGraspSeed(obj):
return hasattr(obj, 'side')
def getCollisionParent(obj):
'''
If obj is an affordance, return obj
If obj is a frame or a grasp seed, return first parent.
'''
if isinstance(obj, vis.FrameItem):
return obj.parent()
if isGraspSeed(obj):
return obj.parent()
else:
return obj
# The most recently cached PickedPoint - available as input to any other algorithm
lastCachedPickedPoint = np.array([0,0,0])
def showRightClickMenu(displayPoint, view):
pickedObj, pickedPoint = vis.findPickedObject(displayPoint, view)
if not pickedObj:
return
objectName = pickedObj.getProperty('Name')
if objectName == 'grid':
return
displayPoint = displayPoint[0], view.height - displayPoint[1]
globalPos = view.mapToGlobal(QtCore.QPoint(*displayPoint))
menu = QtGui.QMenu(view)
widgetAction = QtGui.QWidgetAction(menu)
label = QtGui.QLabel('<b>%s</b>' % objectName)
label.setContentsMargins(9,9,6,6)
widgetAction.setDefaultWidget(label)
menu.addAction(widgetAction)
menu.addSeparator()
propertiesPanel = PythonQt.dd.ddPropertiesPanel()
propertiesPanel.setBrowserModeToWidget()
propertyset.PropertyPanelHelper.addPropertiesToPanel(pickedObj.properties, propertiesPanel)
def onPropertyChanged(prop):
om.PropertyPanelHelper.setPropertyFromPanel(prop, propertiesPanel, pickedObj.properties)
propertiesPanel.connect('propertyValueChanged(QtVariantProperty*)', onPropertyChanged)
propertiesMenu = menu.addMenu('Properties')
propertiesWidgetAction = QtGui.QWidgetAction(propertiesMenu)
propertiesWidgetAction.setDefaultWidget(propertiesPanel)
propertiesMenu.addAction(propertiesWidgetAction)
def onDelete():
om.removeFromObjectModel(pickedObj)
def onHide():
pickedObj.setProperty('Visible', False)
def onSelect():
om.setActiveObject(pickedObj)
reachFrame = getAsFrame(pickedObj)
collisionParent = getCollisionParent(pickedObj)
def onReachLeft():
reachToFrame(reachFrame, 'left', collisionParent)
def onReachRight():
reachToFrame(reachFrame, 'right', collisionParent)
def flipHandSide():
for obj in [pickedObj] + pickedObj.children():
if not isGraspSeed(obj):
continue
side = 'right' if obj.side == 'left' else 'left'
obj.side = side
color = [1.0, 1.0, 0.0]
if side == 'right':
color = [0.33, 1.0, 0.0]
obj.setProperty('Color', color)
def flipHandThumb():
handFrame = pickedObj.children()[0]
t = transformUtils.copyFrame(handFrame.transform)
t.PreMultiply()
t.RotateY(180)
handFrame.copyFrame(t)
pickedObj._renderAllViews()
def onSplineLeft():
splinewidget.planner.newSpline(pickedObj, 'left')
def onSplineRight():
splinewidget.planner.newSpline(pickedObj, 'right')
def getPointCloud(obj):
try:
obj = obj.model.polyDataObj
except AttributeError:
pass
try:
obj.polyData
except AttributeError:
return None
if obj and obj.polyData.GetNumberOfPoints():# and (obj.polyData.GetNumberOfCells() == obj.polyData.GetNumberOfVerts()):
return obj
pointCloudObj = getPointCloud(pickedObj)
affordanceObj = pickedObj if isinstance(pickedObj, affordanceitems.AffordanceItem) else None
def onSegmentGround():
groundPoints, scenePoints = segmentation.removeGround(pointCloudObj.polyData)
vis.showPolyData(groundPoints, 'ground points', color=[0,1,0], parent='segmentation')
vis.showPolyData(scenePoints, 'scene points', color=[1,0,1], parent='segmentation')
pickedObj.setProperty('Visible', False)
def onCopyPointCloud():
global lastRandomColor
polyData = vtk.vtkPolyData()
polyData.DeepCopy(pointCloudObj.polyData)
if pointCloudObj.getChildFrame():
polyData = segmentation.transformPolyData(polyData, pointCloudObj.getChildFrame().transform)
polyData = segmentation.addCoordArraysToPolyData(polyData)
# generate random color, and average with a common color to make them generally similar
lastRandomColor = lastRandomColor + 0.1 + 0.1*random.random()
rgb = colorsys.hls_to_rgb(lastRandomColor, 0.7, 1.0)
obj = vis.showPolyData(polyData, pointCloudObj.getProperty('Name') + ' copy', color=rgb, parent='point clouds')
t = vtk.vtkTransform()
t.PostMultiply()
t.Translate(filterUtils.computeCentroid(polyData))
segmentation.makeMovable(obj, t)
om.setActiveObject(obj)
pickedObj.setProperty('Visible', False)
def onMergeIntoPointCloud():
allPointClouds = om.findObjectByName('point clouds')
if allPointClouds:
allPointClouds = [i.getProperty('Name') for i in allPointClouds.children()]
sel = QtGui.QInputDialog.getItem(None, "Point Cloud Merging", "Pick point cloud to merge into:", allPointClouds, current=0, editable=False)
sel = om.findObjectByName(sel)
# Make a copy of each in same frame
polyDataInto = vtk.vtkPolyData()
polyDataInto.ShallowCopy(sel.polyData)
if sel.getChildFrame():
polyDataInto = segmentation.transformPolyData(polyDataInto, sel.getChildFrame().transform)
polyDataFrom = vtk.vtkPolyData()
polyDataFrom.DeepCopy(pointCloudObj.polyData)
if pointCloudObj.getChildFrame():
polyDataFrom = segmentation.transformPolyData(polyDataFrom, pointCloudObj.getChildFrame().transform)
# Actual merge
append = filterUtils.appendPolyData([polyDataFrom, polyDataInto])
if sel.getChildFrame():
polyDataInto = segmentation.transformPolyData(polyDataInto, sel.getChildFrame().transform.GetInverse())
# resample
append = segmentationroutines.applyVoxelGrid(append, 0.01)
append = segmentation.addCoordArraysToPolyData(append)
# Recenter the frame
sel.setPolyData(append)
t = vtk.vtkTransform()
t.PostMultiply()
t.Translate(filterUtils.computeCentroid(append))
segmentation.makeMovable(sel, t)
# Hide the old one
if pointCloudObj.getProperty('Name') in allPointClouds:
pointCloudObj.setProperty('Visible', False)
def onSegmentTableScene():
data = segmentation.segmentTableScene(pointCloudObj.polyData, pickedPoint)
vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')
def onSegmentDrillAlignedWithTable():
segmentation.segmentDrillAlignedWithTable(pickedPoint, pointCloudObj.polyData)
def onCachePickedPoint():
''' Cache the Picked Point for general purpose use'''
global lastCachedPickedPoint
lastCachedPickedPoint = pickedPoint
#data = segmentation.segmentTableScene(pointCloudObj.polyData, pickedPoint)
#vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')
def onLocalPlaneFit():
planePoints, normal = segmentation.applyLocalPlaneFit(pointCloudObj.polyData, pickedPoint, searchRadius=0.1, searchRadiusEnd=0.2)
obj = vis.showPolyData(planePoints, 'local plane fit', color=[0,1,0])
obj.setProperty('Point Size', 7)
fields = segmentation.makePolyDataFields(obj.polyData)
pose = transformUtils.poseFromTransform(fields.frame)
desc = dict(classname='BoxAffordanceItem', Name='local plane', Dimensions=list(fields.dims), pose=pose)
box = segmentation.affordanceManager.newAffordanceFromDescription(desc)
def onOrientToMajorPlane():
polyData, planeFrame = segmentation.orientToMajorPlane(pointCloudObj.polyData, pickedPoint=pickedPoint)
pointCloudObj.setPolyData(polyData)
def onDiskGlyph():
result = segmentation.applyDiskGlyphs(pointCloudObj.polyData)
obj = vis.showPolyData(result, 'disks', color=[0.8,0.8,0.8])
om.setActiveObject(obj)
pickedObj.setProperty('Visible', False)
def onArrowGlyph():
result = segmentation.applyArrowGlyphs(pointCloudObj.polyData)
obj = vis.showPolyData(result, 'disks')
def onSegmentationEditor():
segmentationpanel.activateSegmentationMode(pointCloudObj.polyData)
def addNewFrame():
t = transformUtils.copyFrame(affordanceObj.getChildFrame().transform)
t.PostMultiply()
t.Translate(np.array(pickedPoint) - np.array(t.GetPosition()))
newFrame = vis.showFrame(t, '%s frame %d' % (affordanceObj.getProperty('Name'), len(affordanceObj.children())), scale=0.2, parent=affordanceObj)
affordanceObj.getChildFrame().getFrameSync().addFrame(newFrame, ignoreIncoming=True)
def copyAffordance():
desc = dict(affordanceObj.getDescription())
del desc['uuid']
desc['Name'] = desc['Name'] + ' copy'
aff = robotSystem.affordanceManager.newAffordanceFromDescription(desc)
aff.getChildFrame().setProperty('Edit', True)
def onPromoteToAffordance():
affObj = affordanceitems.MeshAffordanceItem.promotePolyDataItem(pickedObj)
robotSystem.affordanceManager.registerAffordance(affObj)
actions = [
(None, None),
('Hide', onHide),
('Delete', onDelete),
('Select', onSelect)
]
if affordanceObj:
actions.extend([
('Copy affordance', copyAffordance),
('Add new frame', addNewFrame),
])
elif type(pickedObj) == vis.PolyDataItem:
actions.extend([
('Promote to Affordance', onPromoteToAffordance),
])
if isGraspSeed(pickedObj):
actions.extend([
(None, None),
('Flip Side', flipHandSide),
('Flip Thumb', flipHandThumb),
])
if reachFrame is not None:
actions.extend([
(None, None),
('Reach Left', onReachLeft),
('Reach Right', onReachRight),
#('Spline Left', onSplineLeft),
#('Spline Right', onSplineRight),
])
if pointCloudObj:
actions.extend([
(None, None),
('Copy Pointcloud', onCopyPointCloud),
('Merge Pointcloud Into', onMergeIntoPointCloud),
('Segment Ground', onSegmentGround),
('Segment Table', onSegmentTableScene),
('Segment Drill Aligned', onSegmentDrillAlignedWithTable),
('Local Plane Fit', onLocalPlaneFit),
('Orient with Horizontal', onOrientToMajorPlane),
('Arrow Glyph', onArrowGlyph),
('Disk Glyph', onDiskGlyph),
('Cache Pick Point', onCachePickedPoint),
(None, None),
('Open Segmentation Editor', onSegmentationEditor)
])
for actionName, func in actions:
if not actionName:
menu.addSeparator()
else:
action = menu.addAction(actionName)
action.connect('triggered()', func)
selectedAction = menu.popup(globalPos)
class ViewEventFilter(object):
def __init__(self, view):
self.view = view
self.mouseStart = None
self.initEventFilter()
def filterEvent(self, obj, event):
if event.type() == QtCore.QEvent.MouseButtonDblClick and event.button() == QtCore.Qt.LeftButton:
self.onLeftDoubleClick(event)
elif event.type() == QtCore.QEvent.MouseButtonPress and event.button() == QtCore.Qt.LeftButton:
self.onLeftMousePress(event)
elif event.type() == QtCore.QEvent.MouseButtonPress and event.button() == QtCore.Qt.RightButton:
self.mouseStart = QtCore.QPoint(event.pos())
elif event.type() == QtCore.QEvent.MouseMove:
if self.mouseStart is not None:
delta = QtCore.QPoint(event.pos()) - self.mouseStart
if delta.manhattanLength() > 3:
self.mouseStart = None
else:
self.onMouseMove(event)
elif event.type() == QtCore.QEvent.MouseButtonRelease and event.button() == QtCore.Qt.RightButton and self.mouseStart is not None:
self.mouseStart = None
self.onRightClick(event)
elif event.type() == QtCore.QEvent.Wheel:
self.onWheelEvent(event)
def consumeEvent(self):
self.eventFilter.setEventHandlerResult(True)
def onWheelEvent(self, event):
if neckDriver:
neckDriver.onWheelDelta(event.delta())
def onMouseMove(self, event):
for picker in segmentation.viewPickers:
if not picker.enabled:
continue
picker.onMouseMove(vis.mapMousePosition(self.view, event), event.modifiers())
self.consumeEvent()
def onLeftMousePress(self, event):
if event.modifiers() == QtCore.Qt.ControlModifier:
displayPoint = vis.mapMousePosition(self.view, event)
if footstepsDriver:
newWalkingGoal(displayPoint, self.view)
self.consumeEvent()
for picker in segmentation.viewPickers:
if not picker.enabled:
continue
picker.onMousePress(vis.mapMousePosition(self.view, event), event.modifiers())
self.consumeEvent()
def onLeftDoubleClick(self, event):
displayPoint = vis.mapMousePosition(self.view, event)
useHorizontalWidget = (event.modifiers() == QtCore.Qt.ShiftModifier)
if toggleFootstepWidget(displayPoint, self.view, useHorizontalWidget):
return
if toggleFrameWidget(displayPoint, self.view):
return
if robotLinkSelector and robotLinkSelector.selectLink(displayPoint, self.view):
return
def onRightClick(self, event):
displayPoint = vis.mapMousePosition(self.view, event)
showRightClickMenu(displayPoint, self.view)
def initEventFilter(self):
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
qvtkwidget = self.view.vtkWidget()
qvtkwidget.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.MouseButtonDblClick)
self.eventFilter.addFilteredEventType(QtCore.QEvent.MouseButtonPress)
self.eventFilter.addFilteredEventType(QtCore.QEvent.MouseButtonRelease)
self.eventFilter.addFilteredEventType(QtCore.QEvent.MouseMove)
self.eventFilter.addFilteredEventType(QtCore.QEvent.Wheel)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.filterEvent)
class KeyEventFilter(object):
def __init__(self, view):
self.view = view
self.initEventFilter()
def filterEvent(self, obj, event):
consumed = False
if event.type() == QtCore.QEvent.KeyPress and not event.isAutoRepeat():
key = str(event.text()).lower()
if key == 'f':
zoomToPick(self.getCursorDisplayPosition(), self.view)
consumed = True
elif key == 'r':
consumed = True
if robotModel is not None:
resetCameraToRobot(self.view)
else:
self.view.resetCamera()
self.view.render()
elif key == 's':
consumed = True
if handFactory is not None:
side = 'left' if event.modifiers() != QtCore.Qt.ShiftModifier else 'right'
placeHandModel(self.getCursorDisplayPosition(), self.view, side)
elif key == 'n':
if neckDriver:
neckDriver.activateNeckControl()
elif key in ['0', '1', '2', '3']:
if neckDriver:
consumed = neckDriver.applyNeckPitchPreset(int(key))
if key == '3':
# block vtk keypress handler 3d mode
consumed = True
elif event.type() == QtCore.QEvent.KeyRelease and not event.isAutoRepeat():
if str(event.text()).lower() == 'n':
if neckDriver:
neckDriver.deactivateNeckControl()
if event.type() == QtCore.QEvent.KeyPress and not consumed:
consumed = frameupdater.handleKey(event)
self.eventFilter.setEventHandlerResult(consumed)
def getCursorDisplayPosition(self):
cursorPos = self.view.mapFromGlobal(QtGui.QCursor.pos())
return cursorPos.x(), self.view.height - cursorPos.y()
def initEventFilter(self):
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
qvtkwidget = self.view.vtkWidget()
qvtkwidget.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.KeyPress)
self.eventFilter.addFilteredEventType(QtCore.QEvent.KeyRelease)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.filterEvent)
class KeyPressLogCommander(object):
def __init__(self, widget):
self.widget = widget
self.initEventFilter()
self.commander = lcmUtils.LogPlayerCommander()
def filterEvent(self, obj, event):
if event.type() == QtCore.QEvent.KeyPress:
key = str(event.text()).lower()
consumed = True
if key == 'p':
self.commander.togglePlay()
elif key == 'n':
self.commander.step()
elif key in ('+', '='):
self.commander.faster()
elif key in ('-', '_'):
self.commander.slower()
elif key == '[':
self.commander.back()
elif key == ']':
self.commander.forward()
else:
consumed = False
self.eventFilter.setEventHandlerResult(consumed)
def initEventFilter(self):
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
self.widget.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.KeyPress)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.filterEvent)
class ViewBehaviors(object):
def __init__(self, view):
self.view = view
self.mouseEventFilter = ViewEventFilter(view)
#self.logCommander = KeyPressLogCommander(view.vtkWidget())
self.keyEventFilter = KeyEventFilter(view)
@staticmethod
def addRobotBehaviors(_robotSystem):
global robotSystem, robotModel, handFactory, footstepsDriver, neckDriver, robotLinkSelector
robotSystem = _robotSystem
robotModel = robotSystem.robotStateModel
handFactory = robotSystem.handFactory
footstepsDriver = robotSystem.footstepsDriver
neckDriver = robotSystem.neckDriver
if app.getMainWindow() is not None:
robotLinkSelector = RobotLinkSelector()
|
|
from logic.v1.api import BaseAPI, need, hook
from logic.v1.args import KeyArg, Arg
from logic.v1.core.models import User
from .models import Hashtag, Tag
from mongoengine import Q
from . import models
import json
def process_hashtags(hashtags):
"""takes a string of hashtags and returns a list of Hashtag objects
accepts comma-delimited lists with strings that can include or exclude
pound signs"""
return Hashtag.add(hashtags)
def sync_tags(tags, hashtags):
"""Delete extra tags not in the list of hashtags"""
if len(tags) != len(hashtags):
for tag in tags:
if tag.hashtag not in hashtags:
tag.delete()
class OutlineAPI(BaseAPI):
"""API for Outlines"""
model = models.Outline
methods = {
'get': {
'args': model.fields_to_args(override={'required': False},
hashtags=Arg(list, use=process_hashtags))
},
'post': {
'args': model.fields_to_args(
hashtags=Arg(list, use=process_hashtags))
},
'put': {
'args': model.fields_to_args(
hashtags=Arg(list, use=process_hashtags))
},
'delete': {},
}
endpoints = {
'fetch': {
'args': model.fields_to_args(override={'required': False},
hashtags=Arg(list, use=process_hashtags))
},
'search': {
'args': {
'query': Arg(str)
}
}
}
def post_get(self, obj, data, rval):
"""Convert hashtags into string list of hashtags"""
hashtags = [h.name for h in rval.hashtags]
data = json.loads(rval.load(hashtags=None).to_json())
data['hashtags'] = hashtags
return data
def post_put(self, obj, data, rval):
"""Synchronize tags with hashtags"""
tags = Tag(kind='Outline', oid=str(rval.id)).fetch()
sync_tags(tags, rval.hashtags)
return rval
def post_post(self, obj, data, rval):
"""Saves all Hashtags in Tags many-to-many table"""
obj = rval
for hashtag in data['hashtags']:
Tag(
hashtag=hashtag,
oid=str(obj.id),
kind=obj.__class__.__name__
).save()
return rval
def fetch(self, obj, data):
data = self.model(**data).to_dict()
if not data['hashtags']:
data.pop('hashtags')
return self.model.objects(**data).all()
# TODO: cleanup
def search(self, obj, data):
"""performs search functionality"""
queries = data['query'].split(' ')
hashtags, titles = [], []
for query in queries:
if not query:
continue
if query[0] == '#':
hashtags.append(str(Hashtag(name=query[1:]).get_or_create().id))
else:
titles.append(query)
query = Q()
for hashtag in hashtags:
query = query & Q(hashtags=hashtag)
for title in titles:
query = query \
& (Q(title__icontains=title) \
| Q(content__icontains=title))
outlines, nval = self.model.objects(query).all(), []
for outline in outlines:
hashtags = [h.get().name for h in outline.hashtags]
data = json.loads(outline.load(hashtags=None).to_json())
data['hashtags'] = hashtags
nval.append(data)
return nval
def can(self, obj, user, permission):
"""Returns a boolean allowing or denying API access"""
if permission in ['fetch', 'get']:
return True
if user.status != 'active':
return False
if permission in ['post', 'put', 'delete']:
return True
return False
class HashtagAPI(BaseAPI):
"""API for hashtags"""
model = Hashtag
methods = {
'get': {
'args': model.fields_to_args(override={'required': False})
},
'post': {
'args': model.fields_to_args()
},
'put': {
'args': model.fields_to_args()
},
'delete': {}
}
endpoints = {
'fetch': {}
}
def can(self, obj, user, need):
"""Required permissions implementation"""
if need in ['fetch', 'get']:
return True
if user.status != 'active':
return False
if need in ['post']:
return True
if need == 'put':
return user.id == obj.id
return False
def fetch(self, _, data):
return self.model(**data).fetch(order_by='name')
class TagAPI(BaseAPI):
"""API for relationships between hashtags and objects"""
model = Tag
methods = {
'get': {
'args': model.fields_to_args(override={'required': False})
},
'post': {
'args': model.fields_to_args()
},
'put': {
'args': model.fields_to_args()
},
'delete': {}
}
endpoints = {
'fetch': {
'args': model.fields_to_args(
override={'required': False},
exclude=['created_at', 'id', 'updated_at'])
},
}
def can(self, obj, user, need):
"""Required permissions implementation"""
if need in ['fetch', 'get']:
return True
if user.status != 'active':
return False
if need in ['post', 'delete', 'put']:
return True
return False
def post_fetch(self, obj, data, rval):
"""Converts all tag oids into objects"""
tags = []
objects = list(rval.distinct('oid'))
for tag in rval:
if tag.oid in objects:
obj = DBRef(tag.kind, tag.oid).get()
data = json.loads(tag.to_json())
data['object'] = obj
tags.append(data)
objects.remove(tag.oid)
return tags
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate melodies from a trained checkpoint of an improv RNN model."""
import ast
import os
import time
# internal imports
import tensorflow as tf
import magenta
from magenta.models.improv_rnn import improv_rnn_config_flags
from magenta.models.improv_rnn import improv_rnn_model
from magenta.models.improv_rnn import improv_rnn_sequence_generator
from magenta.protobuf import generator_pb2
from magenta.protobuf import music_pb2
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
# Velocity at which to play chord notes when rendering chords.
CHORD_VELOCITY = 50
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'run_dir', None,
'Path to the directory where the latest checkpoint will be loaded from.')
tf.app.flags.DEFINE_string(
'bundle_file', None,
'Path to the bundle file. If specified, this will take priority over '
'run_dir, unless save_generator_bundle is True, in which case both this '
'flag and run_dir are required')
tf.app.flags.DEFINE_boolean(
'save_generator_bundle', False,
'If true, instead of generating a sequence, will save this generator as a '
'bundle file in the location specified by the bundle_file flag')
tf.app.flags.DEFINE_string(
'bundle_description', None,
'A short, human-readable text description of the bundle (e.g., training '
'data, hyper parameters, etc.).')
tf.app.flags.DEFINE_string(
'output_dir', '/tmp/improv_rnn/generated',
'The directory where MIDI files will be saved to.')
tf.app.flags.DEFINE_integer(
'num_outputs', 10,
'The number of lead sheets to generate. One MIDI file will be created for '
'each.')
tf.app.flags.DEFINE_integer(
'steps_per_chord', 16,
'The number of melody steps to take per backing chord. Each step is a 16th '
'of a bar, so if backing_chords = "C G Am F" and steps_per_chord = 16, '
'four bars will be generated.')
tf.app.flags.DEFINE_string(
'primer_melody', '',
'A string representation of a Python list of '
'magenta.music.Melody event values. For example: '
'"[60, -2, 60, -2, 67, -2, 67, -2]". If specified, this melody will be '
'used as the priming melody. If a priming melody is not specified, '
'melodies will be generated from scratch.')
tf.app.flags.DEFINE_string(
'backing_chords', 'C G Am F C G F C',
'A string representation of a chord progression, with chord symbols '
'separated by spaces. For example: "C Dm7 G13 Cmaj7". The duration of each '
'chord, in steps, is specified by the steps_per_chord flag.')
tf.app.flags.DEFINE_string(
'primer_midi', '',
'The path to a MIDI file containing a melody that will be used as a '
'priming melody. If a primer melody is not specified, melodies will be '
'generated from scratch.')
tf.app.flags.DEFINE_boolean(
'render_chords', False,
'If true, the backing chords will also be rendered as notes in the output '
'MIDI files.')
tf.app.flags.DEFINE_float(
'qpm', None,
'The quarters per minute to play generated output at. If a primer MIDI is '
'given, the qpm from that will override this flag. If qpm is None, qpm '
'will default to 120.')
tf.app.flags.DEFINE_float(
'temperature', 1.0,
'The randomness of the generated melodies. 1.0 uses the unaltered softmax '
'probabilities, greater than 1.0 makes melodies more random, less than 1.0 '
'makes melodies less random.')
tf.app.flags.DEFINE_integer(
'beam_size', 1,
'The beam size to use for beam search when generating melodies.')
tf.app.flags.DEFINE_integer(
'branch_factor', 1,
'The branch factor to use for beam search when generating melodies.')
tf.app.flags.DEFINE_integer(
'steps_per_iteration', 1,
'The number of melody steps to take per beam search iteration.')
tf.app.flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
def get_checkpoint():
"""Get the training dir to be used by the model."""
if FLAGS.run_dir and FLAGS.bundle_file and not FLAGS.save_generator_bundle:
raise magenta.music.SequenceGeneratorException(
'Cannot specify both bundle_file and run_dir')
if FLAGS.run_dir:
train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')
return train_dir
else:
return None
def get_bundle():
"""Returns a generator_pb2.GeneratorBundle object based read from bundle_file.
Returns:
Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is
not set or the save_generator_bundle flag is set.
"""
if FLAGS.save_generator_bundle:
return None
if FLAGS.bundle_file is None:
return None
bundle_file = os.path.expanduser(FLAGS.bundle_file)
return magenta.music.read_bundle_file(bundle_file)
def run_with_flags(generator):
"""Generates melodies and saves them as MIDI files.
Uses the options specified by the flags defined in this module.
Args:
generator: The ImprovRnnSequenceGenerator to use for generation.
"""
if not FLAGS.output_dir:
tf.logging.fatal('--output_dir required')
return
FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
primer_midi = None
if FLAGS.primer_midi:
primer_midi = os.path.expanduser(FLAGS.primer_midi)
if not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
primer_sequence = None
qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE
if FLAGS.primer_melody:
primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))
primer_sequence = primer_melody.to_sequence(qpm=qpm)
elif primer_midi:
primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
qpm = primer_sequence.tempos[0].qpm
else:
tf.logging.warning(
'No priming sequence specified. Defaulting to a single middle C.')
primer_melody = magenta.music.Melody([60])
primer_sequence = primer_melody.to_sequence(qpm=qpm)
# Create backing chord progression from flags.
raw_chords = FLAGS.backing_chords.split()
repeated_chords = [chord for chord in raw_chords
for _ in range(FLAGS.steps_per_chord)]
backing_chords = magenta.music.ChordProgression(repeated_chords)
# Derive the total number of seconds to generate based on the QPM of the
# priming sequence and the length of the backing chord progression.
seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
total_seconds = len(backing_chords) * seconds_per_step
# Specify start/stop time for generation based on starting generation at the
# end of the priming sequence and continuing until the sequence is num_steps
# long.
generator_options = generator_pb2.GeneratorOptions()
if primer_sequence:
input_sequence = primer_sequence
# Set the start time to begin on the next step after the last note ends.
last_end_time = (max(n.end_time for n in primer_sequence.notes)
if primer_sequence.notes else 0)
generate_section = generator_options.generate_sections.add(
start_time=last_end_time + seconds_per_step,
end_time=total_seconds)
if generate_section.start_time >= generate_section.end_time:
tf.logging.fatal(
'Priming sequence is longer than the total number of steps '
'requested: Priming sequence length: %s, Generation length '
'requested: %s',
generate_section.start_time, total_seconds)
return
else:
input_sequence = music_pb2.NoteSequence()
input_sequence.tempos.add().qpm = qpm
generate_section = generator_options.generate_sections.add(
start_time=0,
end_time=total_seconds)
# Add the backing chords to the input sequence.
chord_sequence = backing_chords.to_sequence(sequence_start_time=0.0, qpm=qpm)
for text_annotation in chord_sequence.text_annotations:
if text_annotation.annotation_type == CHORD_SYMBOL:
chord = input_sequence.text_annotations.add()
chord.CopyFrom(text_annotation)
input_sequence.total_time = len(backing_chords) * seconds_per_step
generator_options.args['temperature'].float_value = FLAGS.temperature
generator_options.args['beam_size'].int_value = FLAGS.beam_size
generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
generator_options.args[
'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
tf.logging.debug('input_sequence: %s', input_sequence)
tf.logging.debug('generator_options: %s', generator_options)
# Make the generate request num_outputs times and save the output as midi
# files.
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
digits = len(str(FLAGS.num_outputs))
for i in range(FLAGS.num_outputs):
generated_sequence = generator.generate(input_sequence, generator_options)
if FLAGS.render_chords:
renderer = magenta.music.BasicChordRenderer(velocity=CHORD_VELOCITY)
renderer.render(generated_sequence)
midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
midi_path = os.path.join(FLAGS.output_dir, midi_filename)
magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)
tf.logging.info('Wrote %d MIDI files to %s',
FLAGS.num_outputs, FLAGS.output_dir)
def main(unused_argv):
"""Saves bundle or runs generator based on flags."""
tf.logging.set_verbosity(FLAGS.log)
config = improv_rnn_config_flags.config_from_flags()
generator = improv_rnn_sequence_generator.ImprovRnnSequenceGenerator(
model=improv_rnn_model.ImprovRnnModel(config),
details=config.details,
steps_per_quarter=config.steps_per_quarter,
checkpoint=get_checkpoint(),
bundle=get_bundle())
if FLAGS.save_generator_bundle:
bundle_filename = os.path.expanduser(FLAGS.bundle_file)
if FLAGS.bundle_description is None:
tf.logging.warning('No bundle description provided.')
tf.logging.info('Saving generator bundle to %s', bundle_filename)
generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)
else:
run_with_flags(generator)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
|
|
import re
import npc
from npc.character.tags import TagContainer
from npc.character import Spirit
from mako.template import Template
def template_output(character, header_level=3):
template_path = str(npc.settings.InternalSettings().get('listing.templates.markdown.character.spirit'))
character_template = Template(filename=template_path)
return character_template.render(tags=character.tags, header_level=header_level)
def test_inserts_hashes_for_header_level():
char = Spirit()
output = template_output(char, 3)
assert re.match(r'^###', output) is not None
class TestName:
def test_uses_first_name_for_header(self):
char = Spirit()
char.tags('name').append('Joe Smith')
output = template_output(char)
assert '# Joe Smith' in output
def test_adds_aka_for_remaining_names(self):
char = Spirit()
char.tags('name').extend(['Joe Smith', 'Mr. Smith', 'The Man'])
output = template_output(char)
assert '*AKA Mr. Smith, The Man*' in output
class TestDead:
def test_inserts_deceased_note_if_dead(self):
char = Spirit()
char.tags('name').append('Joe Smith')
char.tags('dead').touch()
output = template_output(char)
assert '# Joe Smith (Deceased)' in output
def test_no_dead_section_without_dead_notes(self):
char = Spirit()
char.tags('name').append('Joe Smith')
output = template_output(char)
assert '*Dead:*' not in output
def test_has_dead_section_with_dead_notes(self):
char = Spirit()
char.tags('name').append('Joe Smith')
char.tags('dead').append('fell hard')
output = template_output(char)
assert '*Dead:* fell hard' in output
def test_titles_on_own_line():
char = Spirit()
char.tags('title').extend(['title 1', 'title 2'])
output = template_output(char)
assert re.search(r'^title 1, title 2$', output, re.MULTILINE) is not None
def test_types_separated_with_slash():
char = Spirit()
char.tags('type').extend(['human', 'changeling'])
output = template_output(char)
assert 'human/changeling' in output
def test_locations_appended_to_types():
char = Spirit()
char.tags('type').extend(['human', 'changeling'])
char.tags('foreign').append('florida')
char.tags('location').append('orlando')
output = template_output(char)
assert 'human/changeling in florida and orlando' in output
def test_foreign_note_if_foreign():
char = Spirit()
char.tags('type').extend(['human', 'changeling'])
char.tags('foreign').touch()
output = template_output(char)
assert 'human/changeling (foreign)' in output
def test_wanderer_note_if_wanderer():
char = Spirit()
char.tags('type').extend(['human', 'changeling'])
char.tags('foreign').touch()
char.tags('wanderer').touch()
output = template_output(char)
assert 'human/changeling (foreign), Wanderer' in output
class TestGroups:
def test_first_group_is_inline_with_type(self):
char = Spirit()
char.tags('type').append('human')
char.tags('group').append('student council')
output = template_output(char)
assert re.search(r'human.*, student council', output) is not None
def test_first_group_tags_appended(self):
char = Spirit()
char.tags('type').append('human')
char.tags('group').append('student council')
char.tags('group').subtag('student council').append('president')
char.tags('group').subtag('student council').append('member')
output = template_output(char)
assert re.search(r'human.*, student council \(president, member\)', output) is not None
def test_remaining_groups_in_own_section(self):
char = Spirit()
char.tags('type').append('human')
char.tags('group').append('student council')
char.tags('group').subtag('student council').append('president')
char.tags('group').subtag('student council').append('member')
char.tags('group').append('volleyball')
char.tags('group').subtag('volleyball').append('star')
char.tags('group').append('chess club')
char.tags('group').subtag('chess club').append('newbie')
output = template_output(char)
assert re.search(r'^volleyball \(star\), chess club \(newbie\)$', output, re.MULTILINE) is not None
def test_first_motley_inline_with_type():
char = Spirit()
char.tags('type').append('human')
char.tags.add_group('motley', 'weirdos')
char.tags('motley').subtag('weirdos').append('token bro')
output = template_output(char)
assert re.search(r'human.*, weirdos Motley \(token bro\)$', output, re.MULTILINE) is not None
def test_group_then_motley():
char = Spirit()
char.tags('type').append('human')
char.tags.add_group('motley', 'weirdos')
char.tags('motley').subtag('weirdos').append('token bro')
char.tags('group').append('student council')
char.tags('group').subtag('student council').append('president')
output = template_output(char)
assert re.search(r'human.*student council \(president\), weirdos Motley \(token bro\)$', output, re.MULTILINE) is not None
class TestAppearance:
def test_has_section_if_filled(self):
char = Spirit()
char.tags('appearance').append('grungy')
output = template_output(char)
assert re.search(r'^\*Appearance:\* grungy$', output, re.MULTILINE) is not None
def test_no_section_if_not_filled(self):
char = Spirit()
output = template_output(char)
assert '*Appearance:*' not in output
class TestBan:
def test_has_section_if_filled(self):
char = Spirit()
char.tags('ban').append('things')
output = template_output(char)
assert re.search(r'^\*Ban:\* things$', output, re.MULTILINE) is not None
def test_no_section_if_not_filled(self):
char = Spirit()
output = template_output(char)
assert '*Ban:*' not in output
class TestDescription:
def test_has_section_if_filled(self):
char = Spirit()
char.tags('description').append('some guy')
output = template_output(char)
assert re.search(r'^\*Notes:\* some guy$', output, re.MULTILINE) is not None
def test_no_section_if_not_filled(self):
char = Spirit()
output = template_output(char)
assert '*Notes:*' not in output
def test_full_sheet_formatting():
char = Spirit()
char.tags('name').extend(['Bob Herbson', 'Bobbie'])
char.tags('dead').append('Perished in a teleporter accident.')
char.tags('title').append('The Spirit Guinea Pig')
char.tags('location').append('Moontown')
char.tags('wanderer').touch()
char.tags('group').append('Testers')
char.tags('group').subtag('Testers').append('Chief Marshall')
char.tags('group').append('Croquet Team')
char.tags('group').subtag('Croquet Team').append('Water Boy')
char.tags.add_group('motley', 'Moon Morons')
char.tags('motley').subtag('Moon Morons').append('Fixer')
char.tags('appearance').append('Red shirt and a goofy grin.')
char.tags('ban').append('Cannot leave the base.')
char.tags('description').append('Outgoing fella with a shady hobby and no fear of death.')
output = template_output(char)
print(output) # Always print the real output for when things go wrong
expected = """\
### Bob Herbson (Deceased)
*AKA Bobbie*
The Spirit Guinea Pig
spirit in Moontown, Wanderer, Testers (Chief Marshall), Moon Morons Motley (Fixer)
Croquet Team (Water Boy)
*Appearance:* Red shirt and a goofy grin.
*Ban:* Cannot leave the base.
*Notes:* Outgoing fella with a shady hobby and no fear of death.
*Dead:* Perished in a teleporter accident.
"""
assert output == expected
|
|
"""Unicode Properties (autogen)."""
from __future__ import unicode_literals
unicode_blocks = {
"basiclatin": "\u0000-\u007f",
"^basiclatin": "\u0080-\U0010ffff",
"latin1supplement": "\u0080-\u00ff",
"^latin1supplement": "\u0000-\u007f\u0100-\U0010ffff",
"latinextendeda": "\u0100-\u017f",
"^latinextendeda": "\u0000-\u00ff\u0180-\U0010ffff",
"latinextendedb": "\u0180-\u024f",
"^latinextendedb": "\u0000-\u017f\u0250-\U0010ffff",
"ipaextensions": "\u0250-\u02af",
"^ipaextensions": "\u0000-\u024f\u02b0-\U0010ffff",
"spacingmodifierletters": "\u02b0-\u02ff",
"^spacingmodifierletters": "\u0000-\u02af\u0300-\U0010ffff",
"combiningdiacriticalmarks": "\u0300-\u036f",
"^combiningdiacriticalmarks": "\u0000-\u02ff\u0370-\U0010ffff",
"greekandcoptic": "\u0370-\u03ff",
"^greekandcoptic": "\u0000-\u036f\u0400-\U0010ffff",
"cyrillic": "\u0400-\u04ff",
"^cyrillic": "\u0000-\u03ff\u0500-\U0010ffff",
"cyrillicsupplement": "\u0500-\u052f",
"^cyrillicsupplement": "\u0000-\u04ff\u0530-\U0010ffff",
"armenian": "\u0530-\u058f",
"^armenian": "\u0000-\u052f\u0590-\U0010ffff",
"hebrew": "\u0590-\u05ff",
"^hebrew": "\u0000-\u058f\u0600-\U0010ffff",
"arabic": "\u0600-\u06ff",
"^arabic": "\u0000-\u05ff\u0700-\U0010ffff",
"syriac": "\u0700-\u074f",
"^syriac": "\u0000-\u06ff\u0750-\U0010ffff",
"arabicsupplement": "\u0750-\u077f",
"^arabicsupplement": "\u0000-\u074f\u0780-\U0010ffff",
"thaana": "\u0780-\u07bf",
"^thaana": "\u0000-\u077f\u07c0-\U0010ffff",
"nko": "\u07c0-\u07ff",
"^nko": "\u0000-\u07bf\u0800-\U0010ffff",
"samaritan": "\u0800-\u083f",
"^samaritan": "\u0000-\u07ff\u0840-\U0010ffff",
"mandaic": "\u0840-\u085f",
"^mandaic": "\u0000-\u083f\u0860-\U0010ffff",
"arabicextendeda": "\u08a0-\u08ff",
"^arabicextendeda": "\u0000-\u089f\u0900-\U0010ffff",
"devanagari": "\u0900-\u097f",
"^devanagari": "\u0000-\u08ff\u0980-\U0010ffff",
"bengali": "\u0980-\u09ff",
"^bengali": "\u0000-\u097f\u0a00-\U0010ffff",
"gurmukhi": "\u0a00-\u0a7f",
"^gurmukhi": "\u0000-\u09ff\u0a80-\U0010ffff",
"gujarati": "\u0a80-\u0aff",
"^gujarati": "\u0000-\u0a7f\u0b00-\U0010ffff",
"oriya": "\u0b00-\u0b7f",
"^oriya": "\u0000-\u0aff\u0b80-\U0010ffff",
"tamil": "\u0b80-\u0bff",
"^tamil": "\u0000-\u0b7f\u0c00-\U0010ffff",
"telugu": "\u0c00-\u0c7f",
"^telugu": "\u0000-\u0bff\u0c80-\U0010ffff",
"kannada": "\u0c80-\u0cff",
"^kannada": "\u0000-\u0c7f\u0d00-\U0010ffff",
"malayalam": "\u0d00-\u0d7f",
"^malayalam": "\u0000-\u0cff\u0d80-\U0010ffff",
"sinhala": "\u0d80-\u0dff",
"^sinhala": "\u0000-\u0d7f\u0e00-\U0010ffff",
"thai": "\u0e00-\u0e7f",
"^thai": "\u0000-\u0dff\u0e80-\U0010ffff",
"lao": "\u0e80-\u0eff",
"^lao": "\u0000-\u0e7f\u0f00-\U0010ffff",
"tibetan": "\u0f00-\u0fff",
"^tibetan": "\u0000-\u0eff\u1000-\U0010ffff",
"myanmar": "\u1000-\u109f",
"^myanmar": "\u0000-\u0fff\u10a0-\U0010ffff",
"georgian": "\u10a0-\u10ff",
"^georgian": "\u0000-\u109f\u1100-\U0010ffff",
"hanguljamo": "\u1100-\u11ff",
"^hanguljamo": "\u0000-\u10ff\u1200-\U0010ffff",
"ethiopic": "\u1200-\u137f",
"^ethiopic": "\u0000-\u11ff\u1380-\U0010ffff",
"ethiopicsupplement": "\u1380-\u139f",
"^ethiopicsupplement": "\u0000-\u137f\u13a0-\U0010ffff",
"cherokee": "\u13a0-\u13ff",
"^cherokee": "\u0000-\u139f\u1400-\U0010ffff",
"unifiedcanadianaboriginalsyllabics": "\u1400-\u167f",
"^unifiedcanadianaboriginalsyllabics": "\u0000-\u13ff\u1680-\U0010ffff",
"ogham": "\u1680-\u169f",
"^ogham": "\u0000-\u167f\u16a0-\U0010ffff",
"runic": "\u16a0-\u16ff",
"^runic": "\u0000-\u169f\u1700-\U0010ffff",
"tagalog": "\u1700-\u171f",
"^tagalog": "\u0000-\u16ff\u1720-\U0010ffff",
"hanunoo": "\u1720-\u173f",
"^hanunoo": "\u0000-\u171f\u1740-\U0010ffff",
"buhid": "\u1740-\u175f",
"^buhid": "\u0000-\u173f\u1760-\U0010ffff",
"tagbanwa": "\u1760-\u177f",
"^tagbanwa": "\u0000-\u175f\u1780-\U0010ffff",
"khmer": "\u1780-\u17ff",
"^khmer": "\u0000-\u177f\u1800-\U0010ffff",
"mongolian": "\u1800-\u18af",
"^mongolian": "\u0000-\u17ff\u18b0-\U0010ffff",
"unifiedcanadianaboriginalsyllabicsextended": "\u18b0-\u18ff",
"^unifiedcanadianaboriginalsyllabicsextended": "\u0000-\u18af\u1900-\U0010ffff",
"limbu": "\u1900-\u194f",
"^limbu": "\u0000-\u18ff\u1950-\U0010ffff",
"taile": "\u1950-\u197f",
"^taile": "\u0000-\u194f\u1980-\U0010ffff",
"newtailue": "\u1980-\u19df",
"^newtailue": "\u0000-\u197f\u19e0-\U0010ffff",
"khmersymbols": "\u19e0-\u19ff",
"^khmersymbols": "\u0000-\u19df\u1a00-\U0010ffff",
"buginese": "\u1a00-\u1a1f",
"^buginese": "\u0000-\u19ff\u1a20-\U0010ffff",
"taitham": "\u1a20-\u1aaf",
"^taitham": "\u0000-\u1a1f\u1ab0-\U0010ffff",
"balinese": "\u1b00-\u1b7f",
"^balinese": "\u0000-\u1aff\u1b80-\U0010ffff",
"sundanese": "\u1b80-\u1bbf",
"^sundanese": "\u0000-\u1b7f\u1bc0-\U0010ffff",
"batak": "\u1bc0-\u1bff",
"^batak": "\u0000-\u1bbf\u1c00-\U0010ffff",
"lepcha": "\u1c00-\u1c4f",
"^lepcha": "\u0000-\u1bff\u1c50-\U0010ffff",
"olchiki": "\u1c50-\u1c7f",
"^olchiki": "\u0000-\u1c4f\u1c80-\U0010ffff",
"sundanesesupplement": "\u1cc0-\u1ccf",
"^sundanesesupplement": "\u0000-\u1cbf\u1cd0-\U0010ffff",
"vedicextensions": "\u1cd0-\u1cff",
"^vedicextensions": "\u0000-\u1ccf\u1d00-\U0010ffff",
"phoneticextensions": "\u1d00-\u1d7f",
"^phoneticextensions": "\u0000-\u1cff\u1d80-\U0010ffff",
"phoneticextensionssupplement": "\u1d80-\u1dbf",
"^phoneticextensionssupplement": "\u0000-\u1d7f\u1dc0-\U0010ffff",
"combiningdiacriticalmarkssupplement": "\u1dc0-\u1dff",
"^combiningdiacriticalmarkssupplement": "\u0000-\u1dbf\u1e00-\U0010ffff",
"latinextendedadditional": "\u1e00-\u1eff",
"^latinextendedadditional": "\u0000-\u1dff\u1f00-\U0010ffff",
"greekextended": "\u1f00-\u1fff",
"^greekextended": "\u0000-\u1eff\u2000-\U0010ffff",
"generalpunctuation": "\u2000-\u206f",
"^generalpunctuation": "\u0000-\u1fff\u2070-\U0010ffff",
"superscriptsandsubscripts": "\u2070-\u209f",
"^superscriptsandsubscripts": "\u0000-\u206f\u20a0-\U0010ffff",
"currencysymbols": "\u20a0-\u20cf",
"^currencysymbols": "\u0000-\u209f\u20d0-\U0010ffff",
"combiningdiacriticalmarksforsymbols": "\u20d0-\u20ff",
"^combiningdiacriticalmarksforsymbols": "\u0000-\u20cf\u2100-\U0010ffff",
"letterlikesymbols": "\u2100-\u214f",
"^letterlikesymbols": "\u0000-\u20ff\u2150-\U0010ffff",
"numberforms": "\u2150-\u218f",
"^numberforms": "\u0000-\u214f\u2190-\U0010ffff",
"arrows": "\u2190-\u21ff",
"^arrows": "\u0000-\u218f\u2200-\U0010ffff",
"mathematicaloperators": "\u2200-\u22ff",
"^mathematicaloperators": "\u0000-\u21ff\u2300-\U0010ffff",
"miscellaneoustechnical": "\u2300-\u23ff",
"^miscellaneoustechnical": "\u0000-\u22ff\u2400-\U0010ffff",
"controlpictures": "\u2400-\u243f",
"^controlpictures": "\u0000-\u23ff\u2440-\U0010ffff",
"opticalcharacterrecognition": "\u2440-\u245f",
"^opticalcharacterrecognition": "\u0000-\u243f\u2460-\U0010ffff",
"enclosedalphanumerics": "\u2460-\u24ff",
"^enclosedalphanumerics": "\u0000-\u245f\u2500-\U0010ffff",
"boxdrawing": "\u2500-\u257f",
"^boxdrawing": "\u0000-\u24ff\u2580-\U0010ffff",
"blockelements": "\u2580-\u259f",
"^blockelements": "\u0000-\u257f\u25a0-\U0010ffff",
"geometricshapes": "\u25a0-\u25ff",
"^geometricshapes": "\u0000-\u259f\u2600-\U0010ffff",
"miscellaneoussymbols": "\u2600-\u26ff",
"^miscellaneoussymbols": "\u0000-\u25ff\u2700-\U0010ffff",
"dingbats": "\u2700-\u27bf",
"^dingbats": "\u0000-\u26ff\u27c0-\U0010ffff",
"miscellaneousmathematicalsymbolsa": "\u27c0-\u27ef",
"^miscellaneousmathematicalsymbolsa": "\u0000-\u27bf\u27f0-\U0010ffff",
"supplementalarrowsa": "\u27f0-\u27ff",
"^supplementalarrowsa": "\u0000-\u27ef\u2800-\U0010ffff",
"braillepatterns": "\u2800-\u28ff",
"^braillepatterns": "\u0000-\u27ff\u2900-\U0010ffff",
"supplementalarrowsb": "\u2900-\u297f",
"^supplementalarrowsb": "\u0000-\u28ff\u2980-\U0010ffff",
"miscellaneousmathematicalsymbolsb": "\u2980-\u29ff",
"^miscellaneousmathematicalsymbolsb": "\u0000-\u297f\u2a00-\U0010ffff",
"supplementalmathematicaloperators": "\u2a00-\u2aff",
"^supplementalmathematicaloperators": "\u0000-\u29ff\u2b00-\U0010ffff",
"miscellaneoussymbolsandarrows": "\u2b00-\u2bff",
"^miscellaneoussymbolsandarrows": "\u0000-\u2aff\u2c00-\U0010ffff",
"glagolitic": "\u2c00-\u2c5f",
"^glagolitic": "\u0000-\u2bff\u2c60-\U0010ffff",
"latinextendedc": "\u2c60-\u2c7f",
"^latinextendedc": "\u0000-\u2c5f\u2c80-\U0010ffff",
"coptic": "\u2c80-\u2cff",
"^coptic": "\u0000-\u2c7f\u2d00-\U0010ffff",
"georgiansupplement": "\u2d00-\u2d2f",
"^georgiansupplement": "\u0000-\u2cff\u2d30-\U0010ffff",
"tifinagh": "\u2d30-\u2d7f",
"^tifinagh": "\u0000-\u2d2f\u2d80-\U0010ffff",
"ethiopicextended": "\u2d80-\u2ddf",
"^ethiopicextended": "\u0000-\u2d7f\u2de0-\U0010ffff",
"cyrillicextendeda": "\u2de0-\u2dff",
"^cyrillicextendeda": "\u0000-\u2ddf\u2e00-\U0010ffff",
"supplementalpunctuation": "\u2e00-\u2e7f",
"^supplementalpunctuation": "\u0000-\u2dff\u2e80-\U0010ffff",
"cjkradicalssupplement": "\u2e80-\u2eff",
"^cjkradicalssupplement": "\u0000-\u2e7f\u2f00-\U0010ffff",
"kangxiradicals": "\u2f00-\u2fdf",
"^kangxiradicals": "\u0000-\u2eff\u2fe0-\U0010ffff",
"ideographicdescriptioncharacters": "\u2ff0-\u2fff",
"^ideographicdescriptioncharacters": "\u0000-\u2fef\u3000-\U0010ffff",
"cjksymbolsandpunctuation": "\u3000-\u303f",
"^cjksymbolsandpunctuation": "\u0000-\u2fff\u3040-\U0010ffff",
"hiragana": "\u3040-\u309f",
"^hiragana": "\u0000-\u303f\u30a0-\U0010ffff",
"katakana": "\u30a0-\u30ff",
"^katakana": "\u0000-\u309f\u3100-\U0010ffff",
"bopomofo": "\u3100-\u312f",
"^bopomofo": "\u0000-\u30ff\u3130-\U0010ffff",
"hangulcompatibilityjamo": "\u3130-\u318f",
"^hangulcompatibilityjamo": "\u0000-\u312f\u3190-\U0010ffff",
"kanbun": "\u3190-\u319f",
"^kanbun": "\u0000-\u318f\u31a0-\U0010ffff",
"bopomofoextended": "\u31a0-\u31bf",
"^bopomofoextended": "\u0000-\u319f\u31c0-\U0010ffff",
"cjkstrokes": "\u31c0-\u31ef",
"^cjkstrokes": "\u0000-\u31bf\u31f0-\U0010ffff",
"katakanaphoneticextensions": "\u31f0-\u31ff",
"^katakanaphoneticextensions": "\u0000-\u31ef\u3200-\U0010ffff",
"enclosedcjklettersandmonths": "\u3200-\u32ff",
"^enclosedcjklettersandmonths": "\u0000-\u31ff\u3300-\U0010ffff",
"cjkcompatibility": "\u3300-\u33ff",
"^cjkcompatibility": "\u0000-\u32ff\u3400-\U0010ffff",
"cjkunifiedideographsextensiona": "\u3400-\u4dbf",
"^cjkunifiedideographsextensiona": "\u0000-\u33ff\u4dc0-\U0010ffff",
"yijinghexagramsymbols": "\u4dc0-\u4dff",
"^yijinghexagramsymbols": "\u0000-\u4dbf\u4e00-\U0010ffff",
"cjkunifiedideographs": "\u4e00-\u9fff",
"^cjkunifiedideographs": "\u0000-\u4dff\ua000-\U0010ffff",
"yisyllables": "\ua000-\ua48f",
"^yisyllables": "\u0000-\u9fff\ua490-\U0010ffff",
"yiradicals": "\ua490-\ua4cf",
"^yiradicals": "\u0000-\ua48f\ua4d0-\U0010ffff",
"lisu": "\ua4d0-\ua4ff",
"^lisu": "\u0000-\ua4cf\ua500-\U0010ffff",
"vai": "\ua500-\ua63f",
"^vai": "\u0000-\ua4ff\ua640-\U0010ffff",
"cyrillicextendedb": "\ua640-\ua69f",
"^cyrillicextendedb": "\u0000-\ua63f\ua6a0-\U0010ffff",
"bamum": "\ua6a0-\ua6ff",
"^bamum": "\u0000-\ua69f\ua700-\U0010ffff",
"modifiertoneletters": "\ua700-\ua71f",
"^modifiertoneletters": "\u0000-\ua6ff\ua720-\U0010ffff",
"latinextendedd": "\ua720-\ua7ff",
"^latinextendedd": "\u0000-\ua71f\ua800-\U0010ffff",
"sylotinagri": "\ua800-\ua82f",
"^sylotinagri": "\u0000-\ua7ff\ua830-\U0010ffff",
"commonindicnumberforms": "\ua830-\ua83f",
"^commonindicnumberforms": "\u0000-\ua82f\ua840-\U0010ffff",
"phagspa": "\ua840-\ua87f",
"^phagspa": "\u0000-\ua83f\ua880-\U0010ffff",
"saurashtra": "\ua880-\ua8df",
"^saurashtra": "\u0000-\ua87f\ua8e0-\U0010ffff",
"devanagariextended": "\ua8e0-\ua8ff",
"^devanagariextended": "\u0000-\ua8df\ua900-\U0010ffff",
"kayahli": "\ua900-\ua92f",
"^kayahli": "\u0000-\ua8ff\ua930-\U0010ffff",
"rejang": "\ua930-\ua95f",
"^rejang": "\u0000-\ua92f\ua960-\U0010ffff",
"hanguljamoextendeda": "\ua960-\ua97f",
"^hanguljamoextendeda": "\u0000-\ua95f\ua980-\U0010ffff",
"javanese": "\ua980-\ua9df",
"^javanese": "\u0000-\ua97f\ua9e0-\U0010ffff",
"cham": "\uaa00-\uaa5f",
"^cham": "\u0000-\ua9ff\uaa60-\U0010ffff",
"myanmarextendeda": "\uaa60-\uaa7f",
"^myanmarextendeda": "\u0000-\uaa5f\uaa80-\U0010ffff",
"taiviet": "\uaa80-\uaadf",
"^taiviet": "\u0000-\uaa7f\uaae0-\U0010ffff",
"meeteimayekextensions": "\uaae0-\uaaff",
"^meeteimayekextensions": "\u0000-\uaadf\uab00-\U0010ffff",
"ethiopicextendeda": "\uab00-\uab2f",
"^ethiopicextendeda": "\u0000-\uaaff\uab30-\U0010ffff",
"meeteimayek": "\uabc0-\uabff",
"^meeteimayek": "\u0000-\uabbf\uac00-\U0010ffff",
"hangulsyllables": "\uac00-\ud7af",
"^hangulsyllables": "\u0000-\uabff\ud7b0-\U0010ffff",
"hanguljamoextendedb": "\ud7b0-\ud7ff",
"^hanguljamoextendedb": "\u0000-\ud7af\ud800-\U0010ffff",
"highsurrogates": "\ud800-\udb7f",
"^highsurrogates": "\u0000-\ud7ff\udb80-\U0010ffff",
"highprivateusesurrogates": "\udb80-\udbff",
"^highprivateusesurrogates": "\u0000-\udb7f\udc00-\U0010ffff",
"lowsurrogates": "\udc00-\udfff",
"^lowsurrogates": "\u0000-\udbff\ue000-\U0010ffff",
"privateusearea": "\ue000-\uf8ff",
"^privateusearea": "\u0000-\udfff\uf900-\U0010ffff",
"cjkcompatibilityideographs": "\uf900-\ufaff",
"^cjkcompatibilityideographs": "\u0000-\uf8ff\ufb00-\U0010ffff",
"alphabeticpresentationforms": "\ufb00-\ufb4f",
"^alphabeticpresentationforms": "\u0000-\ufaff\ufb50-\U0010ffff",
"arabicpresentationformsa": "\ufb50-\ufdff",
"^arabicpresentationformsa": "\u0000-\ufb4f\ufe00-\U0010ffff",
"variationselectors": "\ufe00-\ufe0f",
"^variationselectors": "\u0000-\ufdff\ufe10-\U0010ffff",
"verticalforms": "\ufe10-\ufe1f",
"^verticalforms": "\u0000-\ufe0f\ufe20-\U0010ffff",
"combininghalfmarks": "\ufe20-\ufe2f",
"^combininghalfmarks": "\u0000-\ufe1f\ufe30-\U0010ffff",
"cjkcompatibilityforms": "\ufe30-\ufe4f",
"^cjkcompatibilityforms": "\u0000-\ufe2f\ufe50-\U0010ffff",
"smallformvariants": "\ufe50-\ufe6f",
"^smallformvariants": "\u0000-\ufe4f\ufe70-\U0010ffff",
"arabicpresentationformsb": "\ufe70-\ufeff",
"^arabicpresentationformsb": "\u0000-\ufe6f\uff00-\U0010ffff",
"halfwidthandfullwidthforms": "\uff00-\uffef",
"^halfwidthandfullwidthforms": "\u0000-\ufeff\ufff0-\U0010ffff",
"specials": "\ufff0-\uffff",
"^specials": "\u0000-\uffef\U00010000-\U0010ffff",
"linearbsyllabary": "\U00010000-\U0001007f",
"^linearbsyllabary": "\u0000-\uffff\U00010080-\U0010ffff",
"linearbideograms": "\U00010080-\U000100ff",
"^linearbideograms": "\u0000-\U0001007f\U00010100-\U0010ffff",
"aegeannumbers": "\U00010100-\U0001013f",
"^aegeannumbers": "\u0000-\U000100ff\U00010140-\U0010ffff",
"ancientgreeknumbers": "\U00010140-\U0001018f",
"^ancientgreeknumbers": "\u0000-\U0001013f\U00010190-\U0010ffff",
"ancientsymbols": "\U00010190-\U000101cf",
"^ancientsymbols": "\u0000-\U0001018f\U000101d0-\U0010ffff",
"phaistosdisc": "\U000101d0-\U000101ff",
"^phaistosdisc": "\u0000-\U000101cf\U00010200-\U0010ffff",
"lycian": "\U00010280-\U0001029f",
"^lycian": "\u0000-\U0001027f\U000102a0-\U0010ffff",
"carian": "\U000102a0-\U000102df",
"^carian": "\u0000-\U0001029f\U000102e0-\U0010ffff",
"olditalic": "\U00010300-\U0001032f",
"^olditalic": "\u0000-\U000102ff\U00010330-\U0010ffff",
"gothic": "\U00010330-\U0001034f",
"^gothic": "\u0000-\U0001032f\U00010350-\U0010ffff",
"ugaritic": "\U00010380-\U0001039f",
"^ugaritic": "\u0000-\U0001037f\U000103a0-\U0010ffff",
"oldpersian": "\U000103a0-\U000103df",
"^oldpersian": "\u0000-\U0001039f\U000103e0-\U0010ffff",
"deseret": "\U00010400-\U0001044f",
"^deseret": "\u0000-\U000103ff\U00010450-\U0010ffff",
"shavian": "\U00010450-\U0001047f",
"^shavian": "\u0000-\U0001044f\U00010480-\U0010ffff",
"osmanya": "\U00010480-\U000104af",
"^osmanya": "\u0000-\U0001047f\U000104b0-\U0010ffff",
"cypriotsyllabary": "\U00010800-\U0001083f",
"^cypriotsyllabary": "\u0000-\U000107ff\U00010840-\U0010ffff",
"imperialaramaic": "\U00010840-\U0001085f",
"^imperialaramaic": "\u0000-\U0001083f\U00010860-\U0010ffff",
"phoenician": "\U00010900-\U0001091f",
"^phoenician": "\u0000-\U000108ff\U00010920-\U0010ffff",
"lydian": "\U00010920-\U0001093f",
"^lydian": "\u0000-\U0001091f\U00010940-\U0010ffff",
"meroitichieroglyphs": "\U00010980-\U0001099f",
"^meroitichieroglyphs": "\u0000-\U0001097f\U000109a0-\U0010ffff",
"meroiticcursive": "\U000109a0-\U000109ff",
"^meroiticcursive": "\u0000-\U0001099f\U00010a00-\U0010ffff",
"kharoshthi": "\U00010a00-\U00010a5f",
"^kharoshthi": "\u0000-\U000109ff\U00010a60-\U0010ffff",
"oldsoutharabian": "\U00010a60-\U00010a7f",
"^oldsoutharabian": "\u0000-\U00010a5f\U00010a80-\U0010ffff",
"avestan": "\U00010b00-\U00010b3f",
"^avestan": "\u0000-\U00010aff\U00010b40-\U0010ffff",
"inscriptionalparthian": "\U00010b40-\U00010b5f",
"^inscriptionalparthian": "\u0000-\U00010b3f\U00010b60-\U0010ffff",
"inscriptionalpahlavi": "\U00010b60-\U00010b7f",
"^inscriptionalpahlavi": "\u0000-\U00010b5f\U00010b80-\U0010ffff",
"oldturkic": "\U00010c00-\U00010c4f",
"^oldturkic": "\u0000-\U00010bff\U00010c50-\U0010ffff",
"ruminumeralsymbols": "\U00010e60-\U00010e7f",
"^ruminumeralsymbols": "\u0000-\U00010e5f\U00010e80-\U0010ffff",
"brahmi": "\U00011000-\U0001107f",
"^brahmi": "\u0000-\U00010fff\U00011080-\U0010ffff",
"kaithi": "\U00011080-\U000110cf",
"^kaithi": "\u0000-\U0001107f\U000110d0-\U0010ffff",
"sorasompeng": "\U000110d0-\U000110ff",
"^sorasompeng": "\u0000-\U000110cf\U00011100-\U0010ffff",
"chakma": "\U00011100-\U0001114f",
"^chakma": "\u0000-\U000110ff\U00011150-\U0010ffff",
"sharada": "\U00011180-\U000111df",
"^sharada": "\u0000-\U0001117f\U000111e0-\U0010ffff",
"takri": "\U00011680-\U000116cf",
"^takri": "\u0000-\U0001167f\U000116d0-\U0010ffff",
"cuneiform": "\U00012000-\U000123ff",
"^cuneiform": "\u0000-\U00011fff\U00012400-\U0010ffff",
"cuneiformnumbersandpunctuation": "\U00012400-\U0001247f",
"^cuneiformnumbersandpunctuation": "\u0000-\U000123ff\U00012480-\U0010ffff",
"egyptianhieroglyphs": "\U00013000-\U0001342f",
"^egyptianhieroglyphs": "\u0000-\U00012fff\U00013430-\U0010ffff",
"bamumsupplement": "\U00016800-\U00016a3f",
"^bamumsupplement": "\u0000-\U000167ff\U00016a40-\U0010ffff",
"miao": "\U00016f00-\U00016f9f",
"^miao": "\u0000-\U00016eff\U00016fa0-\U0010ffff",
"kanasupplement": "\U0001b000-\U0001b0ff",
"^kanasupplement": "\u0000-\U0001afff\U0001b100-\U0010ffff",
"byzantinemusicalsymbols": "\U0001d000-\U0001d0ff",
"^byzantinemusicalsymbols": "\u0000-\U0001cfff\U0001d100-\U0010ffff",
"musicalsymbols": "\U0001d100-\U0001d1ff",
"^musicalsymbols": "\u0000-\U0001d0ff\U0001d200-\U0010ffff",
"ancientgreekmusicalnotation": "\U0001d200-\U0001d24f",
"^ancientgreekmusicalnotation": "\u0000-\U0001d1ff\U0001d250-\U0010ffff",
"taixuanjingsymbols": "\U0001d300-\U0001d35f",
"^taixuanjingsymbols": "\u0000-\U0001d2ff\U0001d360-\U0010ffff",
"countingrodnumerals": "\U0001d360-\U0001d37f",
"^countingrodnumerals": "\u0000-\U0001d35f\U0001d380-\U0010ffff",
"mathematicalalphanumericsymbols": "\U0001d400-\U0001d7ff",
"^mathematicalalphanumericsymbols": "\u0000-\U0001d3ff\U0001d800-\U0010ffff",
"arabicmathematicalalphabeticsymbols": "\U0001ee00-\U0001eeff",
"^arabicmathematicalalphabeticsymbols": "\u0000-\U0001edff\U0001ef00-\U0010ffff",
"mahjongtiles": "\U0001f000-\U0001f02f",
"^mahjongtiles": "\u0000-\U0001efff\U0001f030-\U0010ffff",
"dominotiles": "\U0001f030-\U0001f09f",
"^dominotiles": "\u0000-\U0001f02f\U0001f0a0-\U0010ffff",
"playingcards": "\U0001f0a0-\U0001f0ff",
"^playingcards": "\u0000-\U0001f09f\U0001f100-\U0010ffff",
"enclosedalphanumericsupplement": "\U0001f100-\U0001f1ff",
"^enclosedalphanumericsupplement": "\u0000-\U0001f0ff\U0001f200-\U0010ffff",
"enclosedideographicsupplement": "\U0001f200-\U0001f2ff",
"^enclosedideographicsupplement": "\u0000-\U0001f1ff\U0001f300-\U0010ffff",
"miscellaneoussymbolsandpictographs": "\U0001f300-\U0001f5ff",
"^miscellaneoussymbolsandpictographs": "\u0000-\U0001f2ff\U0001f600-\U0010ffff",
"emoticons": "\U0001f600-\U0001f64f",
"^emoticons": "\u0000-\U0001f5ff\U0001f650-\U0010ffff",
"transportandmapsymbols": "\U0001f680-\U0001f6ff",
"^transportandmapsymbols": "\u0000-\U0001f67f\U0001f700-\U0010ffff",
"alchemicalsymbols": "\U0001f700-\U0001f77f",
"^alchemicalsymbols": "\u0000-\U0001f6ff\U0001f780-\U0010ffff",
"cjkunifiedideographsextensionb": "\U00020000-\U0002a6df",
"^cjkunifiedideographsextensionb": "\u0000-\U0001ffff\U0002a6e0-\U0010ffff",
"cjkunifiedideographsextensionc": "\U0002a700-\U0002b73f",
"^cjkunifiedideographsextensionc": "\u0000-\U0002a6ff\U0002b740-\U0010ffff",
"cjkunifiedideographsextensiond": "\U0002b740-\U0002b81f",
"^cjkunifiedideographsextensiond": "\u0000-\U0002b73f\U0002b820-\U0010ffff",
"cjkcompatibilityideographssupplement": "\U0002f800-\U0002fa1f",
"^cjkcompatibilityideographssupplement": "\u0000-\U0002f7ff\U0002fa20-\U0010ffff",
"tags": "\U000e0000-\U000e007f",
"^tags": "\u0000-\U000dffff\U000e0080-\U0010ffff",
"variationselectorssupplement": "\U000e0100-\U000e01ef",
"^variationselectorssupplement": "\u0000-\U000e00ff\U000e01f0-\U0010ffff",
"supplementaryprivateuseareaa": "\U000f0000-\U000fffff",
"^supplementaryprivateuseareaa": "\u0000-\U000effff\U00100000-\U0010ffff",
"supplementaryprivateuseareab": "\U00100000-\U0010ffff",
"^supplementaryprivateuseareab": "\u0000-\U000fffff",
"noblock": "\u0860-\u089f\u1ab0-\u1aff\u1c80-\u1cbf\u2fe0-\u2fef\ua9e0-\ua9ff\uab30-\uabbf\U00010200-\U0001027f\U000102e0-\U000102ff\U00010350-\U0001037f\U000103e0-\U000103ff\U000104b0-\U000107ff\U00010860-\U000108ff\U00010940-\U0001097f\U00010a80-\U00010aff\U00010b80-\U00010bff\U00010c50-\U00010e5f\U00010e80-\U00010fff\U00011150-\U0001117f\U000111e0-\U0001167f\U000116d0-\U00011fff\U00012480-\U00012fff\U00013430-\U000167ff\U00016a40-\U00016eff\U00016fa0-\U0001afff\U0001b100-\U0001cfff\U0001d250-\U0001d2ff\U0001d380-\U0001d3ff\U0001d800-\U0001edff\U0001ef00-\U0001efff\U0001f650-\U0001f67f\U0001f780-\U0001ffff\U0002a6e0-\U0002a6ff\U0002b820-\U0002f7ff\U0002fa20-\U000dffff\U000e0080-\U000e00ff\U000e01f0-\U000effff",
"^noblock": "\u0000-\u085f\u08a0-\u1aaf\u1b00-\u1c7f\u1cc0-\u2fdf\u2ff0-\ua9df\uaa00-\uab2f\uabc0-\U000101ff\U00010280-\U000102df\U00010300-\U0001034f\U00010380-\U000103df\U00010400-\U000104af\U00010800-\U0001085f\U00010900-\U0001093f\U00010980-\U00010a7f\U00010b00-\U00010b7f\U00010c00-\U00010c4f\U00010e60-\U00010e7f\U00011000-\U0001114f\U00011180-\U000111df\U00011680-\U000116cf\U00012000-\U0001247f\U00013000-\U0001342f\U00016800-\U00016a3f\U00016f00-\U00016f9f\U0001b000-\U0001b0ff\U0001d000-\U0001d24f\U0001d300-\U0001d37f\U0001d400-\U0001d7ff\U0001ee00-\U0001eeff\U0001f000-\U0001f64f\U0001f680-\U0001f77f\U00020000-\U0002a6df\U0002a700-\U0002b81f\U0002f800-\U0002fa1f\U000e0000-\U000e007f\U000e0100-\U000e01ef",
}
|
|
"""
sentry.models.project
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import warnings
import six
from bitfield import BitField
from django.conf import settings
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from uuid import uuid1
from sentry.app import locks
from sentry.constants import ObjectStatus
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.colors import get_hashed_color
from sentry.utils.http import absolute_uri
from sentry.utils.retries import TimedRetryPolicy
# TODO(dcramer): pull in enum library
ProjectStatus = ObjectStatus
class ProjectTeam(Model):
__core__ = True
project = FlexibleForeignKey('sentry.Project')
team = FlexibleForeignKey('sentry.Team')
class Meta:
app_label = 'sentry'
db_table = 'sentry_projectteam'
unique_together = (('project', 'team'), )
class ProjectManager(BaseManager):
# TODO(dcramer): we might want to cache this per user
def get_for_user(self, team, user, scope=None, _skip_team_check=False):
from sentry.models import Team
if not (user and user.is_authenticated()):
return []
if not _skip_team_check:
team_list = Team.objects.get_for_user(
organization=team.organization,
user=user,
scope=scope,
)
try:
team = team_list[team_list.index(team)]
except ValueError:
logging.info('User does not have access to team: %s', team.id)
return []
base_qs = self.filter(
team=team,
status=ProjectStatus.VISIBLE,
)
project_list = []
for project in base_qs:
project.team = team
project_list.append(project)
return sorted(project_list, key=lambda x: x.name.lower())
class Project(Model):
"""
Projects are permission based namespaces which generally
are the top level entry point for all data.
"""
__core__ = True
slug = models.SlugField(null=True)
name = models.CharField(max_length=200)
forced_color = models.CharField(max_length=6, null=True, blank=True)
organization = FlexibleForeignKey('sentry.Organization')
team = FlexibleForeignKey('sentry.Team')
teams = models.ManyToManyField(
'sentry.Team', related_name='teams', through=ProjectTeam
)
public = models.BooleanField(default=False)
date_added = models.DateTimeField(default=timezone.now)
status = BoundedPositiveIntegerField(
default=0,
choices=(
(ObjectStatus.VISIBLE,
_('Active')), (ObjectStatus.PENDING_DELETION, _('Pending Deletion')),
(ObjectStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
),
db_index=True
)
# projects that were created before this field was present
# will have their first_event field set to date_added
first_event = models.DateTimeField(null=True)
flags = BitField(
flags=(('has_releases', 'This Project has sent release data'), ), default=0, null=True
)
objects = ProjectManager(cache_fields=[
'pk',
'slug',
])
platform = models.CharField(max_length=64, null=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_project'
unique_together = (('team', 'slug'), ('organization', 'slug'))
__repr__ = sane_repr('team_id', 'name', 'slug')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def next_short_id(self):
from sentry.models import Counter
return Counter.increment(self)
def save(self, *args, **kwargs):
if not self.slug:
lock = locks.get('slug:project', duration=5)
with TimedRetryPolicy(10)(lock.acquire):
slugify_instance(self, self.name, organization=self.organization)
super(Project, self).save(*args, **kwargs)
else:
super(Project, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri('/{}/{}/'.format(self.organization.slug, self.slug))
def is_internal_project(self):
for value in (settings.SENTRY_FRONTEND_PROJECT, settings.SENTRY_PROJECT):
if six.text_type(self.id) == six.text_type(value) or six.text_type(
self.slug
) == six.text_type(value):
return True
return False
# TODO: Make these a mixin
def update_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.set_value(self, *args, **kwargs)
def get_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.get_value(self, *args, **kwargs)
def delete_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.unset_value(self, *args, **kwargs)
@property
def callsign(self):
return self.slug.upper()
@property
def color(self):
if self.forced_color is not None:
return '#%s' % self.forced_color
return get_hashed_color(self.callsign or self.slug)
@property
def member_set(self):
from sentry.models import OrganizationMember
return self.organization.member_set.filter(
id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=True,
organizationmemberteam__team=self.team,
).values('id'),
user__is_active=True,
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Project.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def get_audit_log_data(self):
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'status': self.status,
'public': self.public,
}
def get_full_name(self):
if self.team.name not in self.name:
return '%s %s' % (self.team.name, self.name)
return self.name
def get_notification_recipients(self, user_option):
from sentry.models import UserOption
alert_settings = dict(
(o.user_id, int(o.value))
for o in UserOption.objects.filter(
project=self,
key=user_option,
)
)
disabled = set(u for u, v in six.iteritems(alert_settings) if v == 0)
member_set = set(
self.member_set.exclude(
user__in=disabled,
).values_list('user', flat=True)
)
# determine members default settings
members_to_check = set(u for u in member_set if u not in alert_settings)
if members_to_check:
disabled = set(
(
uo.user_id
for uo in UserOption.objects.filter(
key='subscribe_by_default',
user__in=members_to_check,
) if uo.value == '0'
)
)
member_set = [x for x in member_set if x not in disabled]
return member_set
def get_mail_alert_subscribers(self):
user_ids = self.get_notification_recipients('mail:alert')
if not user_ids:
return []
from sentry.models import User
return list(User.objects.filter(id__in=user_ids))
def is_user_subscribed_to_mail_alerts(self, user):
from sentry.models import UserOption
is_enabled = UserOption.objects.get_value(user, 'mail:alert', project=self)
if is_enabled is None:
is_enabled = UserOption.objects.get_value(user, 'subscribe_by_default', '1') == '1'
else:
is_enabled = bool(is_enabled)
return is_enabled
def transfer_to(self, team):
from sentry.models import ReleaseProject
organization = team.organization
# We only need to delete ReleaseProjects when moving to a different
# Organization. Releases are bound to Organization, so it's not realistic
# to keep this link unless we say, copied all Releases as well.
if self.organization_id != organization.id:
ReleaseProject.objects.filter(
project_id=self.id,
).delete()
self.organization = organization
self.team = team
try:
with transaction.atomic():
self.update(
organization=organization,
team=team,
)
except IntegrityError:
slugify_instance(self, self.name, organization=organization)
self.update(
slug=self.slug,
organization=organization,
team=team,
)
def add_team(self, team):
try:
with transaction.atomic():
ProjectTeam.objects.create(project=self, team=team)
except IntegrityError:
return False
else:
return True
def get_security_token(self):
lock = locks.get(self.get_lock_key(), duration=5)
with TimedRetryPolicy(10)(lock.acquire):
security_token = self.get_option('sentry:token', None)
if security_token is None:
security_token = uuid1().hex
self.update_option('sentry:token', security_token)
return security_token
def get_lock_key(self):
return 'project_token:%s' % self.id
|
|
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2016, Gluu
#
# Author: Yuriy Movchan
#
import java
import json
from com.toopher import RequestError
from com.toopher import ToopherAPI
from java.util import Arrays
from org.xdi.model.custom.script.type.auth import PersonAuthenticationType
from org.xdi.oxauth.security import Identity
from org.xdi.oxauth.service import EncryptionService
from org.xdi.oxauth.service import UserService, AuthenticationService
from org.xdi.service.cdi.util import CdiUtil
from org.xdi.util import StringHelper, ArrayHelper
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "Toopher. Initialization"
toopher_creds_file = configurationAttributes.get("toopher_creds_file").getValue2()
# Load credentials from file
f = open(toopher_creds_file, 'r')
try:
creds = json.loads(f.read())
except:
return False
finally:
f.close()
consumer_key = creds["CONSUMER_KEY"]
consumer_secret = creds["CONSUMER_SECRET"]
try:
encryptionService = CdiUtil.bean(EncryptionService)
consumer_secret = encryptionService.decrypt(consumer_secret)
except:
return False
self.tapi = ToopherAPI(consumer_key, consumer_secret)
print "Toopher. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Toopher. Destroy"
print "Toopher. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
userService = CdiUtil.bean(UserService)
authenticationService = CdiUtil.bean(AuthenticationService)
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
toopher_user_timeout = int(configurationAttributes.get("toopher_user_timeout").getValue2())
user_name = credentials.getUsername()
if (step == 1):
print "Toopher. Authenticate for step 1"
user_password = credentials.getPassword()
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
userService = CdiUtil.bean(UserService)
logged_in = authenticationService.authenticate(user_name, user_password)
if (not logged_in):
return False
# Find user by uid
userService = CdiUtil.bean(UserService)
find_user_by_uid = userService.getUser(user_name)
if (find_user_by_uid == None):
print "Toopher. Authenticate for step 1. Failed to find user"
return False
# Check if the user paired account to phone
user_external_uid_attr = userService.getCustomAttribute(find_user_by_uid, "oxExternalUid")
if ((user_external_uid_attr == None) or (user_external_uid_attr.getValues() == None)):
print "Toopher. Authenticate for step 1. There is no external UIDs for user: ", user_name
else:
topher_user_uid = None
for ext_uid in user_external_uid_attr.getValues():
if (ext_uid.startswith('toopher:')):
topher_user_uid = ext_uid[8:len(ext_uid)]
break
if (topher_user_uid == None):
print "Toopher. Authenticate for step 1. There is no Topher UID for user: ", user_name
else:
identity.setWorkingParameter("toopher_user_uid", topher_user_uid)
return True
elif (step == 2):
print "Toopher. Authenticate for step 2"
passed_step1 = self.isPassedDefaultAuthentication
if (not passed_step1):
return False
sessionAttributes = identity.getSessionId().getSessionAttributes()
if (sessionAttributes == None) or not sessionAttributes.containsKey("toopher_user_uid"):
print "Toopher. Authenticate for step 2. toopher_user_uid is empty"
# Pair with phone
pairing_phrase_array = requestParameters.get("pairing_phrase")
if ArrayHelper.isEmpty(pairing_phrase_array):
print "Toopher. Authenticate for step 2. pairing_phrase is empty"
return False
pairing_phrase = pairing_phrase_array[0]
try:
pairing_status = self.tapi.pair(pairing_phrase, user_name)
toopher_user_uid = pairing_status.id
except RequestError, err:
print "Toopher. Authenticate for step 2. Failed pair with phone: ", err
return False
pairing_result = self.checkPairingStatus(toopher_user_uid, toopher_user_timeout)
if (not pairing_result):
print "Toopher. Authenticate for step 2. The pairing has not been authorized by the phone yet"
return False
print "Toopher. Authenticate for step 2. Storing toopher_user_uid in user entry", toopher_user_uid
# Store toopher_user_uid in user entry
find_user_by_uid = userService.addUserAttribute(user_name, "oxExternalUid", "toopher:" + toopher_user_uid)
if (find_user_by_uid == None):
print "Toopher. Authenticate for step 2. Failed to update current user"
return False
identity.setWorkingParameter("toopher_user_uid", toopher_user_uid)
else:
toopher_user_uid = sessionAttributes.get("toopher_user_uid")
# Check pairing stastus
print "Toopher. Authenticate for step 2. toopher_user_uid: ", toopher_user_uid
pairing_result = self.checkPairingStatus(toopher_user_uid, 0)
if (not pairing_result):
print "Toopher. Authenticate for step 2. The pairing has not been authorized by the phone yet"
return False
return True
elif (step == 3):
print "Toopher. Authenticate for step 3"
passed_step1 = self.isPassedDefaultAuthentication
if (not passed_step1):
return False
sessionAttributes = identity.getSessionId().getSessionAttributes()
if (sessionAttributes == None) or not sessionAttributes.containsKey("toopher_user_uid"):
print "Toopher. Authenticate for step 3. toopher_user_uid is empty"
return False
toopher_user_uid = sessionAttributes.get("toopher_user_uid")
passed_step1 = StringHelper.isNotEmptyString(toopher_user_uid)
if (not passed_step1):
return False
toopher_terminal_name = configurationAttributes.get("toopher_terminal_name").getValue2()
try:
request_status = self.tapi.authenticate(toopher_user_uid, toopher_terminal_name)
request_id = request_status.id
except RequestError, err:
print "Toopher. Authenticate for step 3. Failed to send authentication request to phone: ", err
return False
print "Toopher. Authenticate for step 3. request_id: ", request_id
request_result = self.checkRequestStatus(request_id, toopher_user_timeout)
if (not request_result):
print "Toopher. Authenticate for step 3. The authentication request has not received a response from the phone yet"
return False
print "Toopher. Authenticate for step 3. The request was granted"
return True
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
return True
def getExtraParametersForStep(self, configurationAttributes, step):
if (step in [2, 3]):
return Arrays.asList("toopher_user_uid")
return None
def getCountAuthenticationSteps(self, configurationAttributes):
return 3
def getPageForStep(self, configurationAttributes, step):
if (step == 2):
return "/auth/toopher/tppair.xhtml"
elif (step == 3):
return "/auth/toopher/tpauthenticate.xhtml"
return ""
def isPassedDefaultAuthentication():
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
passed_step1 = StringHelper.isNotEmptyString(user_name)
return passed_step1
def checkPairingStatus(self, pairing_id, timeout):
try:
curTime = java.lang.System.currentTimeMillis()
endTime = curTime + timeout * 1000
while (endTime >= curTime):
pairing_status = self.tapi.getPairingStatus(pairing_id)
if (pairing_status.enabled):
print "Toopher. Pairing complete"
return True
java.lang.Thread.sleep(2000)
curTime = java.lang.System.currentTimeMillis()
except java.lang.Exception, err:
print "Toopher. Could not check pairing status: ", err
return False
print "Toopher. The pairing has not been authorized by the phone yet"
return False
def checkRequestStatus(self, request_id, timeout):
try:
curTime = java.lang.System.currentTimeMillis()
endTime = curTime + timeout * 1000
while (endTime >= curTime):
request_status = self.tapi.getAuthenticationStatus(request_id)
if (request_status.cancelled):
print "Toopher. The authentication request has been cancelled"
return False
if (not request_status.pending):
if (request_status.granted):
print "Toopher. The request was granted"
return True
java.lang.Thread.sleep(2000)
curTime = java.lang.System.currentTimeMillis()
except java.lang.Exception, err:
print "Toopher. Could not check authentication status: ", err
return False
print "Toopher. The authentication request has not received a response from the phone yet"
return False
def logout(self, configurationAttributes, requestParameters):
return True
|
|
# -*- coding: UTF-8 -*-
from __future__ import with_statement
import random
import urllib
from decimal import Decimal
import os.path
from django import forms
from django import http
from django.conf import settings as dsettings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from common.decorators import render_to_json
from common.decorators import render_to_template
from conference import dataaccess
from conference import models
from conference import settings
from conference import utils
from conference.decorators import speaker_access, talk_access, profile_access
from conference.forms import AttendeeLinkDescriptionForm
from conference.forms import OptionForm
from conference.forms import SpeakerForm
from conference.forms import TalkForm
class HttpResponseRedirectSeeOther(http.HttpResponseRedirect):
status_code = 303
@speaker_access
@render_to_template('conference/speaker.html')
def speaker(request, slug, speaker, talks, full_access, speaker_form=SpeakerForm):
if request.method == 'POST':
if not full_access:
return http.HttpResponseBadRequest()
form = speaker_form(data=request.POST)
if form.is_valid():
data = form.cleaned_data
speaker.activity = data['activity']
speaker.activity_homepage = data['activity_homepage']
speaker.industry = data['industry']
speaker.company = data['company']
speaker.company_homepage = data['company_homepage']
speaker.save()
speaker.setBio(data['bio'])
return HttpResponseRedirectSeeOther(reverse('conference-speaker', kwargs={'slug': speaker.slug}))
else:
form = speaker_form(initial={
'activity': speaker.activity,
'activity_homepage': speaker.activity_homepage,
'industry': speaker.industry,
'company': speaker.company,
'company_homepage': speaker.company_homepage,
'bio': getattr(speaker.getBio(), 'body', ''),
})
return {
'form': form,
'full_access': full_access,
'speaker': speaker,
'talks': talks,
'accepted': talks.filter(status='accepted'),
}
@speaker_access
@render_to_template('conference/speaker.xml')
def speaker_xml(request, slug, speaker, full_access, talks):
return {
'speaker': speaker,
'talks': talks,
}
@render_to_template('conference/talk.html')
@talk_access
def talk(request, slug, talk, full_access, talk_form=None):
conf = models.Conference.objects.current()
if talk_form is None:
talk_form = utils.dotted_import(settings.FORMS['AdditionalPaperSubmission'])
if request.method == 'POST':
if not full_access:
return http.HttpResponseBadRequest()
if conf.cfp():
data = request.POST
else:
data = request.POST.copy()
data['level'] = talk.level
data['duration'] = talk.duration
data['language'] = talk.language
data['type'] = talk.type
data['tags'] = ','.join([ x.name for x in talk.tags.all() ])
form = talk_form(data=data, files=request.FILES, instance=talk)
if not conf.cfp() and not data['tags'] and 'tags' in form.fields:
# The CFP and 'closed and we are editing a talk without tags,
# it is not' normally possible since the tags are required;
# we're probably editing a talk inserted through admin, and if that
# 's the case does not make sense to derail the form validation.
form.fields['tags'].required = False
if form.is_valid():
talk = form.save()
messages.info(request, 'Your talk has been modified.')
return HttpResponseRedirectSeeOther(reverse('conference-talk', kwargs={'slug': talk.slug}))
else:
form = talk_form(instance=talk)
return {
'form': form,
'full_access': full_access,
'talk': talk,
'cfp': conf.cfp(),
'voting': conf.voting(),
}
@render_to_template('conference/talk_preview.html')
@talk_access
def talk_preview(request, slug, talk, full_access, talk_form=TalkForm):
conf = models.Conference.objects.current()
return {
'talk': talk,
'voting': conf.voting(),
}
@render_to_template('conference/talk.xml')
@talk_access
def talk_xml(request, slug, talk, full_access):
return {
'talk': talk,
}
def talk_video(request, slug): # pragma: no cover
tlk = get_object_or_404(models.Talk, slug=slug)
if not tlk.video_type or tlk.video_type == 'download':
if tlk.video_file:
vurl = dsettings.MEDIA_URL + tlk.video_file.url
vfile = tlk.video_file.path
elif settings.VIDEO_DOWNLOAD_FALLBACK:
for ext in ('.avi', '.mp4'):
fpath = os.path.join(dsettings.MEDIA_ROOT, 'conference/videos', tlk.slug + ext)
if os.path.exists(fpath):
vurl = dsettings.MEDIA_URL + 'conference/videos/' + tlk.slug + ext
vfile = fpath
break
else:
raise http.Http404()
else:
raise http.Http404()
else:
raise http.Http404()
if settings.TALK_VIDEO_ACCESS:
if not settings.TALK_VIDEO_ACCESS(request, tlk):
return http.HttpResponseForbidden()
vext = os.path.splitext(vfile)[1]
if vext == '.mp4':
mt = 'video/mp4'
elif vext == '.avi':
mt = 'video/x-msvideo'
else:
mt = None
if settings.X_SENDFILE is None:
r = http.HttpResponse(file(vfile), content_type=mt)
elif settings.X_SENDFILE['type'] == 'x-accel':
r = http.HttpResponse('', content_type=mt)
r['X-Accel-Redirect'] = vurl
elif settings.X_SENDFILE['type'] == 'custom':
return settings.X_SENDFILE['f'](tlk, url=vurl, fpath=vfile, content_type=mt)
else:
raise RuntimeError('invalid X_SENDFILE')
fname = '%s%s' % (tlk.title.encode('utf-8'), vext.encode('utf-8'))
r['content-disposition'] = 'attachment; filename="%s"' % fname
return r
@render_to_template('conference/conference.xml')
def conference_xml(request, conference):
conference = get_object_or_404(models.Conference, code=conference)
talks = models.Talk.objects.filter(conference=conference)
schedules = [
(s, utils.TimeTable2.fromSchedule(s.id))
for s in models.Schedule.objects.filter(conference=conference.code)
]
return {
'conference': conference,
'talks': talks,
'schedules': schedules,
}
def talk_report(request): # pragma: no cover
conference = request.GET.getlist('conference')
tags = request.GET.getlist('tag')
return render_to_response(
'conference/talk_report.html', {
'conference': conference,
'tags': tags,
},
context_instance = RequestContext(request))
@render_to_template('conference/schedule.html')
def schedule(request, conference, slug):
sch = get_object_or_404(models.Schedule, conference=conference, slug=slug)
return {
'schedule': sch,
}
@login_required
@render_to_json
def schedule_event_interest(request, conference, slug, eid):
evt = get_object_or_404(models.Event, schedule__conference=conference, schedule__slug=slug, id=eid)
if request.method == 'POST':
val = int(request.POST['interest'])
try:
ei = evt.eventinterest_set.get(user=request.user)
except models.EventInterest.DoesNotExist:
ei = None
if val == 0 and ei:
ei.delete()
elif val != 0:
if not ei:
ei = models.EventInterest(event=evt, user=request.user)
ei.interest = val
ei.save()
else:
try:
val = evt.eventinterest_set.get(user=request.user).interest
except models.EventInterest.DoesNotExist:
val = 0
return { 'interest': val }
@login_required
@render_to_json
def schedule_event_booking(request, conference, slug, eid):
evt = get_object_or_404(models.Event, schedule__conference=conference, schedule__slug=slug, id=eid)
status = models.EventBooking.objects.booking_status(evt.id)
if request.method == 'POST':
fc = utils.dotted_import(settings.FORMS['EventBooking'])
form = fc(event=evt.id, user=request.user.id, data=request.POST)
if form.is_valid():
if form.cleaned_data['value']:
models.EventBooking.objects.book_event(evt.id, request.user.id)
if request.user.id not in status['booked']:
status['booked'].append(request.user.id)
else:
models.EventBooking.objects.cancel_reservation(evt.id, request.user.id)
try:
status['booked'].remove(request.user.id)
except ValueError:
pass
else:
try:
msg = unicode(form.errors['value'][0])
except:
msg = ""
return http.HttpResponseBadRequest(msg)
return {
'booked': len(status['booked']),
'available': max(status['available'], 0),
'seats': status['seats'],
'user': request.user.id in status['booked'],
}
@render_to_json
def schedule_events_booking_status(request, conference):
data = dataaccess.conference_booking_status(conference)
uid = request.user.id if request.user.is_authenticated() else 0
for k, v in data.items():
if uid and uid in v['booked']:
v['user'] = True
else:
v['user'] = False
del v['booked']
return data
@render_to_template('conference/schedule.xml')
def schedule_xml(request, conference, slug):
sch = get_object_or_404(models.Schedule, conference=conference, slug=slug)
return {
'schedule': sch,
'timetable': utils.TimeTable2.fromSchedule(sch.id),
}
@render_to_json
def places(request):
"""
Returns a json special places and hotels.
"""
places = []
for h in models.SpecialPlace.objects.filter(visible = True):
places.append({
'id': h.id,
'name': h.name,
'address': h.address,
'type': h.type,
'url': h.url,
'email': h.email,
'telephone': h.telephone,
'note': h.note,
'lng': h.lng,
'lat': h.lat,
'html': render_to_string('conference/render_place.html', {'p': h}),
})
for h in models.Hotel.objects.filter(visible = True):
places.append({
'id': h.id,
'name': h.name,
'type': 'hotel',
'telephone': h.telephone,
'url': h.url,
'email': h.email,
'availability': h.availability,
'price': h.price,
'note': h.note,
'affiliated': h.affiliated,
'lng': h.lng,
'lat': h.lat,
'modified': h.modified.isoformat(),
'html': render_to_string('conference/render_place.html', {'p': h}),
})
return places
@render_to_json
def sponsor_json(request, sponsor):
"""
Returns the data of the requested sponsor
"""
sponsor = get_object_or_404(models.Sponsor, slug=sponsor)
return {
'sponsor': sponsor.sponsor,
'slug': sponsor.slug,
'url': sponsor.url
}
@login_required
#@transaction.atomic
def paper_submission(request):
try:
speaker = request.user.speaker
except models.Speaker.DoesNotExist:
speaker = None
conf = models.Conference.objects.current()
# If there is no CFP, we raise a HTTP 404
if not conf.cfp_start or not conf.cfp_end:
raise http.Http404()
# the CfP is closed
if not conf.cfp():
if settings.CFP_CLOSED:
return redirect(settings.CFP_CLOSED)
else:
raise http.Http404()
if speaker:
proposed = list(speaker.talk_set.proposed(conference=settings.CONFERENCE))
else:
proposed = []
if not proposed:
fc = utils.dotted_import(settings.FORMS['PaperSubmission'])
form = fc(user=request.user, data=request.POST, files=request.FILES)
else:
fc = utils.dotted_import(settings.FORMS['AdditionalPaperSubmission'])
form = fc(data=request.POST, files=request.FILES)
if request.method == 'POST':
if not proposed:
form = fc(user=request.user, data=request.POST, files=request.FILES)
else:
form = fc(data=request.POST, files=request.FILES)
if form.is_valid():
if not proposed:
talk = form.save()
speaker = request.user.speaker
else:
talk = form.save(speaker=speaker)
messages.info(request, 'Your talk has been submitted, thank you!')
return HttpResponseRedirectSeeOther(reverse('conference-myself-profile'))
else:
if not proposed:
form = fc(user=request.user)
else:
form = fc()
return render_to_response('conference/paper_submission.html', {
'speaker': speaker,
'form': form,
'proposed_talks': proposed,
}, context_instance=RequestContext(request))
def filter_talks_in_context(request, talks, voting_allowed):
# Want to associate each talk with a "unique" number, easily find.
ordinal = dict()
for ix, t in enumerate(talks.order_by('created').values_list('id', flat=True)):
ordinal[t] = ix
user_votes = models.VotoTalk.objects.filter(user=request.user.id)
talks = talks.order_by('speakers__user__first_name', 'speakers__user__last_name')
if request.GET:
form = OptionForm(data=request.GET)
form.is_valid()
options = form.cleaned_data
else:
form = OptionForm()
options = {
'abstracts': 'not-voted',
'talk_type': '',
'language': '',
'tags': '',
'order': 'vote',
}
if options['abstracts'] != 'all':
talks = talks.exclude(id__in=user_votes.values('talk_id'))
if options['talk_type'] in ('s', 't', 'p'):
talks = talks.filter(type=options['talk_type'])
if options['language'] in ('en', 'it'):
talks = talks.filter(language=options['language'])
if options['tags']:
# if options['tags'] ends us a tag not associated with any talk.
# I have a query that results in zero results; to avoid this limit the usable
# tag as a filter to those associated with talk.
allowed = set()
ctt = ContentType.objects.get_for_model(models.Talk)
for t, usage in dataaccess.tags().items():
for cid, oid in usage:
if cid == ctt.id:
allowed.add(t.name)
break
tags = set(options['tags']) & allowed
if tags:
talks = talks.filter(id__in=models.ConferenceTaggedItem.objects \
.filter(
content_type__app_label='conference', content_type__model='talk',
tag__name__in=tags) \
.values('object_id')
)
talk_order = options['order']
votes = dict((x.talk_id, x) for x in user_votes)
# As talks are sorted by a linked model through a m2m can I have repeated
# the talk and distinct does not apply in these cases.
#
# I can only filter in Python, at this point I take this opportunity to
# engage votes user using a single loop.
dups = set()
def filter_vote(t):
if t['id'] in dups:
return False
dups.add(t['id'])
t['user_vote'] = votes.get(t['id'])
t['ordinal'] = ordinal[t['id']]
return True
talks = filter(filter_vote, talks.values('id'))
if talk_order != 'speaker':
def key(x):
if x['user_vote']:
return x['user_vote'].vote
else:
return Decimal('-99.99')
talks = reversed(sorted(reversed(talks), key=key))
ctx = {
'voting_allowed': voting_allowed,
'talks': list(talks),
'form': form,
}
return ctx
def get_data_for_context(request):
conf = models.Conference.objects.current()
voting_allowed = settings.VOTING_ALLOWED(request.user)
talks = models.Talk.objects.proposed(conference=conf.code)
return conf, talks, voting_allowed
def voting(request):
conf, talks, voting_allowed = get_data_for_context(request)
if not settings.VOTING_OPENED(conf, request.user):
if settings.VOTING_CLOSED:
return redirect(settings.VOTING_CLOSED)
else:
raise http.Http404()
if request.method == 'POST':
if not voting_allowed:
return http.HttpResponseBadRequest('anonymous user not allowed')
data = dict((x.id, x) for x in talks)
for k, v in filter(lambda x: x[0].startswith('vote-'), request.POST.items()):
try:
talk = data[int(k[5:])]
except KeyError:
return http.HttpResponseBadRequest('invalid talk')
except ValueError:
return http.HttpResponseBadRequest('id malformed')
if not v:
models.VotoTalk.objects.filter(user=request.user, talk=talk).delete()
else:
try:
vote = Decimal(v)
except ValueError:
return http.HttpResponseBadRequest('vote malformed')
try:
o = models.VotoTalk.objects.get(user=request.user, talk=talk)
except models.VotoTalk.DoesNotExist:
o = models.VotoTalk(user=request.user, talk=talk)
if not vote:
if o.id:
o.delete()
else:
o.vote = vote
o.save()
if request.is_ajax():
return http.HttpResponse('')
else:
return HttpResponseRedirectSeeOther(reverse('conference-voting') + '?' + request.GET.urlencode())
else:
from conference.forms import TagField, ReadonlyTagWidget, PseudoRadioRenderer
class OptionForm(forms.Form):
abstracts = forms.ChoiceField(
choices=(('not-voted', 'Not yet voted'),
('all', 'All'),
),
required=False,
initial='not-voted',
widget=forms.RadioSelect(renderer=PseudoRadioRenderer),
)
talk_type = forms.ChoiceField(
label=u'Session type',
choices=(('all', 'All'),) + tuple(settings.TALK_TYPES_TO_BE_VOTED),
required=False,
initial='all',
widget=forms.RadioSelect(renderer=PseudoRadioRenderer),
)
language = forms.ChoiceField(
choices=(('all', 'All'),) + tuple(settings.TALK_SUBMISSION_LANGUAGES),
required=False,
initial='all',
widget=forms.RadioSelect(renderer=PseudoRadioRenderer),
)
order = forms.ChoiceField(
choices=(('random', 'Random order'),
('vote', 'Vote'),
('speaker', 'Speaker name'),
),
required=False,
initial='random',
widget=forms.RadioSelect(renderer=PseudoRadioRenderer),
)
tags = TagField(
required=False,
widget=ReadonlyTagWidget(),
)
# I want to associate with each talk a "unique" number to display next to the title to be able to easily find.
ordinal = dict()
for ix, t in enumerate(talks.order_by('created').values_list('id', flat=True)):
ordinal[t] = ix
user_votes = models.VotoTalk.objects.filter(user=request.user.id)
# Start by sorting talks by name
talks = talks.order_by('speakers__user__first_name',
'speakers__user__last_name')
if request.GET:
form = OptionForm(data=request.GET)
form.is_valid()
options = form.cleaned_data
else:
form = OptionForm()
options = {
'abstracts': 'not-voted',
'talk_type': 'all',
'language': 'all',
'tags': '',
'order': 'random',
}
# if options['abstracts'] == 'not-voted':
# talks = talks.exclude(id__in=user_votes.values('talk_id'))
if options['talk_type'] in (tchar
for (tchar, tdef) in settings.TALK_TYPES_TO_BE_VOTED):
talks = talks.filter(type__startswith=options['talk_type'])
if options['language'] in (lcode
for (lcode, ldef) in settings.TALK_SUBMISSION_LANGUAGES):
talks = talks.filter(language=options['language'])
if options['tags']:
# if options['tags'] ends us a tag not associated with any talk I results
# in a query that results from scratch; to avoid this limit the usable tag
# as a filter to those associated with talk.
allowed = set()
ctt = ContentType.objects.get_for_model(models.Talk)
for t, usage in dataaccess.tags().items():
for cid, oid in usage:
if cid == ctt.id:
allowed.add(t.name)
break
tags = set(options['tags']) & allowed
if tags:
talks = talks.filter(id__in=models.ConferenceTaggedItem.objects\
.filter(
content_type__app_label='conference', content_type__model='talk',
tag__name__in=tags)\
.values('object_id')
)
talk_order = options['order']
votes = dict((x.talk_id, x) for x in user_votes)
# As talks are sorted by a model connected via a m2m can I have repeated the talk, and
# distinct does not apply in these case.
#
# It can only filtered in python, at this point I take this opportunity to engage
# votes user using a single loop.
dups = set()
def filter_vote(t):
if t['id'] in dups:
return False
dups.add(t['id'])
t['user_vote'] = votes.get(t['id'])
t['ordinal'] = ordinal[t['id']]
return True
talks = filter(filter_vote, talks.values('id'))
# Fix talk order, if necessary
if talk_order == 'vote':
def key(x):
if x['user_vote']:
return x['user_vote'].vote
else:
return Decimal('-99.99')
talks = reversed(sorted(reversed(talks), key=key))
elif talk_order == 'random':
random.shuffle(talks)
elif talk_order == 'speaker':
# Already sorted
pass
ctx = {
'voting_allowed': voting_allowed,
'talks': list(talks),
'form': form,
}
if request.is_ajax():
tpl = 'conference/ajax/voting.html'
else:
tpl = 'conference/voting.html'
return render(request, tpl, ctx)
@render_to_template('conference/profile.html')
@profile_access
def user_profile(request, slug, profile=None, full_access=False):
fc = utils.dotted_import(settings.FORMS['Profile'])
if request.method == 'POST':
if not full_access:
return http.HttpResponseForbidden()
form = fc(instance=profile, data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
return HttpResponseRedirectSeeOther(reverse('conference-profile', kwargs={'slug': profile.slug}))
else:
if full_access:
form = fc(instance=profile)
else:
form = None
return {
'form': form,
'full_access': full_access,
'profile': profile,
}
@login_required
def myself_profile(request):
p = models.AttendeeProfile.objects.getOrCreateForUser(request.user)
return redirect('conference-profile', slug=p.slug)
@render_to_json
def schedule_events_expected_attendance(request, conference):
return dataaccess.expected_attendance(conference)
def covers(request, conference):
events = settings.VIDEO_COVER_EVENTS(conference)
if not events:
raise http.Http404()
schedules = dataaccess.schedules_data(
models.Schedule.objects\
.filter(conference=conference)\
.order_by('date')\
.values_list('id', flat=True)
)
from collections import defaultdict
tracks = defaultdict(dict)
for s in schedules:
for t in s['tracks'].values():
tracks[s['id']][t.track] = t.title
grouped = defaultdict(lambda: defaultdict(list))
for e in dataaccess.events(eids=events):
if not e['tracks']:
continue
sid = e['schedule_id']
t = tracks[sid][e['tracks'][0]]
grouped[sid][t].append(e)
ordered = []
for s in schedules:
data = grouped[s['id']]
if not data:
continue
ordered.append((s, sorted(data.items())))
ctx = {
'conference': conference,
'events': ordered,
}
return render(request, 'conference/covers.html', ctx)
@login_required
def user_profile_link(request, uuid):
"""
"""
profile = get_object_or_404(models.AttendeeProfile, uuid=uuid).user_id
conf = models.Conference.objects.current()
active = conf.conference() or 1
if request.user.id == profile:
if active:
p, _ = models.Presence.objects.get_or_create(profile_id=profile, conference=conf.code)
return redirect('conference-myself-profile')
uid = request.user.id
created = linked = False
try:
link = models.AttendeeLink.objects.getLink(uid, profile)
linked = True
except models.AttendeeLink.DoesNotExist:
if active:
link = models.AttendeeLink(attendee1_id=uid, attendee2_id=profile)
link.save()
from conference.signals import attendees_connected
attendees_connected.send(link, attendee1=uid, attendee2=profile)
created = True
linked = True
form = AttendeeLinkDescriptionForm(initial={
'message': link.message,
})
ctx = {
'profile2': profile,
'created': created,
'linked': linked,
'form': form,
}
return render(request, 'conference/profile_link.html', ctx)
@login_required
@render_to_json
def user_profile_link_message(request, uuid):
profile = get_object_or_404(models.AttendeeProfile, uuid=uuid).user_id
uid = request.user.id
if uid == profile:
return {}
try:
link = models.AttendeeLink.objects.getLink(uid, profile)
except models.AttendeeLink.DoesNotExist:
raise http.Http404()
if request.method == 'POST':
form = AttendeeLinkDescriptionForm(data=request.POST)
if form.is_valid():
link.message = form.cleaned_data['message']
link.save()
return {}
@login_required
def user_conferences(request):
uid = request.user.id
conferences = models.Conference.objects.filter(
code__in=models.Presence.objects.filter(profile=uid).values('conference'))
people = []
for p in models.AttendeeLink.objects.findLinks(uid).order_by('timestamp'):
if p.attendee1_id == uid:
p.other = p.attendee2_id
else:
p.other = p.attendee1_id
people.append(p)
ctx = {
'conferences': conferences,
'people': people,
}
return render(request, 'conference/user_conferences.html', ctx)
|
|
# Copyright (c) 2013,2014 Burkhard Ritter
# This code is distributed under the two-clause BSD License.
import _qca
from collections import OrderedDict
import math
class Layout(object):
def __init__(self):
self._pl = _qca.Layout()
@property
def primitive_layout(self):
return self._pl
def __getstate__(self):
i = OrderedDict()
# This can be very verbose, disabled for now.
# i['r_sites'] = self._pl.r_sites
# i['r_charges'] = self._pl.r_charges
# i['charges'] = self._pl.charges
# i['epc'] = str(self._pl.epc)
return i
def __setstate__(self, i):
# we do not deserialize properly
# TODO: properly reconstruct state
self.__init__()
def coma_getstate(self):
return self.__getstate__()
def coma_setstate(self, i):
self.__setstate__(i)
def __eq__(self, l):
d1 = self.__dict__.copy()
d2 = l.__dict__.copy()
pl1 = d1['_pl']
pl2 = d2['_pl']
del d1['_pl']
del d2['_pl']
return (d1 == d2 and
pl1.r_sites == pl2.r_sites and
pl1.r_charges == pl2.r_charges and
pl1.charges == pl2.charges and
pl1.epc == pl2.epc)
class Wire(Layout):
def __init__(self, N_, V1_, boa_, P_):
Layout.__init__(self)
self.N = N_
self.V1 = V1_
self.boa = boa_
self.P = P_
self._pl.wire(N_, 1.0/V1_, boa_ * 1.0 / V1_, P_)
def __getstate__(self):
i = OrderedDict()
i['type'] = 'wire'
i['N'] = self.N
i['V1'] = self.V1
i['boa'] = self.boa
i['P'] = self.P
i.update(Layout.__getstate__(self))
return i
def __setstate__(self, i):
self.__init__(i['N'], i['V1'], i['boa'], i['P'])
class NonuniformWire(Layout):
def __init__(self, N_, V1_, boas_, P_):
Layout.__init__(self)
self.N = N_
self.V1 = V1_
self.boas = boas_
self.P = P_
a = 1.0 / V1_
bs = [boa * a for boa in boas_]
self._pl.nonuniformWire(N_, a, bs, P_)
def __getstate__(self):
i = OrderedDict()
i['type'] = 'nonuniformwire'
i['N'] = self.N
i['V1'] = self.V1
i['boas'] = self.boas
i['P'] = self.P
i.update(Layout.__getstate__(self))
return i
def __setstate__(self, i):
self.__init__(i['N'], i['V1'], i['boas'], i['P'])
class WireWithTwoDriverCells(Layout):
def __init__(self, N_, V1_, boa_, P1_, P2_):
Layout.__init__(self)
self.N = N_
self.V1 = V1_
self.boa = boa_
self.P1 = P1_
self.P2 = P2_
a = 1.0 / self.V1
b = self.boa * a
for i in range(self.N):
self._pl.addCell((a+b)*i, 0, a)
self._pl.addDriverCell(-b-a, 0, a, self.P1)
self._pl.addDriverCell(self.N*(b+a), 0, a, self.P2)
def __getstate__(self):
i = OrderedDict()
i['type'] = 'wire_with_two_driver_cells'
i['N'] = self.N
i['V1'] = self.V1
i['boa'] = self.boa
i['P1'] = self.P1
i['P2'] = self.P2
i.update(Layout.__getstate__(self))
return i
def __setstate__(self, i):
self.__init__(i['N'], i['V1'], i['boa'], i['P1'], i['P2'])
class NonuniformWireWithTwoDriverCells(Layout):
def __init__(self, N_, V1_, boas_, P1_, P2_):
Layout.__init__(self)
self.N = N_
self.V1 = V1_
self.boas = boas_
self.P1 = P1_
self.P2 = P2_
assert len(self.boas) == self.N+1
a = 1.0 / self.V1
bs = [boa * a for boa in self.boas]
x_off = 0
self._pl.addDriverCell(x_off, 0, a, self.P1)
x_off += bs[0]+a
for b in bs[1:]:
self._pl.addCell(x_off, 0, a)
x_off += b+a
self._pl.addDriverCell(x_off, 0, a, self.P2)
def __getstate__(self):
i = OrderedDict()
i['type'] = 'nonuniformwire_with_two_driver_cells'
i['N'] = self.N
i['V1'] = self.V1
i['boas'] = self.boas
i['P1'] = self.P1
i['P2'] = self.P2
i.update(Layout.__getstate__(self))
return i
def __setstate__(self, i):
self.__init__(i['N'], i['V1'], i['boas'], i['P1'], i['P2'])
class InfiniteWire(Layout):
def __init__(self, N_, N_dead_, V1_, boa_, P_):
Layout.__init__(self)
self.N = N_
self.N_dead = N_dead_
self.V1 = V1_
self.boa = boa_
self.P = P_
self.construct_wire()
def construct_wire(self):
a = 1.0 / self.V1
b = self.boa * a
for i in range(0, self.N_dead):
self._pl.addDriverCell(i*(b+a), 0, a, self.P)
for i in range(self.N_dead, self.N_dead + self.N):
self._pl.addCell(i*(b+a), 0, a)
for i in range(self.N_dead + self.N, 2*self.N_dead + self.N):
self._pl.addDriverCell(i*(b+a), 0, a, self.P)
def __getstate__(self):
i = OrderedDict()
i['type'] = 'infinite_wire'
i['N'] = self.N
i['N_dead'] = self.N_dead
i['V1'] = self.V1
i['boa'] = self.boa
i['P'] = self.P
i.update(Layout.__getstate__(self))
return i
def __setstate__(self, i):
self.__init__(i['N'], i['N_dead'], i['V1'], i['boa'], i['P'])
class AngleWire(Layout):
def __init__(self, N_, V1_, doa_, theta_, P_):
Layout.__init__(self)
self.N = N_
self.V1 = V1_
self.doa = doa_
self.theta = theta_
self.P = P_
a = 1.0 / self.V1
d = self.doa * a
t = self.theta*math.pi/180.0
self._pl.addDriverCell(0, 0, a, self.P)
for i in range(1,self.N+1):
self._pl.addCell(d*math.cos(t)*i, d*math.sin(t)*i, a)
def __getstate__(self):
i = OrderedDict()
i['type'] = 'angle_wire'
i['N'] = self.N
i['V1'] = self.V1
i['doa'] = self.doa
i['theta'] = self.theta
i['P'] = self.P
i.update(Layout.__getstate__(self))
return i
def __setstate__(self, i):
self.__init__(i['N'], i['V1'], i['doa'], i['theta'], i['P'])
class KinkyWire(Layout):
def __init__(self, N_, V1_, doa_, P_, kinks_):
"""Construct a wire with kinks.
N_ is the number of cells in the wire.
V1_ is the Coulombic term, V1 = 1/a.
doa_ is d/a, the cell-cell distance. (d/a = b/a + 1).
P_ is the driver cell polarization.
kinks_ is a tuple of 2-tuples. Each 2-tuple's first entry is the cell
after which the kink is positioned. The second entry is the angle of
the kink in degrees. For example, kinks_=((5,90),) describes a wire
with a single 90 degree kink after the fifth cell. Alternatively,
kinks_ can also be a corresponding dict (but a dict is mutable which is
a disadvantage in certain use cases).
"""
Layout.__init__(self)
self.N = N_
self.V1 = V1_
self.doa = doa_
self.P = P_
self.kinks = kinks_
a = 1.0 / self.V1
d = self.doa * a
ks = dict(self.kinks)
theta,x,y = 0,0,0
thetas,xs,ys = [],[],[]
for i in range(self.N):
if ks.has_key(i):
theta += ks[i] * math.pi / 180.0
x += d * math.cos(theta)
y += d * math.sin(theta)
thetas.append(theta)
xs.append(x)
ys.append(y)
self._pl.addDriverCell(0, 0, a, self.P)
for x,y in zip(xs,ys):
self._pl.addCell(x, y, a)
def __getstate__(self):
i = OrderedDict()
i['type'] = 'kinky_wire'
i['N'] = self.N
i['V1'] = self.V1
i['doa'] = self.doa
i['P'] = self.P
i['kinks'] = self.kinks
i.update(Layout.__getstate__(self))
return i
def __setstate__(self, i):
self.__init__(i['N'], i['V1'], i['doa'], i['P'], i['kinks'])
class MajorityGate(Layout):
def __init__(self, N_lead_, V1_, doa_, I1_, I2_, I3_):
Layout.__init__(self)
self.N_lead = N_lead_
self.V1 = V1_
self.doa = doa_
self.I1 = I1_
self.I2 = I2_
self.I3 = I3_
a = 1.0 / self.V1
d = self.doa * a
# active cells
self._pl.addCell(0,0,a)
for i in range(1, self.N_lead+1):
self._pl.addCell(0,i*d,a)
for i in range(1, self.N_lead+1):
self._pl.addCell(-i*d,0,a)
for i in range(1, self.N_lead+1):
self._pl.addCell(0,-i*d,a)
for i in range(1, self.N_lead+1):
self._pl.addCell(i*d,0,a)
# driver cells
z = (self.N_lead+1)*d
self._pl.addDriverCell(0,z,a,self.I1)
self._pl.addDriverCell(-z,0,a,self.I2)
self._pl.addDriverCell(0,-z,a,self.I3)
def __getstate__(self):
i = OrderedDict()
i['type'] = 'majority_gate'
i['N_lead'] = self.N_lead
i['V1'] = self.V1
i['doa'] = self.doa
i['I1'] = self.I1
i['I2'] = self.I2
i['I3'] = self.I3
i.update(Layout.__getstate__(self))
return i
def __setstate__(self, i):
self.__init__(i['N_lead'], i['V1'], i['doa'], i['I1'], i['I2'], i['I3'])
|
|
#!/usr/bin/python3
r"""
A generic bot to do data ingestion (batch uploading) of photos or other files.
In addition it installs related metadata. The uploading is primarily from a url
to a wiki-site.
Required configuration files
============================
- a 'Data ingestion' template on a wiki site that specifies the name of a
csv file, and csv configuration values.
- a csv file that specifies each file to upload, the file's copy-from URL
location, and some metadata.
Required parameters
===================
The following parameters are required. The 'csvdir' and the 'page:csvFile' will
be joined creating a path to a csv file that should contain specified
information about files to upload.
-csvdir A directory path to csv files
-page A wiki path to templates. One of the templates at this
location must be a 'Data ingestion' template with the
following parameters.
Required parameters
csvFile
Optional parameters
sourceFormat
options: 'csv'
sourceFileKey
options: 'StockNumber'
csvDialect
options: 'excel', ''
csvDelimiter
options: any delimiter, ',' is most common
csvEncoding
options: 'utf8', 'Windows-1252'
formattingTemplate
titleFormat
Example 'Data ingestion' template
=================================
.. code::
{{Data ingestion
|sourceFormat=csv
|csvFile=csv_ingestion.csv
|sourceFileKey=%(StockNumber)
|csvDialect=
|csvDelimiter=,
|csvEncoding=utf8
|formattingTemplate=Template:Data ingestion test configuration
|titleFormat=%(name)s - %(set)s.%(_ext)s
}}
Csv file
========
A full example can be found at tests/data/csv_ingestion.csv
The 'url' field is the location a file will be copied from.
csv field Headers::
description.en,source,author,license,set,name,url
Usage
=====
.. code::
python pwb.py data_ingestion -csvdir:<local_dir/> -page:<cfg_page_on_wiki>
Example
=======
Warning! Put it in one line, otherwise it won't work correctly.
.. code::
python pwb.py data_ingestion \
-csvdir:"test/data" \
-page:"User:<Your-Username>/data_ingestion_test_template"
"""
#
# (C) Pywikibot team, 2012-2022
#
# Distributed under the terms of the MIT license.
#
import base64
import codecs
import csv
import hashlib
import io
import os
import posixpath
from typing import Any, BinaryIO, Optional
from urllib.parse import urlparse
import pywikibot
from pywikibot import pagegenerators
from pywikibot.backports import Dict, List
from pywikibot.comms.http import fetch
from pywikibot.exceptions import NoPageError
from pywikibot.specialbots import UploadRobot
class Photo(pywikibot.FilePage):
"""Represents a Photo (or other file), with metadata, to be uploaded."""
def __init__(self, url: str, metadata: Dict[str, Any],
site: Optional[pywikibot.site.APISite] = None) -> None:
"""
Initializer.
:param url: URL of photo
:param metadata: metadata about the photo that can be referred to
from the title & template
:param site: target site
"""
self.URL = url
self.metadata = metadata
self.metadata['_url'] = url
self.metadata['_filename'] = filename = posixpath.split(
urlparse(url)[2])[1]
ext = filename.split('.')[-1]
self.metadata['_ext'] = None if ext == filename else ext
self.contents = None
if not site:
site = pywikibot.Site('commons:commons')
# default title
super().__init__(site, self.get_title('%(_filename)s.%(_ext)s'))
def download_photo(self) -> BinaryIO:
"""
Download the photo and store it in an io.BytesIO object.
TODO: Add exception handling
"""
if not self.contents:
image_file = fetch(self.URL).content
self.contents = io.BytesIO(image_file)
return self.contents
def find_duplicate_images(self) -> List[str]:
"""
Find duplicates of the photo.
Calculates the SHA1 hash and asks the MediaWiki API
for a list of duplicates.
TODO: Add exception handling, fix site thing
"""
hash_object = hashlib.sha1()
hash_object.update(self.download_photo().getvalue())
return [page.title(with_ns=False)
for page in self.site.allimages(
sha1=base64.b16encode(hash_object.digest()))]
def get_title(self, fmt: str) -> str:
"""
Populate format string with %(name)s entries using metadata.
Note: this does not clean the title, so it may be unusable as
a MediaWiki page title, and cause an API exception when used.
:param fmt: format string
:return: formatted string
"""
# FIXME: normalise the title so it is usable as a MediaWiki title.
return fmt % self.metadata
def get_description(self, template,
extraparams: Optional[Dict[str, str]] = None) -> str:
"""Generate a description for a file."""
params = {}
params.update(self.metadata)
params.update(extraparams or {})
description = '{{%s\n' % template
for key in sorted(params.keys()):
value = params[key]
if not key.startswith('_'):
description += '|{}={}\n'.format(
key, self._safe_template_value(value))
description += '}}'
return description
@staticmethod
def _safe_template_value(value: str) -> str:
"""Replace pipe (|) with {{!}}."""
return value.replace('|', '{{!}}')
def CSVReader(fileobj, urlcolumn, site=None, *args, **kwargs): # noqa: N802
"""Yield Photo objects for each row of a CSV file."""
reader = csv.DictReader(fileobj, *args, **kwargs)
for line in reader:
yield Photo(line[urlcolumn], line, site=site)
class DataIngestionBot(pywikibot.Bot):
"""Data ingestion bot."""
def __init__(self, titlefmt: str, pagefmt: str, **kwargs) -> None:
"""
Initializer.
:param titlefmt: Title format
:param pagefmt: Page format
"""
super().__init__(**kwargs)
self.titlefmt = titlefmt
self.pagefmt = pagefmt
def treat(self, page) -> None:
"""Process each page.
1. Check for existing duplicates on the wiki specified in self.site.
2. If duplicates are found, then skip uploading.
3. Download the file from photo.URL and upload the file to self.site.
"""
duplicates = page.find_duplicate_images()
if duplicates:
pywikibot.output('Skipping duplicate of {!r}'.format(duplicates))
return
title = page.get_title(self.titlefmt)
description = page.get_description(self.pagefmt)
bot = UploadRobot(url=page.URL,
description=description,
use_filename=title,
keep_filename=True,
verify_description=False,
target_site=self.site)
bot._contents = page.download_photo().getvalue()
bot._retrieved = True
bot.run()
@classmethod
def parse_configuration_page(cls, configuration_page) -> Dict[str, str]:
"""
Parse a Page which contains the configuration.
:param configuration_page: page with configuration
:type configuration_page: :py:obj:`pywikibot.Page`
"""
# Set a bunch of defaults
configuration = {
'csvDialect': 'excel',
'csvDelimiter': ';',
'csvEncoding': 'Windows-1252', # FIXME: Encoding hell
}
templates = configuration_page.templatesWithParams()
for (template, params) in templates:
if template.title(with_ns=False) != 'Data ingestion':
continue
for param in params:
field, _, value = param.partition('=')
# Remove leading or trailing spaces
field = field.strip()
value = value.strip() or None
configuration[field] = value
return configuration
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
csv_dir = None
unknown = []
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
gen_factory = pagegenerators.GeneratorFactory()
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
local_args = gen_factory.handle_args(local_args)
for arg in local_args:
opt, _, value = arg.partition(':')
if opt == '-csvdir:':
csv_dir = value
else:
unknown.append(arg)
config_generator = gen_factory.getCombinedGenerator()
if pywikibot.bot.suggest_help(
missing_parameters=None if csv_dir else ['-csvdir'],
missing_generator=not config_generator,
unknown_parameters=unknown):
return
for config_page in config_generator:
try:
config_page.get()
except NoPageError:
pywikibot.error('{} does not exist'.format(config_page))
continue
configuration = DataIngestionBot.parse_configuration_page(config_page)
filename = os.path.join(csv_dir, configuration['csvFile'])
try:
f = codecs.open(filename, 'r', configuration['csvEncoding'])
except OSError as e:
pywikibot.error('{} could not be opened: {}'.format(filename, e))
else:
with f:
files = CSVReader(f, urlcolumn='url',
site=config_page.site,
dialect=configuration['csvDialect'],
delimiter=str(configuration['csvDelimiter']))
bot = DataIngestionBot(configuration['titleFormat'],
configuration['formattingTemplate'],
generator=files)
bot.run()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class interface_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/static-routes/static/next-hops/next-hop/interface-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Reference to an interface or subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "interface-ref"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"static-routes",
"static",
"next-hops",
"next-hop",
"interface-ref",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config (container)
YANG Description: Configured reference to interface / subinterface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configured reference to interface / subinterface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/state (container)
YANG Description: Operational state for interface-ref
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state for interface-ref
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class interface_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/static-routes/static/next-hops/next-hop/interface-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Reference to an interface or subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "interface-ref"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"static-routes",
"static",
"next-hops",
"next-hop",
"interface-ref",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config (container)
YANG Description: Configured reference to interface / subinterface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configured reference to interface / subinterface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/state (container)
YANG Description: Operational state for interface-ref
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state for interface-ref
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.