repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Zaneh-/bearded-tribble-back | taiga/projects/attachments/migrations/0002_add_size_and_name_fields.py | 29 | 1119 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os.path as path
from django.db import models, migrations
def parse_filenames_and_sizes(apps, schema_editor):
Attachment = apps.get_model("attachments", "Attachment")
for item in Attachment.objects.all():
try:
item.size = item.attached_file.size
except Exception as e:
item.size = 0
item.name = path.basename(item.attached_file.name)
item.save()
class Migration(migrations.Migration):
dependencies = [
('attachments', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='name',
field=models.CharField(default='', blank=True, max_length=500),
preserve_default=True,
),
migrations.AddField(
model_name='attachment',
name='size',
field=models.IntegerField(editable=False, null=True, blank=True, default=None),
preserve_default=True,
),
migrations.RunPython(parse_filenames_and_sizes)
]
| agpl-3.0 |
Justin-Yuan/Image2Music-Generator | library/jython2.5.3/Lib/test/testcodec.py | 252 | 1047 | """ Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: u"abc", # 1-n decoding mapping
"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: u"", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
| gpl-2.0 |
kfwang/Glance-OVA-OVF | glance/tests/__init__.py | 19 | 1197 | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance.cmd as glance_cmd
glance_cmd.fix_greendns_ipv6()
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables tests to work with i18n _() blocks
import six.moves.builtins as __builtin__
setattr(__builtin__, '_', lambda x: x)
# Set up logging to output debugging
import logging
logger = logging.getLogger()
hdlr = logging.FileHandler('run_tests.log', 'w')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
| apache-2.0 |
KaelChen/numpy | numpy/ma/mrecords.py | 90 | 27383 | """:mod:`numpy.ma..mrecords`
Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
where fields can be accessed as attributes.
Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
and the masking of individual fields.
.. moduleauthor:: Pierre Gerard-Marchant
"""
from __future__ import division, absolute_import, print_function
# We should make sure that no field is called '_mask','mask','_fieldmask',
# or whatever restricted keywords. An idea would be to no bother in the
# first place, and then rename the invalid fields with a trailing
# underscore. Maybe we could just overload the parser function ?
import sys
import warnings
import numpy as np
import numpy.core.numerictypes as ntypes
from numpy.compat import basestring
from numpy import (
bool_, dtype, ndarray, recarray, array as narray
)
from numpy.core.records import (
fromarrays as recfromarrays, fromrecords as recfromrecords
)
_byteorderconv = np.core.records._byteorderconv
_typestr = ntypes._typestr
import numpy.ma as ma
from numpy.ma import (
MAError, MaskedArray, masked, nomask, masked_array, getdata,
getmaskarray, filled
)
_check_fill_value = ma.core._check_fill_value
__all__ = [
'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords',
'fromtextfile', 'addfield',
]
reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
def _getformats(data):
"""
Returns the formats of arrays in arraylist as a comma-separated string.
"""
if hasattr(data, 'dtype'):
return ",".join([desc[1] for desc in data.dtype.descr])
formats = ''
for obj in data:
obj = np.asarray(obj)
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, ntypes.flexible):
formats += repr(obj.itemsize)
formats += ','
return formats[:-1]
def _checknames(descr, names=None):
"""
Checks that field names ``descr`` are not reserved keywords.
If this is the case, a default 'f%i' is substituted. If the argument
`names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d, t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n, t[1]))
return np.dtype(ndescr)
def _get_fieldmask(self):
mdescr = [(n, '|b1') for n in self.dtype.names]
fdmask = np.empty(self.shape, dtype=mdescr)
fdmask.flat = tuple([False] * len(mdescr))
return fdmask
class MaskedRecords(MaskedArray, object):
"""
Attributes
----------
_data : recarray
Underlying data, as a record array.
_mask : boolean array
Mask of the records. A record is masked when all its fields are
masked.
_fieldmask : boolean recarray
Record array of booleans, setting the mask of each individual field
of each record.
_fill_value : record
Filling values for each field.
"""
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False,
mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
copy=False,
**options):
self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
strides=strides, formats=formats, names=names,
titles=titles, byteorder=byteorder,
aligned=aligned,)
mdtype = ma.make_mask_descr(self.dtype)
if mask is nomask or not np.size(mask):
if not keep_mask:
self._mask = tuple([False] * len(mdtype))
else:
mask = np.array(mask, copy=copy)
if mask.shape != self.shape:
(nd, nm) = (self.size, mask.size)
if nm == 1:
mask = np.resize(mask, self.shape)
elif nm == nd:
mask = np.reshape(mask, self.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MAError(msg % (nd, nm))
copy = True
if not keep_mask:
self.__setmask__(mask)
self._sharedmask = True
else:
if mask.dtype == mdtype:
_mask = mask
else:
_mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
self._mask = _mask
return self
def __array_finalize__(self, obj):
# Make sure we have a _fieldmask by default
_mask = getattr(obj, '_mask', None)
if _mask is None:
objmask = getattr(obj, '_mask', nomask)
_dtype = ndarray.__getattribute__(self, 'dtype')
if objmask is nomask:
_mask = ma.make_mask_none(self.shape, dtype=_dtype)
else:
mdescr = ma.make_mask_descr(_dtype)
_mask = narray([tuple([m] * len(mdescr)) for m in objmask],
dtype=mdescr).view(recarray)
# Update some of the attributes
_dict = self.__dict__
_dict.update(_mask=_mask)
self._update_from(obj)
if _dict['_baseclass'] == ndarray:
_dict['_baseclass'] = recarray
return
def _getdata(self):
"""
Returns the data as a recarray.
"""
return ndarray.view(self, recarray)
_data = property(fget=_getdata)
def _getfieldmask(self):
"""
Alias to mask.
"""
return self._mask
_fieldmask = property(fget=_getfieldmask)
def __len__(self):
"""
Returns the length
"""
# We have more than one record
if self.ndim:
return len(self._data)
# We have only one record: return the nb of fields
return len(self.dtype)
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
# attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
# So far, so good
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
if obj.dtype.fields:
raise NotImplementedError("MaskedRecords is currently limited to"
"simple records.")
# Get some special attributes
# Reset the object's mask
hasmasked = False
_mask = _localdict.get('_mask', None)
if _mask is not None:
try:
_mask = _mask[attr]
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
obj._isfield = True
obj._mask = _mask
# Reset the field values
_fill_value = _localdict.get('_fill_value', None)
if _fill_value is not None:
try:
obj._fill_value = _fill_value[attr]
except ValueError:
obj._fill_value = None
else:
obj = obj.item()
return obj
def __setattr__(self, attr, val):
"""
Sets the attribute attr to the value val.
"""
# Should we call __setmask__ first ?
if attr in ['mask', 'fieldmask']:
self.__setmask__(val)
return
# Create a shortcut (so that we don't have to call getattr all the time)
_localdict = object.__getattribute__(self, '__dict__')
# Check whether we're creating a new field
newattr = attr not in _localdict
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
except:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
exctype, value = sys.exc_info()[:2]
raise exctype(value)
else:
# Get the list of names
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
# Check the attribute
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except:
return ret
# Let's try to set the field
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
if val is masked:
_fill_value = _localdict['_fill_value']
if _fill_value is not None:
dval = _localdict['_fill_value'][attr]
else:
dval = val
mval = True
else:
dval = filled(val)
mval = getmaskarray(val)
obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
_localdict['_mask'].__setitem__(attr, mval)
return obj
def __getitem__(self, indx):
"""
Returns all the fields sharing the same fieldname base.
The fieldname base is either `_data` or `_mask`.
"""
_localdict = self.__dict__
_mask = ndarray.__getattribute__(self, '_mask')
_data = ndarray.view(self, _localdict['_baseclass'])
# We want a field
if isinstance(indx, basestring):
# Make sure _sharedmask is True to propagate back to _fieldmask
# Don't use _set_mask, there are some copies being made that
# break propagation Don't force the mask to nomask, that wreaks
# easy masking
obj = _data[indx].view(MaskedArray)
obj._mask = _mask[indx]
obj._sharedmask = True
fval = _localdict['_fill_value']
if fval is not None:
obj._fill_value = fval[indx]
# Force to masked if the mask is True
if not obj.ndim and obj._mask:
return masked
return obj
# We want some elements.
# First, the data.
obj = np.array(_data[indx], copy=False).view(mrecarray)
obj._mask = np.array(_mask[indx], copy=False).view(recarray)
return obj
def __setitem__(self, indx, value):
"""
Sets the given record to value.
"""
MaskedArray.__setitem__(self, indx, value)
if isinstance(indx, basestring):
self._mask[indx] = ma.getmaskarray(value)
def __str__(self):
"""
Calculates the string representation.
"""
if self.size > 1:
mstr = ["(%s)" % ",".join([str(i) for i in s])
for s in zip(*[getattr(self, f) for f in self.dtype.names])]
return "[%s]" % ", ".join(mstr)
else:
mstr = ["%s" % ",".join([str(i) for i in s])
for s in zip([getattr(self, f) for f in self.dtype.names])]
return "(%s)" % ", ".join(mstr)
def __repr__(self):
"""
Calculates the repr representation.
"""
_names = self.dtype.names
fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
reprstr.insert(0, 'masked_records(')
reprstr.extend([fmt % (' fill_value', self.fill_value),
' )'])
return str("\n".join(reprstr))
def view(self, dtype=None, type=None):
"""
Returns a view of the mrecarray.
"""
# OK, basic copy-paste from MaskedArray.view.
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
# Here again.
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
# OK, there's the change
except TypeError:
dtype = np.dtype(dtype)
# we need to revert to MaskedArray, but keeping the possibility
# of subclasses (eg, TimeSeriesRecords), so we'll force a type
# set to the first parent
if dtype.fields is None:
basetype = self.__class__.__bases__[0]
output = self.__array__().view(dtype, basetype)
output._update_from(self)
else:
output = ndarray.view(self, dtype)
output._fill_value = None
else:
output = ndarray.view(self, dtype, type)
# Update the mask, just like in MaskedArray.view
if (getattr(output, '_mask', nomask) is not nomask):
mdtype = ma.make_mask_descr(output.dtype)
output._mask = self._mask.view(mdtype, ndarray)
output._mask.shape = output.shape
return output
def harden_mask(self):
"""
Forces the mask to hard.
"""
self._hardmask = True
def soften_mask(self):
"""
Forces the mask to soft
"""
self._hardmask = False
def copy(self):
"""
Returns a copy of the masked record.
"""
copied = self._data.copy().view(type(self))
copied._mask = self._mask.copy()
return copied
def tolist(self, fill_value=None):
"""
Return the data portion of the array as a list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to fill_value. If fill_value is None,
the corresponding entries in the output list will be ``None``.
"""
if fill_value is not None:
return self.filled(fill_value).tolist()
result = narray(self.filled().tolist(), dtype=object)
mask = narray(self._mask.tolist())
result[mask] = None
return result.tolist()
def __getstate__(self):
"""Return the internal state of the masked array.
This is for pickling.
"""
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tobytes(),
self._mask.tobytes(),
self._fill_value,
)
return state
def __setstate__(self, state):
"""
Restore the internal state of the masked array.
This is for pickling. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(ver, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
self.fill_value = flv
def __reduce__(self):
"""
Return a 3-tuple for pickling a MaskedArray.
"""
return (_mrreconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
"""
Build a new MaskedArray from the information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
_mask = ndarray.__new__(ndarray, baseshape, 'b1')
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
mrecarray = MaskedRecords
###############################################################################
# Constructors #
###############################################################################
def fromarrays(arraylist, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None,
fill_value=None):
"""
Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
datalist = [getdata(x) for x in arraylist]
masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
_array = recfromarrays(datalist,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._mask.flat = list(zip(*masklist))
if fill_value is not None:
_array.fill_value = fill_value
return _array
def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None,
fill_value=None, mask=nomask):
"""
Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
# Grab the initial _fieldmask, if needed:
_mask = getattr(reclist, '_mask', None)
# Get the list of records.
if isinstance(reclist, ndarray):
# Make sure we don't have some hidden mask
if isinstance(reclist, MaskedArray):
reclist = reclist.filled().view(ndarray)
# Grab the initial dtype, just in case
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
mrec.fill_value = fill_value
# Now, let's deal w/ the mask
if mask is not nomask:
mask = np.array(mask, copy=False)
maskrecordlength = len(mask.dtype)
if maskrecordlength:
mrec._mask.flat = mask
elif len(mask.shape) == 2:
mrec._mask.flat = [tuple(m) for m in mask]
else:
mrec.__setmask__(mask)
if _mask is not None:
mrec._mask[:] = _mask
return mrec
def _guessvartypes(arr):
"""
Tries to guess the dtypes of the str_ ndarray `arr`.
Guesses by testing element-wise conversion. Returns a list of dtypes.
The array is first converted to ndarray. If the array is 2D, the test
is performed on the first line. An exception is raised if the file is
3D or more.
"""
vartypes = []
arr = np.asarray(arr)
if len(arr.shape) == 2:
arr = arr[0]
elif len(arr.shape) > 2:
raise ValueError("The array should be 2D at most!")
# Start the conversion loop.
for f in arr:
try:
int(f)
except ValueError:
try:
float(f)
except ValueError:
try:
complex(f)
except ValueError:
vartypes.append(arr.dtype)
else:
vartypes.append(np.dtype(complex))
else:
vartypes.append(np.dtype(float))
else:
vartypes.append(np.dtype(int))
return vartypes
def openfile(fname):
"""
Opens the file handle of file `fname`.
"""
# A file handle
if hasattr(fname, 'readline'):
return fname
# Try to open the file and guess its type
try:
f = open(fname)
except IOError:
raise IOError("No such file: '%s'" % fname)
if f.readline()[:2] != "\\x":
f.seek(0, 0)
return f
f.close()
raise NotImplementedError("Wow, binary file")
def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
varnames=None, vartypes=None):
"""
Creates a mrecarray from data stored in the file `filename`.
Parameters
----------
fname : {file name/handle}
Handle of an opened file.
delimitor : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
Alphanumeric character used to mark the start of a comment.
missingchar : {'', string}, optional
String indicating missing data, and used to create the masks.
varnames : {None, sequence}, optional
Sequence of the variable names. If None, a list will be created from
the first non empty line of the file.
vartypes : {None, sequence}, optional
Sequence of the variables dtypes. If None, it will be estimated from
the first non-commented line.
Ultra simple: the varnames are in the header, one line"""
# Try to open the file.
ftext = openfile(fname)
# Get the first non-empty line as the varnames
while True:
line = ftext.readline()
firstline = line[:line.find(commentchar)].strip()
_varnames = firstline.split(delimitor)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data.
_variables = masked_array([line.strip().split(delimitor) for line in ftext
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
ftext.close()
# Try to guess the dtype.
if vartypes is None:
vartypes = _guessvartypes(_variables[0])
else:
vartypes = [np.dtype(v) for v in vartypes]
if len(vartypes) != nfields:
msg = "Attempting to %i dtypes for %i fields!"
msg += " Reverting to default."
warnings.warn(msg % (len(vartypes), nfields))
vartypes = _guessvartypes(_variables[0])
# Construct the descriptor.
mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
mfillv = [ma.default_fill_value(f) for f in vartypes]
# Get the data and the mask.
# We just need a list of masked_arrays. It's easier to create it like that:
_mask = (_variables.T == missingchar)
_datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
return fromarrays(_datalist, dtype=mdescr)
def addfield(mrecord, newfield, newfieldname=None):
"""Adds a new field to the masked record array
Uses `newfield` as data and `newfieldname` as name. If `newfieldname`
is None, the new field name is set to 'fi', where `i` is the number of
existing fields.
"""
_data = mrecord._data
_mask = mrecord._mask
if newfieldname is None or newfieldname in reserved_fields:
newfieldname = 'f%i' % len(_data.dtype)
newfield = ma.array(newfield)
# Get the new data.
# Create a new empty recarray
newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
newdata = recarray(_data.shape, newdtype)
# Add the exisintg field
[newdata.setfield(_data.getfield(*f), *f)
for f in _data.dtype.fields.values()]
# Add the new field
newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
newdata = newdata.view(MaskedRecords)
# Get the new mask
# Create a new empty recarray
newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
newmask = recarray(_data.shape, newmdtype)
# Add the old masks
[newmask.setfield(_mask.getfield(*f), *f)
for f in _mask.dtype.fields.values()]
# Add the mask of the new field
newmask.setfield(getmaskarray(newfield),
*newmask.dtype.fields[newfieldname])
newdata._mask = newmask
return newdata
| bsd-3-clause |
szezso/android_kernel_motorola_msm8916 | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
h2non/riprova | riprova/async_retrier.py | 1 | 11086 | # -*- coding: utf-8 -*-
from six import raise_from
from .backoff import Backoff
from .retrier import Retrier
from .constants import PY_34
from .errors import ErrorWhitelist
from .strategies import ConstantBackoff
from .exceptions import MaxRetriesExceeded, RetryError
# Ensure user do not import this module in unsupported Python runtimes
if not PY_34: # pragma: no cover
raise RuntimeError('cannot import async_retrier module in Python <= 3.4')
import asyncio # noqa
class AsyncRetrier(Retrier):
"""
AsyncRetrier implements an asynchronous corutine based operation retrier.
Only compatible with `asyncio` in Python 3.4+.
AsyncRetrier implements a synchronous and asynchronous context manager.
Arguments:
timeout (int|float): maximum optional timeout in milliseconds.
Use `None` for no limit. Defaults to `None`.
backoff (riprova.Backoff): optional backoff strategy to use.
Defaults to `riprova.ConstantBackoff`.
evaluator (function): optional evaluator function used to determine
when an operation should be retried or not.
This allow the developer to retry operations that do not raised
any exception, for instance. Evaluator function accepts 1
argument: the returned task result.
Evaluator function can raise an exception, return an error or
simply return `True` in order to retry the operation.
Otherwise the operation will be considered as valid and the
retry loop will end.
error_evaluator (function|coroutinefunction): optional evaluator
function used to determine when a task raised exception should
be proccesed as legit error and therefore retried or, otherwise,
treated as whitelist error, stopping the retry loop and re-raising
the exception to the task consumer.
This provides high versatility to developers in order to compose
any exception, for instance. Evaluator is an unary
function that accepts 1 argument: the raised exception object.
Evaluator function can raise an exception, return an error or
simply return `True` in order to retry the operation.
Otherwise the operation will be considered as valid and the
retry loop will end.
on_retry (function): optional function to call on before very retry
operation. `on_retry` function accepts 2 arguments: `err, next_try`
and should return nothing.
sleep_coro (coroutinefunction): optional coroutine function used to
sleep. Defaults to `asyncio.sleep`.
loop (asyncio.BaseException): event loop to use.
Defaults to `asyncio.get_event_loop()`.
Attributes:
whitelist (riprova.ErrorWhitelist): default error whitelist instance
used to evaluate when.
blacklist (riprova.ErrorBlacklist): default error blacklist instance
used to evaluate when.
Blacklist and Whitelist are mutually exclusive.
timeout (int): stores the maximum retries attempts timeout in
seconds. Use `None` for no timeout. Defaults to `None`.
attempts (int): number of retry attempts being executed from last
`run()` method call.
error (Exception): stores the latest generated error.
`None` if not error yet from last `run()` execution.
sleep (coroutinefunction): stores the coroutine function used to sleep.
Defaults to `asyncio.sleep`.
backoff (Backoff): stores current used backoff.
Defaults to `riprova.ConstantBackoff`.
evaluator (coroutinefunction): stores the used evaluator function.
Defaults to `None`.
error_evaluator (function|coroutinefunction): stores the used error
evaluator function. Defaults to `self.is_whitelisted_error()`.
on_retry (coroutinefunction): stores the retry notifier function.
Defaults to `None`.
Raises:
AssertionError: in case of invalid input params.
Usage::
retrier = riprova.AsyncRetrier(
timeout=10,
backoff=riprova.FibonacciBackoff(retries=5))
async def task(x):
return x * x
loop = asyncio.get_event_loop()
result = loop.run_until_complete(retrier.run(task, 4))
assert result == 16
assert retrier.attempts == 0
assert retrier.error == None
# Using the async context manager
async with riprova.AsyncRetrier() as retry:
await retry.run(task, 'foo', bar=1)
"""
# Stores the default global error whitelist used for error retry evaluation
whitelist = None
# Blacklist is just a semantic alias to whitelist
blacklist = None
def __init__(self,
timeout=None,
backoff=None,
evaluator=None,
error_evaluator=None,
on_retry=None,
sleep_coro=None,
loop=None):
# Assert input params
if timeout is not None:
assert isinstance(timeout, (int, float)), 'timeout must be number'
assert timeout >= 0, 'timeout cannot be a negative number'
# Event loop to use
self.loop = loop or asyncio.get_event_loop()
# Stores number of retry attempts
self.attempts = 0
# Stores latest error
self.error = None
# Maximum optional timeout in milliseconds. Use 0 for no limit
self.timeout = timeout or None
# Stores optional evaluator function
self.evaluator = asyncio.coroutine(evaluator) if evaluator else None
# Stores the error evaluator function.
self.error_evaluator = error_evaluator or self.is_whitelisted_error
# Stores optional coroutine function to call on before very
# retry operation. `on_retry` function accepts 2 arguments:
# `err, next_try` and should return nothing.
self.on_retry = asyncio.coroutine(on_retry) if on_retry else None
# Backoff strategy to use. Defaults to `riprova.ConstantBackoff`.
self.backoff = backoff or ConstantBackoff()
# Function used to sleep. Defaults `asyncio.sleep()`.
self.sleep = sleep_coro or asyncio.sleep
# Stores the default error whitelist used for error retry evaluation
self.whitelist = (AsyncRetrier.blacklist or
AsyncRetrier.whitelist or
ErrorWhitelist())
@asyncio.coroutine
def _call(self, coro, *args, **kw):
"""
Calls the given coroutine function with the given variadic arguments.
"""
res = yield from coro(*args, **kw) # noqa (required for Python 2.x)
# If not evaluator function response is error
if not self.evaluator or res is None:
# Clean error on success
self.error = None
# Return response object
return res
# Use custom result evaluator in order to determine if the
# operation failed or not
err = yield from self.evaluator(res)
if not err:
self.error = None
return res
# Raise custom error exception
if isinstance(err, Exception):
self.error = err
raise err from RetryError
# If True, raise a custom exception
if err is True:
raise RuntimeError('evaluator assertion returned True')
# Otherwise simply return the error object
return err
@asyncio.coroutine
def _handle_error(self, err):
"""
Handle execution error state and sleep the required amount of time.
"""
# Update latest cached error
self.error = err
# Defaults to false
retry = True
# Evaluate if error is legit or should be retried
if self.error_evaluator:
retry = yield from (asyncio.coroutine(self.error_evaluator)(err))
# If evalutor returns an error exception, just raise it
if retry and isinstance(retry, Exception):
raise_from(retry, self.error)
# If retry evaluator returns False, raise original error and
# stop the retry cycle
if retry is False:
raise err
# Get delay before next retry
delay = self.backoff.next()
# If backoff is ready
if delay == Backoff.STOP:
raise MaxRetriesExceeded('max retries exceeded') from err
# Notify retry subscriber, if needed
if self.on_retry:
yield from self.on_retry(err, delay)
# Sleep before the next try attempt
yield from self.sleep(delay)
@asyncio.coroutine
def _run(self, coro, *args, **kw):
"""
Runs coroutine in a error-safe infinitive loop until the
operation succeed or the max retry attempts is reached.
"""
err = None
while True:
try:
return (yield from self._call(coro, *args, **kw))
# Collect raised errors by cancelled futures
except asyncio.CancelledError as _err:
err = _err
# Handle any other exception error
except Exception as _err:
yield from self._handle_error(_err)
# Increment number of retry attempts
self.attempts += 1
# Forward raised exception, if needed
if err is not None:
raise err
@asyncio.coroutine
def run(self, coro, *args, **kw):
"""
Runs the given coroutine function in a retry loop until the operation
is completed successfully or maximum retries attempts are reached.
Arguments:
coro (coroutinefunction): coroutine function to retry.
*args (args): partial arguments to pass to the function.
*kw (kwargs): partial keyword arguments to pass to the function.
Raises:
Exception: any potential exception raised by the function.
RuntimeError: if evaluator function returns `True`.
asyncio.TimeoutError: in case of a timeout exceed error.
Returns:
mixed: value returned by the original function.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('coro param must be a coroutine function')
# Reset backoff on every new execution
self.backoff.reset()
# Reset state
self.error = None
self.attempts = 0
# If not timeout defined, run the coroutine function
return (yield from asyncio.wait_for(
self._run(coro, *args, **kw),
self.timeout,
loop=self.loop
))
@asyncio.coroutine
def __aenter__(self):
return self
@asyncio.coroutine
def __aexit__(self, exc_type, exc_value, traceback):
return self.__exit__(exc_type, exc_value, traceback)
| mit |
spiffytech/npcworld | lib/test_market.py | 1 | 10747 | import market
def test_default_inventory():
default_amount = 10
inventory = market.Inventory()
assert inventory.wood == default_amount
assert inventory.food == default_amount
assert inventory.ore == default_amount
assert inventory.metal == default_amount
assert inventory.tools == default_amount
def test_has_wood():
inventory = market.Inventory(wood=1)
assert market.has_wood(inventory)
inventory = market.Inventory(wood=0)
assert not market.has_wood(inventory)
def test_has_ore():
inventory = market.Inventory(ore=1)
assert market.has_ore(inventory)
inventory = market.Inventory(ore=0)
assert not market.has_ore(inventory)
def test_has_food():
inventory = market.Inventory(food=1)
assert market.has_food(inventory)
inventory = market.Inventory(food=0)
assert not market.has_food(inventory)
def test_has_metal():
inventory = market.Inventory(metal=1)
assert market.has_metal(inventory)
inventory = market.Inventory(metal=0)
assert not market.has_metal(inventory)
def test_has_tools():
inventory = market.Inventory(tools=1)
assert market.has_tools(inventory)
inventory = market.Inventory(tools=0)
assert not market.has_tools(inventory)
def test_default_npc():
npc = market.NPC()
assert npc.inventory == market.Inventory()
assert npc.occupation is None
def test_lumberjack_produce():
lumberjack = market.NPC(occupation="lumberjack")
npc = lumberjack._replace(inventory=market.Inventory(tools=0, food=0))
updated_npc = market.lumberjack_produce(npc)
assert updated_npc.inventory == npc.inventory
npc = lumberjack
updated_npc = market.lumberjack_produce(npc)
assert updated_npc.inventory.wood == npc.inventory.wood+2
assert updated_npc.inventory.food == npc.inventory.food-1
npc = lumberjack._replace(inventory=market.Inventory(tools=0))
updated_npc = market.lumberjack_produce(npc)
assert updated_npc.inventory.wood == npc.inventory.wood+1
assert updated_npc.inventory.food == npc.inventory.food-1
def test_farmer_produce():
farmer = market.NPC(occupation="farmer")
npc = farmer._replace(inventory=market.Inventory(tools=0, wood=0))
updated_npc = market.do_work(npc)
assert updated_npc.inventory == npc.inventory
npc = farmer
updated_npc = market.do_work(npc)
assert updated_npc.inventory.food == npc.inventory.food+4
assert updated_npc.inventory.wood == npc.inventory.wood-1
npc = farmer._replace(inventory=market.Inventory(tools=0))
updated_npc = market.do_work(npc)
assert updated_npc.inventory.food == npc.inventory.food+2
assert updated_npc.inventory.wood == npc.inventory.wood-1
def test_miner_produce():
miner = market.NPC(occupation="miner")
npc = miner._replace(inventory=market.Inventory(tools=0, food=0))
updated_npc = market.do_work(npc)
assert updated_npc.inventory == npc.inventory
npc = miner
updated_npc = market.do_work(npc)
assert updated_npc.inventory.ore == npc.inventory.ore+4
assert updated_npc.inventory.food == npc.inventory.food-1
npc = miner._replace(inventory=market.Inventory(tools=0))
updated_npc = market.do_work(npc)
assert updated_npc.inventory.ore == npc.inventory.ore+2
assert updated_npc.inventory.food == npc.inventory.food-1
def test_refiner_produce():
refiner = market.NPC(occupation="refiner")
npc = refiner._replace(inventory=market.Inventory(tools=0, food=0))
updated_npc = market.do_work(npc)
assert updated_npc.inventory == npc.inventory
npc = refiner
updated_npc = market.do_work(npc)
assert updated_npc.inventory.metal == npc.inventory.metal+1
assert updated_npc.inventory.ore == 0
assert updated_npc.inventory.food == npc.inventory.food-1
npc = refiner._replace(inventory=market.Inventory(tools=0))
updated_npc = market.do_work(npc)
assert updated_npc.inventory.metal == npc.inventory.metal+1
assert updated_npc.inventory.ore == npc.inventory.ore-2
assert updated_npc.inventory.food == npc.inventory.food-1
class TestTrades(object):
def test_avg_price(self):
market.trade_history = (
tuple(
market.Trade(resource=t[0], price=t[1], type="buy", requested=3, status="accepted") for t in (
("wood", 20),
("wood", 30),
("wood", 35),
("wood", 33),
("wood", 29),
("wood", 20),
# Things that aren't wood, to ensure filtering for the resource in question works right
("food", 90),
("food", 70),
)
),
)
assert round(market.avg_price("wood"), 2) == 27.83
def estimate_npc_price(self):
lower = 2
upper = 20
intervals = market.Intervals(wood=(lower, upper))
assert lower <= market.estimate_npc_price("wood", intervals) <= upper
def test_translate_interval(self):
mean = 50
interval = (25, 75)
new_interval = market.translate_interval(interval, mean)
assert new_interval == interval
mean = 150
interval = (40, 60)
new_interval = market.translate_interval(interval, mean)
assert new_interval == (45, 65)
def test_shrink_interval(self):
assert market.shrink_interval((100, 1000)) == (105, 950)
def test_expand_interval(self):
assert market.expand_interval((100, 1000)) == (95, 1050)
def test_interval_is_divergent(self):
interval = (130, 150)
price = 100
mean = 150
assert market.interval_is_divergent(interval, price, mean) == interval
price = 99
assert market.interval_is_divergent(interval, price, mean) == market.translate_interval(interval, mean)
price = 199
assert market.interval_is_divergent(interval, price, mean) == interval
price = 200
assert market.interval_is_divergent(interval, price, mean) == market.translate_interval(interval, mean)
def test_update_beliefs_accepted(self):
interval=(40, 60)
mean = 150
translated_shrunken = market.shrink_interval(market.translate_interval(interval, mean))
shrunken = market.shrink_interval(interval)
trade = market.Trade(resource="wood", price=150, requested=3, type="buy")
market.trade_history = ((
market.Trade(resource="wood", price=140, type="buy", requested=3, status="accepted"),
market.Trade(resource="wood", price=160, type="buy", requested=3, status="accepted")
),)
npc = market.NPC(belief_intervals=market.BeliefIntervals(wood=interval))
new_npc = market.update_beliefs_accepted(npc, trade)
assert new_npc.belief_intervals.wood == shrunken # No interval translation
# Test lower bounds
trade = market.Trade(resource="wood", price=99, requested=3, type="buy")
new_npc = market.update_beliefs_accepted(npc, trade)
assert new_npc.belief_intervals.wood == translated_shrunken # Interval translation
trade = market.Trade(resource="wood", price=100, requested=3, type="buy")
new_npc = market.update_beliefs_accepted(npc, trade)
assert new_npc.belief_intervals.wood == shrunken
# Test upper bounds
trade = market.Trade(resource="wood", price=199, requested=3, type="buy")
new_npc = market.update_beliefs_accepted(npc, trade)
assert new_npc.belief_intervals.wood == shrunken # Interval translation
trade = market.Trade(resource="wood", price=200, requested=3, type="buy")
new_npc = market.update_beliefs_accepted(npc, trade)
assert new_npc.belief_intervals.wood == translated_shrunken
def test_update_beliefs_rejected(self):
interval=(40, 60)
trade = market.Trade(resource="wood", price=50, requested=3, type="buy")
market.trade_history = ((
market.Trade(resource="wood", price=140, type="buy", requested=3, status="accepted"),
market.Trade(resource="wood", price=160, type="buy", requested=3, status="accepted")
),)
npc = market.NPC(belief_intervals=market.BeliefIntervals(wood=interval))
new_npc = market.update_beliefs_rejected(npc, trade)
assert new_npc.belief_intervals.wood == market.expand_interval((45, 65))
def test_get_buy_resources(self):
assert market.get_buy_resources("farmer") == ["wood", "tools"]
assert market.get_buy_resources("lumberjack") == ["food", "tools"]
assert market.get_buy_resources("refiner") == ["food", "ore", "tools"]
assert market.get_buy_resources("miner") == ["food", "tools"]
assert market.get_buy_resources("blacksmith") == ["metal"]
def test_favorability(self):
assert market.calc_favorability((120, 160), 150) == .75
def test_amt_to_trade(self):
assert market.calc_amt_to_trade(.75, 4) == 3
assert market.calc_amt_to_trade(.75, 3) == 2
def test_determine_trade_quantity(self):
market.trade_history = ((
market.Trade(resource="wood", price=140, type="buy", requested=3, status="accepted"),
market.Trade(resource="wood", price=160, type="buy", requested=3, status="accepted")
),)
npc = market.NPC(
occupation="lumberjack",
belief_intervals=market.BeliefIntervals(wood=(120, 160)),
inventory=market.Inventory(wood=4)
)
assert market.determine_trade_quantity(npc, market.get_sell_resource, market.calc_favorability) == 3
def test_determine_sale_quantity(self):
market.trade_history = ((
market.Trade(resource="wood", price=140, type="buy", requested=3, status="accepted"),
market.Trade(resource="wood", price=160, type="buy", requested=3, status="accepted")
),)
npc = market.NPC(
occupation="lumberjack",
belief_intervals=market.BeliefIntervals(wood=(120, 160)),
inventory=market.Inventory(wood=4)
)
assert market.determine_sale_quantity(npc) == 3
def test_determine_purchase_quantity(self):
market.trade_history = ((
market.Trade(resource="food", price=140, type="buy", requested=3, status="accepted"),
market.Trade(resource="food", price=160, type="buy", requested=3, status="accepted")
),)
npc = market.NPC(
occupation="lumberjack",
belief_intervals=market.BeliefIntervals(food=(120, 160)),
inventory=market.Inventory(food=4)
)
assert market.determine_purchase_quantity(npc) == 1
| gpl-3.0 |
mezz64/home-assistant | tests/components/demo/test_climate.py | 9 | 13154 | """The tests for the demo climate component."""
import pytest
import voluptuous as vol
from homeassistant.components.climate.const import (
ATTR_AUX_HEAT,
ATTR_CURRENT_HUMIDITY,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_HUMIDITY,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_HUMIDITY,
ATTR_MAX_TEMP,
ATTR_MIN_HUMIDITY,
ATTR_MIN_TEMP,
ATTR_PRESET_MODE,
ATTR_SWING_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
DOMAIN,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_ECO,
SERVICE_SET_AUX_HEAT,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HUMIDITY,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
ENTITY_CLIMATE = "climate.hvac"
ENTITY_ECOBEE = "climate.ecobee"
ENTITY_HEATPUMP = "climate.heatpump"
@pytest.fixture(autouse=True)
async def setup_demo_climate(hass):
"""Initialize setup demo climate."""
hass.config.units = METRIC_SYSTEM
assert await async_setup_component(hass, DOMAIN, {"climate": {"platform": "demo"}})
await hass.async_block_till_done()
def test_setup_params(hass):
"""Test the initial parameters."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_COOL
assert state.attributes.get(ATTR_TEMPERATURE) == 21
assert state.attributes.get(ATTR_CURRENT_TEMPERATURE) == 22
assert state.attributes.get(ATTR_FAN_MODE) == "On High"
assert state.attributes.get(ATTR_HUMIDITY) == 67
assert state.attributes.get(ATTR_CURRENT_HUMIDITY) == 54
assert state.attributes.get(ATTR_SWING_MODE) == "Off"
assert STATE_OFF == state.attributes.get(ATTR_AUX_HEAT)
assert state.attributes.get(ATTR_HVAC_MODES) == [
"off",
"heat",
"cool",
"auto",
"dry",
"fan_only",
]
def test_default_setup_params(hass):
"""Test the setup with default parameters."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_MIN_TEMP) == 7
assert state.attributes.get(ATTR_MAX_TEMP) == 35
assert state.attributes.get(ATTR_MIN_HUMIDITY) == 30
assert state.attributes.get(ATTR_MAX_HUMIDITY) == 99
async def test_set_only_target_temp_bad_attr(hass):
"""Test setting the target temperature without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_TEMPERATURE) == 21
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_TEMPERATURE: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_TEMPERATURE) == 21
async def test_set_only_target_temp(hass):
"""Test the setting of the target temperature."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_TEMPERATURE) == 21
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_TEMPERATURE: 30},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_TEMPERATURE) == 30.0
async def test_set_only_target_temp_with_convert(hass):
"""Test the setting of the target temperature."""
state = hass.states.get(ENTITY_HEATPUMP)
assert state.attributes.get(ATTR_TEMPERATURE) == 20
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_HEATPUMP, ATTR_TEMPERATURE: 21},
blocking=True,
)
state = hass.states.get(ENTITY_HEATPUMP)
assert state.attributes.get(ATTR_TEMPERATURE) == 21.0
async def test_set_target_temp_range(hass):
"""Test the setting of the target temperature with range."""
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) == 21.0
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) == 24.0
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: ENTITY_ECOBEE,
ATTR_TARGET_TEMP_LOW: 20,
ATTR_TARGET_TEMP_HIGH: 25,
},
blocking=True,
)
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) == 20.0
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) == 25.0
async def test_set_target_temp_range_bad_attr(hass):
"""Test setting the target temperature range without attribute."""
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) == 21.0
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) == 24.0
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: ENTITY_ECOBEE,
ATTR_TARGET_TEMP_LOW: None,
ATTR_TARGET_TEMP_HIGH: None,
},
blocking=True,
)
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) == 21.0
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) == 24.0
async def test_set_target_humidity_bad_attr(hass):
"""Test setting the target humidity without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HUMIDITY) == 67
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HUMIDITY: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HUMIDITY) == 67
async def test_set_target_humidity(hass):
"""Test the setting of the target humidity."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HUMIDITY) == 67
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HUMIDITY: 64},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HUMIDITY) == 64.0
async def test_set_fan_mode_bad_attr(hass):
"""Test setting fan mode without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_FAN_MODE) == "On High"
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_FAN_MODE: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_FAN_MODE) == "On High"
async def test_set_fan_mode(hass):
"""Test setting of new fan mode."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_FAN_MODE) == "On High"
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_FAN_MODE: "On Low"},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_FAN_MODE) == "On Low"
async def test_set_swing_mode_bad_attr(hass):
"""Test setting swing mode without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_SWING_MODE) == "Off"
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_SWING_MODE: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_SWING_MODE) == "Off"
async def test_set_swing(hass):
"""Test setting of new swing mode."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_SWING_MODE) == "Off"
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_SWING_MODE: "Auto"},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_SWING_MODE) == "Auto"
async def test_set_hvac_bad_attr_and_state(hass):
"""Test setting hvac mode without required attribute.
Also check the state.
"""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_COOL
assert state.state == HVAC_MODE_COOL
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HVAC_MODE: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_COOL
assert state.state == HVAC_MODE_COOL
async def test_set_hvac(hass):
"""Test setting of new hvac mode."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_COOL
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_HEAT
async def test_set_hold_mode_away(hass):
"""Test setting the hold mode away."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ECOBEE, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY
async def test_set_hold_mode_eco(hass):
"""Test setting the hold mode eco."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ECOBEE, ATTR_PRESET_MODE: PRESET_ECO},
blocking=True,
)
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_ECO
async def test_set_aux_heat_bad_attr(hass):
"""Test setting the auxiliary heater without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUX_HEAT,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_AUX_HEAT: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF
async def test_set_aux_heat_on(hass):
"""Test setting the axillary heater on/true."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUX_HEAT,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_AUX_HEAT: True},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_ON
async def test_set_aux_heat_off(hass):
"""Test setting the auxiliary heater off/false."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUX_HEAT,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_AUX_HEAT: False},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF
async def test_turn_on(hass):
"""Test turn on device."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_OFF
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_CLIMATE}, blocking=True
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_HEAT
async def test_turn_off(hass):
"""Test turn on device."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_HEAT
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_CLIMATE}, blocking=True
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_OFF
| apache-2.0 |
xianjunzhengbackup/Cloud-Native-Python | env/lib/python3.5/site-packages/pip/req/req_uninstall.py | 510 | 6897 | from __future__ import absolute_import
import logging
import os
import tempfile
from pip.compat import uses_pycache, WINDOWS, cache_from_source
from pip.exceptions import UninstallationError
from pip.utils import rmtree, ask, is_local, renames, normalize_path
from pip.utils.logging import indent_log
logger = logging.getLogger(__name__)
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([
(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
)
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll back changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s back to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
| mit |
rvalyi/openerp-pt_br | tests_use_case/test_create_sale_order.py | 2 | 1035 |
def test_create_sale_order(oerp):
sale_order_obj = oerp.pool.get('sale.order')
product_obj = oerp.pool.get('product.product')
sale_order_lines = []
prod1 = product_obj.browse(oerp.cr, 1, [5])[0]
sol = {'name': prod1.name,
'product_uom_qty': 1,
'product_id': prod1.id,
'product_uom': 1,
'price_unit': prod1.price_get('list_price')[prod1.id]
}
#sol_new = sale_order_line_obj.product_id_change(oerp.cr, 1, None, 1, 0, 1, 1,name=prod1.name, partner_id=1,fiscal_position=fp1)['value']
sale_order_lines.append((0, 0, sol))
order_id = sale_order_obj.create(oerp.cr, 1, {
'user_id': 1,
'partner_id': 1,
'partner_order_id': 2,
'partner_invoice_id': 2,
'partner_shipping_id': 2,
'pricelist_id': 1,
'order_line': sale_order_lines,
'fiscal_operation_category_id': 1,
'fiscal_position': 1
})
assert sale_order_obj.browse(oerp.cr, 1, [order_id])[0].id == order_id
| agpl-3.0 |
airspeedswift/swift | utils/swift_build_support/swift_build_support/host_specific_configuration.py | 7 | 14126 | # swift_build_support/host_configuration_support.py -------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
import re
import sys
from argparse import ArgumentError
from .targets import StdlibDeploymentTarget
class HostSpecificConfiguration(object):
"""Configuration information for an individual host."""
def __init__(self, host_target, args):
"""Initialize for the given `host_target`."""
# Compute the set of deployment targets to configure/build.
if host_target == args.host_target:
# This host is the user's desired product, so honor the requested
# set of targets to configure/build.
stdlib_targets_to_configure = args.stdlib_deployment_targets
if "all" in args.build_stdlib_deployment_targets:
stdlib_targets_to_build = set(stdlib_targets_to_configure)
else:
stdlib_targets_to_build = set(
args.build_stdlib_deployment_targets).intersection(
set(args.stdlib_deployment_targets))
else:
# Otherwise, this is a host we are building as part of
# cross-compiling, so we only need the target itself.
stdlib_targets_to_configure = [host_target]
stdlib_targets_to_build = set(stdlib_targets_to_configure)
if (hasattr(args, 'stdlib_deployment_targets') and
args.stdlib_deployment_targets == []):
stdlib_targets_to_configure = []
stdlib_targets_to_build = []
# Compute derived information from the arguments.
#
# FIXME: We should move the platform-derived arguments to be entirely
# data driven, so that we can eliminate this code duplication and just
# iterate over all supported platforms.
platforms_to_skip_build = self.__platforms_to_skip_build(args)
platforms_to_skip_test = self.__platforms_to_skip_test(args)
platforms_archs_to_skip_test = \
self.__platforms_archs_to_skip_test(args, host_target)
platforms_to_skip_test_host = self.__platforms_to_skip_test_host(args)
# Compute the lists of **CMake** targets for each use case (configure
# vs. build vs. run) and the SDKs to configure with.
self.sdks_to_configure = set()
self.swift_stdlib_build_targets = []
self.swift_test_run_targets = []
self.swift_benchmark_build_targets = []
self.swift_benchmark_run_targets = []
for deployment_target_name in stdlib_targets_to_configure:
# Get the target object.
deployment_target = StdlibDeploymentTarget.get_target_for_name(
deployment_target_name)
if deployment_target is None:
sys.stderr.write('ERROR: unknown target: {}\n'.format(
deployment_target_name))
sys.stderr.flush()
sys.exit(1)
# Add the SDK to use.
deployment_platform = deployment_target.platform
self.sdks_to_configure.add(deployment_platform.sdk_name)
# If we aren't actually building this target (only configuring
# it), do nothing else.
if deployment_target_name not in stdlib_targets_to_build:
continue
# Compute which actions are desired.
build = (
deployment_platform not in platforms_to_skip_build)
test = (
deployment_platform not in platforms_to_skip_test)
test_host_only = None
dt_supports_benchmark = deployment_target.supports_benchmark
build_benchmarks = build and dt_supports_benchmark
build_external_benchmarks = all([build, dt_supports_benchmark,
args.build_external_benchmarks])
# FIXME: Note, `build-script-impl` computed a property here
# w.r.t. testing, but it was actually unused.
# For platforms which normally require a connected device to
# test, the default behavior is to run tests that only require
# the host (i.e., they do not attempt to execute).
if deployment_platform.uses_host_tests and \
deployment_platform not in \
platforms_to_skip_test_host:
test_host_only = True
name = deployment_target.name
for skip_test_arch in platforms_archs_to_skip_test:
if deployment_target.name == skip_test_arch.name:
test = False
if build:
# Validation, long, and stress tests require building the full
# standard library, whereas the other targets can build a
# slightly smaller subset which is faster to build.
if args.build_swift_stdlib_unittest_extra or \
args.validation_test or args.long_test or \
args.stress_test:
self.swift_stdlib_build_targets.append(
"swift-stdlib-" + name)
else:
self.swift_stdlib_build_targets.append(
"swift-test-stdlib-" + name)
if build_benchmarks:
self.swift_benchmark_build_targets.append(
"swift-benchmark-" + name)
if args.benchmark:
self.swift_benchmark_run_targets.append(
"check-swift-benchmark-" + name)
if build_external_benchmarks:
# Add support for the external benchmarks.
self.swift_benchmark_build_targets.append(
"swift-benchmark-{}-external".format(name))
if args.benchmark:
self.swift_benchmark_run_targets.append(
"check-swift-benchmark-{}-external".format(name))
if test:
if test_host_only:
suffix = "-only_non_executable"
elif args.only_executable_test:
suffix = "-only_executable"
elif args.only_non_executable_test:
suffix = "-only_non_executable"
else:
suffix = ""
subset_suffix = ""
if args.validation_test and args.long_test and \
args.stress_test:
subset_suffix = "-all"
elif args.validation_test:
subset_suffix = "-validation"
elif args.long_test:
subset_suffix = "-only_long"
elif args.stress_test:
subset_suffix = "-only_stress"
else:
subset_suffix = ""
# Support for running the macCatalyst tests with
# the iOS-like target triple.
macosx_platform_match = re.search("macosx-(.*)", name)
if macosx_platform_match and args.maccatalyst \
and args.maccatalyst_ios_tests:
(self.swift_test_run_targets
.append("check-swift{}{}-{}-{}".format(
subset_suffix, suffix, "macosx-maccatalyst",
macosx_platform_match.group(1))))
else:
(self.swift_test_run_targets
.append("check-swift{}{}-{}".format(
subset_suffix, suffix, name)))
if args.test_optimized and not test_host_only:
self.swift_test_run_targets.append(
"check-swift{}-optimize-{}".format(
subset_suffix, name))
if args.test_optimize_for_size and not test_host_only:
self.swift_test_run_targets.append(
"check-swift{}-optimize_size-{}".format(
subset_suffix, name))
if args.test_optimize_none_with_implicit_dynamic and \
not test_host_only:
self.swift_test_run_targets.append(
"check-swift{}-optimize_none_with_implicit_dynamic-{}"
.format(subset_suffix, name))
def __platforms_to_skip_build(self, args):
platforms_to_skip_build = set()
if not args.build_linux:
platforms_to_skip_build.add(StdlibDeploymentTarget.Linux)
if not args.build_freebsd:
platforms_to_skip_build.add(StdlibDeploymentTarget.FreeBSD)
if not args.build_cygwin:
platforms_to_skip_build.add(StdlibDeploymentTarget.Cygwin)
if not args.build_osx:
platforms_to_skip_build.add(StdlibDeploymentTarget.OSX)
if not args.build_ios_device:
platforms_to_skip_build.add(StdlibDeploymentTarget.iOS)
if not args.build_ios_simulator:
platforms_to_skip_build.add(StdlibDeploymentTarget.iOSSimulator)
if not args.build_tvos_device:
platforms_to_skip_build.add(StdlibDeploymentTarget.AppleTV)
if not args.build_tvos_simulator:
platforms_to_skip_build.add(
StdlibDeploymentTarget.AppleTVSimulator)
if not args.build_watchos_device:
platforms_to_skip_build.add(StdlibDeploymentTarget.AppleWatch)
if not args.build_watchos_simulator:
platforms_to_skip_build.add(
StdlibDeploymentTarget.AppleWatchSimulator)
if not args.build_android:
platforms_to_skip_build.add(StdlibDeploymentTarget.Android)
return platforms_to_skip_build
def __platforms_to_skip_test(self, args):
platforms_to_skip_test = set()
if not args.test_linux:
platforms_to_skip_test.add(StdlibDeploymentTarget.Linux)
if not args.test_freebsd:
platforms_to_skip_test.add(StdlibDeploymentTarget.FreeBSD)
if not args.test_cygwin:
platforms_to_skip_test.add(StdlibDeploymentTarget.Cygwin)
if not args.test_osx:
platforms_to_skip_test.add(StdlibDeploymentTarget.OSX)
if not args.test_ios_host and not args.only_non_executable_test:
platforms_to_skip_test.add(StdlibDeploymentTarget.iOS)
elif not args.only_non_executable_test:
raise ArgumentError(None,
"error: iOS device tests are not " +
"supported in open-source Swift.")
if not args.test_ios_simulator:
platforms_to_skip_test.add(StdlibDeploymentTarget.iOSSimulator)
if not args.test_tvos_host and not args.only_non_executable_test:
platforms_to_skip_test.add(StdlibDeploymentTarget.AppleTV)
elif not args.only_non_executable_test:
raise ArgumentError(None,
"error: tvOS device tests are not " +
"supported in open-source Swift.")
if not args.test_tvos_simulator:
platforms_to_skip_test.add(StdlibDeploymentTarget.AppleTVSimulator)
if not args.test_watchos_host and not args.only_non_executable_test:
platforms_to_skip_test.add(StdlibDeploymentTarget.AppleWatch)
elif not args.only_non_executable_test:
raise ArgumentError(None,
"error: watchOS device tests are not " +
"supported in open-source Swift.")
if not args.test_watchos_simulator:
platforms_to_skip_test.add(
StdlibDeploymentTarget.AppleWatchSimulator)
if not args.test_android:
platforms_to_skip_test.add(StdlibDeploymentTarget.Android)
return platforms_to_skip_test
def __platforms_archs_to_skip_test(self, args, host_target):
platforms_archs_to_skip_test = set()
if not args.test_ios_32bit_simulator:
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.iOSSimulator.i386)
if host_target == StdlibDeploymentTarget.OSX.x86_64.name:
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.iOSSimulator.arm64)
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.AppleTVSimulator.arm64)
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.AppleWatchSimulator.arm64)
if host_target == StdlibDeploymentTarget.OSX.arm64.name:
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.iOSSimulator.i386)
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.iOSSimulator.x86_64)
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.AppleTVSimulator.x86_64)
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.AppleWatchSimulator.i386)
return platforms_archs_to_skip_test
def __platforms_to_skip_test_host(self, args):
platforms_to_skip_test_host = set()
if not args.test_android_host:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.Android)
if not args.test_ios_host and not args.only_non_executable_test:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.iOS)
if not args.test_tvos_host and not args.only_non_executable_test:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.AppleTV)
if not args.test_watchos_host and not args.only_non_executable_test:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.AppleWatch)
return platforms_to_skip_test_host
| apache-2.0 |
isaacfeng/Traffic-sign-recognition | new_try.py | 1 | 4365 | import os
import random
import skimage.data
import skimage.transform
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import my_lib as my
sess = tf.InteractiveSession()
ROOT_PATH = "GTSRB"
train_data_dir = os.path.join(ROOT_PATH, "Final_Training/Images/")
test_data_dir = os.path.join(ROOT_PATH, "Final_Test/")
images, labels = my.load_train_data(train_data_dir)
images_test = my.load_test_data(test_data_dir)
print("Unique Labels: {0}\nTotal Images:{1}".format(len(set(labels)), len(images)))
print("Total Test Images:{0}".format(len(images_test)))
my.display_images_and_labels(images, labels)
my.display_label_images(images, labels, 32)
for image in images[:5]:
print("shape: {0}, min: {1}, max: {2}".format(image.shape, image.min(), image.max()))
images28 = [skimage.transform.resize(image, (28, 28))
for image in images]
my.display_images_and_labels(images28, labels)
images28_test = [skimage.transform.resize(image_test, (28, 28))
for image_test in images_test]
for image in images28[:5]:
print("shape: {0}, min: {1}, max: {2}".format(image.shape, image.min(), image.max()))
labels_a = np.array(labels)
images_a = np.array(images28)
images_test_a = np.array(images28_test)
print("labels: ", labels_a.shape)
print('images: ', images_a.shape)
print('test images: ', images_test_a.shape)
buff = np.arange(0, 39209)
np.random.shuffle(buff)
labels_shuffled = labels_a
images_shuffled = images_a
for i in range(39209):
j = buff[i]
labels_shuffled[i] = labels_a[j]
images_shuffled[i] = images_a[j]
labels_onehot = np.zeros((39209, 43))
labels_onehot[np.arange(39209), labels_a] = 1
print("labels one hot: ", labels_onehot[10000:10005])
labels_shuffled_onehot = np.zeros((39209, 43))
labels_shuffled_onehot[np.arange(39209), labels_shuffled] = 1
batch_images = np.zeros((35, 800, 28, 28, 3))
batch_labels = np.zeros((35, 800, 43))
for i in range(35):
batch_images[i] = images_shuffled[800*i:800*i+800]
batch_labels[i] = labels_shuffled_onehot[800*i:800*i+800]
print("batch_images: ", batch_images[8].shape)
print("label_images: ", batch_labels[8].shape)
loss_buffer = np.zeros(1000)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
x = tf.placeholder(tf.float32, [None, 28, 28, 3])
y_ = tf.placeholder(tf.int32, [None, 43])
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 43])
b_fc2 = bias_variable([43])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
predicted_labels = tf.argmax(y_conv, 1)
sess.run(tf.global_variables_initializer())
for i in range(10000):
_, loss_value = sess.run([train_step, cross_entropy],feed_dict={x: batch_images[i % 35], y_: batch_labels[i % 35], keep_prob: 0.5})
if i % 10 == 0:
print i
print("Loss: ", loss_value)
loss_buffer[i/10] = loss_value
print("cross validation accuracy %g"%accuracy.eval(feed_dict={
x: images_shuffled[28000:39209], y_: labels_shuffled_onehot[28000:39209], keep_prob: 1.0}))
result = predicted_labels.eval(feed_dict={x: images_test_a[0:12630], keep_prob: 1.0})
np.savetxt('result.csv', result, delimiter=',')
np.savetxt('loss.csv', loss_buffer, delimiter=',')
| apache-2.0 |
JianyuWang/nova | nova/tests/functional/api_sample_tests/test_hosts.py | 17 | 2565 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class HostsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-hosts"
def _get_flags(self):
f = super(HostsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.hosts.Hosts')
return f
def test_host_startup(self):
response = self._do_get('os-hosts/%s/startup' % self.compute.host)
subs = self._get_regexes()
self._verify_response('host-get-startup', subs, response, 200)
def test_host_reboot(self):
response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
subs = self._get_regexes()
self._verify_response('host-get-reboot', subs, response, 200)
def test_host_shutdown(self):
response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
subs = self._get_regexes()
self._verify_response('host-get-shutdown', subs, response, 200)
def test_host_maintenance(self):
response = self._do_put('os-hosts/%s' % self.compute.host,
'host-put-maintenance-req', {})
subs = self._get_regexes()
self._verify_response('host-put-maintenance-resp', subs, response, 200)
def test_host_get(self):
response = self._do_get('os-hosts/%s' % self.compute.host)
subs = self._get_regexes()
self._verify_response('host-get-resp', subs, response, 200)
def test_hosts_list(self):
response = self._do_get('os-hosts')
subs = self._get_regexes()
self._verify_response('hosts-list-resp', subs, response, 200)
| apache-2.0 |
timesking/MITMf | core/servers/FTP.py | 24 | 2544 | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import threading
from core.responder.utils import *
from SocketServer import BaseRequestHandler, ThreadingMixIn, TCPServer
from core.responder.packets import FTPPacket
class FTP:
def start(self):
try:
if OsInterfaceIsSupported():
server = ThreadingTCPServer((settings.Config.Bind_To, 21), FTP1)
else:
server = ThreadingTCPServer(('', 21), FTP1)
t = threading.Thread(name='SMB', target=server.serve_forever)
t.setDaemon(True)
t.start()
except Exception as e:
print "Error starting SMB server: {}".format(e)
print_exc()
class ThreadingTCPServer(ThreadingMixIn, TCPServer):
allow_reuse_address = 1
def server_bind(self):
if OsInterfaceIsSupported():
try:
self.socket.setsockopt(socket.SOL_SOCKET, 25, settings.Config.Bind_To+'\0')
except:
pass
TCPServer.server_bind(self)
class FTP1(BaseRequestHandler):
def handle(self):
try:
self.request.send(str(FTPPacket()))
data = self.request.recv(1024)
if data[0:4] == "USER":
User = data[5:].strip()
Packet = FTPPacket(Code="331",Message="User name okay, need password.")
self.request.send(str(Packet))
data = self.request.recv(1024)
if data[0:4] == "PASS":
Pass = data[5:].strip()
Packet = FTPPacket(Code="530",Message="User not logged in.")
self.request.send(str(Packet))
data = self.request.recv(1024)
SaveToDb({
'module': 'FTP',
'type': 'Cleartext',
'client': self.client_address[0],
'user': User,
'cleartext': Pass,
'fullhash': User+':'+Pass
})
else:
Packet = FTPPacket(Code="502",Message="Command not implemented.")
self.request.send(str(Packet))
data = self.request.recv(1024)
except Exception:
pass | gpl-3.0 |
ejona86/grpc | src/python/grpcio_status/grpc_status/_async.py | 10 | 1950 | # Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reference implementation for status mapping in gRPC Python."""
from grpc.experimental import aio
from google.rpc import status_pb2
from ._common import code_to_grpc_status_code, GRPC_DETAILS_METADATA_KEY
async def from_call(call: aio.Call):
"""Returns a google.rpc.status.Status message from a given grpc.aio.Call.
This is an EXPERIMENTAL API.
Args:
call: An grpc.aio.Call instance.
Returns:
A google.rpc.status.Status message representing the status of the RPC.
"""
code = await call.code()
details = await call.details()
trailing_metadata = await call.trailing_metadata()
if trailing_metadata is None:
return None
for key, value in trailing_metadata:
if key == GRPC_DETAILS_METADATA_KEY:
rich_status = status_pb2.Status.FromString(value)
if code.value[0] != rich_status.code:
raise ValueError(
'Code in Status proto (%s) doesn\'t match status code (%s)'
% (code_to_grpc_status_code(rich_status.code), code))
if details != rich_status.message:
raise ValueError(
'Message in Status proto (%s) doesn\'t match status details (%s)'
% (rich_status.message, details))
return rich_status
return None
__all__ = [
'from_call',
]
| apache-2.0 |
fdvarela/odoo8 | addons/hr_timesheet_invoice/wizard/__init__.py | 433 | 1159 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_invoice_create
import hr_timesheet_analytic_profit
import hr_timesheet_final_invoice_create
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cristian99garcia/pilas-activity | pilas/actores/entradadetexto.py | 1 | 1732 | # -*- encoding: utf-8 -*-
# Pilas engine - A video game framework.
#
# Copyright 2010 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilas.actores import Actor
import pilas
class EntradaDeTexto(Actor):
"""Representa una caja de texto que puede servir para ingresar texto.
Este actor, en la mayoria de los casos, se utiliza para solicitarle
el nombre a un usuario. Por ejemplo, cuando completa un record
de puntaje."""
def __init__(self, x=0, y=0, color=pilas.colores.negro, limite=10, tamano=32, fuente='Arial', cursor_intermitente=True):
self.cursor = "|"
self.texto = ""
self.limite = limite
imagen = pilas.imagenes.cargar_superficie(640, 480)
Actor.__init__(self, imagen)
pilas.eventos.pulsa_tecla.conectar(self.cuando_pulsa_una_tecla)
self._actualizar_imagen()
if cursor_intermitente:
pilas.mundo.agregar_tarea_siempre(0.25, self._actualizar_cursor)
def _actualizar_cursor(self):
if self.cursor == "":
self.cursor = "|"
else:
self.cursor = ""
self._actualizar_imagen()
return True
def cuando_pulsa_una_tecla(self, evento):
if evento.codigo == '\x08':
# Indica que se quiere borrar un caracter
self.texto = self.texto[:-1]
else:
if len(self.texto) < self.limite:
self.texto = self.texto + evento.texto
self._actualizar_imagen()
def _actualizar_imagen(self):
self.imagen.pintar(pilas.colores.blanco)
self.imagen.texto(self.texto + self.cursor, 100, 100)
| gpl-3.0 |
jiemakel/omorfi | src/python/generate-yaml.py | 1 | 4497 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
"""
This script converts TSV formatted tests to yaml formatted tests
"""
# Author: Tommi A Pirinen <[email protected]> 2014
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import argparse
import csv
from sys import argv, exit, stderr
from time import strftime
from omorfi.omor_formatter import OmorFormatter
from omorfi.apertium_formatter import ApertiumFormatter
# standard UI stuff
def main():
# initialise argument parser
ap = argparse.ArgumentParser(
description="Convert Finnish dictionary TSV data into xerox/HFST lexc format")
ap.add_argument("--quiet", "-q", action="store_false", dest="verbose",
default=False,
help="do not print output to stdout while processing")
ap.add_argument("--verbose", "-v", action="store_true", default=False,
help="print each step to stdout while processing")
ap.add_argument("--input", "-i", action="append", required=True,
metavar="INFILE", help="read tests from INFILEs")
ap.add_argument("--version", "-V", action="version")
ap.add_argument("--output", "-o", "--one-file", "-1",
type=argparse.FileType("w"), required=True,
metavar="OFILE", help="write output to OFILE")
ap.add_argument("--fields", "-F", action="store", default=2,
metavar="N", help="read N fields from master")
ap.add_argument("--separator", action="store", default="\t",
metavar="SEP", help="use SEP as separator")
ap.add_argument("--comment", "-C", action="append", default=["#"],
metavar="COMMENT", help="skip lines starting with COMMENT that"
"do not have SEPs")
ap.add_argument("--strip", action="store",
metavar="STRIP", help="strip STRIP from fields before using")
ap.add_argument("--format", "-f", action="store", default="omor",
help="use specific output format for lexc data",
choices=['omor', 'apertium'])
args = ap.parse_args()
quoting = csv.QUOTE_NONE
quotechar = None
# setup files
formatter = None
if args.format == 'omor':
formatter = OmorFormatter()
elif args.format == 'apertium':
formatter = ApertiumFormatter()
if args.verbose:
print("Writing yaml to", args.output.name)
# print test cases
for tsv_filename in args.input:
if args.verbose:
print("Reading from", tsv_filename)
linecount = 0
print("# Omorfi tests generated from", tsv_filename,
"date:", strftime("%Y-%m-%d %H:%M:%S+%Z"),
"params: ", ' '.join(argv), file=args.output,
sep='\n# ')
print("Tests:\n All tests:", file=args.output)
# for each line
with open(tsv_filename, 'r', newline='') as tsv_file:
tsv_reader = csv.reader(tsv_file, delimiter=args.separator,
quoting=quoting, quotechar=quotechar, escapechar='%', strict=True)
for tsv_parts in tsv_reader:
linecount += 1
if len(tsv_parts) < 3:
print(tsv_filename, linecount,
"Too few tabs on line",
"skipping following fields:", tsv_parts,
file=stderr)
continue
# format output
print(' "', tsv_parts[1], sep='', file=args.output,
end='')
print(formatter.analyses2lexc(tsv_parts[2],
args.format).replace('% ', ' '),
file=args.output, end='')
print('": "', tsv_parts[0], '"', sep='', file=args.output)
exit(0)
if __name__ == "__main__":
main()
| gpl-3.0 |
r0e/servo | tests/wpt/harness/wptrunner/executors/executorselenium.py | 72 | 9431 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import socket
import sys
import threading
import time
import traceback
import urlparse
import uuid
from .base import (ExecutorException,
Protocol,
RefTestExecutor,
RefTestImplementation,
TestExecutor,
TestharnessExecutor,
testharness_result_converter,
reftest_result_converter,
strip_server)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
extra_timeout = 5
def do_delayed_imports():
global webdriver
global exceptions
from selenium import webdriver
from selenium.common import exceptions
class SeleniumProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def setup(self, runner):
"""Connect to browser via Selenium's WebDriver implementation."""
self.runner = runner
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
session_started = False
try:
self.webdriver = webdriver.Remote(
self.url, desired_capabilities=self.capabilities)
except:
self.logger.warning(
"Connecting to Selenium failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("Selenium session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect to Selenium")
self.executor.runner.send_message("init_failed")
else:
try:
self.after_connect()
except:
print >> sys.stderr, traceback.format_exc()
self.logger.warning(
"Failed to connect to navigate initial page")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.load_runner("http")
def load_runner(self, protocol):
url = urlparse.urljoin(self.executor.server_url(protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.webdriver.execute_script("document.title = '%s'" %
threading.current_thread().name.replace("'", '"'))
def wait(self):
while True:
try:
self.webdriver.execute_async_script("");
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumRun(object):
def __init__(self, func, webdriver, url, timeout):
self.func = func
self.result = None
self.webdriver = webdriver
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.webdriver.set_script_timeout((timeout + extra_timeout) * 1000)
except exceptions.ErrorInResponseException:
self.logger.error("Lost webdriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.webdriver, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver.js")) as f:
self.script = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_protocol_change(self, new_protocol):
self.protocol.load_runner(new_protocol)
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol.webdriver,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, webdriver, url, timeout):
return webdriver.execute_async_script(
self.script % {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000})
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
if self.close_after_done and self.has_window:
self.protocol.webdriver.close()
self.protocol.webdriver.switch_to_window(
self.protocol.webdriver.window_handles[-1])
self.has_window = False
if not self.has_window:
self.protocol.webdriver.execute_script(self.script)
self.protocol.webdriver.switch_to_window(
self.protocol.webdriver.window_handles[-1])
self.has_window = True
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test):
return SeleniumRun(self._screenshot,
self.protocol.webdriver,
self.test_url(test),
test.timeout).run()
def _screenshot(self, webdriver, url, timeout):
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
| mpl-2.0 |
hsaputra/tensorflow | tensorflow/contrib/session_bundle/session_bundle.py | 89 | 6887 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Importer for an exported TensorFlow model.
This module provides a function to create a SessionBundle containing both the
Session and MetaGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util.deprecation import deprecated
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def maybe_session_bundle_dir(export_dir):
"""Checks if the model path contains session bundle model.
Args:
export_dir: string path to model checkpoint, for example 'model/00000123'
Returns:
true if path contains session bundle model files, ie META_GRAPH_DEF_FILENAME
"""
meta_graph_filename = os.path.join(export_dir,
constants.META_GRAPH_DEF_FILENAME)
return file_io.file_exists(meta_graph_filename)
@deprecated("2017-06-30",
"No longer supported. Switch to SavedModel immediately.")
def load_session_bundle_from_path(export_dir,
target="",
config=None,
meta_graph_def=None):
"""Load session bundle from the given path.
The function reads input from the export_dir, constructs the graph data to the
default graph and restores the parameters for the session created.
Args:
export_dir: the directory that contains files exported by exporter.
target: The execution engine to connect to. See target in tf.Session()
config: A ConfigProto proto with configuration options. See config in
tf.Session()
meta_graph_def: optional object of type MetaGraphDef. If this object is
present, then it is used instead of parsing MetaGraphDef from export_dir.
Returns:
session: a tensorflow session created from the variable files.
meta_graph: a meta graph proto saved in the exporter directory.
Raises:
RuntimeError: if the required files are missing or contain unrecognizable
fields, i.e. the exported model is invalid.
"""
if not meta_graph_def:
meta_graph_filename = os.path.join(export_dir,
constants.META_GRAPH_DEF_FILENAME)
if not file_io.file_exists(meta_graph_filename):
raise RuntimeError("Expected meta graph file missing %s" %
meta_graph_filename)
# Reads meta graph file.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
meta_graph_def.ParseFromString(
file_io.read_file_to_string(meta_graph_filename, binary_mode=True))
variables_filename = ""
variables_filename_list = []
checkpoint_sharded = False
variables_index_filename = os.path.join(export_dir,
constants.VARIABLES_INDEX_FILENAME_V2)
checkpoint_v2 = file_io.file_exists(variables_index_filename)
# Find matching checkpoint files.
if checkpoint_v2:
# The checkpoint is in v2 format.
variables_filename_pattern = os.path.join(
export_dir, constants.VARIABLES_FILENAME_PATTERN_V2)
variables_filename_list = file_io.get_matching_files(
variables_filename_pattern)
checkpoint_sharded = True
else:
variables_filename = os.path.join(export_dir, constants.VARIABLES_FILENAME)
if file_io.file_exists(variables_filename):
variables_filename_list = [variables_filename]
else:
variables_filename = os.path.join(export_dir,
constants.VARIABLES_FILENAME_PATTERN)
variables_filename_list = file_io.get_matching_files(variables_filename)
checkpoint_sharded = True
# Prepare the files to restore a session.
if not variables_filename_list:
restore_files = ""
elif checkpoint_v2 or not checkpoint_sharded:
# For checkpoint v2 or v1 with non-sharded files, use "export" to restore
# the session.
restore_files = constants.VARIABLES_FILENAME
else:
restore_files = constants.VARIABLES_FILENAME_PATTERN
assets_dir = os.path.join(export_dir, constants.ASSETS_DIRECTORY)
collection_def = meta_graph_def.collection_def
graph_def = graph_pb2.GraphDef()
if constants.GRAPH_KEY in collection_def:
# Use serving graph_def in MetaGraphDef collection_def if exists
graph_def_any = collection_def[constants.GRAPH_KEY].any_list.value
if len(graph_def_any) != 1:
raise RuntimeError("Expected exactly one serving GraphDef in : %s" %
meta_graph_def)
else:
graph_def_any[0].Unpack(graph_def)
# Replace the graph def in meta graph proto.
meta_graph_def.graph_def.CopyFrom(graph_def)
ops.reset_default_graph()
sess = session.Session(target, graph=None, config=config)
# Import the graph.
saver = saver_lib.import_meta_graph(meta_graph_def)
# Restore the session.
if restore_files:
saver.restore(sess, os.path.join(export_dir, restore_files))
init_op_tensor = None
if constants.INIT_OP_KEY in collection_def:
init_ops = collection_def[constants.INIT_OP_KEY].node_list.value
if len(init_ops) != 1:
raise RuntimeError("Expected exactly one serving init op in : %s" %
meta_graph_def)
init_op_tensor = ops.get_collection(constants.INIT_OP_KEY)[0]
# Create asset input tensor list.
asset_tensor_dict = {}
if constants.ASSETS_KEY in collection_def:
assets_any = collection_def[constants.ASSETS_KEY].any_list.value
for asset in assets_any:
asset_pb = manifest_pb2.AssetFile()
asset.Unpack(asset_pb)
asset_tensor_dict[asset_pb.tensor_binding.tensor_name] = os.path.join(
assets_dir, asset_pb.filename)
if init_op_tensor:
# Run the init op.
sess.run(fetches=[init_op_tensor], feed_dict=asset_tensor_dict)
return sess, meta_graph_def
| apache-2.0 |
kaner/gettor | lib/gettor/i18n.py | 1 | 20837 | # Copyright (c) 2008 - 2011, Jacob Appelbaum <[email protected]>,
# Christian Fromme <[email protected]>
# This is Free Software. See LICENSE for license information.
# -*- coding: utf-8 -*-
import os
import gettext
def getLang(lang, config):
"""Return the Translation instance for a given language. If no Translation
instance is found, return the one for 'en'
"""
localeDir = os.path.join(config.BASEDIR, "i18n")
fallback = config.DEFAULT_LOCALE
return gettext.translation("gettor", localedir=localeDir,
languages=[lang], fallback=fallback)
def _(text):
"""This is necessary because strings are translated when they're imported.
Otherwise this would make it impossible to switch languages more than
once
"""
return text
# Giant multi language help message. Add more translations as they become ready
MULTILANGHELP = """
Hello, This is the "GetTor" robot.
I will mail you a Tor package, if you tell me which one you want.
Please select one of the following package names:
tor-browser-bundle
macosx-i386-bundle
macosx-ppc-bundle
linux-browser-bundle-i386
linux-browser-bundle-x86_64
source-bundle
Please reply to this mail (to gettor), and tell me
a single package name anywhere in the body of your email.
OBTAINING LOCALIZED VERSIONS OF TOR
===================================
To get a version of Tor translated into your language, specify the
language you want in the address you send the mail to:
gettor+zh
This example will give you the requested package in a localized
version for Chinese. Check below for a list of supported language
codes.
List of supported locales:
-------------------------
Here is a list of all available languages:
gettor+ar: Arabic
gettor+de: German
gettor+en: English
gettor+es: Spanish
gettor+fa: Farsi (Iran)
gettor+fr: French
gettor+it: Italian
gettor+nl: Dutch
gettor+pl: Polish
gettor+ru: Russian
gettor+zh: Chinese
If you select no language, you will receive the English version.
SUPPORT
=======
If you have any questions or it doesn't work, you can contact a
human at this support email address: tor-assistants
--
مرحبا، أنا روبوت \"احصل على تور\".
سأرسل لك حزمة برامج تور، إذا أخبرتني أيها تريد.
رجاء اختر إحدى أسماء الحزم التالية:
tor-browser-bundle
macosx-i386-bundle
macosx-ppc-bundle
linux-browser-bundle-i386
linux-browser-bundle-x86_64
source-bundle
يرجى أن ترد على هذه الرسالة (إلى [email protected])، وتخبرني
باسم حزمة واحدة فقط في أي مكان ضمن رسالة الرد.
الحصول على إصدارات مترجمة من تور
========================
لتحصل على إصدار تور مترجم إلى لغتك، يرجى أن تحدد
اللغة التي تريد ضمن العنوان الذي سترسل الرسالة الإلكترونية إليه:
[email protected]
هذا المثال يعطيك الحزمة المطلوبة مترجمة
للغة الصينية. تحقق من القائمة أدناه لتجد رموز اللغات
المدعومة.
قائمة اللغات المدعومة:
-------------------
ها هي قائمة اللغات المتوفرة:
[email protected]: العربية
[email protected]: الألمانية
[email protected]: الإنكليزية
[email protected]: الإسبانية
[email protected]: الفارسية
[email protected]: الفرنسية
[email protected]: الإيطالية
[email protected]: الهولندية
[email protected]: البولندية
[email protected]: الروسية
[email protected]: الصينية
إن لم تقم باختيار لغة فستحصل على الإصدارة الإنكليزية.
الدعم الفني
=======
إن كانت لديك أية أسئلة أو إذا لم يعمل هذا الحل يمكنك الاتصال بكائن
بشري على عنوان الدعم الفني التالي: [email protected]
--
سلام! روبات "GetTor" در خدمت شماست.
چنانچه به من بگویید که به کدامیک از بسته های Tor نیاز دارید، آن را برای شما
ارسال خواهم کرد.
لطفا یکی از بسته های را زیر با ذکر نام انتخاب کنید:
tor-browser-bundle
macosx-i386-bundle
macosx-ppc-bundle
linux-browser-bundle-i386
linux-browser-bundle-x86_64
source-bundle
لطفا به این نامه پاسخ داده ( به آدرس [email protected] ) و در قسمتی از
متن ایمیل خود نام یکی از بسته های فوق را ذکر کنید.
تهیه نسخه ترجمه شده TOR
===================================
برای دریافت نسخه ای از TOR ترجمه شده به زبان محلی شما، می بایستی زبان مورد
نظر خود را در آدرس گیرنده ایمیل ذکر کنید. بعنوان مثال:
[email protected]
در این مثال، فرستنده خواهان نسخه ترجمه شده به زبان چینی می باشد. برای آگاهی
از کدهای مربوط به زبانهای قابل پشتیبانی توسط Tor ، فهرست زیر را مطالعه کنید:
فهرست زبانهای پشتیانی شده
-------------------------
[email protected]: Arabic
[email protected]: German
[email protected]: English
[email protected]: Spanish
[email protected]: Farsi (Iran)
[email protected]: French
[email protected]: Italian
[email protected]: Dutch
[email protected]: Polish
[email protected]: Russian
[email protected]: Chinese
چنانچه هیچیک از زبانهای فوق را انتخاب نکنید، نسخه انگلیسی برای شما ارسال
خواهد شد.
پشتیبانی
=======
چنانچه سوالی دارید یا برنامه دچار اشکال بوده و کار نمی کند ، با قسمت
پشتیبانی با آدرس زیر تماس بگیرید تا یک انسان به سوال شما پاسخ دهد: [email protected]
--
Hei, dette er "GetTor"-roboten
Jeg kommer til å sende deg en Tor-pakke, hvis du forteller meg hvilken du
vil ha.
Vennligst velg en av følgende pakkenavn:
tor-browser-bundle
macosx-i386-bundle
macosx-ppc-bundle
linux-browser-bundle-i386
linux-browser-bundle-x86_64
source-bundle
Vennligst svar til denne eposten (til [email protected]), og nevn
kun et enkelt pakkenavn i tekstområdet til eposten din.
SKAFFE LOKALISERTE VERSJONER AV TOR
===================================
For å skaffe en versjon av Tor som har blitt oversatt til ditt språk,
spesifiser språket du vil i epostadressen du sender eposten til:
[email protected]
Dette eksempelet vil gi deg en forespurt pakke som er en oversatt
versjon for kinesisk. Se listen nedenfor for hvilke språk det er støtte for.
Liste av støttede språk:
-------------------------
Her er en liste av språkene som er tilgjengelig:
[email protected]: Arabisk
[email protected]: Tysk
[email protected]: Engelsk
[email protected]: Spansk
[email protected]: Farsi (Iran)
[email protected]: Fransk
[email protected]: Italiensk
[email protected]: Nederlandsk
[email protected]: Polsk
[email protected]: Russisk
[email protected]: Kinesisk
Hvis du ikke spesifiserer noen språk vil du motta standard Engelsk
versjon
STØTTE
=======
Hvis du har noen spørsmål eller det ikke virker, kan du kontakte et
menneske på denne support-eposten: [email protected]
--
Olá! Este é o robot "GetTor".
Eu envio-lhe um pacote Tor, bastando para isso dizer qual o que quer.
Escolha um dos seguintes pacotes:
tor-browser-bundle
macosx-i386-bundle
macosx-ppc-bundle
linux-browser-bundle-i386
linux-browser-bundle-x86_64
source-bundle
Por favor responda a esta email (para [email protected]), e diga qual o
pacote que deseja, colocando o seu nome no corpo do seu email.
OBTER VERSÕES TRADUZIDAS DO TOR
===================================
Para lhe ser enviado uma versão traduzida do Tor, especifique a língua no
destinatário do seu email:
[email protected]
Este exemplo vai enviar o pacote traduzido para Chinês Simplificado. Veja a
lista de endereços de email existentes que pode utilizar:
Lista de endereços de email suportados:
-------------------------
[email protected]: Português
[email protected]: Arábico
[email protected]: Alemão
[email protected]: Inglês
[email protected]: Espanhol
[email protected]: Farsi (Irão)
[email protected]: Francês
[email protected]: Italiano
[email protected]: Holandês
[email protected]: Polaco
[email protected]: Russo
[email protected]: Chinês
Se não escolher nenhuma língua, receberá o Tor em Inglês.
SUPORTE
=======
Se tiver alguma dúvida, pode contactar um humano através do seguinte
endereço: [email protected]
--
Здравствуйте! Это "робот GetTor".
Я отошлю вам пакет Tor если вы укажете который вы хотите.
Пожалуйста выберите один из пакетов:
tor-browser-bundle
macosx-i386-bundle
macosx-ppc-bundle
linux-browser-bundle-i386
linux-browser-bundle-x86_64
source-bundle
Пожалуйста свяжитесь с нами по этой элктронной почте
([email protected]), и укажите
название одного из пакетов в любом месте в "теле" вашего письма.
ПОЛУЧЕНИЕ ЛОКАЛИЗИРОВАННЫХ ВЕРСИЙ TOR
===================================
Чтобы получить версию Tor переведенную на ваш язык,укажите
предпочитаемый язык в адресной строке куда вы отослали электронную почту:
[email protected]
Вышеуказанный пример даст вам запрошенный пакет в локализированной
версии китайского языка. Проверьте ниже список кодов поддерживаемых
языков.
Список поддерживаемых регионов
-------------------------
Ниже указан список всех доступных языков:
[email protected]: арабский
[email protected]: немецкий
[email protected]: английский
[email protected]: испанский
[email protected]: фарси (Иран)
[email protected]: французский
[email protected]: итальянский
[email protected]: голландский
[email protected]: польский
[email protected]: русский
[email protected]: китайский
Если вы не выберите язык, вы получите версию на английском языке.
ПОДДЕРЖКА
=======
Если у вас вопросы или что то не сработало, вы можете связаться
с живым представителем по этому электронному адресу:[email protected]
--
你好, 这里是"GetTor"自动回复。
您从这里可以得到Tor套件, 请告诉我您需要的套件种类.
请选择套件名称:
tor-browser-bundle
(Tor+Firefox浏览器)
macosx-i386-bundle
(Tor for MacOS)
macosx-ppc-bundle
(Tor for MacOS on PowerPC )
linux-browser-bundle-i386
linux-browser-bundle-x86_64
(Tor for Linux)
source-bundle
(源码包)
请直接回复本邮件([email protected]),
并在信的正文中写好您所需要的套件名称(不包括括号内的中文)。
获取其他语言的Tor套件
===================================
在收件人地址中指定语言代码可以获得本对应语言的版本,例如:
[email protected]
本例中,您将得到中文版的Tor套件,下面是目前支持的语种代码:
支持语言列表:
-------------------------
全部可用语言列表:
[email protected]: Arabic
[email protected]: German
[email protected]: English
[email protected]: Spanish
[email protected]: Farsi (Iran)
[email protected]: French
[email protected]: Italian
[email protected]: Dutch
[email protected]: Polish
[email protected]: Russian
[email protected]: 中文
如果您未指定语言代码,您将收到英文版。
支持
=======
如果您遇到困难或服务出现问题,请联系我们的
技术支持邮箱: [email protected]
--
"""
GETTOR_TEXT = [
# GETTOR_TEXT[0]
_("""Hello, This is the "GetTor" robot.
Thank you for your request."""),
# GETTOR_TEXT[1]
_(""" Unfortunately, we won't answer you at this address. You should make
an account with GMAIL.COM or YAHOO.CN and send the mail from
one of those."""),
# GETTOR_TEXT[2]
_("""We only process requests from email services that support "DKIM",
which is an email feature that lets us verify that the address in the
"From" line is actually the one who sent the mail."""),
# GETTOR_TEXT[3]
_("""(We apologize if you didn't ask for this mail. Since your email is from
a service that doesn't use DKIM, we're sending a short explanation,
and then we'll ignore this email address for the next day or so.)"""),
# GETTOR_TEXT[4]
_("""Please note that currently, we can't process HTML emails or base 64
mails. You will need to send plain text."""),
# GETTOR_TEXT[5]
_("""If you have any questions or it doesn't work, you can contact a
human at this support email address: [email protected]"""),
# GETTOR_TEXT[6]
_("""I will mail you a Tor package, if you tell me which one you want.
Please select one of the following package names:"""),
# GETTOR_TEXT[7]
_("""Please reply to this mail (to [email protected]), and tell me
a single package name anywhere in the body of your email."""),
# GETTOR_TEXT[8]
_(""" OBTAINING LOCALIZED VERSIONS OF TOR"""),
# GETTOR_TEXT[9]
_("""To get a version of Tor translated into your language, specify the
language you want in the address you send the mail to:"""),
# GETTOR_TEXT[10]
_("""This example will give you the requested package in a localized
version for Chinese. Check below for a list of supported language
codes. """),
# GETTOR_TEXT[11]
_(""" List of supported locales:"""),
# GETTOR_TEXT[12]
_("""Here is a list of all available languages:"""),
# GETTOR_TEXT[13]
_(""" [email protected]: Arabic
[email protected]: German
[email protected]: English
[email protected]: Spanish
[email protected]: Farsi (Iran)
[email protected]: French
[email protected]: Italian
[email protected]: Dutch
[email protected]: Polish
[email protected]: Russian
[email protected]: Chinese"""),
# GETTOR_TEXT[14]
_("""If you select no language, you will receive the English version."""),
# GETTOR_TEXT[15]
_("""SMALLER SIZED PACKAGES"""),
# GETTOR_TEXT[16]
_("""If your bandwith is low or your provider doesn't allow you to
receive large attachments in your email, there is a feature of
GetTor you can use to make it send you a number of small packages
instead of one big one."""),
# GETTOR_TEXT[17]
_("""Simply include the keyword 'split' somewhere in your email like so:"""),
# GETTOR_TEXT[18]
_("""Sending this text in an email to GetTor will cause it to send you
the Tor Browser Bundle in a number of 1,4MB attachments."""),
# GETTOR_TEXT[19]
_("""After having received all parts, you need to re-assemble them to
one package again. This is done as follows:"""),
# GETTOR_TEXT[20]
_("""1.) Save all received attachments into one folder on your disk."""),
# GETTOR_TEXT[21]
_("""2.) Unzip all files ending in ".z". If you saved all attachments to
a fresh folder before, simply unzip all files in that folder."""),
# GETTOR_TEXT[22]
_("""3.) Verify all files as described in the mail you received with
each package. (gpg --verify)"""),
# GETTOR_TEXT[23]
_("""4.) Now use a program that can unrar multivolume RAR archives. On
Windows, this usually is WinRAR. If you don't have that
installed on you computer yet, get it here:"""),
# GETTOR_TEXT[24]
_("""To unpack your Tor package, simply doubleclick the ".exe" file."""),
# GETTOR_TEXT[25]
_("""5.) After unpacking is finished, you should find a newly created
".exe" file in your destination folder. Simply doubleclick
that and Tor Browser Bundle should start within a few seconds."""),
# GETTOR_TEXT[26]
_("""6.) That's it. You're done. Thanks for using Tor and have fun!"""),
# GETTOR_TEXT[27]
_("""SUPPORT"""),
# GETTOR_TEXT[28]
_("""If you have any questions or it doesn't work, you can contact a
human at this support email address: [email protected]"""),
# GETTOR_TEXT[29]
_(""" Here's your requested software as a zip file. Please unzip the
package and verify the signature."""),
# GETTOR_TEXT[30]
_("""Hint: If your computer has GnuPG installed, use the gpg
commandline tool as follows after unpacking the zip file:"""),
# GETTOR_TEXT[31]
_("""The output should look somewhat like this:"""),
# GETTOR_TEXT[32]
_("""If you're not familiar with commandline tools, try looking for
a graphical user interface for GnuPG on this website:"""),
# GETTOR_TEXT[33]
_("""If your Internet connection blocks access to the Tor network, you
may need a bridge relay. Bridge relays (or "bridges" for short)
are Tor relays that aren't listed in the main directory. Since there
is no complete public list of them, even if your ISP is filtering
connections to all the known Tor relays, they probably won't be able
to block all the bridges."""),
# GETTOR_TEXT[34]
_("""You can acquire a bridge by sending an email that contains "get bridges"
in the body of the email to the following email address:"""),
# GETTOR_TEXT[35]
_("""It is also possible to fetch bridges with a web browser at the following
url: https://bridges.torproject.org/"""),
# GETTOR_TEXT[36]
_("""IMPORTANT NOTE:
Since this is part of a split-file request, you need to wait for
all split files to be received by you before you can save them all
into the same directory and unpack them by double-clicking the
first file."""),
# GETTOR_TEXT[37]
_("""Packages might arrive out of order! Please make sure you received
all packages before you attempt to unpack them!"""),
# GETTOR_TEXT[38]
_("""Thank you for your request.
It was successfully understood. Your request is currently being processed.
Your package should arrive within the next ten minutes."""),
# GETTOR_TEXT[39]
_("""If it doesn't arrive, the package might be too big for your mail provider.
Try resending the mail from a GMAIL.COM or YAHOO.COM account."""),
# GETTOR_TEXT[40]
_("""Unfortunately we are currently experiencing problems and we can't fulfill
your request right now. Please be patient as we try to resolve this issue.""")
]
| bsd-3-clause |
Dev4X/oppia | extensions/rules/normalized_string_test.py | 20 | 2859 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classification of NormalizedStrings."""
__author__ = 'Sean Lip'
from extensions.rules import normalized_string
import test_utils
class NormalizedStringRuleUnitTests(test_utils.GenericTestBase):
"""Tests for rules operating on NormalizedString objects."""
def test_equals_rule(self):
rule = normalized_string.Equals('hello')
self.assertTrue(rule.eval('hello'))
self.assertTrue(rule.eval('Hello'))
self.assertFalse(rule.eval('goodbye'))
def test_case_sensitive_equals_rule(self):
rule = normalized_string.CaseSensitiveEquals('hello')
self.assertTrue(rule.eval('hello'))
self.assertFalse(rule.eval('Hello'))
self.assertFalse(rule.eval('goodbye'))
def test_starts_with_rule(self):
self.assertTrue(normalized_string.StartsWith('he').eval('hello'))
self.assertTrue(normalized_string.StartsWith('HE').eval('hello'))
self.assertFalse(normalized_string.StartsWith('hello').eval('he'))
def test_contains_rule(self):
self.assertTrue(normalized_string.Contains('he').eval('hello'))
self.assertTrue(normalized_string.Contains('HE').eval('hello'))
self.assertTrue(normalized_string.Contains('ll').eval('hello'))
self.assertFalse(normalized_string.Contains('ol').eval('hello'))
def test_fuzzy_equals_rule(self):
self.assertTrue(normalized_string.FuzzyEquals('hello').eval('hello'))
self.assertTrue(normalized_string.FuzzyEquals('HEllp').eval('hellp'))
self.assertTrue(normalized_string.FuzzyEquals('hello').eval('hell'))
self.assertTrue(normalized_string.FuzzyEquals('hell').eval('hello'))
self.assertTrue(normalized_string.FuzzyEquals('hellp').eval('hello'))
self.assertTrue(normalized_string.FuzzyEquals('hello').eval('hellp'))
self.assertTrue(normalized_string.FuzzyEquals('hello').eval('helo'))
self.assertTrue(normalized_string.FuzzyEquals('hello').eval('helllo'))
self.assertFalse(normalized_string.FuzzyEquals('pleh').eval('help'))
self.assertFalse(normalized_string.FuzzyEquals('hello').eval(
'hellllo'))
self.assertFalse(normalized_string.FuzzyEquals('hello').eval('help'))
| apache-2.0 |
googleapis/googleapis-gen | google/cloud/automl/v1/automl-v1-py/google/cloud/automl_v1/types/data_items.py | 1 | 8129 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.automl_v1.types import geometry
from google.cloud.automl_v1.types import io
from google.cloud.automl_v1.types import text_segment as gca_text_segment
__protobuf__ = proto.module(
package='google.cloud.automl.v1',
manifest={
'Image',
'TextSnippet',
'DocumentDimensions',
'Document',
'ExamplePayload',
},
)
class Image(proto.Message):
r"""A representation of an image.
Only images up to 30MB in size are supported.
Attributes:
image_bytes (bytes):
Image content represented as a stream of bytes. Note: As
with all ``bytes`` fields, protobuffers use a pure binary
representation, whereas JSON representations use base64.
thumbnail_uri (str):
Output only. HTTP URI to the thumbnail image.
"""
image_bytes = proto.Field(
proto.BYTES,
number=1,
oneof='data',
)
thumbnail_uri = proto.Field(
proto.STRING,
number=4,
)
class TextSnippet(proto.Message):
r"""A representation of a text snippet.
Attributes:
content (str):
Required. The content of the text snippet as
a string. Up to 250000 characters long.
mime_type (str):
Optional. The format of
[content][google.cloud.automl.v1.TextSnippet.content].
Currently the only two allowed values are "text/html" and
"text/plain". If left blank, the format is automatically
determined from the type of the uploaded
[content][google.cloud.automl.v1.TextSnippet.content].
content_uri (str):
Output only. HTTP URI where you can download
the content.
"""
content = proto.Field(
proto.STRING,
number=1,
)
mime_type = proto.Field(
proto.STRING,
number=2,
)
content_uri = proto.Field(
proto.STRING,
number=4,
)
class DocumentDimensions(proto.Message):
r"""Message that describes dimension of a document.
Attributes:
unit (google.cloud.automl_v1.types.DocumentDimensions.DocumentDimensionUnit):
Unit of the dimension.
width (float):
Width value of the document, works together
with the unit.
height (float):
Height value of the document, works together
with the unit.
"""
class DocumentDimensionUnit(proto.Enum):
r"""Unit of the document dimension."""
DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0
INCH = 1
CENTIMETER = 2
POINT = 3
unit = proto.Field(
proto.ENUM,
number=1,
enum=DocumentDimensionUnit,
)
width = proto.Field(
proto.FLOAT,
number=2,
)
height = proto.Field(
proto.FLOAT,
number=3,
)
class Document(proto.Message):
r"""A structured text document e.g. a PDF.
Attributes:
input_config (google.cloud.automl_v1.types.DocumentInputConfig):
An input config specifying the content of the
document.
document_text (google.cloud.automl_v1.types.TextSnippet):
The plain text version of this document.
layout (Sequence[google.cloud.automl_v1.types.Document.Layout]):
Describes the layout of the document. Sorted by
[page_number][].
document_dimensions (google.cloud.automl_v1.types.DocumentDimensions):
The dimensions of the page in the document.
page_count (int):
Number of pages in the document.
"""
class Layout(proto.Message):
r"""Describes the layout information of a
[text_segment][google.cloud.automl.v1.Document.Layout.text_segment]
in the document.
Attributes:
text_segment (google.cloud.automl_v1.types.TextSegment):
Text Segment that represents a segment in
[document_text][google.cloud.automl.v1p1beta.Document.document_text].
page_number (int):
Page number of the
[text_segment][google.cloud.automl.v1.Document.Layout.text_segment]
in the original document, starts from 1.
bounding_poly (google.cloud.automl_v1.types.BoundingPoly):
The position of the
[text_segment][google.cloud.automl.v1.Document.Layout.text_segment]
in the page. Contains exactly 4
[normalized_vertices][google.cloud.automl.v1p1beta.BoundingPoly.normalized_vertices]
and they are connected by edges in the order provided, which
will represent a rectangle parallel to the frame. The
[NormalizedVertex-s][google.cloud.automl.v1p1beta.NormalizedVertex]
are relative to the page. Coordinates are based on top-left
as point (0,0).
text_segment_type (google.cloud.automl_v1.types.Document.Layout.TextSegmentType):
The type of the
[text_segment][google.cloud.automl.v1.Document.Layout.text_segment]
in document.
"""
class TextSegmentType(proto.Enum):
r"""The type of TextSegment in the context of the original
document.
"""
TEXT_SEGMENT_TYPE_UNSPECIFIED = 0
TOKEN = 1
PARAGRAPH = 2
FORM_FIELD = 3
FORM_FIELD_NAME = 4
FORM_FIELD_CONTENTS = 5
TABLE = 6
TABLE_HEADER = 7
TABLE_ROW = 8
TABLE_CELL = 9
text_segment = proto.Field(
proto.MESSAGE,
number=1,
message=gca_text_segment.TextSegment,
)
page_number = proto.Field(
proto.INT32,
number=2,
)
bounding_poly = proto.Field(
proto.MESSAGE,
number=3,
message=geometry.BoundingPoly,
)
text_segment_type = proto.Field(
proto.ENUM,
number=4,
enum='Document.Layout.TextSegmentType',
)
input_config = proto.Field(
proto.MESSAGE,
number=1,
message=io.DocumentInputConfig,
)
document_text = proto.Field(
proto.MESSAGE,
number=2,
message='TextSnippet',
)
layout = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=Layout,
)
document_dimensions = proto.Field(
proto.MESSAGE,
number=4,
message='DocumentDimensions',
)
page_count = proto.Field(
proto.INT32,
number=5,
)
class ExamplePayload(proto.Message):
r"""Example data used for training or prediction.
Attributes:
image (google.cloud.automl_v1.types.Image):
Example image.
text_snippet (google.cloud.automl_v1.types.TextSnippet):
Example text.
document (google.cloud.automl_v1.types.Document):
Example document.
"""
image = proto.Field(
proto.MESSAGE,
number=1,
oneof='payload',
message='Image',
)
text_snippet = proto.Field(
proto.MESSAGE,
number=2,
oneof='payload',
message='TextSnippet',
)
document = proto.Field(
proto.MESSAGE,
number=4,
oneof='payload',
message='Document',
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
piyushroshan/xen-4.3.2 | tools/xm-test/lib/XmTestLib/NetConfig.py | 31 | 8702 | #!/usr/bin/python
"""
Copyright (C) International Business Machines Corp., 2005, 2006
Authors: Dan Smith <[email protected]>
Daniel Stekloff <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; under version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys
import commands
import os
import re
import time
import random
from xen.xend.sxp import Parser
from Xm import *
from Test import *
from config import *
class NetworkError(Exception):
def __init__(self, msg):
self.errMsg = msg
def __str__(self):
return str(self.errMsg)
def getXendNetConfig():
# Find out what environment we're in: bridge, nat, or route
xconfig = os.getenv("XEND_CONFIG")
if not xconfig:
xconfig = "/etc/xen/xend-config.sxp"
try:
configfile = open(xconfig, 'r')
except:
return "bridge"
S = configfile.read()
pin = Parser()
pin.input(S)
pin.input_eof()
val = pin.get_val()
while val[0] != 'network-script':
val = pin.get_val()
if val[0] != 'network-script' or len(val) < 2:
# entry network-script not found or no type specified
netenv = "bridge"
else:
# split network command into script name and its parameters
sub_val = val[1].split()
if sub_val[0] == "network-bridge":
netenv = "bridge"
elif sub_val[0] == "network-route":
netenv = "route"
elif sub_val[0] == "network-nat":
netenv = "nat"
else:
raise NetworkError("Failed to get network env from xend config")
configfile.close()
return netenv
class NetConfig:
def __init__(self):
self.netenv = getXendNetConfig()
self.used_ips = {}
self.free_oct_ips = [ 0, 0, 0, 0 ]
self.total_ips = 0
if NETWORK_IP_RANGE == 'dhcp':
self.netmask = NETWORK_IP_RANGE
self.network = NETWORK_IP_RANGE
self.max_ip = NETWORK_IP_RANGE
self.min_ip = NETWORK_IP_RANGE
else:
self.netmask = NETMASK
self.network = NETWORK
s_ip = ''
# Get starting ip and max ip from configured ip range
s_ip = NETWORK_IP_RANGE
ips = s_ip.split("-")
self.max_ip = ips[1]
self.min_ip = ips[0]
self.__setMaxNumberIPs()
# Clean out any aliases in the network range for dom0's interface.
# If an alias exists, a test xendevice add command could fail.
if NETWORK_IP_RANGE != "dhcp":
self.__cleanDom0Aliases()
def __setMaxNumberIPs(self):
# Count the number of IPs available, to help tests know whether they
# have enough to run or not
masko = self.netmask.split('.')
maxo = self.max_ip.split('.')
mino = self.min_ip.split('.')
ips = 0
# Last octet
self.free_oct_ips[3] = (int(maxo[3]) - int(mino[3])) + 1
# 3rd octet
self.free_oct_ips[2] = (int(maxo[2]) - int(mino[2])) + 1
# 2nd octet
self.free_oct_ips[1] = (int(maxo[1]) - int(mino[1])) + 1
# 1st octet
self.free_oct_ips[0] = (int(maxo[0]) - int(mino[0])) + 1
self.total_ips = self.free_oct_ips[3]
if self.free_oct_ips[2] > 1:
self.total_ips = (self.total_ips * self.free_oct_ips[2])
if self.free_oct_ips[1] > 1:
self.total_ips = (self.total_ips * self.free_oct_ips[1])
if self.free_oct_ips[0] > 1:
self.total_ips = (self.total_ips * self.free_oct_ips[0])
def __cleanDom0Aliases(self):
# Remove any aliases within the supplied network IP range on dom0
scmd = 'ip addr show dev %s' % (DOM0_INTF)
status, output = traceCommand(scmd)
if status:
raise NetworkError("Failed to show %s aliases: %d" %
(DOM0_INTF, status))
lines = output.split("\n")
for line in lines:
ip = re.search('(\d+\.\d+\.\d+\.\d+)', line)
if ip and self.isIPInRange(ip.group(1)) == True:
dcmd = 'ip addr del %s/32 dev %s' % (ip.group(1), DOM0_INTF)
dstatus, doutput = traceCommand(dcmd)
if dstatus:
raise NetworkError("Failed to remove %s aliases: %d" %
(DOM0_INTF, status))
def getNetEnv(self):
return self.netenv
def setUsedIP(self, domname, interface, ip):
self.used_ips['%s:%s' % (domname, interface)] = ip
def __findFirstOctetIP(self, prefix, min, max):
for i in range(min, max):
ip = '%s%s' % (prefix, str(i))
found = False
for k in self.used_ips.keys():
if self.used_ips[k] == ip:
found = True
if found == False:
return ip
if found == True:
return None
def getFreeIP(self, domname, interface):
# Get a free IP. It uses the starting ip octets and then the
# total number of allowed numbers for that octet. It only
# calculates ips for the last two octets, we shouldn't need more
start_octets = self.min_ip.split(".")
ip = None
# Only working with ips from last two octets, shouldn't need more
max = int(start_octets[2]) + self.free_oct_ips[2]
for i in range(int(start_octets[2]), max):
prefix = '%s.%s.%s.' % (start_octets[0], start_octets[1], str(i))
ip = self.__findFirstOctetIP(prefix, int(start_octets[3]), self.free_oct_ips[3])
if ip:
break
if not ip:
raise NetworkError("Ran out of configured addresses.")
self.setUsedIP(domname, interface, ip)
return ip
def getNetMask(self):
return self.netmask
def getNetwork(self):
return self.network
def getIP(self, domname, interface):
# Depending on environment, set an IP. Uses the configured range
# of IPs, network address, and netmask
if NETWORK_IP_RANGE == "dhcp":
return None
# Make sure domain and interface aren't already assigned an IP
if self.used_ips.has_key('%s:%s' % (domname, interface)):
raise NetworkError("Domain %s interface %s is already has IP"
% (domname, interface))
return self.getFreeIP(domname, interface)
def setIP(self, domname, interface, ip):
# Make sure domain and interface aren't already assigned an IP
if self.used_ips.has_key('%s:%s' % (domname, interface)):
raise NetworkError("Domain %s interface %s is already has IP"
% (domname, interface))
self.setUsedIP(domname, interface, ip)
def releaseIP(self, domname, interface, ip):
if self.used_ips.has_key('%s:%s' % (domname, interface)):
del self.used_ips['%s:%s' % (domname, interface)]
def getNumberAllowedIPs(self):
return self.total_ips
def canRunNetTest(self, ips):
# Check to see if a test can run, returns true or false. Input is
# number of ips needed.
if NETWORK_IP_RANGE == "dhcp":
return True
if self.total_ips >= ips:
return True
return False
def isIPInRange(self, ip):
# Checks to see if supplied ip is in the range of allowed ips
maxo = self.max_ip.split('.')
mino = self.min_ip.split('.')
ipo = ip.split('.')
if int(ipo[0]) < int(mino[0]):
return False
elif int(ipo[0]) > int(maxo[0]):
return False
if int(ipo[1]) < int(mino[1]):
return False
elif int(ipo[1]) > int(maxo[1]):
return False
if int(ipo[2]) < int(mino[2]):
return False
elif int(ipo[2]) > int(maxo[2]):
return False
if int(ipo[3]) < int(mino[3]):
return False
elif int(ipo[3]) > int(maxo[3]):
return False
return True
| gpl-2.0 |
MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Mac/IDLE/idlemain.py | 71 | 2786 | """
Bootstrap script for IDLE as an application bundle.
"""
import sys, os
# Change the current directory the user's home directory, that way we'll get
# a more useful default location in the open/save dialogs.
os.chdir(os.path.expanduser('~/Documents'))
# Make sure sys.executable points to the python interpreter inside the
# framework, instead of at the helper executable inside the application
# bundle (the latter works, but doesn't allow access to the window server)
#
# .../IDLE.app/
# Contents/
# MacOS/
# IDLE (a python script)
# Python{-32} (symlink)
# Resources/
# idlemain.py (this module)
# ...
#
# ../IDLE.app/Contents/MacOS/Python{-32} is symlinked to
# ..Library/Frameworks/Python.framework/Versions/m.n
# /Resources/Python.app/Contents/MacOS/Python{-32}
# which is the Python interpreter executable
#
# The flow of control is as follows:
# 1. IDLE.app is launched which starts python running the IDLE script
# 2. IDLE script exports
# PYTHONEXECUTABLE = .../IDLE.app/Contents/MacOS/Python{-32}
# (the symlink to the framework python)
# 3. IDLE script alters sys.argv and uses os.execve to replace itself with
# idlemain.py running under the symlinked python.
# This is the magic step.
# 4. During interpreter initialization, because PYTHONEXECUTABLE is defined,
# sys.executable may get set to an unuseful value.
#
# (Note that the IDLE script and the setting of PYTHONEXECUTABLE is
# generated automatically by bundlebuilder in the Python 2.x build.
# Also, IDLE invoked via command line, i.e. bin/idle, bypasses all of
# this.)
#
# Now fix up the execution environment before importing idlelib.
# Reset sys.executable to its normal value, the actual path of
# the interpreter in the framework, by following the symlink
# exported in PYTHONEXECUTABLE.
pyex = os.environ['PYTHONEXECUTABLE']
sys.executable = os.path.join(os.path.dirname(pyex), os.readlink(pyex))
# Remove any sys.path entries for the Resources dir in the IDLE.app bundle.
p = pyex.partition('.app')
if p[2].startswith('/Contents/MacOS/Python'):
sys.path = [value for value in sys.path if
value.partition('.app') != (p[0], p[1], '/Contents/Resources')]
# Unexport PYTHONEXECUTABLE so that the other Python processes started
# by IDLE have a normal sys.executable.
del os.environ['PYTHONEXECUTABLE']
# Look for the -psn argument that the launcher adds and remove it, it will
# only confuse the IDLE startup code.
for idx, value in enumerate(sys.argv):
if value.startswith('-psn_'):
del sys.argv[idx]
break
# Now it is safe to import idlelib.
from idlelib.PyShell import main
if __name__ == '__main__':
main()
| gpl-2.0 |
losnikitos/googleads-python-lib | examples/dfp/v201411/product_template_service/get_sponsorship_product_templates.py | 4 | 1924 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all sponsorship product templates.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
product_template_service = client.GetService(
'ProductTemplateService', version='v201411')
# Create a statement to select all sponsorship product templates.
values = [{
'key': 'lineItemType',
'value': {
'xsi_type': 'TextValue',
'value': 'SPONSORSHIP'
}
}]
query = 'WHERE lineItemType = :lineItemType ORDER BY id ASC'
statement = dfp.FilterStatement(query, values)
# Get product templates by statement.
while True:
response = product_template_service.getProductTemplatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for product_template in response['results']:
print ('Product template with id \'%s\' and name \'%s\' was found.' % (
product_template['id'], product_template['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
Djlavoy/scrapy | scrapy/loader/__init__.py | 78 | 6221 | """Item Loader
See documentation in docs/topics/loaders.rst
"""
from collections import defaultdict
import six
from scrapy.item import Item
from scrapy.selector import Selector
from scrapy.utils.decorators import deprecated
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.utils.misc import arg_to_iter, extract_regex
from scrapy.utils.python import flatten
from .common import wrap_loader_context
from .processors import Identity
class ItemLoader(object):
default_item_class = Item
default_input_processor = Identity()
default_output_processor = Identity()
default_selector_class = Selector
def __init__(self, item=None, selector=None, response=None, **context):
if selector is None and response is not None:
selector = self.default_selector_class(response)
self.selector = selector
context.update(selector=selector, response=response)
if item is None:
item = self.default_item_class()
self.item = context['item'] = item
self.context = context
self._values = defaultdict(list)
def add_value(self, field_name, value, *processors, **kw):
value = self.get_value(value, *processors, **kw)
if value is None:
return
if not field_name:
for k, v in six.iteritems(value):
self._add_value(k, v)
else:
self._add_value(field_name, value)
def replace_value(self, field_name, value, *processors, **kw):
value = self.get_value(value, *processors, **kw)
if value is None:
return
if not field_name:
for k, v in six.iteritems(value):
self._replace_value(k, v)
else:
self._replace_value(field_name, value)
def _add_value(self, field_name, value):
value = arg_to_iter(value)
processed_value = self._process_input_value(field_name, value)
if processed_value:
self._values[field_name] += arg_to_iter(processed_value)
def _replace_value(self, field_name, value):
self._values.pop(field_name, None)
self._add_value(field_name, value)
def get_value(self, value, *processors, **kw):
regex = kw.get('re', None)
if regex:
value = arg_to_iter(value)
value = flatten([extract_regex(regex, x) for x in value])
for proc in processors:
if value is None:
break
proc = wrap_loader_context(proc, self.context)
value = proc(value)
return value
def load_item(self):
item = self.item
for field_name in tuple(self._values):
value = self.get_output_value(field_name)
if value is not None:
item[field_name] = value
return item
def get_output_value(self, field_name):
proc = self.get_output_processor(field_name)
proc = wrap_loader_context(proc, self.context)
try:
return proc(self._values[field_name])
except Exception as e:
raise ValueError("Error with output processor: field=%r value=%r error='%s: %s'" % \
(field_name, self._values[field_name], type(e).__name__, str(e)))
def get_collected_values(self, field_name):
return self._values[field_name]
def get_input_processor(self, field_name):
proc = getattr(self, '%s_in' % field_name, None)
if not proc:
proc = self._get_item_field_attr(field_name, 'input_processor', \
self.default_input_processor)
return proc
def get_output_processor(self, field_name):
proc = getattr(self, '%s_out' % field_name, None)
if not proc:
proc = self._get_item_field_attr(field_name, 'output_processor', \
self.default_output_processor)
return proc
def _process_input_value(self, field_name, value):
proc = self.get_input_processor(field_name)
proc = wrap_loader_context(proc, self.context)
return proc(value)
def _get_item_field_attr(self, field_name, key, default=None):
if isinstance(self.item, Item):
value = self.item.fields[field_name].get(key, default)
else:
value = default
return value
def _check_selector_method(self):
if self.selector is None:
raise RuntimeError("To use XPath or CSS selectors, "
"%s must be instantiated with a selector "
"or a response" % self.__class__.__name__)
def add_xpath(self, field_name, xpath, *processors, **kw):
values = self._get_xpathvalues(xpath, **kw)
self.add_value(field_name, values, *processors, **kw)
def replace_xpath(self, field_name, xpath, *processors, **kw):
values = self._get_xpathvalues(xpath, **kw)
self.replace_value(field_name, values, *processors, **kw)
def get_xpath(self, xpath, *processors, **kw):
values = self._get_xpathvalues(xpath, **kw)
return self.get_value(values, *processors, **kw)
@deprecated(use_instead='._get_xpathvalues()')
def _get_values(self, xpaths, **kw):
return self._get_xpathvalues(xpaths, **kw)
def _get_xpathvalues(self, xpaths, **kw):
self._check_selector_method()
xpaths = arg_to_iter(xpaths)
return flatten([self.selector.xpath(xpath).extract() for xpath in xpaths])
def add_css(self, field_name, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
self.add_value(field_name, values, *processors, **kw)
def replace_css(self, field_name, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
self.replace_value(field_name, values, *processors, **kw)
def get_css(self, css, *processors, **kw):
values = self._get_cssvalues(css, **kw)
return self.get_value(values, *processors, **kw)
def _get_cssvalues(self, csss, **kw):
self._check_selector_method()
csss = arg_to_iter(csss)
return flatten([self.selector.css(css).extract() for css in csss])
XPathItemLoader = create_deprecated_class('XPathItemLoader', ItemLoader)
| bsd-3-clause |
Edraak/edraak-platform | lms/djangoapps/ccx/tests/test_models.py | 9 | 7556 | """
tests for the models
"""
import json
from datetime import datetime, timedelta
import ddt
from nose.plugins.attrib import attr
from pytz import utc
from student.roles import CourseCcxCoachRole
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, check_mongo_calls
from ..overrides import override_field_for_ccx
from .factories import CcxFactory
@ddt.ddt
@attr(shard=7)
class TestCCX(ModuleStoreTestCase):
"""Unit tests for the CustomCourseForEdX model
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""common setup for all tests"""
super(TestCCX, self).setUp()
self.course = CourseFactory.create()
self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
def set_ccx_override(self, field, value):
"""Create a field override for the test CCX on <field> with <value>"""
override_field_for_ccx(self.ccx, self.course, field, value)
def test_ccx_course_is_correct_course(self):
"""verify that the course property of a ccx returns the right course"""
expected = self.course
actual = self.ccx.course
self.assertEqual(expected, actual)
def test_ccx_course_caching(self):
"""verify that caching the propery works to limit queries"""
with check_mongo_calls(3):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.course # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.course # pylint: disable=pointless-statement
def test_ccx_start_is_correct(self):
"""verify that the start datetime for a ccx is correctly retrieved
Note that after setting the start field override microseconds are
truncated, so we can't do a direct comparison between before and after.
For this reason we test the difference between and make sure it is less
than one second.
"""
expected = datetime.now(utc)
self.set_ccx_override('start', expected)
actual = self.ccx.start # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_start_caching(self):
"""verify that caching the start property works to limit queries"""
now = datetime.now(utc)
self.set_ccx_override('start', now)
with check_mongo_calls(3):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.start # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.start # pylint: disable=pointless-statement, no-member
def test_ccx_due_without_override(self):
"""verify that due returns None when the field has not been set"""
actual = self.ccx.due # pylint: disable=no-member
self.assertIsNone(actual)
def test_ccx_due_is_correct(self):
"""verify that the due datetime for a ccx is correctly retrieved"""
expected = datetime.now(utc)
self.set_ccx_override('due', expected)
actual = self.ccx.due # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_due_caching(self):
"""verify that caching the due property works to limit queries"""
expected = datetime.now(utc)
self.set_ccx_override('due', expected)
with check_mongo_calls(3):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.due # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.due # pylint: disable=pointless-statement, no-member
def test_ccx_has_started(self):
"""verify that a ccx marked as starting yesterday has started"""
now = datetime.now(utc)
delta = timedelta(1)
then = now - delta
self.set_ccx_override('start', then)
self.assertTrue(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_not_started(self):
"""verify that a ccx marked as starting tomorrow has not started"""
now = datetime.now(utc)
delta = timedelta(1)
then = now + delta
self.set_ccx_override('start', then)
self.assertFalse(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_ended(self):
"""verify that a ccx that has a due date in the past has ended"""
now = datetime.now(utc)
delta = timedelta(1)
then = now - delta
self.set_ccx_override('due', then)
self.assertTrue(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_has_not_ended(self):
"""verify that a ccx that has a due date in the future has not eneded
"""
now = datetime.now(utc)
delta = timedelta(1)
then = now + delta
self.set_ccx_override('due', then)
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_without_due_date_has_not_ended(self):
"""verify that a ccx without a due date has not ended"""
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_max_student_enrollment_correct(self):
"""
Verify the override value for max_student_enrollments_allowed
"""
expected = 200
self.set_ccx_override('max_student_enrollments_allowed', expected)
actual = self.ccx.max_student_enrollments_allowed # pylint: disable=no-member
self.assertEqual(expected, actual)
def test_structure_json_default_empty(self):
"""
By default structure_json does not contain anything
"""
self.assertEqual(self.ccx.structure_json, None) # pylint: disable=no-member
self.assertEqual(self.ccx.structure, None) # pylint: disable=no-member
def test_structure_json(self):
"""
Test a json stored in the structure_json
"""
dummy_struct = [
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_4",
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_5",
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_11"
]
json_struct = json.dumps(dummy_struct)
ccx = CcxFactory(
course_id=self.course.id,
coach=self.coach,
structure_json=json_struct
)
self.assertEqual(ccx.structure_json, json_struct) # pylint: disable=no-member
self.assertEqual(ccx.structure, dummy_struct) # pylint: disable=no-member
def test_locator_property(self):
"""
Verify that the locator helper property returns a correct CCXLocator
"""
locator = self.ccx.locator # pylint: disable=no-member
self.assertEqual(self.ccx.id, long(locator.ccx))
| agpl-3.0 |
nkrinner/nova | nova/openstack/common/middleware/request_id.py | 19 | 1423 | # Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that ensures request ID.
It ensures to assign request ID for each API request and set it to
request environment. The request ID is also added to API response.
"""
import webob.dec
from nova.openstack.common import context
from nova.openstack.common.middleware import base
ENV_REQUEST_ID = 'openstack.request_id'
HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id'
class RequestIdMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
req_id = context.generate_request_id()
req.environ[ENV_REQUEST_ID] = req_id
response = req.get_response(self.application)
if HTTP_RESP_HEADER_REQUEST_ID not in response.headers:
response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, req_id)
return response
| apache-2.0 |
stuarteberg/ray | ray/classify.py | 1 | 39656 | #!/usr/bin/env python
# system modules
import sys, os, argparse
import cPickle
import logging
from math import sqrt
from abc import ABCMeta, abstractmethod
from random import shuffle
# libraries
import h5py
import time
import itertools
from numpy import bool, array, double, zeros, mean, random, concatenate, where,\
uint8, ones, float32, uint32, unique, newaxis, zeros_like, arange, floor, \
histogram, seterr, __version__ as numpy_version, unravel_index, diff, \
nonzero, sort, log, inf, argsort, repeat, ones_like, cov, arccos, dot, \
pi, bincount, isfinite, mean, median, sign, intersect1d
seterr(divide='ignore')
from numpy.linalg import det, eig, norm
from scipy import arange
from scipy.misc.common import factorial
from scipy.ndimage import binary_erosion
try:
from scipy.spatial import Delaunay
except ImportError:
logging.warning('Unable to load scipy.spatial.Delaunay. '+
'Convex hull features not available.')
from scipy.misc import comb as nchoosek
from scipy.stats import sem
import networkx as nx
try:
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression, LinearRegression
except ImportError:
logging.warning('scikits.learn not found. SVC, Regression not available.')
from evaluate import xlogx
try:
from vigra.learning import RandomForest as VigraRandomForest
from vigra.__version__ import version as vigra_version
vigra_version = tuple(map(int, vigra_version.split('.')))
except ImportError:
logging.warning(' vigra library is not available. '+
'Cannot use random forest classifier.')
pass
# local imports
import morpho
import iterprogress as ip
from imio import read_h5_stack, write_h5_stack, write_image_stack
from adaboost import AdaBoost
class NullFeatureManager(object):
def __init__(self, *args, **kwargs):
self.default_cache = 'feature-cache'
def __call__(self, g, n1, n2=None):
return self.compute_features(g, n1, n2)
def compute_features(self, g, n1, n2=None):
if n2 is None:
c1 = g.node[n1][self.default_cache]
return self.compute_node_features(g, n1, c1)
if len(g.node[n1]['extent']) > len(g.node[n2]['extent']):
n1, n2 = n2, n1 # smaller node first
c1, c2, ce = [d[self.default_cache] for d in
[g.node[n1], g.node[n2], g[n1][n2]]]
return concatenate((
self.compute_node_features(g, n1, c1),
self.compute_node_features(g, n2, c2),
self.compute_edge_features(g, n1, n2, ce),
self.compute_difference_features(g, n1, n2, c1, c2)
))
def create_node_cache(self, *args, **kwargs):
return array([])
def create_edge_cache(self, *args, **kwargs):
return array([])
def update_node_cache(self, *args, **kwargs):
pass
def update_edge_cache(self, *args, **kwargs):
pass
def pixelwise_update_node_cache(self, *args, **kwargs):
pass
def pixelwise_update_edge_cache(self, *args, **kwargs):
pass
def compute_node_features(self, *args, **kwargs):
return array([])
def compute_edge_features(self, *args, **kwargs):
return array([])
def compute_difference_features(self, *args, **kwargs):
return array([])
class GraphTopologyFeatureManager(NullFeatureManager):
def __init__(self, *args, **kwargs):
super(GraphTopologyFeatureManager, self).__init__()
def compute_node_features(self, g, n, cache=None):
deg = g.degree(n)
ndeg = nx.algorithms.average_neighbor_degree(g, nodes=[n])[n]
return array([deg, ndeg])
def compute_edge_features(self, g, n1, n2, cache=None):
nn1, nn2 = g.neighbors(n1), g.neighbors(n2)
common_neighbors = float(len(intersect1d(nn1, nn2)))
return array([common_neighbors])
def compute_difference_features(self, g, n1, n2, cache1=None, cache2=None):
return self.compute_node_features(g, n1, cache1) - \
self.compute_node_features(g, n2, cache2)
class MomentsFeatureManager(NullFeatureManager):
def __init__(self, nmoments=4, use_diff_features=True, oriented=False,
normalize=False, *args, **kwargs):
super(MomentsFeatureManager, self).__init__()
self.nmoments = nmoments
self.use_diff_features = use_diff_features
self.oriented = oriented
self.normalize = normalize
def compute_moment_sums(self, ar, idxs):
values = ar[idxs][...,newaxis]
return (values ** arange(self.nmoments+1)).sum(axis=0).T
def create_node_cache(self, g, n):
node_idxs = list(g.node[n]['extent'])
if self.oriented:
ar = g.max_probabilities_r
else:
ar = g.non_oriented_probabilities_r
return self.compute_moment_sums(ar, node_idxs)
def create_edge_cache(self, g, n1, n2):
edge_idxs = list(g[n1][n2]['boundary'])
if self.oriented:
ar = g.oriented_probabilities_r
else:
ar = g.non_oriented_probabilities_r
return self.compute_moment_sums(ar, edge_idxs)
def update_node_cache(self, g, n1, n2, dst, src):
dst += src
def update_edge_cache(self, g, e1, e2, dst, src):
dst += src
def pixelwise_update_node_cache(self, g, n, dst, idxs, remove=False):
if len(idxs) == 0: return
a = -1.0 if remove else 1.0
if self.oriented:
ar = g.max_probabilities_r
else:
ar = g.non_oriented_probabilities_r
dst += a * self.compute_moment_sums(ar, idxs)
def pixelwise_update_edge_cache(self, g, n1, n2, dst, idxs, remove=False):
if len(idxs) == 0: return
a = -1.0 if remove else 1.0
if self.oriented:
ar = g.max_probabilities_r
else:
ar = g.non_oriented_probabilities_r
dst += a * self.compute_moment_sums(ar, idxs)
def compute_node_features(self, g, n, cache=None):
if cache is None:
cache = g.node[n][self.default_cache]
feat = central_moments_from_noncentral_sums(cache)
if self.normalize:
feat = ith_root(feat)
n = feat.ravel()[0]
return concatenate(([n], feat[1:].T.ravel()))
def compute_edge_features(self, g, n1, n2, cache=None):
if cache is None:
cache = g[n1][n2][self.default_cache]
feat = central_moments_from_noncentral_sums(cache)
if self.normalize:
feat = ith_root(feat)
n = feat.ravel()[0]
return concatenate(([n], feat[1:].T.ravel()))
def compute_difference_features(self,g, n1, n2, cache1=None, cache2=None,
nthroot=False):
if not self.use_diff_features:
return array([])
if cache1 is None:
cache1 = g.node[n1][self.default_cache]
m1 = central_moments_from_noncentral_sums(cache1)
if cache2 is None:
cache2 = g.node[n2][self.default_cache]
m2 = central_moments_from_noncentral_sums(cache2)
if nthroot or self.normalize:
m1, m2 = map(ith_root, [m1, m2])
feat = abs(m1-m2)
n = feat.ravel()[0]
return concatenate(([n], feat[1:].T.ravel()))
def central_moments_from_noncentral_sums(a):
"""Compute moments about the mean from sums of x**i, for i=0, ..., len(a).
The first two moments about the mean (1 and 0) would always be
uninteresting so the function returns n (the sample size) and mu (the
sample mean) in their place.
"""
a = a.astype(double)
if len(a) == 1:
return a
N = a.copy()[0]
a /= N
mu = a.copy()[1]
ac = zeros_like(a)
for n in range(2,len(a)):
js = arange(n+1)
if a.ndim > 1: js = js[:,newaxis]
# Formula found in Wikipedia page for "Central moment", 2011-07-31
ac[n] = (nchoosek(n,js) *
(-1)**(n-js) * a[js.ravel()] * mu**(n-js)).sum(axis=0)
ac[0] = N
ac[1] = mu
return ac
def ith_root(ar):
"""Get the ith root of the array values at ar[i] for i > 1."""
if len(ar) < 2:
return ar
ar = ar.copy()
ar[2:] = sign(ar[2:]) * \
(abs(ar[2:]) ** (1.0/arange(2, len(ar)))[:, newaxis])
return ar
class OrientationFeatureManager(NullFeatureManager):
def __init__(self, *args, **kwargs):
super(OrientationFeatureManager, self).__init__()
def create_node_cache(self, g, n):
# Get subscripts of extent (morpho.unravel_index was slow)
M = zeros_like(g.watershed);
M.ravel()[list(g.node[n]['extent'])] = 1
ind = array(nonzero(M)).T
# Get second moment matrix
smm = cov(ind.T)/float(len(ind))
try:
# Get eigenvectors
val,vec = eig(smm)
idx = argsort(val)[::-1]
val = val[idx]
vec = vec[idx,:]
return [val,vec,ind]
except:
n = g.watershed.ndim
return [array([0]*n), zeros((n,n)), ind]
return [ val, vec, ind]
def create_edge_cache(self, g, n1, n2):
# Get subscripts of extent (morpho.unravel_index was slow)
M = zeros_like(g.watershed);
M.ravel()[list(g[n1][n2]['boundary'])] = 1
ind = array(nonzero(M)).T
# Get second moment matrix
smm = cov(ind.T)/float(len(ind))
try:
# Get eigenvectors
val,vec = eig(smm)
idx = argsort(val)[::-1]
val = val[idx]
vec = vec[idx,:]
return [val, vec, ind]
except:
n = g.watershed.ndim
return [array([0]*n), zeros((n,n)), ind]
def update_node_cache(self, g, n1, n2, dst, src):
dst = self.create_node_cache(g,n1)
def update_edge_cache(self, g, e1, e2, dst, src):
dst = self.create_edge_cache(g,e1[0], e1[1])
def pixelwise_update_node_cache(self, g, n, dst, idxs, remove=False):
pass
def pixelwise_update_edge_cache(self, g, n1, n2, dst, idxs, remove=False):
pass
def compute_node_features(self, g, n, cache=None):
if cache is None:
cache = g.node[n][self.default_cache]
val = cache[0]
vec = cache[1]
ind = cache[2]
features = []
features.extend(val)
# coherence measure
if val[0]==0 and val[1]==0:
features.append(0)
else:
features.append( ((val[0]-val[1])/(val[0]+val[1]))**2)
return array(features)
def compute_edge_features(self, g, n1, n2, cache=None):
if cache is None:
cache = g[n1][n2][self.default_cache]
val = cache[0]
vec = cache[1]
ind = cache[2]
features = []
features.extend(val)
# coherence measure
if val[0]==0 and val[1]==0:
features.append(0)
else:
features.append( ((val[0]-val[1])/(val[0]+val[1]))**2)
return array(features)
def compute_difference_features(self,g, n1, n2, cache1=None, cache2=None):
if cache1 is None:
cache1 = g.node[n1][self.default_cache]
val1 = cache1[0]
vec1 = cache1[1]
ind1 = cache1[2]
if cache2 is None:
cache2 = g.node[n2][self.default_cache]
val2 = cache2[0]
vec2 = cache2[1]
ind2 = cache2[2]
v1 = vec1[:,0]
v2 = vec2[:,0]
# Line connecting centroids of regions
m1 = ind1.mean(axis=0)
m2 = ind2.mean(axis=0)
v3 = m1 - m2 # move to origin
# Featres are angle differences
if norm(v1) != 0: v1 /= norm(v1)
if norm(v2) != 0: v2 /= norm(v2)
if norm(v3) != 0: v3 /= norm(v3)
features = []
ang1 = arccos(min(max(dot(v1,v2),-1),1))
if ang1>pi/2.0: ang1 = pi - ang1
features.append(ang1)
ang2 = arccos(min(max(dot(v1,v3),-1),1))
if ang2>pi/2.0: ang2 = pi - ang2
ang3 = arccos(min(max(dot(v2,v3),-1),1))
if ang3>pi/2.0: ang3 = pi - ang3
features.append(min([ang2,ang3]))
features.append(max([ang2,ang3]))
features.append(mean([ang2,ang3]))
return array(features)
class ConvexHullFeatureManager(NullFeatureManager):
def __init__(self, *args, **kwargs):
super(ConvexHullFeatureManager, self).__init__()
def convex_hull_ind(self,g,n1,n2=None):
M = zeros_like(g.watershed);
if n2 is not None:
M.ravel()[list(g[n1][n2]['boundary'])]=1
else:
M.ravel()[list(g.node[n1]['extent'])] = 1
M = M - binary_erosion(M) #Only need border
ind = array(nonzero(M)).T
return ind
def convex_hull_vol(self, ind, g):
# Compute the convex hull of the region
try:
tri = Delaunay(ind)
except:
# Just triangulate bounding box
mins = ind.min(axis=0)
maxes = ind.max(axis=0)
maxes[maxes==mins] += 1
ind = array(list(itertools.product(*tuple(array([mins,maxes]).T))))
tri = Delaunay(ind)
vol = 0
for simplex in tri.vertices:
pts = tri.points[simplex].T
pts = pts - repeat(pts[:,0][:,newaxis], pts.shape[1],axis=1)
pts = pts[:,1:]
vol += abs(1/float(factorial(pts.shape[0])) * det(pts))
return vol,tri
def create_node_cache(self, g, n):
vol, tri = self.convex_hull_vol(self.convex_hull_ind(g,n), g)
return array([tri,vol])
def create_edge_cache(self, g, n1, n2):
vol, tri = self.convex_hull_vol(self.convex_hull_ind(g,n1,n2), g)
return array([tri,vol])
def update_node_cache(self, g, n1, n2, dst, src):
tri1 = src[0]
tri2 = dst[0]
ind1 = tri1.points[unique(tri1.convex_hull.ravel())]
ind2 = tri2.points[unique(tri2.convex_hull.ravel())]
allind = concatenate((ind1,ind2))
vol, tri = self.convex_hull_vol(allind, g)
dst = array([tri,vol])
def update_edge_cache(self, g, e1, e2, dst, src):
tri1 = src[0]
tri2 = dst[0]
ind1 = tri1.points[unique(tri1.convex_hull.ravel())]
ind2 = tri2.points[unique(tri2.convex_hull.ravel())]
allind = concatenate((ind1,ind2))
vol, tri = self.convex_hull_vol(allind, g)
dst = array([tri,vol])
def pixelwise_update_node_cache(self, g, n, dst, idxs, remove=False):
pass
def pixelwise_update_edge_cache(self, g, n1, n2, dst, idxs, remove=False):
pass
def compute_node_features(self, g, n, cache=None):
if cache is None:
cache = g.node[n][self.default_cache]
convex_vol = cache[1]
features = []
features.append(convex_vol)
features.append(convex_vol/float(len(g.node[n]['extent'])))
return array(features)
def compute_edge_features(self, g, n1, n2, cache=None):
if cache is None:
cache = g[n1][n2][self.default_cache]
convex_vol = cache[1]
features = []
features.append(convex_vol)
features.append(convex_vol/float(len(g[n1][n2]['boundary'])))
return array(features)
def compute_difference_features(self,g, n1, n2, cache1=None, cache2=None):
if cache1 is None:
cache1 = g.node[n1][self.default_cache]
tri1 = cache1[0]
convex_vol1 = cache1[1]
if cache2 is None:
cache2 = g.node[n2][self.default_cache]
tri2 = cache2[0]
convex_vol2 = cache2[1]
ind1 = tri1.points[unique(tri1.convex_hull.ravel())]
ind2 = tri2.points[unique(tri2.convex_hull.ravel())]
allind = concatenate((ind1,ind2))
convex_vol_both, tri_both = self.convex_hull_vol(allind, g)
vol1 = float(len(g.node[n1]['extent']))
vol2 = float(len(g.node[n2]['extent']))
volborder = float(len(g[n1][n2]['boundary']))
volboth = vol1+vol2
features = []
features.append(abs(convex_vol1/vol1 - convex_vol2/vol2))
features.append(abs(convex_vol1/vol1 - convex_vol_both/volboth))
features.append(abs(convex_vol2/vol2 - convex_vol_both/volboth))
features.append(abs(convex_vol_both/volboth))
features.append((convex_vol1*vol2)/(convex_vol2*vol1))
features.append(volborder/vol1)
features.append(volborder/vol2)
features.append(volborder/volboth)
return array(features)
class HistogramFeatureManager(NullFeatureManager):
def __init__(self, nbins=4, minval=0.0, maxval=1.0,
compute_percentiles=[], oriented=False,
compute_histogram=True, *args, **kwargs):
super(HistogramFeatureManager, self).__init__()
self.minval = minval
self.maxval = maxval
self.nbins = nbins
self.oriented = oriented
self.compute_histogram = compute_histogram
try:
_ = len(compute_percentiles)
except TypeError: # single percentile value given
compute_percentiles = [compute_percentiles]
self.compute_percentiles = compute_percentiles
def histogram(self, vals):
if vals.ndim == 1:
return histogram(vals, bins=self.nbins,
range=(self.minval,self.maxval))[0].astype(double)[newaxis,:]
elif vals.ndim == 2:
return concatenate([self.histogram(vals_i) for vals_i in vals.T], 0)
else:
raise ValueError('HistogramFeatureManager.histogram expects '+
'either a 1-d or 2-d array of probabilities. Got %i-d array.'%
vals.ndim)
def percentiles(self, h, desired_percentiles):
if h.ndim == 1 or any([i==1 for i in h.shape]): h = h.reshape((1,-1))
h = h.T
nchannels = h.shape[1]
hcum = concatenate((zeros((1,nchannels)), h.cumsum(axis=0)), axis=0)
bin_edges = zeros((self.nbins+1, nchannels))
for i in range(nchannels):
bin_edges[:,i] = arange(self.minval,self.maxval+1e-10,
(self.maxval-self.minval)/float(self.nbins))
ps = zeros([len(desired_percentiles), h.shape[1]], dtype=double)
for i, p in enumerate(desired_percentiles):
b2 = (hcum>=p).argmax(axis=0)
b1 = (b2-1, arange(nchannels,dtype=int))
b2 = (b2, arange(nchannels,dtype=int))
slope = (bin_edges[b2]-bin_edges[b1]) / (hcum[b2]-hcum[b1])
delta = p - hcum[b1]
estim = bin_edges[b1] + delta*slope
error = slope==inf
estim[error] = (bin_edges[b2]+bin_edges[b1])[error]/2
ps[i] = estim
return ps.T
def normalized_histogram_from_cache(self, cache, desired_percentiles):
s = cache.sum(axis=1)[:,newaxis]
s[s==0] = 1
h = cache/s
ps = self.percentiles(h, desired_percentiles)
return h, ps
def create_node_cache(self, g, n):
node_idxs = list(g.node[n]['extent'])
if self.oriented:
ar = g.max_probabilities_r
else:
ar = g.non_oriented_probabilities_r
return self.histogram(ar[node_idxs,:])
def create_edge_cache(self, g, n1, n2):
edge_idxs = list(g[n1][n2]['boundary'])
if self.oriented:
ar = g.oriented_probabilities_r
else:
ar = g.non_oriented_probabilities_r
return self.histogram(ar[edge_idxs,:])
def update_node_cache(self, g, n1, n2, dst, src):
dst += src
def update_edge_cache(self, g, e1, e2, dst, src):
dst += src
def pixelwise_update_node_cache(self, g, n, dst, idxs, remove=False):
if len(idxs) == 0: return
a = -1.0 if remove else 1.0
if self.oriented:
ar = g.max_probabilities_r
else:
ar = g.non_oriented_probabilities_r
dst += a * self.histogram(ar[idxs,:])
def pixelwise_update_edge_cache(self, g, n1, n2, dst, idxs, remove=False):
if len(idxs) == 0: return
a = -1.0 if remove else 1.0
if self.oriented:
ar = g.oriented_probabilities_r
else:
ar = g.non_oriented_probabilities_r
dst += a * self.histogram(ar[idxs,:])
def JS_divergence(self, p, q):
m = (p+q)/2
return (self.KL_divergence(p, m) + self.KL_divergence(q, m))/2
def KL_divergence(self, p, q):
"""Return the Kullback-Leibler Divergence between two histograms."""
kl = []
if p.ndim == 1:
p = p[newaxis,:]
q = q[newaxis,:]
for i in range(len(p)):
ind = nonzero(p[i]*q[i])
if len(ind[0]) == 0:
k = 1.0
else:
k = (p[i][ind] * log( p[i][ind]/q[i][ind])).sum()
kl.append(k)
return array(kl)
def compute_node_features(self, g, n, cache=None):
if not self.compute_histogram:
return array([])
if cache is None:
cache = g.node[n1][self.default_cache]
h, ps = self.normalized_histogram_from_cache(cache,
self.compute_percentiles)
return concatenate((h,ps), axis=1).ravel()
def compute_edge_features(self, g, n1, n2, cache=None):
if not self.compute_histogram:
return array([])
if cache is None:
cache = g[n1][n2][self.default_cache]
h, ps = self.normalized_histogram_from_cache(cache,
self.compute_percentiles)
return concatenate((h,ps), axis=1).ravel()
def compute_difference_features(self,g, n1, n2, cache1=None, cache2=None):
if cache1 is None:
cache1 = g.node[n1][self.default_cache]
h1, _ = self.normalized_histogram_from_cache(cache1,
self.compute_percentiles)
if cache2 is None:
cache2 = g.node[n2][self.default_cache]
h2, _ = self.normalized_histogram_from_cache(cache2,
self.compute_percentiles)
return self.JS_divergence(h1, h2)
class SquigglinessFeatureManager(NullFeatureManager):
def __init__(self, ndim=3, *args, **kwargs):
super(SquigglinessFeatureManager, self).__init__()
self.ndim = ndim
# cache is min and max coordinates of bounding box
if numpy_version < '1.6.0':
self.compute_bounding_box = self.compute_bounding_box_old
# uses older, slower version of numpy.unravel_index
def compute_bounding_box(self, indices, shape):
d = self.ndim
unraveled_indices = concatenate(
unravel_index(list(indices), shape)).reshape((-1,d), order='F')
m = unraveled_indices.min(axis=0)
M = unraveled_indices.max(axis=0)+ones(d)
return m, M
def compute_bounding_box_old(self, indices, shape):
d = self.ndim
unraveled_indices = concatenate(
[unravel_index(idx, shape) for idx in indices]).reshape((-1,d))
m = unraveled_indices.min(axis=0)
M = unraveled_indices.max(axis=0)+ones(d)
return m, M
def create_edge_cache(self, g, n1, n2):
edge_idxs = g[n1][n2]['boundary']
return concatenate(
self.compute_bounding_box(edge_idxs, g.segmentation.shape)
)
def update_edge_cache(self, g, e1, e2, dst, src):
dst[:self.ndim] = \
concatenate((dst[newaxis,:self.ndim], src[newaxis,:self.ndim]),
axis=0).min(axis=0)
dst[self.ndim:] = \
concatenate((dst[newaxis,self.ndim:], src[newaxis,self.ndim:]),
axis=0).max(axis=0)
def pixelwise_update_edge_cache(self, g, n1, n2, dst, idxs, remove=False):
if remove:
pass
# dst = self.create_edge_cache(g, n1, n2)
if len(idxs) == 0: return
b = concatenate(self.compute_bounding_box(idxs, g.segmentation.shape))
self.update_edge_cache(g, (n1,n2), None, dst, b)
def compute_edge_features(self, g, n1, n2, cache=None):
if cache is None:
cache = g[n1][n2][self.default_cache]
m, M = cache[:self.ndim], cache[self.ndim:]
plane_surface = sort(M-m)[1:].prod() * (3.0-g.pad_thickness)
return array([len(g[n1][n2]['boundary']) / plane_surface])
class CompositeFeatureManager(NullFeatureManager):
def __init__(self, children=[], *args, **kwargs):
super(CompositeFeatureManager, self).__init__()
self.children = children
def create_node_cache(self, *args, **kwargs):
return [c.create_node_cache(*args, **kwargs) for c in self.children]
def create_edge_cache(self, *args, **kwargs):
return [c.create_edge_cache(*args, **kwargs) for c in self.children]
def update_node_cache(self, g, n1, n2, dst, src):
for i, child in enumerate(self.children):
child.update_node_cache(g, n1, n2, dst[i], src[i])
def update_edge_cache(self, g, e1, e2, dst, src):
for i, child in enumerate(self.children):
child.update_edge_cache(g, e1, e2, dst[i], src[i])
def pixelwise_update_node_cache(self, g, n, dst, idxs, remove=False):
for i, child in enumerate(self.children):
child.pixelwise_update_node_cache(g, n, dst[i], idxs, remove)
def pixelwise_update_edge_cache(self, g, n1, n2, dst, idxs, remove=False):
for i, child in enumerate(self.children):
child.pixelwise_update_edge_cache(g, n1, n2, dst[i], idxs, remove)
def compute_node_features(self, g, n, cache=None):
if cache is None: cache = g.node[n][self.default_cache]
features = []
for i, child in enumerate(self.children):
features.append(child.compute_node_features(g, n, cache[i]))
return concatenate(features)
def compute_edge_features(self, g, n1, n2, cache=None):
if cache is None: cache = g[n1][n2][self.default_cache]
features = []
for i, child in enumerate(self.children):
features.append(child.compute_edge_features(g, n1, n2, cache[i]))
return concatenate(features)
def compute_difference_features(self, g, n1, n2, cache1=None, cache2=None):
if cache1 is None: cache1 = g.node[n1][self.default_cache]
if cache2 is None: cache2 = g.node[n2][self.default_cache]
features = []
for i, child in enumerate(self.children):
features.append(
child.compute_difference_features(g, n1, n2, cache1[i], cache2[i])
)
return concatenate(features)
def mean_and_sem(g, n1, n2):
bvals = g.probabilities_r[list(g[n1][n2]['boundary'])]
return array([mean(bvals), sem(bvals)]).reshape(1,2)
def mean_sem_and_n_from_cache_dict(d):
n, s1, s2 = d['feature-cache'][:3]
m = s1/n
v = 0 if n==1 else max(0, s2/(n-1) - n/(n-1)*m*m)
s = sqrt(v/n)
return m, s, n
def skew_from_cache_dict(d):
n, s1, s2, s3 = d['feature-cache'][:4]
m1 = s1/n
k1 = m1
m2 = s2/n
k2 = m2 - m1*m1
m3 = s3/n
k3 = m3 - 3*m2*m1 + 2*m1*m1*m1
return k3 * k2**(-1.5)
def feature_set_a(g, n1, n2):
"""Return the mean, SEM, and size of n1, n2, and the n1-n2 boundary in g.
n1 is defined as the smaller of the two nodes, so the labels are swapped
accordingly if necessary before computing the statistics.
SEM: standard error of the mean, equal to sqrt(var/n)
"""
if len(g.node[n1]['extent']) > len(g.node[n2]['extent']):
n1, n2 = n2, n1
mb, sb, lb = mean_sem_and_n_from_cache_dict(g[n1][n2])
m1, s1, l1 = mean_sem_and_n_from_cache_dict(g.node[n1])
m2, s2, l2 = mean_sem_and_n_from_cache_dict(g.node[n2])
return array([mb, sb, lb, m1, s1, l1, m2, s2, l2]).reshape(1,9)
def node_feature_set_a(g, n):
"""Return the mean, standard deviation, SEM, size, and skewness of n.
Uses the probability of boundary within n.
"""
d = g.node[n]
m, s, l = mean_sem_and_n_from_cache_dict(d)
stdev = s*sqrt(l)
skew = skew_from_cache_dict(d)
return array([m, stdev, s, l, skew])
def h5py_stack(fn):
try:
a = array(h5py.File(fn, 'r')['stack'])
except Exception as except_inst:
print except_inst
raise
return a
class RandomForest(object):
def __init__(self, ntrees=255, use_feature_importance=False,
sample_classes_individually=False):
self.rf = VigraRandomForest(treeCount=ntrees,
sample_classes_individually=sample_classes_individually)
self.use_feature_importance = use_feature_importance
self.sample_classes_individually = sample_classes_individually
def fit(self, features, labels, num_train_examples=None, **kwargs):
idxs = range(len(features))
shuffle(idxs)
idxs = idxs[:num_train_examples]
features = self.check_features_vector(features[idxs])
labels = self.check_labels_vector(labels[idxs])
if self.use_feature_importance:
self.oob, self.feature_importance = \
self.rf.learnRFWithFeatureSelection(features, labels)
else:
self.oob = self.rf.learnRF(features, labels)
return self
def predict_proba(self, features):
features = self.check_features_vector(features)
return self.rf.predictProbabilities(features)
def predict(self, features):
features = self.check_features_vector(features)
return self.rf.predictLabels(features)
def check_features_vector(self, features):
if features.dtype != float32:
features = features.astype(float32)
if features.ndim == 1:
features = features[newaxis,:]
return features
def check_labels_vector(self, labels):
if labels.dtype != uint32:
if len(unique(labels[labels < 0])) == 1 and not (labels==0).any():
labels[labels < 0] = 0
else:
labels = labels + labels.min()
labels = labels.astype(uint32)
labels = labels.reshape((labels.size, 1))
return labels
def save_to_disk(self, fn, rfgroupname='rf', overwrite=True):
self.rf.writeHDF5(fn, rfgroupname, overwrite)
attr_list = ['oob', 'feature_importance', 'use_feature_importance']
f = h5py.File(fn)
for attr in attr_list:
if hasattr(self, attr):
f[attr] = getattr(self, attr)
def load_from_disk(self, fn, rfgroupname='rf'):
self.rf = VigraRandomForest(fn, rfgroupname)
f = h5py.File(fn, 'r')
groups = []
f.visit(groups.append)
attrs = [g for g in groups if not g.startswith(rfgroupname)]
for attr in attrs:
setattr(self, attr, array(f[attr]))
def read_rf_info(fn):
f = h5py.File(fn)
return map(array, [f['oob'], f['feature_importance']])
def concatenate_data_elements(alldata):
"""Return one big learning set from a list of learning sets.
A learning set is a list/tuple of length 4 containing features, labels,
weights, and node merge history.
"""
return map(concatenate, zip(*alldata))
def unique_learning_data_elements(alldata):
if type(alldata[0]) not in (list, tuple): alldata = [alldata]
f, l, w, h = concatenate_data_elements(alldata)
af = f.view('|S%d'%(f.itemsize*(len(f[0]))))
_, uids, iids = unique(af, return_index=True, return_inverse=True)
bcs = bincount(iids) #DBG
logging.debug( #DBG
'repeat feature vec min %d, mean %.2f, median %.2f, max %d.' %
(bcs.min(), mean(bcs), median(bcs), bcs.max())
)
def get_uniques(ar): return ar[uids]
return map(get_uniques, [f, l, w, h])
def save_training_data_to_disk(data, fn, names=None, info='N/A'):
if names is None:
names = ['features', 'labels', 'weights', 'history']
fout = h5py.File(fn, 'w')
for data_elem, name in zip(data, names):
fout[name] = data_elem
fout.attrs['info'] = info
fout.close()
def load_training_data_from_disk(fn, names=None, info='N/A'):
if names is None:
names = ['features', 'labels', 'weights', 'history']
fin = h5py.File(fn, 'r')
data = []
for name in names:
data.append(array(fin[name]))
return data
def boundary_overlap_threshold(boundary_idxs, gt, tol_false, tol_true):
"""Return -1, 0 or 1 by thresholding overlaps between boundaries."""
n = len(boundary_idxs)
gt_boundary = 1-gt.ravel()[boundary_idxs].astype(bool)
fraction_true = gt_boundary.astype(double).sum() / n
if fraction_true > tol_true:
return 1
elif fraction_true > tol_false:
return 0
else:
return -1
def make_thresholded_boundary_overlap_loss(tol_false, tol_true):
"""Return a merge loss function based on boundary overlaps."""
def loss(g, n1, n2, gt):
boundary_idxs = list(g[n1][n2]['boundary'])
return \
boundary_overlap_threshold(boundary_idxs, gt, tol_false, tol_true)
return loss
def label_merges(g, merge_history, feature_map_function, gt, loss_function):
"""Replay an agglomeration history and label the loss of each merge."""
labels = zeros(len(merge_history))
number_of_features = feature_map_function(g, *g.edges_iter().next()).size
features = zeros((len(merge_history), number_of_features))
labeled_image = zeros(gt.shape, double)
for i, nodes in enumerate(ip.with_progress(
merge_history, title='Replaying merge history...',
pbar=ip.StandardProgressBar())):
n1, n2 = nodes
features[i,:] = feature_map_function(g, n1, n2)
labels[i] = loss_function(g, n1, n2, gt)
labeled_image.ravel()[list(g[n1][n2]['boundary'])] = 2+labels[i]
g.merge_nodes(n1,n2)
return features, labels, labeled_image
def select_classifier(cname, features=None, labels=None, **kwargs):
if 'svm'.startswith(cname):
del kwargs['class_weight']
c = SVC(probability=True, **kwargs)
elif 'logistic-regression'.startswith(cname):
c = LogisticRegression()
elif 'linear-regression'.startswith(cname):
c = LinearRegression()
elif 'random-forest'.startswith(cname):
try:
c = RandomForest()
except NameError:
logging.warning(' Tried to use random forest, but not available.'+
' Falling back on adaboost.')
cname = 'ada'
if 'adaboost'.startswith(cname):
c = AdaBoost(**kwargs)
if features is not None and labels is not None:
c = c.fit(features, labels, **kwargs)
return c
def pickled(fn):
try:
obj = cPickle.load(open(fn, 'r'))
except cPickle.UnpicklingError:
obj = RandomForest()
obj.load_from_disk(fn)
return obj
arguments = argparse.ArgumentParser(add_help=False)
arggroup = arguments.add_argument_group('Classification options')
arggroup.add_argument('-c', '--classifier', default='ada',
help='''Choose the classifier to use. Default: adaboost.
Options: svm, logistic-regression, linear-regression,
random-forest, adaboost'''
)
arggroup.add_argument('-k', '--load-classifier',
type=pickled, metavar='PCK_FILE',
help='Load and use a pickled classifier as a merge priority function.'
)
arggroup.add_argument('-f', '--feature-map-function', metavar='FCT_NAME',
default='feature_set_a',
help='Use named function as feature map (ignored when -c is not used).'
)
arggroup.add_argument('-T', '--training-data', metavar='HDF5_FN', type=str,
help='Load training data from file.'
)
arggroup.add_argument('-N', '--node-split-classifier', metavar='HDF5_FN',
type=str,
help='Load a node split classifier and split nodes when required.'
)
if __name__ == '__main__':
from agglo import best_possible_segmentation, Rag, boundary_mean, \
classifier_probability, random_priority
parser = argparse.ArgumentParser(
parents=[arguments],
description='Create an agglomeration classifier.'
)
parser.add_argument('ws', type=h5py_stack,
help='Watershed volume, in HDF5 format.'
)
parser.add_argument('gt', type=h5py_stack,
help='Ground truth volume, in HDF5 format also.'
)
parser.add_argument('probs', type=h5py_stack,
help='''Probabilities volume, in HDF ... you get the idea.'''
)
parser.add_argument('fout', help='.pck filename to save the classifier.')
parser.add_argument('-t', '--max-threshold', type=float, default=255,
help='Agglomerate until this threshold'
)
parser.add_argument('-s', '--save-training-data', metavar='FILE',
help='Save the generated training data to FILE (HDF5 format).'
)
parser.add_argument('-b', '--balance-classes', action='store_true',
default=False,
help='Ensure both true edges and false edges are equally represented.'
)
parser.add_argument('-K', '--kernel', default='rbf',
help='The kernel for an SVM classifier.'
)
parser.add_argument('-o', '--objective-function', metavar='FCT_NAME',
default='random_priority', help='The merge priority function name.'
)
parser.add_argument('--save-node-training-data', metavar='FILE',
help='Save node features and labels to FILE.'
)
parser.add_argument('--node-classifier', metavar='FILE',
help='Train and output a node split classifier.'
)
args = parser.parse_args()
feature_map_function = eval(args.feature_map_function)
if args.load_classifier is not None:
mpf = classifier_probability(eval(args.feature_map_function),
args.load_classifier)
else:
mpf = eval(args.objective_function)
wsg = Rag(args.ws, args.probs, mpf)
features, labels, weights, history, ave_sizes = \
wsg.learn_agglomerate(args.gt, feature_map_function)
print 'shapes: ', features.shape, labels.shape
if args.load_classifier is not None:
try:
f = h5py.File(args.save_training_data)
old_features = array(f['samples'])
old_labels = array(f['labels'])
features = concatenate((features, old_features), 0)
labels = concatenate((labels, old_labels), 0)
except:
pass
print "fitting classifier of size, pos: ", labels.size, (labels==1).sum()
if args.balance_classes:
cw = 'auto'
else:
cw = {-1:1, 1:1}
if args.save_training_data is not None:
try:
os.remove(args.save_training_data)
except OSError:
pass
f = h5py.File(args.save_training_data)
f['samples'] = features
f['labels'] = labels
f['history'] = history
f['size'] = ave_sizes
c = select_classifier(args.classifier, features=features, labels=labels,
class_weight=cw, kernel=args.kernel)
print "saving classifier..."
try:
cPickle.dump(c, open(os.path.expanduser(args.fout), 'w'), -1)
except RuntimeError:
os.remove(os.path.expanduser(args.fout))
c.save_to_disk(os.path.expanduser(args.fout))
print 'Warning: unable to pickle classifier to :', args.fout
| mit |
pilou-/ansible | lib/ansible/plugins/doc_fragments/oneview.py | 43 | 2374 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# OneView doc fragment
DOCUMENTATION = r'''
options:
config:
description:
- Path to a .json configuration file containing the OneView client configuration.
The configuration file is optional and when used should be present in the host running the ansible commands.
If the file path is not provided, the configuration will be loaded from environment variables.
For links to example configuration files or how to use the environment variables verify the notes section.
type: path
requirements:
- python >= 2.7.9
notes:
- "A sample configuration file for the config parameter can be found at:
U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)"
- "Check how to use environment variables for configuration at:
U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)"
- "Additional Playbooks for the HPE OneView Ansible modules can be found at:
U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)"
- "The OneView API version used will directly affect returned and expected fields in resources.
Information on setting the desired API version and can be found at:
U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)"
'''
VALIDATEETAG = r'''
options:
validate_etag:
description:
- When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag
for the resource matches the ETag provided in the data.
type: bool
default: yes
'''
FACTSPARAMS = r'''
options:
params:
description:
- List of params to delimit, filter and sort the list of resources.
- "params allowed:
- C(start): The first item to return, using 0-based indexing.
- C(count): The number of resources to return.
- C(filter): A general filter/query string to narrow the list of items returned.
- C(sort): The sort order of the returned data set."
type: dict
'''
| gpl-3.0 |
rajanshah/dx | dx/dx_valuation.py | 5 | 49297 | #
# DX Analytics
# Derivatives Instruments and Portfolio Valuation Classes
# dx_valuation.py
#
# DX Analytics is a financial analytics library, mainly for
# derviatives modeling and pricing by Monte Carlo simulation
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
from dx_models import *
import statsmodels.api as sm
# Classes for single risk factor instrument valuation
class valuation_class_single(object):
''' Basic class for single-risk factor instrument valuation.
Attributes
==========
name : string
name of the object
underlying :
instance of simulation class
mar_env : instance of market_environment
market environment data for valuation
payoff_func : string
derivatives payoff in Python syntax
Example: 'np.maximum(maturity_value - 100, 0)'
where maturity_value is the NumPy vector with
respective values of the underlying
Example: 'np.maximum(instrument_values - 100, 0)'
where instrument_values is the NumPy matrix with
values of the underlying over the whole time/path grid
Methods
=======
update:
updates selected valuation parameters
delta :
returns the delta of the derivative
vega :
returns the vega of the derivative
'''
def __init__(self, name, underlying, mar_env, payoff_func=''):
try:
self.name = name
self.pricing_date = mar_env.pricing_date
try:
self.strike = mar_env.get_constant('strike')
# strike is optional
except:
pass
self.maturity = mar_env.get_constant('maturity')
self.currency = mar_env.get_constant('currency')
# simulation parameters and discount curve from simulation object
self.frequency = underlying.frequency
self.paths = underlying.paths
self.discount_curve = underlying.discount_curve
self.payoff_func = payoff_func
self.underlying = underlying
# provide pricing_date and maturity to underlying
self.underlying.special_dates.extend([self.pricing_date,
self.maturity])
except:
print "Error parsing market environment."
def update(self, initial_value=None, volatility=None,
strike=None, maturity=None):
if initial_value is not None:
self.underlying.update(initial_value=initial_value)
if volatility is not None:
self.underlying.update(volatility=volatility)
if strike is not None:
self.strike = strike
if maturity is not None:
self.maturity = maturity
# add new maturity date if not in time_grid
if not maturity in self.underlying.time_grid:
self.underlying.special_dates.append(maturity)
self.underlying.instrument_values = None
def delta(self, interval=None, accuracy=4):
if interval is None:
interval = self.underlying.initial_value / 50.
# forward-difference approximation
# calculate left value for numerical delta
value_left = self.present_value(fixed_seed=True)
# numerical underlying value for right value
initial_del = self.underlying.initial_value + interval
self.underlying.update(initial_value=initial_del)
# calculate right value for numerical delta
value_right = self.present_value(fixed_seed=True)
# reset the initial_value of the simulation object
self.underlying.update(initial_value=initial_del - interval)
delta = (value_right - value_left) / interval
# correct for potential numerical errors
if delta < -1.0:
return -1.0
elif delta > 1.0:
return 1.0
else:
return round(delta, accuracy)
def vega(self, interval=0.01, accuracy=4):
if interval < self.underlying.volatility / 50.:
interval = self.underlying.volatility / 50.
# forward-difference approximation
# calculate the left value for numerical vega
value_left = self.present_value(fixed_seed=True)
# numerical volatility value for right value
vola_del = self.underlying.volatility + interval
# update the simulation object
self.underlying.update(volatility=vola_del)
# calculate the right value of numerical vega
value_right = self.present_value(fixed_seed=True)
# reset volatility value of simulation object
self.underlying.update(volatility=vola_del - interval)
vega = (value_right - value_left) / interval
return round(vega, accuracy)
class valuation_mcs_european_single(valuation_class_single):
''' Class to value European options with arbitrary payoff
by single-factor Monte Carlo simulation.
Methods
=======
generate_payoff :
returns payoffs given the paths and the payoff function
present_value :
returns present value (Monte Carlo estimator)
'''
def generate_payoff(self, fixed_seed=False):
'''
Attributes
==========
fixed_seed : boolean
used same/fixed seed for valued
'''
try:
# strike defined?
strike = self.strike
except:
pass
paths = self.underlying.get_instrument_values(fixed_seed=fixed_seed)
time_grid = self.underlying.time_grid
try:
time_index = np.where(time_grid == self.maturity)[0]
time_index = int(time_index)
except:
print "Maturity date not in time grid of underlying."
maturity_value = paths[time_index]
# average value over whole path
mean_value = np.mean(paths[:time_index], axis=1)
# maximum value over whole path
max_value = np.amax(paths[:time_index], axis=1)[-1]
# minimum value over whole path
min_value = np.amin(paths[:time_index], axis=1)[-1]
try:
payoff = eval(self.payoff_func)
return payoff
except:
print "Error evaluating payoff function."
def present_value(self, accuracy=6, fixed_seed=False, full=False):
'''
Attributes
==========
accuracy : int
number of decimals in returned result
fixed_seed :
used same/fixed seed for valuation
'''
cash_flow = self.generate_payoff(fixed_seed=fixed_seed)
discount_factor = self.discount_curve.get_discount_factors(
self.underlying.time_grid, self.paths)[1][0]
result = np.sum(discount_factor * cash_flow) / len(cash_flow)
if full:
return round(result, accuracy), discount_factor * cash_flow
else:
return round(result, accuracy)
class valuation_mcs_american_single(valuation_class_single):
''' Class to value American options with arbitrary payoff
by single-factor Monte Carlo simulation.
Methods
=======
generate_payoff :
returns payoffs given the paths and the payoff function
present_value :
returns present value (LSM Monte Carlo estimator)
according to Longstaff-Schwartz (2001)
'''
def generate_payoff(self, fixed_seed=False):
'''
Attributes
==========
fixed_seed :
used same/fixed seed for valuation
'''
try:
strike = self.strike
except:
pass
paths = self.underlying.get_instrument_values(fixed_seed=fixed_seed)
time_grid = self.underlying.time_grid
try:
time_index_start = int(np.where(time_grid == self.pricing_date)[0])
time_index_end = int(np.where(time_grid == self.maturity)[0])
except:
print "Maturity date not in time grid of underlying."
instrument_values = paths[time_index_start:time_index_end + 1]
try:
payoff = eval(self.payoff_func)
return instrument_values, payoff, time_index_start, time_index_end
except:
print "Error evaluating payoff function."
def present_value(self, accuracy=3, fixed_seed=False, bf=5, full=False):
'''
Attributes
==========
accuracy : int
number of decimals in returned result
fixed_seed :
used same/fixed seed for valuation
bf : int
number of basis functions for regression
'''
instrument_values, inner_values, time_index_start, time_index_end = \
self.generate_payoff(fixed_seed=fixed_seed)
time_list = \
self.underlying.time_grid[time_index_start:time_index_end + 1]
discount_factors = self.discount_curve.get_discount_factors(
time_list, self.paths, dtobjects=True)[1]
V = inner_values[-1]
for t in range(len(time_list) - 2, 0, -1):
# derive relevant discount factor for given time interval
df = discount_factors[t] / discount_factors[t + 1]
# regression step
rg = np.polyfit(instrument_values[t], V * df, bf)
# calculation of continuation values per path
C = np.polyval(rg, instrument_values[t])
# optimal decision step:
# if condition is satisfied (inner value > regressed cont. value)
# then take inner value; take actual cont. value otherwise
V = np.where(inner_values[t] > C, inner_values[t], V * df)
df = discount_factors[0] / discount_factors[1]
result = np.sum(df * V) / len(V)
if full:
return round(result, accuracy), df * V
else:
return round(result, accuracy)
# Classes for multi risk factor instrument valuation
class valuation_class_multi(object):
''' Basic class for multi-risk factor instrument valuation.
Attributes
==========
name : string
name of the object
mar_env : instance of market_environment
market environment data for valuation
underlyings : dictionary
instances of model classes
correlations : list
correlations between underlyings
payoff_func : string
derivatives payoff in Python syntax
Example: 'np.maximum(maturity_value[key] - 100, 0)'
where maturity_value[key] is the NumPy vector with
respective values of the underlying 'key' from the
risk_factors dictionary
Methods
=======
update:
updates selected valuation parameters
delta :
returns the delta of the derivative
vega :
returns the vega of the derivative
'''
def __init__(self, name, val_env, risk_factors=None, correlations=None,
payoff_func='', fixed_seed=False, portfolio=False):
try:
self.name = name
self.val_env = val_env
self.currency = self.val_env.get_constant('currency')
self.pricing_date = val_env.pricing_date
try:
self.strike = self.val_env.get_constant('strike')
# strike optional
except:
pass
self.maturity = self.val_env.get_constant('maturity')
self.frequency = self.val_env.get_constant('frequency')
self.paths = self.val_env.get_constant('paths')
self.discount_curve = self.val_env.get_curve('discount_curve')
self.risk_factors = risk_factors
self.underlyings = set()
if portfolio is False:
self.underlying_objects = {}
else:
self.underlying_objects = risk_factors
self.correlations = correlations
self.payoff_func = payoff_func
self.fixed_seed = fixed_seed
self.instrument_values = {}
try:
self.time_grid = self.val_env.get_curve('time_grid')
except:
self.time_grid = None
self.correlation_matrix = None
except:
print "Error parsing market environment."
# Generating general time grid
if self.time_grid is None:
start = self.val_env.get_constant('starting_date')
end = self.val_env.get_constant('final_date')
maturity = self.maturity
time_grid = pd.date_range(start=start, end=end,
freq=self.val_env.get_constant('frequency')
).to_pydatetime()
if start in time_grid and end in time_grid and \
maturity in time_grid:
self.time_grid = time_grid
else:
time_grid = list(time_grid)
if maturity not in time_grid:
time_grid.insert(0, maturity)
if start not in time_grid:
time_grid.insert(0, start)
if end not in time_grid:
time_grid.append(end)
time_grid.sort()
self.time_grid = np.array(time_grid)
self.val_env.add_curve('time_grid', self.time_grid)
if portfolio is False:
if correlations is not None:
ul_list = sorted(self.risk_factors)
correlation_matrix = np.zeros((len(ul_list), len(ul_list)))
np.fill_diagonal(correlation_matrix, 1.0)
correlation_matrix = pd.DataFrame(correlation_matrix,
index=ul_list, columns=ul_list)
for corr in correlations:
if corr[2] >= 1.0:
corr[2] = 0.999999999999
correlation_matrix[corr[0]].ix[corr[1]] = corr[2]
correlation_matrix[corr[1]].ix[corr[0]] = corr[2]
self.correlation_matrix = correlation_matrix
cholesky_matrix = np.linalg.cholesky(
np.array(correlation_matrix))
# dictionary with index positions
rn_set = {}
for asset in self.risk_factors:
rn_set[asset] = ul_list.index(asset)
# random numbers array
random_numbers = sn_random_numbers((len(rn_set),
len(self.time_grid),
self.val_env.constants['paths']),
fixed_seed=self.fixed_seed)
# adding all to valuation environment
self.val_env.add_list('cholesky_matrix', cholesky_matrix)
self.val_env.add_list('rn_set', rn_set)
self.val_env.add_list('random_numbers', random_numbers)
for asset in self.risk_factors:
mar_env = self.risk_factors[asset]
mar_env.add_environment(val_env)
model = models[mar_env.constants['model']]
if correlations is not None:
self.underlying_objects[asset] = model(asset,
mar_env, True)
else:
self.underlying_objects[asset] = model(asset,
mar_env, False)
def get_instrument_values(self, fixed_seed=True):
for obj in self.underlying_objects.values():
if obj.instrument_values is None:
obj.generate_paths(fixed_seed=fixed_seed)
def update(self, key=None, initial_value=None, volatility=None,
strike=None, maturity=None):
if key is not None:
underlying = self.underlying_objects[key]
if initial_value is not None:
underlying.update(initial_value=initial_value)
if volatility is not None:
underlying.update(volatility=volatility)
if strike is not None:
self.strike = strike
if maturity is not None:
self.maturity = maturity
for underlying in underlyings.values():
underlying.update(final_date=self.maturity)
self.get_instrument_values()
def delta(self, key, interval=None):
if len(self.instrument_values) == 0:
self.get_instrument_values()
asset = self.underlying_objects[key]
if interval is None:
interval = asset.initial_value / 50.
value_left = self.present_value()
start_value = asset.initial_value
initial_del = start_value + interval
asset.update(initial_value=initial_del)
self.get_instrument_values()
value_right = self.present_value()
asset.update(start_value)
self.instrument_values = {}
delta = (value_right - value_left) / interval
if delta < -1.0:
return -1.0
elif delta > 1.0:
return 1.0
else:
return delta
def vega(self, key, interval=0.01):
if len(self.instrument_values) == 0:
self.get_instrument_values()
asset = self.underlying_objects[key]
if interval < asset.volatility / 50.:
interval = asset.volatility / 50.
value_left = self.present_value()
start_vola = asset.volatility
vola_del = start_vola + interval
asset.update(volatility=vola_del)
self.get_instrument_values()
value_right = self.present_value()
asset.update(volatility=start_vola)
self.instrument_values = {}
return (value_right - value_left) / interval
class valuation_mcs_european_multi(valuation_class_multi):
''' Class to value European options with arbitrary payoff
by multi-risk factor Monte Carlo simulation.
Methods
=======
generate_payoff :
returns payoffs given the paths and the payoff function
present_value :
returns present value (Monte Carlo estimator)
'''
def generate_payoff(self, fixed_seed=True):
self.get_instrument_values(fixed_seed=True)
paths = {key: name.instrument_values for key, name
in self.underlying_objects.items()}
time_grid = self.time_grid
try:
time_index = np.where(time_grid == self.maturity)[0]
time_index = int(time_index)
except:
print "Maturity date not in time grid of underlying."
maturity_value = {}
mean_value = {}
max_value = {}
min_value = {}
for key in paths:
maturity_value[key] = paths[key][time_index]
mean_value[key] = np.mean(paths[key][:time_index], axis=1)
max_value[key] = np.amax(paths[key][:time_index], axis=1)
min_value[key] = np.amin(paths[key][:time_index], axis=1)
try:
payoff = eval(self.payoff_func)
return payoff
except:
print "Error evaluating payoff function."
def present_value(self, accuracy=3, fixed_seed=True, full=False):
cash_flow = self.generate_payoff(fixed_seed)
discount_factor = self.discount_curve.get_discount_factors(
self.time_grid, self.paths)[1][0]
result = np.sum(discount_factor * cash_flow) / len(cash_flow)
if full:
return round(result, accuracy), df * cash_flow
else:
return round(result, accuracy)
class valuation_mcs_american_multi(valuation_class_multi):
''' Class to value American options with arbitrary payoff
by multi-risk factor Monte Carlo simulation.
Methods
=======
generate_payoff :
returns payoffs given the paths and the payoff function
present_value :
returns present value (Monte Carlo estimator)
'''
def generate_payoff(self, fixed_seed=True):
self.get_instrument_values(fixed_seed=True)
self.instrument_values = {key: name.instrument_values for key, name
in self.underlying_objects.items()}
try:
time_index_start = int(np.where(self.time_grid == self.pricing_date)[0])
time_index_end = int(np.where(self.time_grid == self.maturity)[0])
except:
print "Maturity date not in time grid of underlying."
instrument_values = {}
for key, obj in self.instrument_values.items():
instrument_values[key] = \
self.instrument_values[key][time_index_start:time_index_end
+ 1]
try:
payoff = eval(self.payoff_func)
return instrument_values, payoff, time_index_start, time_index_end
except:
print "Error evaluating payoff function."
def present_value(self, accuracy=3, fixed_seed=True, full=False):
instrument_values, inner_values, time_index_start, time_index_end = \
self.generate_payoff(fixed_seed=fixed_seed)
time_list = self.time_grid[time_index_start:time_index_end + 1]
discount_factors = self.discount_curve.get_discount_factors(
time_list, self.paths, dtobjects=True)[1]
V = inner_values[-1]
for t in range(len(time_list) - 2, 0, -1):
df = discount_factors[t] / discount_factors[t + 1]
matrix = {}
for asset_1 in instrument_values.keys():
matrix[asset_1] = instrument_values[asset_1][t]
for asset_2 in instrument_values.keys():
matrix[asset_1 + asset_2] = instrument_values[asset_1][t] \
* instrument_values[asset_2][t]
rg = sm.OLS(V * df, np.array(matrix.values()).T).fit()
C = np.sum(rg.params * np.array(matrix.values()).T, axis=1)
V = np.where(inner_values[t] > C, inner_values[t], V * df)
df = discount_factors[0] / discount_factors[1]
result = np.sum(df * V) / len(V)
if full:
return round(result, accuracy), df * V
else:
return round(result, accuracy)
# Classes for derivatives portfolio valuation
class derivatives_position(object):
''' Class to model a derivatives position.
Attributes
==========
name : string
name of the object
quantity : float
number of derivatives instruments making up the position
underlyings : list of strings
names of risk_factors/risk factors for the derivative
mar_env : instance of market_environment
constants, lists and curves relevant for valuation_class
otype : string
valuation class to use
payoff_func : string
payoff string for the derivative
Methods
=======
get_info :
prints information about the derivative position
'''
def __init__(self, name, quantity, underlyings, mar_env, otype, payoff_func):
self.name = name
self.quantity = quantity
self.underlyings = underlyings
self.mar_env = mar_env
self.otype = otype
self.payoff_func = payoff_func
def get_info(self):
print "NAME"
print self.name, '\n'
print "QUANTITY"
print self.quantity, '\n'
print "UNDERLYINGS"
print self.underlyings, '\n'
print "MARKET ENVIRONMENT"
print "\n**Constants**"
for key in self.mar_env.constants:
print key, self.mar_env.constants[key]
print "\n**Lists**"
for key in self.mar_env.lists:
print key, self.mar_env.lists[key]
print "\n**Curves**"
for key in self.mar_env.curves:
print key, self.mar_env.curves[key]
print "\nOPTION TYPE"
print self.otype, '\n'
print "PAYOFF FUNCTION"
print self.payoff_func
models = {'gbm' : geometric_brownian_motion,
'jd' : jump_diffusion,
'sv' : stochastic_volatility,
'svjd' : stoch_vol_jump_diffusion,
'srd' : square_root_diffusion,
'srjd' : square_root_jump_diffusion,
'srjd+' : square_root_jump_diffusion_plus}
otypes = {'European single' : valuation_mcs_european_single,
'American single' : valuation_mcs_american_single,
'European multi' : valuation_mcs_european_multi,
'American multi' : valuation_mcs_american_multi}
class derivatives_portfolio(object):
''' Class for building and valuing portfolios of derivatives positions.
Attributes
==========
name : str
name of the object
positions : dict
dictionary of positions (instances of derivatives_position class)
val_env : market_environment
market environment for the valuation
risk_factors : dict
dictionary of market environments for the risk_factors
correlations : list or pd.DataFrame
correlations between risk_factors
fixed_seed : boolean
flag for fixed rng seed
Methods
=======
get_positions :
prints information about the single portfolio positions
get_values :
estimates and returns positions values
get_present_values :
returns the full distribution of the simulated portfolio values
get_statistics :
returns a pandas DataFrame object with portfolio statistics
get_port_risk :
estimates sensitivities for point-wise parameter shocks
'''
def __init__(self, name, positions, val_env, risk_factors,
correlations=None, fixed_seed=False, parallel=False):
self.name = name
self.positions = positions
self.val_env = val_env
self.risk_factors = risk_factors
self.underlyings = set()
if correlations is None or correlations is False:
self.correlations = None
else:
self.correlations = correlations
self.time_grid = None
self.underlying_objects = {}
self.valuation_objects = {}
self.fixed_seed = fixed_seed
self.parallel = parallel
self.special_dates = []
for pos in self.positions:
# determine earliest starting_date
self.val_env.constants['starting_date'] = \
min(self.val_env.constants['starting_date'],
positions[pos].mar_env.pricing_date)
# determine latest date of relevance
self.val_env.constants['final_date'] = \
max(self.val_env.constants['final_date'],
positions[pos].mar_env.constants['maturity'])
# collect all underlyings
# add to set; avoids redundancy
for ul in positions[pos].underlyings:
self.underlyings.add(ul)
# generating general time grid
start = self.val_env.constants['starting_date']
end = self.val_env.constants['final_date']
time_grid = pd.date_range(start=start, end=end,
freq=self.val_env.constants['frequency']
).to_pydatetime()
time_grid = list(time_grid)
for pos in self.positions:
maturity_date = positions[pos].mar_env.constants['maturity']
if maturity_date not in time_grid:
time_grid.insert(0, maturity_date)
self.special_dates.append(maturity_date)
if start not in time_grid:
time_grid.insert(0, start)
if end not in time_grid:
time_grid.append(end)
# delete duplicate entries
# & sort dates in time_grid
time_grid = sorted(set(time_grid))
self.time_grid = np.array(time_grid)
self.val_env.add_list('time_grid', self.time_grid)
# taking care of correlations
ul_list = sorted(self.underlyings)
correlation_matrix = np.zeros((len(ul_list), len(ul_list)))
np.fill_diagonal(correlation_matrix, 1.0)
correlation_matrix = pd.DataFrame(correlation_matrix,
index=ul_list, columns=ul_list)
if self.correlations is not None:
if isinstance(self.correlations, list):
# if correlations are given as list of list/tuple objects
for corr in self.correlations:
if corr[2] >= 1.0:
corr[2] = 0.999999999999
if corr[2] <= -1.0:
corr[2] = -0.999999999999
# fill correlation matrix
correlation_matrix[corr[0]].ix[corr[1]] = corr[2]
correlation_matrix[corr[1]].ix[corr[0]] = corr[2]
# determine Cholesky matrix
cholesky_matrix = np.linalg.cholesky(np.array(
correlation_matrix))
else:
# if correlation matrix was already given as pd.DataFrame
cholesky_matrix = np.linalg.cholesky(np.array(
self.correlations))
else:
cholesky_matrix = np.linalg.cholesky(np.array(
correlation_matrix))
# dictionary with index positions for the
# slice of the random number array to be used by
# respective underlying
rn_set = {}
for asset in self.underlyings:
rn_set[asset] = ul_list.index(asset)
# random numbers array, to be used by
# all underlyings (if correlations exist)
random_numbers = sn_random_numbers(
(len(rn_set),
len(self.time_grid),
self.val_env.constants['paths']),
fixed_seed=self.fixed_seed)
# adding all to valuation environment which is
# to be shared with every underlying
self.val_env.add_list('correlation_matrix', correlation_matrix)
self.val_env.add_list('cholesky_matrix', cholesky_matrix)
self.val_env.add_list('random_numbers', random_numbers)
self.val_env.add_list('rn_set', rn_set)
for asset in self.underlyings:
# select market environment of asset
mar_env = self.risk_factors[asset]
# add valuation environment to market environment
mar_env.add_environment(val_env)
# select the right simulation class
model = models[mar_env.constants['model']]
# instantiate simulation object
if self.correlations is not None:
corr = True
else:
corr = False
self.underlying_objects[asset] = model(asset, mar_env,
corr=corr)
for pos in positions:
# select right valuation class (European, American)
val_class = otypes[positions[pos].otype]
# pick the market environment and add the valuation environment
mar_env = positions[pos].mar_env
mar_env.add_environment(self.val_env)
# instantiate valuation class single risk vs. multi risk
if self.positions[pos].otype[-5:] == 'multi':
underlying_objects = {}
for obj in positions[pos].underlyings:
underlying_objects[obj] = self.underlying_objects[obj]
self.valuation_objects[pos] = \
val_class(name=positions[pos].name,
val_env=mar_env,
risk_factors=underlying_objects,
payoff_func=positions[pos].payoff_func,
portfolio=True)
else:
self.valuation_objects[pos] = \
val_class(name=positions[pos].name,
mar_env=mar_env,
underlying=self.underlying_objects[
positions[pos].underlyings[0]],
payoff_func=positions[pos].payoff_func)
def get_positions(self):
''' Convenience method to get information about
all derivatives positions in a portfolio. '''
for pos in self.positions:
bar = '\n' + 50 * '-'
print bar
self.positions[pos].get_info()
print bar
def get_values(self, fixed_seed=False):
''' Providing portfolio position values. '''
res_list = []
if self.parallel is True:
self.underlying_objects = \
simulate_parallel(self.underlying_objects.values())
results = value_parallel(self.valuation_objects.values())
# iterate over all positions in portfolio
for pos in self.valuation_objects:
pos_list = []
if self.parallel is True:
present_value = results[self.valuation_objects[pos].name]
else:
present_value = self.valuation_objects[pos].present_value()
pos_list.append(pos)
pos_list.append(self.positions[pos].name)
pos_list.append(self.positions[pos].quantity)
pos_list.append(self.positions[pos].otype)
pos_list.append(self.positions[pos].underlyings)
# calculate all present values for the single instruments
pos_list.append(present_value)
pos_list.append(self.valuation_objects[pos].currency)
# single instrument value times quantity
pos_list.append(present_value * self.positions[pos].quantity)
res_list.append(pos_list)
res_df = pd.DataFrame(res_list, columns=['position', 'name', 'quantity',
'otype', 'risk_facts', 'value',
'currency', 'pos_value'])
print 'Total\n', res_df[['pos_value']].sum()
return res_df
def get_present_values(self, fixed_seed=False):
''' Get full distribution of present values. '''
present_values = np.zeros(self.val_env.get_constant('paths'))
if self.parallel is True:
self.underlying_objects = \
simulate_parallel(self.underlying_objects.values())
results = value_parallel(self.valuation_objects.values(),
full=True)
for pos in self.valuation_objects:
present_values += results[self.valuation_objects[pos].name] \
* self.positions[pos].quantity
else:
for pos in self.valuation_objects:
present_values += self.valuation_objects[pos].present_value(
fixed_seed = fixed_seed, full=True)[1] \
* self.positions[pos].quantity
return present_values
def get_statistics(self, fixed_seed=None):
''' Providing position statistics. '''
res_list = []
if fixed_seed is None:
fixed_seed = self.fixed_seed
if self.parallel is True:
self.underlying_objects = \
simulate_parallel(self.underlying_objects.values())
results = value_parallel(self.valuation_objects.values(),
fixed_seed=fixed_seed)
delta_list = greeks_parallel(self.valuation_objects.values(),
Greek='Delta')
vega_list = greeks_parallel(self.valuation_objects.values(),
Greek='Vega')
# iterate over all positions in portfolio
for pos in self.valuation_objects:
pos_list = []
if self.parallel is True:
present_value = results[self.valuation_objects[pos].name]
else:
present_value = self.valuation_objects[pos].present_value(
fixed_seed=fixed_seed, accuracy=3)
pos_list.append(pos)
pos_list.append(self.positions[pos].name)
pos_list.append(self.positions[pos].quantity)
pos_list.append(self.positions[pos].otype)
pos_list.append(self.positions[pos].underlyings)
# calculate all present values for the single instruments
pos_list.append(present_value)
pos_list.append(self.valuation_objects[pos].currency)
# single instrument value times quantity
pos_list.append(present_value * self.positions[pos].quantity)
if self.positions[pos].otype[-5:] == 'multi':
# multiple delta and vega values for multi-risk derivatives
delta_dict = {}
vega_dict = {}
for key in self.valuation_objects[pos].underlying_objects.keys():
# delta and vega per position and underlying
delta_dict[key] = round(self.valuation_objects[pos].delta(key)
* self.positions[pos].quantity, 6)
vega_dict[key] = round(self.valuation_objects[pos].vega(key)
* self.positions[pos].quantity, 6)
pos_list.append(str(delta_dict))
pos_list.append(str(vega_dict))
else:
if self.parallel is True:
# delta from parallel calculation
pos_list.append(delta_list[pos]
* self.positions[pos].quantity)
# vega from parallel calculation
pos_list.append(vega_list[pos]
* self.positions[pos].quantity)
else:
# delta per position
pos_list.append(self.valuation_objects[pos].delta()
* self.positions[pos].quantity)
# vega per position
pos_list.append(self.valuation_objects[pos].vega()
* self.positions[pos].quantity)
res_list.append(pos_list)
res_df = pd.DataFrame(res_list, columns=['position', 'name',
'quantity', 'otype',
'risk_facts', 'value',
'currency', 'pos_value',
'pos_delta', 'pos_vega'])
print 'Totals\n', res_df[['pos_value', 'pos_delta', 'pos_vega']].sum()
return res_df
def get_port_risk(self, Greek='Delta', low=0.8, high=1.2, step=0.1,
fixed_seed=None, risk_factors=None):
''' Calculating portfolio risk statistics. '''
if risk_factors is None:
risk_factors = self.underlying_objects.keys()
if fixed_seed is None:
fixed_seed = self.fixed_seed
sensitivities = {}
levels = np.arange(low, high + 0.01, step)
if self.parallel is True:
values = value_parallel(self.valuation_objects.values(),
fixed_seed=fixed_seed)
for key in self.valuation_objects:
values[key] *= self.positions[key].quantity
else:
values = {}
for key, obj in self.valuation_objects.items():
values[key] = obj.present_value() \
* self.positions[key].quantity
import copy
for rf in risk_factors:
print '\n' + rf
in_val = self.underlying_objects[rf].initial_value
in_vol = self.underlying_objects[rf].volatility
results = []
for level in levels:
values_sens = copy.deepcopy(values)
print level,
if level == 1.0:
pass
else:
for key, obj in self.valuation_objects.items():
if rf in self.positions[key].underlyings:
if self.positions[key].otype[-5:] == 'multi':
if Greek == 'Delta':
obj.underlying_objects[rf].update(
initial_value=level * in_val)
if Greek == 'Vega':
obj.underlying_objects[rf].update(
volatility=level * in_vol)
else:
if Greek == 'Delta':
obj.underlying.update(
initial_value=level * in_val)
elif Greek == 'Vega':
obj.underlying.update(
volatility=level * in_vol)
values_sens[key] = obj.present_value(
fixed_seed=fixed_seed) \
* self.positions[key].quantity
if self.positions[key].otype[-5:] == 'multi':
obj.underlying_objects[rf].update(
initial_value=in_val)
obj.underlying_objects[rf].update(
volatility=in_vol)
else:
obj.underlying.update(initial_value=in_val)
obj.underlying.update(volatility=in_vol)
if Greek == 'Delta':
results.append((round(level * in_val, 2),
sum(values_sens.values())))
if Greek == 'Vega':
results.append((round(level * in_vol, 2),
sum(values_sens.values())))
sensitivities[rf + '_' + Greek] = pd.DataFrame(np.array(results),
index=levels,
columns=['factor', 'value'])
print 2 * '\n'
return pd.Panel(sensitivities), sum(values.values())
def risk_report(sensitivities, digits=2):
for key in sensitivities:
print '\n' + key
print np.round(sensitivities[key].transpose(), digits)
import multiprocessing as mp
def simulate_parallel(objs, fixed_seed=True):
procs = []
man = mp.Manager()
output = man.Queue()
def worker(o, output):
o.generate_paths(fixed_seed=fixed_seed)
output.put((o.name, o))
for o in objs:
procs.append(mp.Process(target=worker, args=(o, output)))
[pr.start() for pr in procs]
[pr.join() for pr in procs]
results = [output.get() for o in objs]
underlying_objects = {}
for o in results:
underlying_objects[o[0]] = o[1]
return underlying_objects
def value_parallel(objs, fixed_seed=True, full=False):
procs = []
man = mp.Manager()
output = man.Queue()
def worker(o, output):
if full is True:
pvs = o.present_value(fixed_seed=fixed_seed, full=True)[1]
output.put((o.name, pvs))
else:
pv = o.present_value(fixed_seed=fixed_seed)
output.put((o.name, pv))
for o in objs:
procs.append(mp.Process(target=worker, args=(o, output)))
[pr.start() for pr in procs]
[pr.join() for pr in procs]
res_list = [output.get() for o in objs]
results = {}
for o in res_list:
results[o[0]] = o[1]
return results
def greeks_parallel(objs, Greek='Delta'):
procs = []
man = mp.Manager()
output = man.Queue()
def worker(o, output):
if Greek == 'Delta':
output.put((o.name, o.delta()))
elif Greek == 'Vega':
output.put((o.name, o.vega()))
for o in objs:
procs.append(mp.Process(target=worker, args=(o, output)))
[pr.start() for pr in procs]
[pr.join() for pr in procs]
res_list = [output.get() for o in objs]
results = {}
for o in res_list:
results[o[0]] = o[1]
return results
class var_derivatives_portfolio(derivatives_portfolio):
''' Class for building and valuing portfolios of derivatives positions
with risk factors given from fitted VAR model.
Attributes
==========
name : str
name of the object
positions : dict
dictionary of positions (instances of derivatives_position class)
val_env : market_environment
market environment for the valuation
var_risk_factors : VAR model
vector autoregressive model for risk factors
fixed_seed : boolean
flag for fixed rng seed
Methods
=======
get_positions :
prints information about the single portfolio positions
get_values :
estimates and returns positions values
get_present_values :
returns the full distribution of the simulated portfolio values
'''
def __init__(self, name, positions, val_env, var_risk_factors,
fixed_seed=False, parallel=False):
self.name = name
self.positions = positions
self.val_env = val_env
self.var_risk_factors = var_risk_factors
self.underlyings = set()
self.time_grid = None
self.underlying_objects = {}
self.valuation_objects = {}
self.fixed_seed = fixed_seed
self.special_dates = []
for pos in self.positions:
# determine earliest starting_date
self.val_env.constants['starting_date'] = \
min(self.val_env.constants['starting_date'],
positions[pos].mar_env.pricing_date)
# determine latest date of relevance
self.val_env.constants['final_date'] = \
max(self.val_env.constants['final_date'],
positions[pos].mar_env.constants['maturity'])
# collect all underlyings
# add to set; avoids redundancy
for ul in positions[pos].underlyings:
self.underlyings.add(ul)
# generating general time grid
start = self.val_env.constants['starting_date']
end = self.val_env.constants['final_date']
time_grid = pd.date_range(start=start, end=end,
freq='B' # allow business day only
).to_pydatetime()
time_grid = list(time_grid)
if start not in time_grid:
time_grid.insert(0, start)
if end not in time_grid:
time_grid.append(end)
# delete duplicate entries & sort dates in time_grid
time_grid = sorted(set(time_grid))
self.time_grid = np.array(time_grid)
self.val_env.add_list('time_grid', self.time_grid)
#
# generate simulated paths
#
self.fit_model = var_risk_factors.fit(maxlags=5, ic='bic')
sim_paths = self.fit_model.simulate(
paths=self.val_env.get_constant('paths'),
steps=len(self.time_grid),
initial_values=var_risk_factors.y[-1])
symbols = sim_paths[0].columns.values
for sym in symbols:
df = pd.DataFrame()
for i, path in enumerate(sim_paths):
df[i] = path[sym]
self.underlying_objects[sym] = general_underlying(
sym, df, self.val_env)
for pos in positions:
# select right valuation class (European, American)
val_class = otypes[positions[pos].otype]
# pick the market environment and add the valuation environment
mar_env = positions[pos].mar_env
mar_env.add_environment(self.val_env)
# instantiate valuation classes
self.valuation_objects[pos] = \
val_class(name=positions[pos].name,
mar_env=mar_env,
underlying=self.underlying_objects[
positions[pos].underlyings[0]],
payoff_func=positions[pos].payoff_func)
def get_statistics(self):
raise NotImplementedError
def get_port_risk(self):
raise NotImplementedError
| agpl-3.0 |
talishte/ctigre | env/lib/python2.7/site-packages/django/utils/encoding.py | 92 | 9512 | from __future__ import unicode_literals
import codecs
import datetime
from decimal import Decimal
import locale
import warnings
from django.utils.functional import Promise
from django.utils import six
from django.utils.six.moves.urllib.parse import quote
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class that derives __str__ from __unicode__.
On Python 2, __str__ returns the output of __unicode__ encoded as a UTF-8
bytestring. On Python 3, __str__ returns the output of __unicode__.
Useful as a mix-in. If you support Python 2 and 3 with a single code base,
you can inherit this mix-in and just define __unicode__.
"""
def __init__(self, *args, **kwargs):
warnings.warn("StrAndUnicode is deprecated. Define a __str__ method "
"and apply the @python_2_unicode_compatible decorator "
"instead.", DeprecationWarning, stacklevel=2)
super(StrAndUnicode, self).__init__(*args, **kwargs)
if six.PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if six.PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, six.memoryview):
s = bytes(s)
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """\
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """\
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale but falls back to the given
fallback encoding if the encoding is unsupported by python or could
not be determined. See tickets #10335 and #5846
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
| bsd-2-clause |
alienlike/courier | courier/fsio.py | 1 | 1420 | import os
import string
from pkg_resources import resource_filename
datadir = resource_filename(__name__, '.data')
class FileSystemIO:
# generate the absolute and relative paths for a message element
def get_path(self, msg_elem_id):
# convert id to a 20-character string padded with zeros
msg_elem_id = '%020d' % msg_elem_id
# split the string into 4-char chunks
split_len = lambda str, length: [str[i:i+length] for i in range(0, len(str), length)]
path_elems = split_len(msg_elem_id, 4)
# convert the chunks into a relative path
relpath = os.path.join(*path_elems)
# convert the relative path to an absolute path
abspath = os.path.join(datadir, relpath)
# blaze a trail to the relative path
reldir = relpath[:-1]
self._blaze_trail(reldir)
return abspath, relpath
def get_abspath(self, relpath):
abspath = os.path.join(datadir, relpath)
return abspath
def _blaze_trail(self, relpath):
# make sure that the datadir exists
if not os.path.exists(datadir):
os.mkdir(datadir)
# make sure that each subfolder exists within datadir
currbase = datadir
for dir in string.split(relpath, os.path.sep):
currbase = os.path.join(currbase, dir)
if not os.path.exists(currbase):
os.mkdir(currbase) | gpl-3.0 |
openstack/python-openstacksdk | openstack/tests/unit/block_storage/v2/test_stats.py | 2 | 1622 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.block_storage.v2 import stats
POOLS = {"name": "pool1",
"capabilities": {
"updated": "2014-10-28T00=00=00-00=00",
"total_capacity": 1024,
"free_capacity": 100,
"volume_backend_name": "pool1",
"reserved_percentage": "0",
"driver_version": "1.0.0",
"storage_protocol": "iSCSI",
"QoS_support": "false"
}
}
class TestBackendPools(base.TestCase):
def setUp(self):
super(TestBackendPools, self).setUp()
def test_basic(self):
sot = stats.Pools(POOLS)
self.assertEqual("", sot.resource_key)
self.assertEqual("pools", sot.resources_key)
self.assertEqual("/scheduler-stats/get_pools?detail=True",
sot.base_path)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_fetch)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
self.assertFalse(sot.allow_commit)
| apache-2.0 |
rihtak/foursquared.eclair | util/gen_parser.py | 262 | 4392 | #!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna ([email protected])
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| apache-2.0 |
adlnet-archive/edx-platform | lms/djangoapps/foldit/views.py | 191 | 6365 | import hashlib
import json
import logging
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from foldit.models import Score, PuzzleComplete
from student.models import unique_id_for_user
import re
log = logging.getLogger(__name__)
@login_required
@csrf_exempt
@require_POST
def foldit_ops(request):
"""
Endpoint view for foldit operations.
"""
responses = []
if "SetPlayerPuzzleScores" in request.POST:
puzzle_scores_json = request.POST.get("SetPlayerPuzzleScores")
pz_verify_json = request.POST.get("SetPlayerPuzzleScoresVerify")
log.debug("SetPlayerPuzzleScores message: puzzle scores: %r",
puzzle_scores_json)
puzzle_score_verify = json.loads(pz_verify_json)
if not verifies_ok(request.user.email,
puzzle_scores_json, puzzle_score_verify):
responses.append({"OperationID": "SetPlayerPuzzleScores",
"Success": "false",
"ErrorString": "Verification failed",
"ErrorCode": "VerifyFailed"})
log.warning(
"Verification of SetPlayerPuzzleScores failed:"
"user %s, scores json %r, verify %r",
request.user,
puzzle_scores_json,
pz_verify_json
)
else:
# This is needed because we are not getting valid json - the
# value of ScoreType is an unquoted string. Right now regexes are
# quoting the string, but ideally the json itself would be fixed.
# To allow for fixes without breaking this, the regex should only
# match unquoted strings,
a = re.compile(r':([a-zA-Z]*),')
puzzle_scores_json = re.sub(a, r':"\g<1>",', puzzle_scores_json)
puzzle_scores = json.loads(puzzle_scores_json)
responses.append(save_scores(request.user, puzzle_scores))
if "SetPuzzlesComplete" in request.POST:
puzzles_complete_json = request.POST.get("SetPuzzlesComplete")
pc_verify_json = request.POST.get("SetPuzzlesCompleteVerify")
log.debug("SetPuzzlesComplete message: %r",
puzzles_complete_json)
puzzles_complete_verify = json.loads(pc_verify_json)
if not verifies_ok(request.user.email,
puzzles_complete_json, puzzles_complete_verify):
responses.append({"OperationID": "SetPuzzlesComplete",
"Success": "false",
"ErrorString": "Verification failed",
"ErrorCode": "VerifyFailed"})
log.warning(
"Verification of SetPuzzlesComplete failed:"
" user %s, puzzles json %r, verify %r",
request.user,
puzzles_complete_json,
pc_verify_json
)
else:
puzzles_complete = json.loads(puzzles_complete_json)
responses.append(save_complete(request.user, puzzles_complete))
return HttpResponse(json.dumps(responses))
def verify_code(email, val):
"""
Given the email and passed in value (str), return the expected
verification code.
"""
# TODO: is this the right string?
verification_string = email.lower() + '|' + val
return hashlib.md5(verification_string).hexdigest()
def verifies_ok(email, val, verification):
"""
Check that the hash_str matches the expected hash of val.
Returns True if verification ok, False otherwise
"""
if verification.get("VerifyMethod") != "FoldItVerify":
log.debug("VerificationMethod in %r isn't FoldItVerify", verification)
return False
hash_str = verification.get("Verify")
return verify_code(email, val) == hash_str
def save_scores(user, puzzle_scores):
score_responses = []
for score in puzzle_scores:
log.debug("score: %s", score)
# expected keys ScoreType, PuzzleID (int),
# BestScore (energy), CurrentScore (Energy), ScoreVersion (int)
puzzle_id = score['PuzzleID']
best_score = score['BestScore']
current_score = score['CurrentScore']
score_version = score['ScoreVersion']
# SetPlayerPuzzleScoreResponse object
# Score entries are unique on user/unique_user_id/puzzle_id/score_version
try:
obj = Score.objects.get(
user=user,
unique_user_id=unique_id_for_user(user),
puzzle_id=puzzle_id,
score_version=score_version)
obj.current_score = current_score
obj.best_score = best_score
except Score.DoesNotExist:
obj = Score(
user=user,
unique_user_id=unique_id_for_user(user),
puzzle_id=puzzle_id,
current_score=current_score,
best_score=best_score,
score_version=score_version)
obj.save()
score_responses.append({'PuzzleID': puzzle_id,
'Status': 'Success'})
return {"OperationID": "SetPlayerPuzzleScores", "Value": score_responses}
def save_complete(user, puzzles_complete):
"""
Returned list of PuzzleIDs should be in sorted order (I don't think client
cares, but tests do)
"""
for complete in puzzles_complete:
log.debug("Puzzle complete: %s", complete)
puzzle_id = complete['PuzzleID']
puzzle_set = complete['Set']
puzzle_subset = complete['SubSet']
# create if not there
PuzzleComplete.objects.get_or_create(
user=user,
unique_user_id=unique_id_for_user(user),
puzzle_id=puzzle_id,
puzzle_set=puzzle_set,
puzzle_subset=puzzle_subset)
# List of all puzzle ids of intro-level puzzles completed ever, including on this
# request
# TODO: this is just in this request...
complete_responses = list(pc.puzzle_id
for pc in PuzzleComplete.objects.filter(user=user))
return {"OperationID": "SetPuzzlesComplete", "Value": complete_responses}
| agpl-3.0 |
gooli/termenu | termenu/ansi.py | 1 | 2889 | from __future__ import print_function
import errno
import sys
import re
import os
COLORS = dict(black=0, red=1, green=2, yellow=3, blue=4, magenta=5, cyan=6, white=7, default=9)
def write(text):
written = 0
fd = sys.stdout.fileno()
while written < len(text):
remains = text[written:].encode("utf8")
try:
written += os.write(fd, remains)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
def up(n=1):
write("\x1b[%dA" % n)
def down(n=1):
write("\x1b[%dB" % n)
def forward(n=1):
write("\x1b[%dC" % n)
def back(n=1):
write("\x1b[%dD" % n)
def move_horizontal(column=1):
write("\x1b[%dG" % column)
def move(row, column):
write("\x1b[%d;%dH" % (row, column))
def clear_screen():
write("\x1b[2J")
def clear_eol():
write("\x1b[0K")
def clear_line():
write("\x1b[2K")
def save_position():
write("\x1b[s")
def restore_position():
write("\x1b[u")
def hide_cursor():
write("\x1b[?25l")
def show_cursor():
write("\x1b[?25h")
def colorize(string, color, background=None, bright=False):
color = 30 + COLORS.get(color, COLORS["default"])
background = 40 + COLORS.get(background, COLORS["default"])
return "\x1b[0;%d;%d;%dm%s\x1b[0;m" % (int(bright), color, background, string)
def highlight(string, background):
# adds background to a string, even if it's already colorized
background = 40 + COLORS.get(background, COLORS["default"])
bkcmd = "\x1b[%dm" % background
stopcmd = "\x1b[m"
return bkcmd + string.replace(stopcmd, stopcmd + bkcmd) + stopcmd
ANSI_COLOR_REGEX = "\x1b\[(\d+)?(;\d+)*;?m"
def decolorize(string):
return re.sub(ANSI_COLOR_REGEX, "", string)
class ansistr(str):
def __init__(self, s):
if not isinstance(s, str):
s = str(s)
self.__str = s
self.__parts = [m.span() for m in re.finditer("(%s)|(.)" % ANSI_COLOR_REGEX, s)]
self.__len = sum(1 if p[1]-p[0]==1 else 0 for p in self.__parts)
def __len__(self):
return self.__len
def __getslice__(self, i, j):
parts = []
count = 0
for start, end in self.__parts:
if end - start == 1:
count += 1
if i <= count < j:
parts.append(self.__str[start:end])
else:
parts.append(self.__str[start:end])
return ansistr("".join(parts))
def __add__(self, s):
return ansistr(self.__str + s)
def decolorize(self):
return decolorize(self.__str)
if __name__ == "__main__":
# Print all colors
colors = [name for name, color in sorted(COLORS.items(), key=lambda v: v[1])]
for bright in [False, True]:
for background in colors:
for color in colors:
print(colorize("Hello World!", color, background, bright))
| mit |
bhargav2408/kivy | examples/widgets/effectwidget2.py | 43 | 1264 | '''
This is an example of creating your own effect by writing a glsl string.
'''
from kivy.base import runTouchApp
from kivy.lang import Builder
from kivy.uix.effectwidget import EffectWidget, EffectBase
# The effect string is glsl code defining an effect function.
effect_string = '''
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
// Note that time is a uniform variable that is automatically
// provided to all effects.
float red = color.x * abs(sin(time*2.0));
float green = color.y; // No change
float blue = color.z * (1.0 - abs(sin(time*2.0)));
return vec4(red, green, blue, color.w);
}
'''
class DemoEffect(EffectWidget):
def __init__(self, *args, **kwargs):
self.effect_reference = EffectBase(glsl=effect_string)
super(DemoEffect, self).__init__(*args, **kwargs)
widget = Builder.load_string('''
DemoEffect:
effects: [self.effect_reference] if checkbox.active else []
orientation: 'vertical'
Button:
text: 'Some text so you can see what happens.'
BoxLayout:
size_hint_y: None
height: dp(50)
Label:
text: 'Enable effect?'
CheckBox:
id: checkbox
active: True
''')
runTouchApp(widget)
| mit |
puzan/ansible | lib/ansible/plugins/action/ios_config.py | 126 | 4162 | #
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ios import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
aponxi/libmysqlpp | bakefile-0.2.9/src/portautils.py | 1 | 3095 | #
# This file is part of Bakefile (http://www.bakefile.org)
#
# Copyright (C) 2003,2004,2008 Vaclav Slavik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# $Id: portautils.py 1181 2008-01-20 18:23:05Z vaclavslavik $
#
# Portable utilities for misc tasks
#
import os, tempfile
#
# Secure temporary file creation:
#
def mktemp(prefix):
"""Uses tempfile.mkstemp() to atomically create named file, but works
only with Python 2.3+."""
handle, filename = tempfile.mkstemp(prefix=prefix)
os.close(handle)
return filename
def mktempdir(prefix):
"""Uses tempfile.mkdtemp() to atomically create named directory, but works
only with Python 2.3+."""
return tempfile.mkdtemp(prefix=prefix)
#
# Cross-platform file locking:
# (based on portalocker Python Cookbook recipe
# by John Nielsen <[email protected]>:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
#
if os.name == 'nt':
import win32con
import win32file
import pywintypes
# is there any reason not to reuse the following structure?
__overlapped = pywintypes.OVERLAPPED()
def lock(file):
try:
hfile = win32file._get_osfhandle(file.fileno())
win32file.LockFileEx(hfile, win32con.LOCKFILE_EXCLUSIVE_LOCK,
0, 0x7fffffff, __overlapped)
except pywintypes.error, e:
# err 120 is unimplemented call, happens on win9x:
if e.args[0] != 120:
raise e
def unlock(file):
try:
hfile = win32file._get_osfhandle(file.fileno())
win32file.UnlockFileEx(hfile, 0, 0x7fffffff, __overlapped)
except pywintypes.error, e:
# err 120 is unimplemented call, happens on win9x:
if e.args[0] != 120:
raise e
elif os.name == 'posix':
import fcntl
def lock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
else:
def lock(file): pass
def unlock(file): pass
| lgpl-2.1 |
rrauenza/factory_boy | factory/builder.py | 1 | 12577 | """Build factory instances."""
import collections
from . import declarations
from . import enums
from . import errors
from . import utils
DeclarationWithContext = collections.namedtuple(
'DeclarationWithContext',
['name', 'declaration', 'context'],
)
PostGenerationContext = collections.namedtuple(
'PostGenerationContext',
['value_provided', 'value', 'extra'],
)
class DeclarationSet(object):
"""A set of declarations, including the recursive parameters.
Attributes:
declarations (dict(name => declaration)): the top-level declarations
contexts (dict(name => dict(subfield => value))): the nested parameters related
to a given top-level declaration
This object behaves similarly to a dict mapping a top-level declaration name to a
DeclarationWithContext, containing field name, declaration object and extra context.
"""
def __init__(self, initial=None):
self.declarations = {}
self.contexts = collections.defaultdict(dict)
self.update(initial or {})
@classmethod
def split(cls, entry):
"""Split a declaration name into a (declaration, subpath) tuple.
Examples:
>>> DeclarationSet.split('foo__bar')
('foo', 'bar')
>>> DeclarationSet.split('foo')
('foo', None)
>>> DeclarationSet.split('foo__bar__baz')
('foo', 'bar__baz')
"""
if enums.SPLITTER in entry:
return entry.split(enums.SPLITTER, 1)
else:
return (entry, None)
@classmethod
def join(cls, root, subkey):
"""Rebuild a full declaration name from its components.
for every string x, we have `join(split(x)) == x`.
"""
if subkey is None:
return root
return enums.SPLITTER.join((root, subkey))
def copy(self):
return self.__class__(self.as_dict())
def update(self, values):
"""Add new declarations to this set/
Args:
values (dict(name, declaration)): the declarations to ingest.
"""
for k, v in values.items():
root, sub = self.split(k)
if sub is None:
self.declarations[root] = v
else:
self.contexts[root][sub] = v
extra_context_keys = set(self.contexts) - set(self.declarations)
if extra_context_keys:
raise errors.InvalidDeclarationError(
"Received deep context for unknown fields: %r (known=%r)" % (
{
self.join(root, sub): v
for root in extra_context_keys
for sub, v in self.contexts[root].items()
},
sorted(self.declarations),
)
)
def filter(self, entries):
"""Filter a set of declarations: keep only those related to this object.
This will keep:
- Declarations that 'override' the current ones
- Declarations that are parameters to current ones
"""
return [
entry for entry in entries
if self.split(entry)[0] in self.declarations
]
def sorted(self):
return utils.sort_ordered_objects(
self.declarations,
getter=lambda entry: self.declarations[entry],
)
def __contains__(self, key):
return key in self.declarations
def __getitem__(self, key):
return DeclarationWithContext(
name=key,
declaration=self.declarations[key],
context=self.contexts[key],
)
def __iter__(self):
return iter(self.declarations)
def values(self):
"""Retrieve the list of declarations, with their context."""
for name in self:
yield self[name]
def _items(self):
"""Extract a list of (key, value) pairs, suitable for our __init__."""
for name in self.declarations:
yield name, self.declarations[name]
for subkey, value in self.contexts[name].items():
yield self.join(name, subkey), value
def as_dict(self):
"""Return a dict() suitable for our __init__."""
return dict(self._items())
def __repr__(self):
return '<DeclarationSet: %r>' % self.as_dict()
class FakePostGenerationDeclaration(declarations.PostGenerationDeclaration):
"""A fake post-generation declaration, providing simply a hardcoded value.
Used to disable post-generation when the user has overridden a method.
"""
def __init__(self, value):
self.value = value
def call(self, instance, step, context):
return self.value
def parse_declarations(decls, base_pre=None, base_post=None):
pre_declarations = base_pre.copy() if base_pre else DeclarationSet()
post_declarations = base_post.copy() if base_post else DeclarationSet()
# Inject extra declarations, splitting between known-to-be-post and undetermined
extra_post = {}
extra_maybenonpost = {}
for k, v in decls.items():
if enums.get_builder_phase(v) == enums.BuilderPhase.POST_INSTANTIATION:
if k in pre_declarations:
# Conflict: PostGenerationDeclaration with the same
# name as a BaseDeclaration
raise errors.InvalidDeclarationError(
"PostGenerationDeclaration %s=%r shadows declaration %r"
% (k, v, pre_declarations[k])
)
extra_post[k] = v
elif k in post_declarations:
# Passing in a scalar value to a PostGenerationDeclaration
# Set it as `key__`
magic_key = post_declarations.join(k, '')
extra_post[magic_key] = v
else:
extra_maybenonpost[k] = v
# Start with adding new post-declarations
post_declarations.update(extra_post)
# Fill in extra post-declaration context
post_overrides = post_declarations.filter(extra_maybenonpost)
post_declarations.update({
k: v
for k, v in extra_maybenonpost.items()
if k in post_overrides
})
# Anything else is pre_declarations
pre_declarations.update({
k: v
for k, v in extra_maybenonpost.items()
if k not in post_overrides
})
return pre_declarations, post_declarations
class BuildStep(object):
def __init__(self, builder, sequence, parent_step=None):
self.builder = builder
self.sequence = sequence
self.attributes = {}
self.parent_step = parent_step
self.stub = None
def resolve(self, declarations):
self.stub = Resolver(
declarations=declarations,
step=self,
sequence=self.sequence,
)
for field_name in declarations:
self.attributes[field_name] = getattr(self.stub, field_name)
@property
def chain(self):
if self.parent_step:
parent_chain = self.parent_step.chain
else:
parent_chain = ()
return (self.stub,) + parent_chain
def recurse(self, factory, declarations, force_sequence=None):
builder = self.builder.recurse(factory._meta, declarations)
return builder.build(parent_step=self, force_sequence=force_sequence)
class StepBuilder(object):
"""A factory instantiation step.
Attributes:
- parent: the parent StepBuilder, or None for the root step
- extras: the passed-in kwargs for this branch
- factory: the factory class being built
- strategy: the strategy to use
"""
def __init__(self, factory_meta, extras, strategy):
self.factory_meta = factory_meta
self.strategy = strategy
self.extras = extras
self.force_init_sequence = extras.pop('__sequence', None)
def build(self, parent_step=None, force_sequence=None):
"""Build a factory instance."""
# TODO: Handle "batch build" natively
pre, post = parse_declarations(
self.extras,
base_pre=self.factory_meta.pre_declarations,
base_post=self.factory_meta.post_declarations,
)
if force_sequence is not None:
sequence = force_sequence
elif self.force_init_sequence is not None:
sequence = self.force_init_sequence
else:
sequence = self.factory_meta.next_sequence()
step = BuildStep(
builder=self,
sequence=sequence,
parent_step=parent_step,
)
step.resolve(pre)
args, kwargs = self.factory_meta.prepare_arguments(step.attributes)
instance = self.factory_meta.instantiate(
step=step,
args=args,
kwargs=kwargs,
)
postgen_results = {}
for declaration_name in post.sorted():
declaration = post[declaration_name]
postgen_context = PostGenerationContext(
value_provided='' in declaration.context,
value=declaration.context.get(''),
extra={k: v for k, v in declaration.context.items() if k != ''},
)
postgen_results[declaration_name] = declaration.declaration.call(
instance=instance,
step=step,
context=postgen_context,
)
self.factory_meta.use_postgeneration_results(
instance=instance,
step=step,
results=postgen_results,
)
return instance
def recurse(self, factory_meta, extras):
"""Recurse into a sub-factory call."""
return self.__class__(factory_meta, extras, strategy=self.strategy)
class Resolver(object):
"""Resolve a set of declarations.
Attributes are set at instantiation time, values are computed lazily.
Attributes:
__initialized (bool): whether this object's __init__ as run. If set,
setting any attribute will be prevented.
__declarations (dict): maps attribute name to their declaration
__values (dict): maps attribute name to computed value
__pending (str list): names of the attributes whose value is being
computed. This allows to detect cyclic lazy attribute definition.
__step (BuildStep): the BuildStep related to this resolver.
This allows to have the value of a field depend on the value of
another field
"""
__initialized = False
def __init__(self, declarations, step, sequence):
self.__declarations = declarations
self.__step = step
self.__values = {}
self.__pending = []
self.__initialized = True
@property
def factory_parent(self):
return self.__step.parent_step.stub if self.__step.parent_step else None
def __repr__(self):
return '<Resolver for %r>' % self.__step
def __getattr__(self, name):
"""Retrieve an attribute's value.
This will compute it if needed, unless it is already on the list of
attributes being computed.
"""
if name in self.__pending:
raise errors.CyclicDefinitionError(
"Cyclic lazy attribute definition for %r; cycle found in %r." %
(name, self.__pending))
elif name in self.__values:
return self.__values[name]
elif name in self.__declarations:
declaration = self.__declarations[name]
value = declaration.declaration
if enums.get_builder_phase(value) == enums.BuilderPhase.ATTRIBUTE_RESOLUTION:
self.__pending.append(name)
try:
value = value.evaluate(
instance=self,
step=self.__step,
extra=declaration.context,
)
finally:
last = self.__pending.pop()
assert name == last
self.__values[name] = value
return value
else:
raise AttributeError(
"The parameter %r is unknown. Evaluated attributes are %r, "
"definitions are %r." % (name, self.__values, self.__declarations))
def __setattr__(self, name, value):
"""Prevent setting attributes once __init__ is done."""
if not self.__initialized:
return super(Resolver, self).__setattr__(name, value)
else:
raise AttributeError('Setting of object attributes is not allowed')
| mit |
grandtiger/profitpy | profit/series/advanced.py | 18 | 10262 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase
# Distributed under the terms of the GNU General Public License v2
# Author: Troy Melhase <[email protected]>
from numpy import arctan, array, log, mean, std, median
from scipy.stats import linregress, mode
from profit.series.basic import SeriesIndex, MovingAverageIndex
class FisherTransform(MovingAverageIndex):
""" FisherTransform
"""
params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1))
]
def __init__(self, series, periods):
MovingAverageIndex.__init__(self, series, periods)
self.inter = []
def reindex(self):
periods = self.periods
period = self.series[-periods:]
current = period[-1]
mx = max(period)
mn = min(period)
try:
inter = 0.33 * 2 * ((current - mn) / (mx - mn) - 0.5) + (0.67 * self.inter[-1])
if inter > 0.99:
inter = 0.99
elif inter < -0.99:
inter = -0.99
fish = 0.5 * log((1 + inter) / (1 - inter)) + (0.5 * self[-1])
except (TypeError, IndexError, ZeroDivisionError, ):
inter = 0
fish = 0
self.inter.append(inter)
self.append(fish)
class MAMA(MovingAverageIndex):
""" Mother of Adaptave Moving Averages.
"""
fast_limit = 0.5
slow_limit = 0.05
params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1))
]
def __init__(self, series, periods):
MovingAverageIndex.__init__(self, series, periods)
self.hist = {'q1':[], 'i1':[], 'q2':[], 'i2':[], 're':[], 'im':[],
'sms':[], 'dts':[], 'prs':[], 'sps':[], 'phs':[], }
def reindex(self):
hist = self.hist
sms, dts, prs, sps, phs = \
hist['sms'], hist['dts'], hist['prs'], hist['sps'], hist['phs']
q1, i1, q2, i2, re, im = \
hist['q1'], hist['i1'], hist['q2'], hist['i2'], hist['re'], hist['im']
series = self.series
periods = self.periods
if len(series) > periods:
sm = sum((4*series[-1], 3*series[-2], 2*series[-3], series[-4])) / 10
sms.append(sm)
dt = (0.0962*sms[-1] + 0.5769*sms[-3] - 0.5769*sms[-5] - 0.0962*sms[-7]) * (0.075*prs[-2] + 0.54)
dts.append(dt)
qa = (.0962*dts[-1] + 0.5769*dts[-3] - 0.5769*dts[-5] - 0.0962*dts[-7]) * (0.075*prs[-2] + 0.54)
q1.append(qa)
ia = dts[-4]
i1.append(ia)
jI = (0.0962*i1[-1] + 0.5769*i1[-3] - 0.5769*i1[-5] - 0.0962*i1[-7]) * (0.075*prs[-2] + 0.54)
jQ = (0.0962*q1[-1] + 0.5769*q1[-3] - 0.5769*q1[-5] - 0.0962*q1[-7]) * (0.075*prs[-2] + 0.54)
ib = i1[-1] - jQ
qb = q1[-1] - jI
ib = 0.2*ib + 0.8*i2[-1]
qb = 0.2*qb + 0.8*q2[-1]
i2.append(ib)
q2.append(qb)
ra = i2[-1]*i2[-2] + q2[-1]*q2[-2]
ima = i2[-1]*q2[-2] - q2[-1]*i2[-2]
ra = 0.2*ra + 0.8*re[-1]
ima = 0.2*ra + 0.8*im[-1]
re.append(ra)
im.append(ima)
if im[-1] != 0 and re[-1] != 0:
pra = 360 / arctan(im[-1]/re[-1])
else:
pra = 0
if pra > 1.5*prs[-1]: pra = 1.5*prs[-1]
if pra < 0.67*prs[-1]: prs = 0.67*prs[-1]
if pra < 6: pra = 6
if pra > 50: pra = 50
pra = 0.2*pra + 0.8*prs[-1]
prs.append(pra)
spa = 0.33*prs[-1] + 0.67*sps[-1]
sps.append(spa)
if i1[-1] != 0:
ph = arctan(q1[-1] / i1[-1])
else:
ph = 0
phs.append(ph)
dp = phs[-2] - phs[-1]
if dp < 1: dp = 1
alpha = self.fast_limit / dp
if alpha < self.slow_limit: alpha = self.slow_limit
mama = alpha*series[-1] + (1 - alpha)*self[-1]
#FAMA = .5*alpha*MAMA + (1 - .5*alpha)*FAMA[1];
self.append(mama)
else:
last = series[-1]
for vlst in hist.values():
vlst.append(last)
self.append(last)
class SMA(MovingAverageIndex):
""" Simple Moving Average index.
"""
params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1))
]
def reindex(self):
periods = self.periods
period = self.series[-periods:]
sma = None
if len(period) == periods:
try:
sma = mean(period)
except (TypeError, IndexError):
pass
self.append(sma)
class EMA(MovingAverageIndex):
""" Exponential Moving Average index.
"""
params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1)),
('k', dict(type='float', min=0.001, default=2.0))
]
def __init__(self, series, periods, k=2.0):
MovingAverageIndex.__init__(self, series, periods)
self.k = k
def reindex(self):
try:
last = self[-1]
except (IndexError, ):
self.append(None)
return
periods = self.periods
ema = None
if last is None:
try:
period = self.series[-periods:]
if len(period) == periods:
ema = mean(period)
except (TypeError, ):
pass
else:
pt = self.series[-1]
k = self.k / (periods + 1)
ema = last + (k * (pt - last))
self.append(ema)
class WMA(MovingAverageIndex):
""" Weighted Moving Average index.
"""
params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1)),
]
def __init__(self, series, periods):
MovingAverageIndex.__init__(self, series, periods)
offsets = range(1, periods+1)
periods_sum = float(sum(offsets))
self.weights = array([x/periods_sum for x in offsets])
def reindex(self):
periods = self.periods
period = self.series[-periods:]
wma = None
if len(period) == periods:
try:
wma = sum(period * self.weights)
except (TypeError, ):
pass
self.append(wma)
class Volatility(MovingAverageIndex):
""" Volatility index.
Volatility = standard deviation of closing price [for n periods] /
average closing price [for n periods]
"""
params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1)),
]
def reindex(self):
periods = self.periods
period = self.series[-periods:]
vol = None
if len(period) == periods:
try:
vol = std(period) / mean(period)
vol *= 100
except TypeError:
pass
self.append(vol)
class VerticalHorizontalFilter(MovingAverageIndex):
""" VerticalHorizontalFilter
"""
params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1))
]
def reindex(self):
periods = self.periods
period = self.series[-periods:]
vhf = None
if len(period) == periods:
try:
diffs = array(period[1:]) - period[0:-1]
vhf = (max(period) - min(period)) / sum(abs(diffs))
except (IndexError, TypeError, ZeroDivisionError):
pass
self.append(vhf)
class BollingerBand(SeriesIndex):
""" BollingerBand
"""
params = [
('series', dict(type='line')),
('period', dict(type='int', min=1)),
('dev_factor', dict(type='float')),
]
def __init__(self, series, period, dev_factor):
SeriesIndex.__init__(self, series)
self.period = period # allows for periods != periods of series
self.dev_factor = dev_factor
def reindex(self):
period = self.series[-self.period:]
last = self.series[-1]
try:
dev = std(period)
dev *= self.dev_factor
dev += last
except (TypeError, ZeroDivisionError, ):
dev = None
self.append(dev)
class LinearRegressionSlope(SeriesIndex):
""" LinearRegressionSlope
LinearRegressionSlope(series, periods) -> slope of the linear
regression
"""
params = [
('series', dict(type='line')),
('period', dict(type='int', min=1)),
('scale', dict(type='float', default=1.0)),
]
def __init__(self, series, periods, scale=1):
SeriesIndex.__init__(self, series)
self.periods = periods
self.scale = scale
self.xarray = array(range(0, periods))
def reindex(self):
xa = self.xarray
ya = array(self.series[-self.periods:])
try:
slope, intercept, r, two_tail_prob, est_stderr = linregress(xa, ya)
except (TypeError, ValueError, ZeroDivisionError):
slope = 0.0
self.append(slope * self.scale)
class OrderStatisticFilter(MovingAverageIndex):
""" Ordered Statistic Filter base class.
OS filters base their operation on the ranking of the samples
within the filter window. The data are ranked by their summary
statistics, such as their mean or variance, rather than by their
temporal position.
"""
not__params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1))
]
class MedianValue(OrderStatisticFilter):
""" Indexes a series by the median.
"""
params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1))
]
def reindex(self):
values = self.series[-self.periods:]
m = median(values).toscalar()
self.append(m)
class ModeValue(OrderStatisticFilter):
""" Indexes a series by the mode.
"""
params = [
('series', dict(type='line')),
('periods', dict(type='int', min=1))
]
def reindex(self):
values = self.series[-self.periods:]
m = mode(values)[0].toscalar()
self.append(m)
| gpl-2.0 |
bhtucker/perceptron_viz | voweler/perceptron.py | 1 | 1376 | # -*- coding: utf-8 -*-
"""
perceptron
~~~~~~~~~~
Module for our learning node logic
"""
import numpy as np
import math
from string import ascii_lowercase
from letters import is_vowel
class Perceptron(object):
"""Basic Perceptron functionality:
store weights, apply them to points, update weights based on error
"""
def __init__(self, learning_rate=.1, input_width=11):
self.learning_rate = learning_rate
self.input_width = input_width
self.w = np.random.random(input_width)
def predict(self, point):
return 1. if np.dot(self.w, point) > .5 else 0.
def update(self, point, error):
self.w += point * error * self.learning_rate
class VowelPerceptron(Perceptron):
"""Vowel-detection specific methods for perceptron"""
def __init__(self, salt='', *args, **kwargs):
super(VowelPerceptron, self).__init__(**kwargs)
self.letter_map = self.get_embedding()
def handle_letter(self, letter, update=True):
point = self.to_vec(letter)
pred = self.predict(point)
if update:
error = is_vowel(letter) - pred
self.update(point, error)
return pred
def to_vec(self, char):
return self.letter_map[char]
def get_embedding(self):
return {l: np.random.random(self.input_width) for l in ascii_lowercase}
| mit |
stevenewey/django | setup.py | 123 | 3257 | import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
setup(
name='Django',
version=version,
url='http://www.djangoproject.com/',
author='Django Software Foundation',
author_email='[email protected]',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
extras_require={
"bcrypt": ["bcrypt"],
},
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
| bsd-3-clause |
learking/aaCodonProbPred | predAAprob/aa_studyHydro.py | 1 | 1256 | import sys
from aaProbSolver import *
import re
import glob
import math
#'L' is the reference codon
aaList = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']
#coef file and folders
#on desktop
#firstOrderCoefFile = '/home/kuangyu/workspace/aaCodonProbPred/coefs/aa_sa_coef.txt'
#on mac
firstOrderCoefFile = '/Users/kwang2/Documents/workspace/aaCodonProbPred/coefs/aa_sa_coef.txt'
fFirstOrder = open(firstOrderCoefFile, "r")
firstOrderCoefs = fFirstOrder.readlines()
fFirstOrder.close()
firstOrderSolver = aaProbSolver(firstOrderCoefs)
#sanac range:
#min: 0
#max: 172.4
#generate test data points between min and max
allPoints = range(1,172,2)
fOutput=open("/Users/kwang2/Documents/workspace/aaCodonProbPred/studyHydroResult/aaProb_sa_0_172.csv","w")
for x in allPoints:
aaProbs = []
for aa in aaList:
tmpX = [1, x]
tmpProb = firstOrderSolver.getCatProb(aa, tmpX)
aaProbs.append(tmpProb)
resultLine = ",".join(map(str,aaProbs)) + "\n"
#print resultLine
#print sum(aaProbs)
fOutput.write(resultLine)
fOutput.close()
| gpl-2.0 |
zaclimon/Quanta-Flo | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
mzizzi/ansible | lib/ansible/modules/files/blockinfile.py | 24 | 10652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, 2015 YAEGASHI Takeshi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: blockinfile
author:
- 'YAEGASHI Takeshi (@yaegashi)'
extends_documentation_fragment:
- files
- validate
short_description: Insert/update/remove a text block
surrounded by marker lines.
version_added: '2.0'
description:
- This module will insert/update/remove a block of multi-line text
surrounded by customizable marker lines.
options:
path:
aliases: [ dest, destfile, name ]
required: true
description:
- The file to modify.
- Before 2.3 this option was only usable as I(dest), I(destfile) and I(name).
state:
required: false
choices: [ present, absent ]
default: present
description:
- Whether the block should be there or not.
marker:
required: false
default: '# {mark} ANSIBLE MANAGED BLOCK'
description:
- The marker line template.
"{mark}" will be replaced with "BEGIN" or "END".
block:
aliases: [ content ]
required: false
default: ''
description:
- The text to insert inside the marker lines.
If it's missing or an empty string,
the block will be removed as if C(state) were specified to C(absent).
insertafter:
required: false
default: EOF
description:
- If specified, the block will be inserted after the last match of
specified regular expression. A special value is available; C(EOF) for
inserting the block at the end of the file. If specified regular
expression has no matches, C(EOF) will be used instead.
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
default: None
description:
- If specified, the block will be inserted before the last match of
specified regular expression. A special value is available; C(BOF) for
inserting the block at the beginning of the file. If specified regular
expression has no matches, the block will be inserted at the end of the
file.
choices: [ 'BOF', '*regex*' ]
create:
required: false
default: 'no'
choices: [ 'yes', 'no' ]
description:
- Create a new file if it doesn't exist.
backup:
required: false
default: 'no'
choices: [ 'yes', 'no' ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
follow:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- 'This flag indicates that filesystem links, if they exist, should be followed.'
version_added: "2.1"
notes:
- This module supports check mode.
- When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
"""
EXAMPLES = r"""
# Before 2.3, option 'dest' or 'name' was used instead of 'path'
- name: insert/update "Match User" configuration block in /etc/ssh/sshd_config
blockinfile:
path: /etc/ssh/sshd_config
block: |
Match User ansible-agent
PasswordAuthentication no
- name: insert/update eth0 configuration stanza in /etc/network/interfaces
(it might be better to copy files into /etc/network/interfaces.d/)
blockinfile:
path: /etc/network/interfaces
block: |
iface eth0 inet static
address 192.0.2.23
netmask 255.255.255.0
- name: insert/update configuration using a local file
blockinfile:
block: "{{ lookup('file', './local/ssh_config') }}"
dest: "/etc/ssh/ssh_config"
backup: yes
- name: insert/update HTML surrounded by custom markers after <body> line
blockinfile:
path: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
insertafter: "<body>"
content: |
<h1>Welcome to {{ ansible_hostname }}</h1>
<p>Last updated on {{ ansible_date_time.iso8601 }}</p>
- name: remove HTML as well as surrounding markers
blockinfile:
path: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
content: ""
- name: Add mappings to /etc/hosts
blockinfile:
path: /etc/hosts
block: |
{{ item.ip }} {{ item.name }}
marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}"
with_items:
- { name: host1, ip: 10.10.1.10 }
- { name: host2, ip: 10.10.1.11 }
- { name: host3, ip: 10.10.1.12 }
"""
import re
import os
import tempfile
from ansible.module_utils.six import b
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
def write_changes(module, contents, path):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, aliases=['dest', 'destfile', 'name'], type='path'),
state=dict(default='present', choices=['absent', 'present']),
marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'),
block=dict(default='', type='str', aliases=['content']),
insertafter=dict(default=None),
insertbefore=dict(default=None),
create=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
path = params['path']
if module.boolean(params.get('follow', None)):
path = os.path.realpath(path)
if os.path.isdir(path):
module.fail_json(rc=256,
msg='Path %s is a directory !' % path)
path_exists = os.path.exists(path)
if not path_exists:
if not module.boolean(params['create']):
module.fail_json(rc=257,
msg='Path %s does not exist !' % path)
original = None
lines = []
else:
f = open(path, 'rb')
original = f.read()
f.close()
lines = original.splitlines()
insertbefore = params['insertbefore']
insertafter = params['insertafter']
block = to_bytes(params['block'])
marker = to_bytes(params['marker'])
present = params['state'] == 'present'
if not present and not path_exists:
module.exit_json(changed=False, msg="File %s not present" % path)
if insertbefore is None and insertafter is None:
insertafter = 'EOF'
if insertafter not in (None, 'EOF'):
insertre = re.compile(insertafter)
elif insertbefore not in (None, 'BOF'):
insertre = re.compile(insertbefore)
else:
insertre = None
marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker)
marker1 = re.sub(b(r'{mark}'), b('END'), marker)
if present and block:
# Escape seqeuences like '\n' need to be handled in Ansible 1.x
if module.ansible_version.startswith('1.'):
block = re.sub('', block, '')
blocklines = [marker0] + block.splitlines() + [marker1]
else:
blocklines = []
n0 = n1 = None
for i, line in enumerate(lines):
if line == marker0:
n0 = i
if line == marker1:
n1 = i
if None in (n0, n1):
n0 = None
if insertre is not None:
for i, line in enumerate(lines):
if insertre.search(line):
n0 = i
if n0 is None:
n0 = len(lines)
elif insertafter is not None:
n0 += 1
elif insertbefore is not None:
n0 = 0 # insertbefore=BOF
else:
n0 = len(lines) # insertafter=EOF
elif n0 < n1:
lines[n0:n1+1] = []
else:
lines[n1:n0+1] = []
n0 = n1
lines[n0:n0] = blocklines
if lines:
result = b('\n').join(lines)
if original is None or original.endswith(b('\n')):
result += b('\n')
else:
result = ''
if original == result:
msg = ''
changed = False
elif original is None:
msg = 'File created'
changed = True
elif not blocklines:
msg = 'Block removed'
changed = True
else:
msg = 'Block inserted'
changed = True
if changed and not module.check_mode:
if module.boolean(params['backup']) and path_exists:
module.backup_local(path)
write_changes(module, result, path)
if module.check_mode and not path_exists:
module.exit_json(changed=changed, msg=msg)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
| gpl-3.0 |
AnishShah/tensorflow | tensorflow/contrib/data/python/kernel_tests/serialization/dataset_constructor_serialization_test.py | 14 | 3762 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the dataset constructors serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.platform import test
class FromTensorsSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_tensor_dataset(self, variable_array):
components = (variable_array, np.array([1, 2, 3]), np.array(37.0))
return dataset_ops.Dataset.from_tensors(components)
def testFromTensorsCore(self):
# Equal length components
arr = np.array(1)
num_outputs = 1
diff_arr = np.array(2)
self.run_core_tests(lambda: self._build_tensor_dataset(arr),
lambda: self._build_tensor_dataset(diff_arr),
num_outputs)
class FromTensorSlicesSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_tensor_slices_dataset(self, components):
return dataset_ops.Dataset.from_tensor_slices(components)
def testFromTensorSlicesCore(self):
# Equal length components
components = (np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0]))
diff_comp = (np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[5], [6], [7], [8]]), 22),
np.array([1.0, 2.0, 3.0, 4.0]))
dict_components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
self.run_core_tests(lambda: self._build_tensor_slices_dataset(components),
lambda: self._build_tensor_slices_dataset(diff_comp), 4)
self.run_core_tests(
lambda: self._build_tensor_slices_dataset(dict_components), None, 3)
class FromSparseTensorSlicesSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_sparse_tensor_slice_dataset(self, slices):
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))],
dtype=np.int64)
values = np.array([val for s in slices for val in s], dtype=np.float64)
dense_shape = np.array(
[len(slices), max(len(s) for s in slices) + 1], dtype=np.int64)
sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape)
return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components)
def testFromSparseTensorSlicesCore(self):
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
diff_slices = [[1., 2.], [2.], [2., 3., 4.], [], [], []]
self.run_core_tests(
lambda: self._build_sparse_tensor_slice_dataset(slices),
lambda: self._build_sparse_tensor_slice_dataset(diff_slices),
9,
sparse_tensors=True)
if __name__ == "__main__":
test.main()
| apache-2.0 |
javiergarridomellado/Empresa_django | devcodela/lib/python2.7/site-packages/django/contrib/contenttypes/management.py | 96 | 2903 | from django.contrib.contenttypes.models import ContentType
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_apps, get_models, signals
from django.utils.encoding import smart_text
from django.utils import six
from django.utils.six.moves import input
def update_contenttypes(app, created_models, verbosity=2, db=DEFAULT_DB_ALIAS, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
if not router.allow_syncdb(db, ContentType):
return
ContentType.objects.clear_cache()
app_models = get_models(app)
if not app_models:
return
# They all have the same app_label, get the first one.
app_label = app_models[0]._meta.app_label
app_models = dict(
(model._meta.object_name.lower(), model)
for model in app_models
)
# Get all the content types
content_types = dict(
(ct.model, ct)
for ct in ContentType.objects.using(db).filter(app_label=app_label)
)
to_remove = [
ct
for (model_name, ct) in six.iteritems(content_types)
if model_name not in app_models
]
cts = [
ContentType(
name=smart_text(model._meta.verbose_name_raw),
app_label=app_label,
model=model_name,
)
for (model_name, model) in six.iteritems(app_models)
if model_name not in content_types
]
ContentType.objects.using(db).bulk_create(cts)
if verbosity >= 2:
for ct in cts:
print("Adding content type '%s | %s'" % (ct.app_label, ct.model))
# Confirm that the content type is stale before deletion.
if to_remove:
if kwargs.get('interactive', False):
content_type_display = '\n'.join([
' %s | %s' % (ct.app_label, ct.model)
for ct in to_remove
])
ok_to_delete = input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in to_remove:
if verbosity >= 2:
print("Deleting stale content type '%s | %s'" % (ct.app_label, ct.model))
ct.delete()
else:
if verbosity >= 2:
print("Stale content types remain.")
def update_all_contenttypes(verbosity=2, **kwargs):
for app in get_apps():
update_contenttypes(app, None, verbosity, **kwargs)
signals.post_syncdb.connect(update_contenttypes)
if __name__ == "__main__":
update_all_contenttypes()
| gpl-2.0 |
bigdatauniversity/edx-platform | common/djangoapps/util/request.py | 163 | 1285 | """ Utility functions related to HTTP requests """
import re
from django.conf import settings
from microsite_configuration import microsite
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
COURSE_REGEX = re.compile(r'^.*?/courses/{}'.format(settings.COURSE_ID_PATTERN))
def safe_get_host(request):
"""
Get the host name for this request, as safely as possible.
If ALLOWED_HOSTS is properly set, this calls request.get_host;
otherwise, this returns whatever settings.SITE_NAME is set to.
This ensures we will never accept an untrusted value of get_host()
"""
if isinstance(settings.ALLOWED_HOSTS, (list, tuple)) and '*' not in settings.ALLOWED_HOSTS:
return request.get_host()
else:
return microsite.get_value('site_domain', settings.SITE_NAME)
def course_id_from_url(url):
"""
Extracts the course_id from the given `url`.
"""
if not url:
return None
match = COURSE_REGEX.match(url)
if match is None:
return None
course_id = match.group('course_id')
if course_id is None:
return None
try:
return SlashSeparatedCourseKey.from_deprecated_string(course_id)
except InvalidKeyError:
return None
| agpl-3.0 |
ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/httplib2/python2/httplib2/socks.py | 811 | 18459 | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import base64
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if (self.__proxy[4] != None and self.__proxy[5] != None):
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
headers += ["Host: ", destaddr, "\r\n"]
if (self.__proxy[4] != None and self.__proxy[5] != None):
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (not isinstance(destpair[0], basestring)) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1],portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0],destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| bsd-3-clause |
qnu/paratrac | fs/data.py | 1 | 16469 | #############################################################################
# ParaTrac: Scalable Tracking Tools for Parallel Applications
# Copyright (C) 2009,2010 Nan Dun <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
#
# fs/data.py
# Filesystem Trace Database
#
import os
import sys
from modules.utils import SYSCALL
from modules import utils
from modules import num
from modules.data import Database as CommonDatabase
class Database(CommonDatabase):
def __init__(self, path):
CommonDatabase.__init__(self, path)
# Only attributes can be accurately queried
self.SYSC_ATTR = ["iid", "stamp", "pid", "sysc", "fid", "res",
"elapsed", "aux1", "aux2"]
self.FILE_ATTR = ["iid", "fid", "path"]
self.PROC_ATTR = ["iid", "pid", "ppid", "live",
"res", "cmdline", "environ"]
def _set_tabs(self):
self.tab["runtime"] = "item TEXT, value TEXT"
self.tab["file"] = "iid INTEGER, fid INTEGER, path TEXT"
self.tab["sysc"] = "iid INTEGER, stamp DOUBLE, pid INTEGER, " \
"sysc INTEGER, fid INTEGER, res INTEGER, elapsed DOUBLE, " \
"aux1 INTEGER, aux2 INTEGER"
self.tab["proc"] = "iid INTGER, pid INTEGER, ppid INTEGER, " \
"live INTEGER, res INTEGER, btime FLOAT, elapsed FLOAT, " \
"utime FLOAT, stime FLOAT, cmdline TEXT, environ TEXT"
def import_logs(self, logdir=None):
if logdir is None:
logdir = os.path.dirname(self.db)
self._create_tabs(True)
iid = 0
runtime = {}
f = open("%s/runtime.log" % logdir)
for l in f.readlines():
item, val = l.strip().split(":", 1)
self.cur.execute("INSERT INTO runtime VALUES (?,?)", (item, val))
if val.isdigit():
runtime[item] = eval(val)
else:
runtime[item] = "%s" % val
f.close()
f = open("%s/file.log" % logdir)
for l in f.readlines():
fid, path = l.strip().split(":", 1)
self.cur.execute("INSERT INTO file VALUES (?,?,?)",
(iid, fid, path))
f.close()
f = open("%s/sysc.log" % logdir)
btime = None
for l in f.readlines():
stamp,pid,sysc,fid,res,elapsed,aux1,aux2 = l.strip().split(",")
if not btime: btime = float(stamp)
stamp = "%f" % (float(stamp) - btime)
self.cur.execute("INSERT INTO sysc VALUES (?,?,?,?,?,?,?,?,?)",
(iid,stamp,pid,sysc,fid,res,elapsed,aux1,aux2))
f.close()
# import process logs according to the accuracy of information
procs = set()
CLK_TCK = runtime['clktck']
SYS_BTIME = runtime['sysbtime']
have_taskstat_log = False
if os.path.exists("%s/taskstat.log" % logdir):
f = open("%s/taskstat.log" % logdir)
for l in f.readlines():
pid,ppid,live,res,btime,elapsed,utime,stime,cmd \
= l.strip().split(",")
# btime (sec), elapsed (usec), utime (usec), stime (usec)
elapsed = float(elapsed) / 1000000.0
utime = float(utime) / 1000000.0
stime = float(stime) / 1000000.0
self.cur.execute("INSERT INTO proc (iid,pid,ppid,live,res,"
"btime,elapsed,utime,stime) VALUES (?,?,?,?,?,?,?,?,?)",
(iid,pid,ppid,live,res,btime,elapsed,utime,stime))
f.close()
have_taskstat_log = True
have_ptrace_log = False
if os.path.exists("%s/ptrace.log" % logdir):
f = open("%s/ptrace.log" % logdir)
for l in f.readlines():
pid,ppid,start,stamp,utime,stime,cmd,env \
= l.strip().split(",")
if not have_taskstat_log:
# calculate real btime and elapsed
btime = SYS_BTIME + float(start) / CLK_TCK
elapsed = float(stamp) - btime
utime = float(utime) / CLK_TCK
stime = float(stime) / CLK_TCK
self.cur.execute("INSERT INTO proc (iid,pid,ppid,live,res,"
"btime,elapsed,utime,stime,cmdline,environ) "
"VALUES (?,?,?,?,?,?,?,?,?)",
(iid,pid,ppid,0,0,btime,elapsed,utime,stime,cmd,env))
else:
self.cur.execute("UPDATE proc SET cmdline=?,environ=? WHERE "
"pid=%s and ppid=%s" % (pid, ppid), (cmd, env))
procs.add(eval(pid))
f.close()
have_ptrace_log = True
if os.path.exists("%s/proc.log" % logdir):
f = open("%s/proc.log" % logdir)
for l in f.readlines():
flag,pid,ppid,start,stamp,utime,stime,cmd,env \
= l.strip().split("|#|")
if not flag or eval(pid) in procs: # just ignore start status right now
continue
if not have_taskstat_log:
btime = SYS_BTIME + float(start) / CLK_TCK
elapsed = float(stamp) - btime
utime = float(utime) / CLK_TCK
stime = float(stime) / CLK_TCK
self.cur.execute("INSERT INTO proc (iid,pid,ppid,live,res,"
"btime,elapsed,utime,stime,cmdline,environ) "
"VALUES (?,?,?,?,?,?,?,?,?,?,?)",
(iid,pid,ppid,0,0,btime,elapsed,utime,stime,cmd,env))
# TODO: integrating ptrace log
else:
self.cur.execute("UPDATE proc SET cmdline=?,environ=? WHERE "
"pid=%s and ppid=%s" % (pid, ppid), (cmd, env))
f.close()
self.con.commit()
# runtime table routines
def runtime_sel(self, fields="*"):
self.cur.execute("SELECT %s FROM runtime" % fields)
return self.cur.fetchall()
def runtime_get_value(self, item):
self.cur.execute("SELECT value FROM runtime WHERE item=?", (item,))
res = self.cur.fetchone()
if res is None: return None
else: return res[0]
def runtime_values(self):
self.cur.execute('SELECT item,value FROM runtime')
return self.cur.fetchall()
# syscall table routines
def sysc_sel(self, sysc, fields="*"):
self.cur.execute("SELECT %s FROM sysc WHERE sysc=?"
% fields, (sysc,))
return self.cur.fetchall()
def sysc_count(self, sysc):
self.cur.execute("SELECT COUNT(*) FROM sysc WHERE sysc=?", (sysc,))
return self.cur.fetchone()[0]
def sysc_sum(self, sysc, field):
cur = self.con.cursor()
cur.execute("SELECT SUM(%s) FROM sysc WHERE sysc=?"
"GROUP BY sysc" % field, (sysc,))
res = cur.fetchone()
if res is None: # No such system call
return 0
else:
return res[0]
def sysc_sum2(self, columns, **where):
columns = columns.split(',')
columns = ','.join(map(lambda s:"SUM(%s)"%s, columns))
qstr = "SELECT %s FROM sysc" % columns
wstr = " and ".join(map(lambda k:"%s=%s" % (k, where[k]),
utils.list_intersect([self.SYSC_ATTR, where.keys()])))
if wstr != "": qstr = "%s WHERE %s" % (qstr, wstr)
self.cur.execute(qstr)
return self.cur.fetchall()
def sysc_avg(self, sysc, field):
cur = self.con.cursor()
cur.execute("SELECT AVG(%s) FROM sysc WHERE sysc=?"
"GROUP BY sysc" % field, (sysc,))
res = cur.fetchone()
if res is None: # No such system call
return 0
else:
return res[0]
def sysc_std(self, sysc, field):
cur = self.con.cursor()
cur.execute("SELECT %s FROM sysc WHERE sysc=?" % field, (sysc,))
vlist = map(lambda x:x[0], cur.fetchall())
return num.num_std(vlist)
def sysc_cdf(self, sysc, field, numbins=None):
"""if numbins is None, use all data"""
self.cur.execute("SELECT %s FROM sysc WHERE sysc=?"
% field, (sysc,))
vlist = map(lambda x:x[0], self.cur.fetchall())
vlist.sort()
total = sum(vlist)
data = []
curr_sum = 0.0
for v in vlist:
curr_sum += v
if total == 0: ratio = 0
else: ratio = curr_sum/total
data.append((v, ratio))
return data
def sysc_sel_procs_by_file(self, iid, sysc, fid, fields="*"):
self.cur.execute("SELECT %s FROM sysc WHERE "
"iid=? AND sysc=? AND fid=? GROUP BY pid" % fields,
(iid, sysc, fid))
return self.cur.fetchall()
# file table routines
def file_sel(self, columns, **where):
qstr = "SELECT %s FROM file" % columns
wstr = " and ".join(map(lambda k:"%s=%s" % (k, where[k]),
utils.list_intersect([self.FILE_ATTR, where.keys()])))
if wstr != "": qstr = "%s WHERE %s" % (qstr, wstr)
self.cur.execute(qstr)
return self.cur.fetchall()
def files(self, **attr):
"""Return a list of files IDs that satisfy specified attributes"""
qstr = "SELECT fid FROM file" # SQL query string
if attr == {}:
self.cur.execute(qstr)
return map(lambda x:x[0], self.cur.fetchall())
# Select from procs table
qstr = "SELECT fid FROM file"
wstr = " and ".join(map(lambda k:"%s=%s" % (k, attr[k]),
list_intersect([self.FILE_ATTR, attr.keys()])))
if wstr != "": qstr = "%s WHERE %s GROUP BY file" % (qstr, wstr)
self.cur.execute(qstr)
return map(lambda x:x[0], self.cur.fetchall())
# TODO:ASAP
# Select from sysc table
def procs(self, **attr):
"""Return a list of processes IDs that satisfy specified attributes"""
qstr = "SELECT pid FROM proc" # SQL query string
if attr == {}:
self.cur.execute(qstr)
return map(lambda x:x[0], self.cur.fetchall())
procs = []
if "sysc" in attr.keys(): attr["sysc"] = SYSCALL[attr["sysc"]]
# Select from syscall table
qstr = "SELECT pid FROM syscall"
wstr = " and ".join(map(lambda k:"%s=%s" % (k, attr[k]),
list_intersect([self.SYSC_ATTR, attr.keys()])))
if wstr != "":
qstr = "%s WHERE %s GROUP BY pid" % (qstr, wstr)
self.cur.execute(qstr)
procs_sc = map(lambda x:x[0], self.cur.fetchall())
procs.extend(procs_sc)
# Select from procs table
qstr = "SELECT pid FROM proc"
wstr = " and ".join(map(lambda k:"%s=%s" % (k, attr[k]),
list_intersect([self.PROC_ATTR, attr.keys()])))
if wstr != "":
qstr = "%s WHERE %s GROUP BY pid" % (qstr, wstr)
self.cur.execute(qstr)
procs_pc = map(lambda x:x[0], self.cur.fetchall())
if len(procs) > 0: # procs added from syscall
procs = list_intersect([procs, procs_pc])
else:
procs.extend(procs_pc)
return procs
def proc_sel(self, columns, **where):
qstr = "SELECT %s FROM proc" % columns
wstr = " and ".join(map(lambda k:"%s=%s" % (k, where[k]),
utils.list_intersect([self.PROC_ATTR, where.keys()])))
if wstr != "": qstr = "%s WHERE %s" % (qstr, wstr)
self.cur.execute(qstr)
return self.cur.fetchall()
def proc_sum(self, field):
self.cur.execute("SELECT SUM(%s) FROM proc" % field)
res = self.cur.fetchone()
if res is None: # No such system call
return 0
else:
return res[0]
def proc_avg(self, field):
self.cur.execute("SELECT AVG(%s) FROM proc" % field)
res = self.cur.fetchone()
if res is None: # No such system call
return 0
else:
return res[0]
def proc_std(self, field):
self.cur.execute("SELECT %s FROM proc" % field)
vlist = map(lambda x:x[0], self.cur.fetchall())
if len(vlist) == 0: return 0
return num.num_std(vlist)
def proc_sum2(self, columns, **where):
columns = columns.split(',')
columns = ','.join(map(lambda s:"SUM(%s)"%s, columns))
qstr = "SELECT %s FROM proc" % columns
wstr = " and ".join(map(lambda k:"%s=%s" % (k, where[k]),
utils.list_intersect([self.PROC_ATTR, where.keys()])))
if wstr != "": qstr = "%s WHERE %s" % (qstr, wstr)
print qstr
def proc_cmdline(self, iid, pid, fullcmd=True):
self.cur.execute("SELECT cmdline FROM proc "
"WHERE iid=? and pid=?", (iid, pid))
res = self.cur.fetchone()[0]
if fullcmd: return res
else: return res.split(" ", 1)[0]
def proc_io_sum_elapsed_and_bytes(self, sysc, iid, pid, fid):
assert sysc == SYSCALL['read'] or sysc == SYSCALL['write']
self.cur.execute("SELECT SUM(elapsed),SUM(aux1) FROM syscall "
"WHERE sysc=? and iid=? and pid=? and fid=?",
(sysc, iid, pid, fid))
return self.cur.fetchone()
def proc_stat(self, column, **attr):
"""Return (sum, avg, stddev) of column of selected processes"""
qstr = "SELECT %s FROM proc" % column
wstr = " and ".join(map(lambda k:"%s=%s" % (k, attr[k]),
list_intersect([self.PROC_ATTR, attr.keys()])))
if wstr != "": qstr = "%s WHERE %s" % (qstr, wstr)
self.cur.execute(qstr)
values = map(lambda x:x[0], self.cur.fetchall())
return numpy.sum(values), numpy.mean(values), numpy.std(values)
def proc_cdf(self, column, numbins=None, **attr):
"""Return (sum, avg, stddev) of column of selected processes"""
qstr = "SELECT %s FROM proc" % column
wstr = " and ".join(map(lambda k:"%s=%s" % (k, attr[k]),
list_intersect([self.PROC_ATTR, attr.keys()])))
if wstr != "": qstr = "%s WHERE %s" % (qstr, wstr)
self.cur.execute(qstr)
values = map(lambda x:x[0], self.cur.fetchall())
values.sort()
total = numpy.sum(values)
cdf_data = []
curr_sum = 0.0
for v in values:
curr_sum += v
cdf_data.append((v, curr_sum/total))
return cdf_data
def sysc_stat(self, column, **attr):
"""Return (sum, avg, stddev) of column of selected processes"""
if "sysc" in attr.keys(): attr["sysc"] = SYSCALL[attr["sysc"]]
qstr = "SELECT %s FROM syscall" % column
wstr = " and ".join(map(lambda k:"%s=%s" % (k, attr[k]),
list_intersect([self.SYSC_ATTR, attr.keys()])))
if wstr != "": qstr = "%s WHERE %s" % (qstr, wstr)
self.cur.execute(qstr)
values = map(lambda x:x[0], self.cur.fetchall())
return numpy.sum(values), numpy.mean(values), numpy.std(values)
def proc_throughput(self, iid, pid, fid, sysc):
if sysc == "read" or sysc == "write":
self.cur.execute("SELECT SUM(elapsed),SUM(aux1) FROM sysc"
" WHERE iid=? and pid=? and fid=? and sysc=? GROUP BY pid",
(iid, pid, fid, SYSCALL[sysc]))
else:
self.cur.execute("SELECT SUM(elapsed),COUNT(sysc) FROM syscall"
" WHERE iid=? and pid=? and fid=? and sysc=? GROUP BY pid",
(iid, pid, fid, SYSCALL[sysc]))
return self.cur.fetchone()
| gpl-3.0 |
DrXyzzy/smc | src/smc_pyutil/smc_pyutil/status.py | 1 | 1405 | #!/usr/bin/python
import json, os
SMC = os.environ['SMC']
os.chdir(SMC)
status = {}
def set(prop, val):
status[prop] = val
def read(prop, filename, strip=False, int_value=False, to_int=False):
try:
s = open(filename).read()
if strip:
s = s.strip()
if '.port' in prop:
try:
s = int(s)
except TypeError:
pass
if int_value:
s = int(s.split('=')[1])
if to_int:
s = int(s)
status[prop] = s
except:
status[prop] = False
def main():
for daemon in ['local_hub', 'sage_server', 'console_server']:
pidfile = os.path.join(os.path.join(SMC, daemon), '%s.pid' % daemon)
if os.path.exists(pidfile):
try:
pid = int(open(pidfile).read())
os.kill(pid, 0)
set(daemon + '.pid', pid)
except:
set(daemon + '.pid', False)
else:
set(daemon + '.pid', False)
for name in [
'secret_token', 'local_hub/local_hub.port', 'local_hub/raw.port',
'console_server/console_server.port',
'sage_server/sage_server.port'
]:
to_int = 'port' in name
read(name.split('/')[-1], os.path.join(SMC, name), to_int=to_int)
print(json.dumps(status))
if __name__ == "__main__":
main()
| agpl-3.0 |
freakboy3742/django | tests/asgi/tests.py | 13 | 11078 | import asyncio
import sys
import threading
from pathlib import Path
from unittest import skipIf
from asgiref.sync import SyncToAsync
from asgiref.testing import ApplicationCommunicator
from django.contrib.staticfiles.handlers import ASGIStaticFilesHandler
from django.core.asgi import get_asgi_application
from django.core.signals import request_finished, request_started
from django.db import close_old_connections
from django.test import (
AsyncRequestFactory, SimpleTestCase, modify_settings, override_settings,
)
from django.utils.http import http_date
from .urls import test_filename
TEST_STATIC_ROOT = Path(__file__).parent / 'project' / 'static'
@skipIf(sys.platform == 'win32' and (3, 8, 0) < sys.version_info < (3, 8, 1), 'https://bugs.python.org/issue38563')
@override_settings(ROOT_URLCONF='asgi.urls')
class ASGITest(SimpleTestCase):
async_request_factory = AsyncRequestFactory()
def setUp(self):
request_started.disconnect(close_old_connections)
def tearDown(self):
request_started.connect(close_old_connections)
async def test_get_asgi_application(self):
"""
get_asgi_application() returns a functioning ASGI callable.
"""
application = get_asgi_application()
# Construct HTTP request.
scope = self.async_request_factory._base_scope(path='/')
communicator = ApplicationCommunicator(application, scope)
await communicator.send_input({'type': 'http.request'})
# Read the response.
response_start = await communicator.receive_output()
self.assertEqual(response_start['type'], 'http.response.start')
self.assertEqual(response_start['status'], 200)
self.assertEqual(
set(response_start['headers']),
{
(b'Content-Length', b'12'),
(b'Content-Type', b'text/html; charset=utf-8'),
},
)
response_body = await communicator.receive_output()
self.assertEqual(response_body['type'], 'http.response.body')
self.assertEqual(response_body['body'], b'Hello World!')
async def test_file_response(self):
"""
Makes sure that FileResponse works over ASGI.
"""
application = get_asgi_application()
# Construct HTTP request.
scope = self.async_request_factory._base_scope(path='/file/')
communicator = ApplicationCommunicator(application, scope)
await communicator.send_input({'type': 'http.request'})
# Get the file content.
with open(test_filename, 'rb') as test_file:
test_file_contents = test_file.read()
# Read the response.
response_start = await communicator.receive_output()
self.assertEqual(response_start['type'], 'http.response.start')
self.assertEqual(response_start['status'], 200)
headers = response_start['headers']
self.assertEqual(len(headers), 3)
expected_headers = {
b'Content-Length': str(len(test_file_contents)).encode('ascii'),
b'Content-Type': b'text/x-python',
b'Content-Disposition': b'inline; filename="urls.py"',
}
for key, value in headers:
try:
self.assertEqual(value, expected_headers[key])
except AssertionError:
# Windows registry may not be configured with correct
# mimetypes.
if sys.platform == 'win32' and key == b'Content-Type':
self.assertEqual(value, b'text/plain')
else:
raise
response_body = await communicator.receive_output()
self.assertEqual(response_body['type'], 'http.response.body')
self.assertEqual(response_body['body'], test_file_contents)
# Allow response.close() to finish.
await communicator.wait()
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.staticfiles'})
@override_settings(
STATIC_URL='static/',
STATIC_ROOT=TEST_STATIC_ROOT,
STATICFILES_DIRS=[TEST_STATIC_ROOT],
STATICFILES_FINDERS=[
'django.contrib.staticfiles.finders.FileSystemFinder',
],
)
async def test_static_file_response(self):
application = ASGIStaticFilesHandler(get_asgi_application())
# Construct HTTP request.
scope = self.async_request_factory._base_scope(path='/static/file.txt')
communicator = ApplicationCommunicator(application, scope)
await communicator.send_input({'type': 'http.request'})
# Get the file content.
file_path = TEST_STATIC_ROOT / 'file.txt'
with open(file_path, 'rb') as test_file:
test_file_contents = test_file.read()
# Read the response.
stat = file_path.stat()
response_start = await communicator.receive_output()
self.assertEqual(response_start['type'], 'http.response.start')
self.assertEqual(response_start['status'], 200)
self.assertEqual(
set(response_start['headers']),
{
(b'Content-Length', str(len(test_file_contents)).encode('ascii')),
(b'Content-Type', b'text/plain'),
(b'Content-Disposition', b'inline; filename="file.txt"'),
(b'Last-Modified', http_date(stat.st_mtime).encode('ascii')),
},
)
response_body = await communicator.receive_output()
self.assertEqual(response_body['type'], 'http.response.body')
self.assertEqual(response_body['body'], test_file_contents)
# Allow response.close() to finish.
await communicator.wait()
async def test_headers(self):
application = get_asgi_application()
communicator = ApplicationCommunicator(
application,
self.async_request_factory._base_scope(
path='/meta/',
headers=[
[b'content-type', b'text/plain; charset=utf-8'],
[b'content-length', b'77'],
[b'referer', b'Scotland'],
[b'referer', b'Wales'],
],
),
)
await communicator.send_input({'type': 'http.request'})
response_start = await communicator.receive_output()
self.assertEqual(response_start['type'], 'http.response.start')
self.assertEqual(response_start['status'], 200)
self.assertEqual(
set(response_start['headers']),
{
(b'Content-Length', b'19'),
(b'Content-Type', b'text/plain; charset=utf-8'),
},
)
response_body = await communicator.receive_output()
self.assertEqual(response_body['type'], 'http.response.body')
self.assertEqual(response_body['body'], b'From Scotland,Wales')
async def test_get_query_string(self):
application = get_asgi_application()
for query_string in (b'name=Andrew', 'name=Andrew'):
with self.subTest(query_string=query_string):
scope = self.async_request_factory._base_scope(
path='/',
query_string=query_string,
)
communicator = ApplicationCommunicator(application, scope)
await communicator.send_input({'type': 'http.request'})
response_start = await communicator.receive_output()
self.assertEqual(response_start['type'], 'http.response.start')
self.assertEqual(response_start['status'], 200)
response_body = await communicator.receive_output()
self.assertEqual(response_body['type'], 'http.response.body')
self.assertEqual(response_body['body'], b'Hello Andrew!')
async def test_disconnect(self):
application = get_asgi_application()
scope = self.async_request_factory._base_scope(path='/')
communicator = ApplicationCommunicator(application, scope)
await communicator.send_input({'type': 'http.disconnect'})
with self.assertRaises(asyncio.TimeoutError):
await communicator.receive_output()
async def test_wrong_connection_type(self):
application = get_asgi_application()
scope = self.async_request_factory._base_scope(path='/', type='other')
communicator = ApplicationCommunicator(application, scope)
await communicator.send_input({'type': 'http.request'})
msg = 'Django can only handle ASGI/HTTP connections, not other.'
with self.assertRaisesMessage(ValueError, msg):
await communicator.receive_output()
async def test_non_unicode_query_string(self):
application = get_asgi_application()
scope = self.async_request_factory._base_scope(path='/', query_string=b'\xff')
communicator = ApplicationCommunicator(application, scope)
await communicator.send_input({'type': 'http.request'})
response_start = await communicator.receive_output()
self.assertEqual(response_start['type'], 'http.response.start')
self.assertEqual(response_start['status'], 400)
response_body = await communicator.receive_output()
self.assertEqual(response_body['type'], 'http.response.body')
self.assertEqual(response_body['body'], b'')
async def test_request_lifecycle_signals_dispatched_with_thread_sensitive(self):
class SignalHandler:
"""Track threads handler is dispatched on."""
threads = []
def __call__(self, **kwargs):
self.threads.append(threading.current_thread())
signal_handler = SignalHandler()
request_started.connect(signal_handler)
request_finished.connect(signal_handler)
# Perform a basic request.
application = get_asgi_application()
scope = self.async_request_factory._base_scope(path='/')
communicator = ApplicationCommunicator(application, scope)
await communicator.send_input({'type': 'http.request'})
response_start = await communicator.receive_output()
self.assertEqual(response_start['type'], 'http.response.start')
self.assertEqual(response_start['status'], 200)
response_body = await communicator.receive_output()
self.assertEqual(response_body['type'], 'http.response.body')
self.assertEqual(response_body['body'], b'Hello World!')
# Give response.close() time to finish.
await communicator.wait()
# At this point, AsyncToSync does not have a current executor. Thus
# SyncToAsync falls-back to .single_thread_executor.
target_thread = next(iter(SyncToAsync.single_thread_executor._threads))
request_started_thread, request_finished_thread = signal_handler.threads
self.assertEqual(request_started_thread, target_thread)
self.assertEqual(request_finished_thread, target_thread)
request_started.disconnect(signal_handler)
request_finished.disconnect(signal_handler)
| bsd-3-clause |
360youlun/cmsplugin-bootstrap-carousel | cmsplugin_bootstrap_carousel/models_default.py | 1 | 3456 | # coding: utf-8
import os
from django.db import models
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils.translation import ugettext_lazy as _
from cms.models.pluginmodel import CMSPlugin
from PIL import Image
from cStringIO import StringIO
from . import config
class Carousel(CMSPlugin):
domid = models.CharField(max_length=50, verbose_name=_('Name'))
interval = models.IntegerField(default=5000)
def copy_relations(self, oldinstance):
for item in oldinstance.carouselitem_set.all():
item.pk = None
item.carousel = self
item.save()
def __unicode__(self):
return self.domid
class CarouselItem(models.Model):
carousel = models.ForeignKey(Carousel, verbose_name=_("Carousel"))
caption_title = models.CharField(max_length=100, blank=True, null=True, verbose_name=_("Caption Title"))
button_title = models.CharField(max_length=255, blank=True, verbose_name=_("Button Title"))
button_url = models.URLField(blank=True, verbose_name=_("Button URL"))
caption_content = models.TextField(blank=True, null=True, verbose_name=_("Caption Content"))
image = models.ImageField(upload_to=config.CAROUSEL_UPLOADS_FOLDER, null=True, verbose_name=_("Image"))
text_position = models.CharField(max_length=10, choices=config.CAROUSEL_TEXT_POSITIONS,
default=config.CAROUSEL_TEXT_POSITION_LEFT, verbose_name=_("Text Position"))
transition = models.CharField(max_length=30, choices=config.CAROUSEL_TRANSITION_CHOICES,
default=config.CAROUSEL_TRANS_NO_TRANSITION, verbose_name=_("Transition"))
start_position = models.CharField(max_length=20, choices=config.CAROUSEL_MOVEMENT_POSITION_CHOICES,
default=config.CAROUSEL_MOVEMENT_POSITION_LEFT_TOP_LABEL,
verbose_name=_("Start Position"))
end_position = models.CharField(max_length=20, choices=config.CAROUSEL_MOVEMENT_POSITION_CHOICES,
default=config.CAROUSEL_MOVEMENT_POSITION_LEFT_TOP_LABEL,
verbose_name=_("End Position"))
zoom = models.CharField(max_length=3, choices=config.CAROUSEL_ZOOM_CHOICES,
default=config.CAROUSEL_ZOOM_NO, verbose_name=_('Zoom'))
target = models.CharField(max_length=10, choices=config.CAROUSEL_OPEN_TAB_CHOICES,
default=config.CAROUSEL_OPEN_IN_CURRENT_TAB, verbose_name=_('Target'))
def save(self, *args, **kwargs):
if self.image:
img = Image.open(self.image.file)
if img.mode not in ('L', 'RGB'):
img = img.convert('RGB')
size = config.BOOTSTRAP_CAROUSEL_IMGSIZE
extension = config.BOOTSTRAP_CAROUSEL_FILE_EXTENSION
if size != img.size:
img.thumbnail(size, Image.ANTIALIAS)
temp_handle = StringIO()
img.save(temp_handle, extension)
temp_handle.seek(0)
suf = SimpleUploadedFile(os.path.split(self.image.name)[-1],
temp_handle.read(), content_type='image/%s' % extension)
fname = "%s.%s" % (os.path.splitext(self.image.name)[0], extension)
self.image.save(fname, suf, save=False)
super(CarouselItem, self).save()
| bsd-3-clause |
virtualopensystems/nova | nova/tests/network/test_manager.py | 1 | 147793 | # Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import fixtures
import mock
import mox
import netaddr
from oslo.config import cfg
from oslo import messaging
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova import objects
from nova.objects import quotas as quotas_obj
from nova.objects import virtual_interface as vif_obj
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import quota
from nova import test
from nova.tests import fake_instance
from nova.tests import fake_ldap
from nova.tests import fake_network
from nova.tests import matchers
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_floating_ip
from nova.tests.objects import test_network
from nova.tests.objects import test_service
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '2001:db9:0:1::10',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1::%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_valid_fixed_ipv6(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'2001:db9:0:1::10')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **networks[1])])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(4, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_end(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, dhcp_server='192.168.0.11',
allowed_start='192.168.0.10',
allowed_end='192.168.0.245')
self.assertEqual(1, len(nets))
network = nets[0]
# gateway defaults to beginning of allowed_start
self.assertEqual('192.168.0.10', network['gateway'])
# vpn_server doesn't conflict with dhcp_start
self.assertEqual('192.168.0.12', network['vpn_private_address'])
# dhcp_start doesn't conflict with dhcp_server
self.assertEqual('192.168.0.13', network['dhcp_start'])
# NOTE(vish): 10 from the beginning, 10 from the end, and
# 1 for the gateway, 1 for the dhcp server,
# 1 for the vpn server
self.assertEqual(23, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_out_of_range(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AddressOutOfRange,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_start='192.168.1.10')
def test_validate_reserved_end_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidAddress,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_end='invalid')
def test_validate_cidr_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidCidr,
self.network.create_networks,
context_admin, 'fake', 'invalid', False,
1, 256)
def test_validate_non_int_size(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidIntValue,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 'invalid')
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_instance_dns(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
fixedip = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None
).AndReturn(fixedip)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_allocate_calculates_quota_auth(self, util_method, reserve,
get_by_uuid):
inst = objects.Instance()
inst['uuid'] = 'nosuch'
get_by_uuid.return_value = inst
reserve.side_effect = exception.OverQuota(overs='testing')
util_method.return_value = ('foo', 'bar')
self.assertRaises(exception.FixedIpLimitExceeded,
self.network.allocate_fixed_ip,
self.context, 123, {'uuid': 'nosuch'})
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_deallocate_calculates_quota_auth(self, util_method, reserve,
get_by_address):
inst = objects.Instance(uuid='fake-uuid')
fip = objects.FixedIP(instance_uuid='fake-uuid',
virtual_interface_id=1)
get_by_address.return_value = fip
util_method.return_value = ('foo', 'bar')
# This will fail right after the reserve call when it tries
# to look up the fake instance we created above
self.assertRaises(exception.InstanceNotFound,
self.network.deallocate_fixed_ip,
self.context, '1.2.3.4', instance=inst)
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
def test_allocate_fixed_ip_cleanup(self,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1)
mock_fixedip_associate.return_value = fip
instance = objects.Instance(context=self.context)
instance.create()
mock_instance_get.return_value = instance
mock_vif_get.return_value = vif_obj.VirtualInterface(
instance_uuid='fake-uuid', id=1)
with contextlib.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
mock_setup_network.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=address)
mock_dns_manager.delete_entry.assert_has_calls([
mock.call(instance.display_name, ''),
mock.call(instance.uuid, '')
])
mock_fixedip_disassociate.assert_called_once_with(self.context)
class FlatDHCPNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatDHCPNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(objects.QuotasNoOp,
self.network.quotas_cls)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
instance = db.instance_create(self.context, {})
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
self.context_admin,
instance['uuid'],
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch',
'vpn_private_address': netaddr.IPAddress('1.2.3.4')
}, vpn=1)
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1, reserved=True)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
self.assertEqual(networks[1]["vlan"], 103)
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
@mock.patch('nova.db.network_get')
def test_validate_networks(self, net_get):
def network_get(_context, network_id, project_only='allow_none'):
return dict(test_network.fake_network, **networks[network_id])
net_get.side_effect = network_get
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id + '1')
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id='testproject')
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=1)
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
mock_commit.assert_called_once_with(ctxt, 'reserve',
project_id='testproject')
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
network=test_network.fake_network)
# floating ip that's already associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1)
# floating ip that isn't associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
'1.2.3.4',
'1.2.3.5',
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
instance_uuid='fake_uuid',
network=test_network.fake_network)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
@mock.patch('nova.db.floating_ip_get_all_by_host')
@mock.patch('nova.db.fixed_ip_get')
def _test_floating_ip_init_host(self, fixed_get, floating_get,
public_interface, expected_arg):
floating_get.return_value = [
dict(test_floating_ip.fake_floating_ip,
interface='foo',
address='1.2.3.4'),
dict(test_floating_ip.fake_floating_ip,
interface='fakeiface',
address='1.2.3.5',
fixed_ip_id=1),
dict(test_floating_ip.fake_floating_ip,
interface='bar',
address='1.2.3.6',
fixed_ip_id=2),
]
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network=test_network.fake_network)
raise exception.FixedIpNotFound(id=fixed_ip_id)
fixed_get.side_effect = fixed_ip_get
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
netaddr.IPAddress('1.2.3.4'),
expected_arg,
mox.IsA(objects.Network))
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface=False,
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# floating ip that is associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
project_id=ctxt.project_id)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False,
host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
auto_assigned=True,
project_id=ctxt.project_id)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.Forbidden,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.Forbidden,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.Forbidden,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
'DE:AD:BE:EF:00:00')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': 'project1'})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
fixed_update.return_value = fixed_get.return_value
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = objects.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
class FakeNetwork(object):
def __init__(self, **kwargs):
self.vlan = None
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
def __getitem__(self, item):
return getattr(self, item)
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
self.flags(use_local=True, group='conductor')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'virtual_interface_get_by_instance')
manager.db.virtual_interface_get_by_instance(
self.context, FAKEUUID,
use_slave=False).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info(self, fixed_get,
instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
manager.deallocate_for_instance(
ctx, instance=objects.Instance._from_db_object(self.context,
objects.Instance(), instance_get.return_value))
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
@mock.patch('nova.db.fixed_ip_disassociate')
def test_remove_fixed_ip_from_instance(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
get.return_value = [
dict(test_fixed_ip.fake_fixed_ip, **x)
for x in manager.db.fixed_ip_get_by_instance(None,
FAKEUUID)]
manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
HOST,
'10.0.0.1')
self.assertEqual(manager.deallocate_called, '10.0.0.1')
disassociate.assert_called_once_with(self.context, '10.0.0.1')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_remove_fixed_ip_from_instance_bad_input(self, get):
manager = fake_network.FakeNetworkManager()
get.return_value = []
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.flags(use_local=True, group='conductor')
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
def setUp(self):
super(AllocateTestCase, self).setUp()
dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.user_context = context.RequestContext('testuser',
'testproject')
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.user_context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEqual(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance=inst)
def test_allocate_for_instance_illegal_network(self):
networks = db.network_get_all(self.context)
requested_networks = []
for network in networks:
# set all networks to other projects
db.network_update(self.context, network['id'],
{'host': self.network.host,
'project_id': 'otherid'})
requested_networks.append((network['uuid'], None))
# set the first network to our project
db.network_update(self.context, networks[0]['id'],
{'project_id': self.user_context.project_id})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
self.assertRaises(exception.NetworkNotFoundForProject,
self.network.allocate_for_instance, self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=self.context.project_id, macs=None,
requested_networks=requested_networks)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance,
self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.service_get_by_host_and_topic')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
service_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=12)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
service_get.return_value = test_service.fake_service
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_associate_floating_ip_multi_host_calls(self, floating_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=None)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocate_floating_ip_quota_rollback(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
self.mox.StubOutWithMock(self.network,
'_floating_ip_owned_by_project')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.reserve(self.context,
floating_ips=-1,
project_id='testproject').AndReturn('fake-rsv')
self.network._floating_ip_owned_by_project(self.context,
mox.IgnoreArg())
db.floating_ip_deallocate(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
quota.QUOTAS.rollback(self.context, 'fake-rsv',
project_id='testproject')
self.mox.ReplayAll()
self.network.deallocate_floating_ip(self.context, '10.0.0.1')
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.deleted = True
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_get_by_address')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_start(self, floating_update, floating_get,
fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
floating_get.side_effect = fake_floating_ip_get_by_address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not str(fixed_ip) == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_finish(self, floating_update, fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 2)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[1]['domain'], domain2)
self.assertEqual(domains[0]['project'], 'testproject')
self.assertEqual(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
network=test_network.fake_network)
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'1.2.3.4', '1.2.3.5', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
mock_get.return_value = mock.sentinel.floating
self.assertEqual(mock.sentinel.floating,
self.network.get_floating_ip_by_address(
self.context,
mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
mock_get.return_value = mock.sentinel.floatings
self.assertEqual(mock.sentinel.floatings,
self.network.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context, self.context.project_id)
@mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
def test_get_floating_ips_by_fixed_address(self, mock_get):
mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
objects.FloatingIP(address='5.6.7.8')]
self.assertEqual(['1.2.3.4', '5.6.7.8'],
self.network.get_floating_ips_by_fixed_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(test.ReplaceModule('ldap', fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
| apache-2.0 |
DirectDev/wds-V2 | www/admin/plugins/ionicons/builder/scripts/generate_font.py | 348 | 5381 | # Font generation script from FontCustom
# https://github.com/FontCustom/fontcustom/
# http://fontcustom.com/
import fontforge
import os
import md5
import subprocess
import tempfile
import json
import copy
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
INPUT_SVG_DIR = os.path.join(SCRIPT_PATH, '..', '..', 'src')
OUTPUT_FONT_DIR = os.path.join(SCRIPT_PATH, '..', '..', 'fonts')
MANIFEST_PATH = os.path.join(SCRIPT_PATH, '..', 'manifest.json')
BUILD_DATA_PATH = os.path.join(SCRIPT_PATH, '..', 'build_data.json')
AUTO_WIDTH = True
KERNING = 15
cp = 0xf100
m = md5.new()
f = fontforge.font()
f.encoding = 'UnicodeFull'
f.design_size = 16
f.em = 512
f.ascent = 448
f.descent = 64
manifest_file = open(MANIFEST_PATH, 'r')
manifest_data = json.loads(manifest_file.read())
manifest_file.close()
print "Load Manifest, Icons: %s" % ( len(manifest_data['icons']) )
build_data = copy.deepcopy(manifest_data)
build_data['icons'] = []
font_name = manifest_data['name']
m.update(font_name + ';')
m.update(manifest_data['prefix'] + ';')
for dirname, dirnames, filenames in os.walk(INPUT_SVG_DIR):
for filename in filenames:
name, ext = os.path.splitext(filename)
filePath = os.path.join(dirname, filename)
size = os.path.getsize(filePath)
if ext in ['.svg', '.eps']:
# see if this file is already in the manifest
chr_code = None
for ionicon in manifest_data['icons']:
if ionicon['name'] == name:
chr_code = ionicon['code']
break
if chr_code is None:
# this is a new src icon
print 'New Icon: \n - %s' % (name)
while True:
chr_code = '0x%x' % (cp)
already_exists = False
for ionicon in manifest_data['icons']:
if ionicon.get('code') == chr_code:
already_exists = True
cp += 1
chr_code = '0x%x' % (cp)
continue
if not already_exists:
break
print ' - %s' % chr_code
manifest_data['icons'].append({
'name': name,
'code': chr_code
})
build_data['icons'].append({
'name': name,
'code': chr_code
})
if ext in ['.svg']:
# hack removal of <switch> </switch> tags
svgfile = open(filePath, 'r+')
tmpsvgfile = tempfile.NamedTemporaryFile(suffix=ext, delete=False)
svgtext = svgfile.read()
svgfile.seek(0)
# replace the <switch> </switch> tags with 'nothing'
svgtext = svgtext.replace('<switch>', '')
svgtext = svgtext.replace('</switch>', '')
tmpsvgfile.file.write(svgtext)
svgfile.close()
tmpsvgfile.file.close()
filePath = tmpsvgfile.name
# end hack
m.update(name + str(size) + ';')
glyph = f.createChar( int(chr_code, 16) )
glyph.importOutlines(filePath)
# if we created a temporary file, let's clean it up
if tmpsvgfile:
os.unlink(tmpsvgfile.name)
# set glyph size explicitly or automatically depending on autowidth
if AUTO_WIDTH:
glyph.left_side_bearing = glyph.right_side_bearing = 0
glyph.round()
# resize glyphs if autowidth is enabled
if AUTO_WIDTH:
f.autoWidth(0, 0, 512)
fontfile = '%s/ionicons' % (OUTPUT_FONT_DIR)
build_hash = m.hexdigest()
if build_hash == manifest_data.get('build_hash'):
print "Source files unchanged, did not rebuild fonts"
else:
manifest_data['build_hash'] = build_hash
f.fontname = font_name
f.familyname = font_name
f.fullname = font_name
f.generate(fontfile + '.ttf')
f.generate(fontfile + '.svg')
# Fix SVG header for webkit
# from: https://github.com/fontello/font-builder/blob/master/bin/fontconvert.py
svgfile = open(fontfile + '.svg', 'r+')
svgtext = svgfile.read()
svgfile.seek(0)
svgfile.write(svgtext.replace('''<svg>''', '''<svg xmlns="http://www.w3.org/2000/svg">'''))
svgfile.close()
scriptPath = os.path.dirname(os.path.realpath(__file__))
try:
subprocess.Popen([scriptPath + '/sfnt2woff', fontfile + '.ttf'], stdout=subprocess.PIPE)
except OSError:
# If the local version of sfnt2woff fails (i.e., on Linux), try to use the
# global version. This allows us to avoid forcing OS X users to compile
# sfnt2woff from source, simplifying install.
subprocess.call(['sfnt2woff', fontfile + '.ttf'])
# eotlitetool.py script to generate IE7-compatible .eot fonts
subprocess.call('python ' + scriptPath + '/eotlitetool.py ' + fontfile + '.ttf -o ' + fontfile + '.eot', shell=True)
subprocess.call('mv ' + fontfile + '.eotlite ' + fontfile + '.eot', shell=True)
# Hint the TTF file
subprocess.call('ttfautohint -s -f -n ' + fontfile + '.ttf ' + fontfile + '-hinted.ttf > /dev/null 2>&1 && mv ' + fontfile + '-hinted.ttf ' + fontfile + '.ttf', shell=True)
manifest_data['icons'] = sorted(manifest_data['icons'], key=lambda k: k['name'])
build_data['icons'] = sorted(build_data['icons'], key=lambda k: k['name'])
print "Save Manifest, Icons: %s" % ( len(manifest_data['icons']) )
f = open(MANIFEST_PATH, 'w')
f.write( json.dumps(manifest_data, indent=2, separators=(',', ': ')) )
f.close()
print "Save Build, Icons: %s" % ( len(build_data['icons']) )
f = open(BUILD_DATA_PATH, 'w')
f.write( json.dumps(build_data, indent=2, separators=(',', ': ')) )
f.close()
| mit |
CompMusic/essentia | src/examples/tutorial/extractor_predominantmelody.py | 10 | 2747 | # Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import sys, csv
from essentia import *
from essentia.standard import *
from pylab import *
from numpy import *
# In this script we will extract predominant melody given a music file
try:
filename = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input-audiofile>"
sys.exit()
# We will use a composite algorithm PredominantMelody, which combines a number of
# required steps for us. Let's declare and configure it first:
hopSize = 128
frameSize = 2048
sampleRate = 44100
guessUnvoiced = True # read the algorithm's reference for more details
run_predominant_melody = PredominantMelody(guessUnvoiced=guessUnvoiced,
frameSize=frameSize,
hopSize=hopSize);
# Load audio file, apply equal loudness filter, and compute predominant melody
audio = MonoLoader(filename = filename, sampleRate=sampleRate)()
audio = EqualLoudness()(audio)
pitch, confidence = run_predominant_melody(audio)
n_frames = len(pitch)
print "number of frames:", n_frames
# Visualize output pitch values
fig = plt.figure()
plot(range(n_frames), pitch, 'b')
n_ticks = 10
xtick_locs = [i * (n_frames / 10.0) for i in range(n_ticks)]
xtick_lbls = [i * (n_frames / 10.0) * hopSize / sampleRate for i in range(n_ticks)]
xtick_lbls = ["%.2f" % round(x,2) for x in xtick_lbls]
plt.xticks(xtick_locs, xtick_lbls)
ax = fig.add_subplot(111)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Pitch (Hz)')
suptitle("Predominant melody pitch")
# Visualize output pitch confidence
fig = plt.figure()
plot(range(n_frames), confidence, 'b')
n_ticks = 10
xtick_locs = [i * (n_frames / 10.0) for i in range(n_ticks)]
xtick_lbls = [i * (n_frames / 10.0) * hopSize / sampleRate for i in range(n_ticks)]
xtick_lbls = ["%.2f" % round(x,2) for x in xtick_lbls]
plt.xticks(xtick_locs, xtick_lbls)
ax = fig.add_subplot(111)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Confidence')
suptitle("Predominant melody pitch confidence")
show()
| agpl-3.0 |
YingYang/STFT_R_git_repo | MNE_stft/mne_regression.py | 1 | 7284 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 1 12:55:06 2014
@author: ying
"""
import mne
import numpy as np
import numpy.linalg as la
from mne_stft_regression import _apply_inverse_evoked_list
# ===========================================================================
def mne_regression(evoked_list, inverse_operator, X,
labels = None, pick_ori=None, pick_normal=None,
snr=1, Flag_reg_stats = False,
method = "MNE"):
''' Get the MNE solution for a given snr(lambda value)
regress the time points instead of STFT coefficients
Input:
evoked_list, a list of evoked instances
inverse_operator, the inverse operator for MNE
X, [n_trials, p] array
labels, ROI labels list, if None, use the whole brain
snr, controls lambda
method, "MNE", "dSPM", "sLORETA",
Note that dSPM and sLORETA can not be used for prediction,
and the coefficients are normalized too.
Output:
result_dict = dict(coef = coef, F = F, sel = sel,roi_data = roi_data)
['coef']: Regression coefficients, complex arrays [n_dipoles,n_coefs,n_steps,p]
['F'],F-statistics, complex arrays
['sel'], selction of the source points, columns of G
['roi_data'] the source data in the ROI
'''
n_trials = len(evoked_list)
sel = []
# The following line is wrong
n_dipoles = inverse_operator['nsource']
# if label is specified, only do the regression on the labels
# otherwise, select the data for the whole brain.
if labels is not None:
for i in range(len(labels)):
_, sel_tmp = mne.source_space.label_src_vertno_sel(labels[i],inverse_operator['src'])
sel = np.hstack([sel, sel_tmp])
sel = sel.astype(np.int)
else:
sel = np.arange(0,n_dipoles,1)
sel.astype(np.int)
# tested, the result is the same as running apply_inverse()
roi_data = _apply_inverse_evoked_list(evoked_list, inverse_operator,
lambda2= 1.0/snr**2, method=method,
labels=labels, nave=1, pick_ori=pick_ori,
verbose=None, pick_normal=None)
n_dipoles, n_times = roi_data[0].shape
n_trials = len(evoked_list)
# regression, return coefficients and F-values
p = X.shape[1]
[dim0,dim1,dim2] = roi_data.shape
coef = np.zeros([dim0,dim1,p])
F = np.zeros([dim0,dim1]) if Flag_reg_stats else None
linreg_op = np.dot(la.inv(X.T.dot(X)),X.T)
for i in range(dim0):
for j in range(dim1):
tmpY = roi_data[i,j,:]
tmp_coef = linreg_op.dot(tmpY)
coef[i,j,:] = tmp_coef
if Flag_reg_stats:
tmpY_hat = np.dot(X,tmp_coef)
tmp_res = tmpY_hat-tmpY
SSE = np.dot(tmp_res,tmp_res)
SST = np.sum((tmpY-np.mean(tmpY))**2)
if SSE== 0:
F[i,j] += 0
else:
F[i,j] += (SST-SSE)/(p-1)/(SSE/(n_trials-p))
result_dict = dict(coef = coef, F = F, sel = sel,roi_data_3D = roi_data)
return result_dict
#===============================================================
def get_MSE_mne_regression(evoked_list, fwd, X, coef, labels):
'''
Use the mne regression coefficients to get predicted sensor data,
then abtain the mean of squared error
Input:
evoked_list, a list of evoked objects
fwd, the forward solution
X, the design matrix,
coef, the regression coefficients, [n_sources, ntimes, p] real
Output:
MSE, the sum of squared error across trials
'''
sel = []
n_dipoles = fwd['nsource']
if labels is not None:
for i in range(len(labels)):
_, sel_tmp = mne.source_space.label_src_vertno_sel(labels[i],fwd['src'])
sel = np.hstack([sel, sel_tmp])
sel = sel.astype(np.int)
else:
sel = np.arange(0,n_dipoles,1)
sel.astype(np.int)
# prepair the forward solution
evoked_ch_names = evoked_list[0].info['ch_names']
fwd_ch_names = fwd['info']['ch_names']
channel_sel = [i for i in range(len(fwd_ch_names)) \
if fwd_ch_names[i] in evoked_ch_names]
G = fwd['sol']['data'][channel_sel,:]
G = G[:,sel]
n_trials,p = X.shape
if n_trials != len(evoked_list):
raise ValueError("the numbers of trials do not match")
SSE = 0.0
for r in range(n_trials):
# STFT coefficients of current trial
predicted_ts = np.sum(coef*X[r,:],axis = 2)
predicted_sensor = G.dot(predicted_ts)
SSE += np.sum((evoked_list[r].data - predicted_sensor)**2)
MSE = SSE/(n_trials)
return MSE
# ==============================================================
def select_lambda_tuning_mne_regression_cv(evoked_list, inverse_operator,
fwd, X, cv_partition_ind,
snr_tuning_seq,
labels = None):
'''
Use cross-validation to select the best lambda (tuning snr values)
All source points across the whole brain must be used,
This may require a large membory
Input:
evoked_list, n_trials of evoked objects
inverse_operator, the inverse_operator,
fwd, the forward solution
X, [n_trials,p] the design matrix
cv_partition_ind, [n_trials,] parition index for cross validcation
snr_tuning_seq, a sequence of "snr" parameter
Output:
best_snr_tuning, the best snr paramter
cv_MSE, the cross validated SSE for each snr parameters
'''
n_fold = len(np.unique(cv_partition_ind))
# number of tuning paramters
n_par_tuning = len(snr_tuning_seq)
cv_MSE = np.ones([len(snr_tuning_seq),n_fold], dtype = np.float)*np.Inf
for j in range(n_fold):
# partition
test_trials = np.nonzero(cv_partition_ind == j)[0]
train_trials = np.nonzero(cv_partition_ind != j)[0]
evoked_list_train = [evoked_list[r] for r in range(len(evoked_list)) \
if r in train_trials]
Xtrain = X[train_trials,:]
evoked_list_test = [evoked_list[r] for r in range(len(evoked_list)) \
if r in test_trials]
Xtest = X[test_trials,:]
for i in range(n_par_tuning):
tmp_snr = snr_tuning_seq[i]
tmp_result = mne_regression(evoked_list_train, inverse_operator,
Xtrain, labels = labels,
snr=tmp_snr)
coef = tmp_result['coef']
# Now do the prediction
tmp_MSE = get_MSE_mne_regression(evoked_list_test, fwd, Xtest,
coef, labels = labels)
cv_MSE[i,j] = tmp_MSE
cv_MSE = cv_MSE.mean(axis = 1)
best_ind = np.argmin(cv_MSE)
snr_tuning_star = snr_tuning_seq[best_ind]
return snr_tuning_star, cv_MSE
| gpl-3.0 |
OpenFacetracker/facetracker-core | lib/youtube-dl/youtube_dl/downloader/__init__.py | 8 | 1105 | from __future__ import unicode_literals
from .common import FileDownloader
from .external import get_external_downloader
from .f4m import F4mFD
from .hls import HlsFD
from .hls import NativeHlsFD
from .http import HttpFD
from .mplayer import MplayerFD
from .rtmp import RtmpFD
from ..utils import (
determine_protocol,
)
PROTOCOL_MAP = {
'rtmp': RtmpFD,
'm3u8_native': NativeHlsFD,
'm3u8': HlsFD,
'mms': MplayerFD,
'rtsp': MplayerFD,
'f4m': F4mFD,
}
def get_suitable_downloader(info_dict, params={}):
"""Get the downloader class that can handle the info dict."""
protocol = determine_protocol(info_dict)
info_dict['protocol'] = protocol
external_downloader = params.get('external_downloader')
if external_downloader is not None:
ed = get_external_downloader(external_downloader)
if ed.supports(info_dict):
return ed
if protocol == 'm3u8' and params.get('hls_prefer_native'):
return NativeHlsFD
return PROTOCOL_MAP.get(protocol, HttpFD)
__all__ = [
'get_suitable_downloader',
'FileDownloader',
]
| gpl-2.0 |
swapnakrishnan2k/tp-qemu | qemu/tests/virtio_port_login.py | 9 | 4239 | """
Collection of virtio_console and virtio_serialport tests.
:copyright: 2010-2012 Red Hat Inc.
"""
import logging
import aexpect
from autotest.client.shared import error
from virttest import utils_misc
from virttest import remote
from virttest import utils_virtio_port
class ConsoleLoginTest(utils_virtio_port.VirtioPortTest):
__sessions__ = []
def __init__(self, test, env, params):
super(ConsoleLoginTest, self).__init__(test, env, params)
self.vm = self.get_vm_with_ports(no_consoles=1, no_serialports=1)
@error.context_aware
def pre_step(self):
error.context("Config guest and reboot it", logging.info)
pre_cmd = self.params.get("pre_cmd")
session = self.vm.wait_for_login(timeout=360)
session.cmd(pre_cmd, timeout=240)
session = self.vm.reboot(session=session, timeout=900, serial=False)
self.__sessions__.append(session)
@error.context_aware
def virtio_console_login(self, port='vc1'):
error.context("Login guest via '%s'" % port, logging.info)
session = self.vm.wait_for_serial_login(timeout=180, virtio=port)
self.__sessions__.append(session)
return session
def console_login(self, port='vc1'):
return self.virtio_console_login(port=port)
@error.context_aware
def virtio_serial_login(self, port='vs1'):
error.context("Try to login guest via '%s'" % port, logging.info)
username = self.params.get("username")
password = self.params.get("password")
prompt = self.params.get("shell_prompt", "[\#\$]")
linesep = eval("'%s'" % self.params.get("shell_linesep", r"\n"))
for vport in self.get_virtio_ports(self.vm)[1]:
if vport.name == port:
break
vport = None
if not vport:
raise error.TestError("Not virtio serial port '%s' found" % port)
logfile = "serial-%s-%s.log" % (vport.name, self.vm.name)
socat_cmd = "nc -U %s" % vport.hostfile
session = aexpect.ShellSession(socat_cmd, auto_close=False,
output_func=utils_misc.log_line,
output_params=(logfile,),
prompt=prompt)
session.set_linesep(linesep)
session.sendline()
self.__sessions__.append(session)
try:
remote.handle_prompts(session, username, password, prompt, 180)
raise error.TestFail("virtio serial '%s' should no " % port +
"channel to login")
except remote.LoginTimeoutError:
self.__sessions__.append(session)
logging.info("Can't login via %s" % port)
return session
def serial_login(self, port="vc1"):
return self.virtio_serial_login(port=port)
@error.context_aware
def cleanup(self):
error.context("Close open connection and destroy vm", logging.info)
for session in self.__sessions__:
if session:
session.close()
self.__sessions__.remove(session)
super(ConsoleLoginTest, self).cleanup(vm=self.vm)
def run(test, params, env):
"""
KVM virtio_console test
Basic function test to check virtio console login function.
:param test: kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment
"""
login_console = params.get("login_console", "vc1")
console_params = params.object_params(login_console)
console_test = ConsoleLoginTest(test, env, params)
try:
console_test.pre_step()
port_type = console_params.get("virtio_port_type")
login_func = "%s_login" % port_type
logging.info("Login function: %s" % login_func)
session = getattr(console_test, login_func)(login_console)
if "serial" not in port_type:
for cmd in params.get("shell_cmd_list", "dir").split(","):
logging.info("sending command: %s" % cmd)
output = session.cmd_output(cmd, timeout=240)
logging.info("output:%s" % output)
except Exception:
console_test.cleanup()
raise
| gpl-2.0 |
Johnetordoff/osf.io | admin/registration_providers/urls.py | 5 | 1610 | from django.conf.urls import url
from . import views
app_name = 'admin'
urlpatterns = [
url(r'^create/$', views.CreateRegistrationProvider.as_view(), name='create'),
url(r'^$', views.RegistrationProviderList.as_view(), name='list'),
url(r'^import/$', views.ImportRegistrationProvider.as_view(), name='import'),
url(r'^process_custom_taxonomy/$', views.ProcessCustomTaxonomy.as_view(), name='process_custom_taxonomy'),
url(r'^(?P<registration_provider_id>[a-z0-9]+)/$', views.RegistrationProviderDetail.as_view(), name='detail'),
url(r'^(?P<registration_provider_id>[a-z0-9]+)/delete/$', views.DeleteRegistrationProvider.as_view(), name='delete'),
url(r'^(?P<registration_provider_id>[a-z0-9]+)/export/$', views.ExportRegistrationProvider.as_view(), name='export'),
url(r'^(?P<registration_provider_id>[a-z0-9]+)/import/$', views.ImportRegistrationProvider.as_view(), name='import'),
url(r'^(?P<registration_provider_id>[a-z0-9]+)/schemas/$', views.ChangeSchema.as_view(), name='schemas'),
url(r'^(?P<registration_provider_id>[a-z0-9]+)/cannot_delete/$', views.CannotDeleteProvider.as_view(), name='cannot_delete'),
url(r'^(?P<registration_provider_id>[a-z0-9]+)/share_source/$', views.ShareSourceRegistrationProvider.as_view(), name='share_source'),
url(r'^(?P<registration_provider_id>[a-z0-9]+)/remove_admins_and_moderators/$', views.RemoveAdminsAndModerators.as_view(), name='remove_admins_and_moderators'),
url(r'^(?P<registration_provider_id>[a-z0-9]+)/add_admin_or_moderator/$', views.AddAdminOrModerator.as_view(), name='add_admin_or_moderator'),
]
| apache-2.0 |
dbcls/dbcls-galaxy | lib/galaxy/util/heartbeat.py | 3 | 3730 |
# Attempt to load threadframe module, and only define Heartbeat class
# if available
try:
import pkg_resources
pkg_resources.require( "threadframe" )
except:
import sys
print >> sys.stderr, "No threadframe module, Heartbeat not available"
Heartbeat = None
else:
import threading
import threadframe
import time
import traceback
import os
import sys
def get_current_thread_object_dict():
"""
Get a dictionary of all 'Thread' objects created via the threading
module keyed by thread_id. Note that not all interpreter threads
have a thread objects, only the main thread and any created via the
'threading' module. Threads created via the low level 'thread' module
will not be in the returned dictionary.
HACK: This mucks with the internals of the threading module since that
module does not expose any way to match 'Thread' objects with
intepreter thread identifiers (though it should).
"""
rval = dict()
# Acquire the lock and then union the contents of 'active' and 'limbo'
# threads into the return value.
threading._active_limbo_lock.acquire()
rval.update( threading._active )
rval.update( threading._limbo )
threading._active_limbo_lock.release()
return rval
class Heartbeat( threading.Thread ):
"""
Thread that periodically dumps the state of all threads to a file using
the `threadframe` extension
"""
def __init__( self, name="Heartbeat Thread", period=20, fname="heartbeat.log" ):
threading.Thread.__init__( self, name=name )
self.should_stop = False
self.period = period
self.fname = fname
self.file = None
# Save process id
self.pid = os.getpid()
# Event to wait on when sleeping, allows us to interrupt for shutdown
self.wait_event = threading.Event()
def run( self ):
self.file = open( self.fname, "a" )
print >> self.file, "Heartbeat for pid %d thread started at %s" % ( self.pid, time.asctime() )
print >> self.file
try:
while not self.should_stop:
# Print separator with timestamp
print >> self.file, "Traceback dump for all threads at %s:" % time.asctime()
print >> self.file
# Print the thread states
threads = get_current_thread_object_dict()
for thread_id, frame in threadframe.dict().iteritems():
if thread_id in threads:
object = repr( threads[thread_id] )
else:
object = "<No Thread object>"
print >> self.file, "Thread %s, %s:" % ( thread_id, object )
print >> self.file
traceback.print_stack( frame, file=self.file )
print >> self.file
print >> self.file, "End dump"
print >> self.file
self.file.flush()
# Sleep for a bit
self.wait_event.wait( self.period )
finally:
print >> self.file, "Heartbeat for pid %d thread stopped at %s" % ( self.pid, time.asctime() )
print >> self.file
# Cleanup
self.file.close()
def shutdown( self ):
self.should_stop = True
self.wait_event.set()
self.join()
| mit |
QijunPan/ansible | lib/ansible/modules/cloud/ovirt/ovirt_tags_facts.py | 13 | 4781 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_tags_facts
short_description: Retrieve facts about one or more oVirt tags
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt tags."
notes:
- "This module creates a new top-level C(ovirt_tags) fact, which
contains a list of tags"
options:
name:
description:
- "Name of the tag which should be listed."
vm:
description:
- "Name of the VM, which tags should be listed."
host:
description:
- "Name of the host, which tags should be listed."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all tags, which names start with C(tag):
- ovirt_tags_facts:
name: tag*
- debug:
var: tags
# Gather facts about all tags, which are assigned to VM C(postgres):
- ovirt_tags_facts:
vm: postgres
- debug:
var: tags
# Gather facts about all tags, which are assigned to host C(west):
- ovirt_tags_facts:
host: west
- debug:
var: tags
'''
RETURN = '''
ovirt_tags:
description: "List of dictionaries describing the tags. Tags attribues are mapped to dictionary keys,
all tags attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/tag."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
name=dict(default=None),
host=dict(default=None),
vm=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
tags_service = connection.system_service().tags_service()
tags = []
all_tags = tags_service.list()
if module.params['name']:
tags.extend([
t for t in all_tags
if fnmatch.fnmatch(t.name, module.params['name'])
])
if module.params['host']:
hosts_service = connection.system_service().hosts_service()
host = search_by_name(hosts_service, module.params['host'])
if host is None:
raise Exception("Host '%s' was not found." % module.params['host'])
tags.extend([
tag for tag in hosts_service.host_service(host.id).tags_service().list()
])
if module.params['vm']:
vms_service = connection.system_service().vms_service()
vm = search_by_name(vms_service, module.params['vm'])
if vm is None:
raise Exception("Vm '%s' was not found." % module.params['vm'])
tags.extend([
tag for tag in vms_service.vm_service(vm.id).tags_service().list()
])
if not (module.params['vm'] or module.params['host'] or module.params['name']):
tags = all_tags
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_tags=[
get_dict_of_struct(
struct=t,
connection=connection,
fetch_nested=module.params['fetch_nested'],
attributes=module.params['nested_attributes'],
) for t in tags
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
eugena/django | tests/select_for_update/tests.py | 203 | 9626 | from __future__ import unicode_literals
import threading
import time
from multiple_database.routers import TestRouter
from django.conf import settings
from django.db import connection, router, transaction
from django.db.utils import DEFAULT_DB_ALIAS, ConnectionHandler, DatabaseError
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import Person
# We need to set settings.DEBUG to True so we can capture the output SQL
# to examine.
@override_settings(DEBUG=True)
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.person = Person.objects.create(name='Reinhardt')
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
new_connections = ConnectionHandler(settings.DATABASES)
self.new_connection = new_connections[DEFAULT_DB_ALIAS]
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, tested_connection, nowait=False):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = tested_connection.ops.for_update_sql(nowait)
sql = tested_connection.queries[-1]['sql']
return bool(sql.find(for_update_sql) > -1)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
Test that the backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(connection))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
Test that the backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic():
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(connection, nowait=True))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
If a SELECT...FOR UPDATE NOWAIT is run on a database backend
that supports FOR UPDATE but not NOWAIT, then we should find
that a DatabaseError is raised.
"""
self.assertRaises(
DatabaseError,
list,
Person.objects.all().select_for_update(nowait=True)
)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
Test that a TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
with self.assertRaises(transaction.TransactionManagementError):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
Test that no TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
with self.assertRaises(transaction.TransactionManagementError):
list(people)
def run_select_for_update(self, status, nowait=False):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
people = list(
Person.objects.all().select_for_update(nowait=nowait)
)
people[0].name = 'Fred'
people[0].save()
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
Check that a thread running a select_for_update that
accesses rows being touched by a similar operation
on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.isAlive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Check that running a raw query which can't obtain a FOR UPDATE lock
raises the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
| bsd-3-clause |
vlinhd11/vlinhd11-android-scripting | python/gdata/src/gdata/experimental_oauth.py | 133 | 4540 | #!/usr/bin/env python
import binascii
import urllib
import time
import random
import hmac
from gdata.tlslite.utils import keyfactory
from gdata.tlslite.utils import cryptomath
OAUTH_VERSION = '1.0'
def get_normalized_http_url(http_request):
full_url = http_request.uri.to_string()
return full_url[:full_url.find('?')]
def escape(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return urllib.quote(s, safe='~')
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join([str(random.randint(0, 9)) for i in xrange(length)])
def timestamp():
return int(time.time())
def get_normalized_parameters(http_request, oauth_params):
params = oauth_params.copy()
params.update(http_request.uri.query)
if 'oauth_signature' in params:
del params['oauth_signature']
pairs = params.items()
# sort lexicographically, first after key, then after value
pairs.sort()
# combine key value pairs in string and escape
x = '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in pairs])
return x
def build_signature_base_string(http_request, oauth_params):
return '&'.join(
escape(http_request.method.upper()),
escape(get_normalized_http_url(http_request)),
escape(get_normalized_parameters(http_request, oauth_params)))
def build_hmac_signature(self, http_request, oauth_params, consumer_secret,
token_secret):
raw = build_signature_base_string(http_request, oauth_params)
key = None
hashed = None
if token_secret:
key = '%s&%s' % (escape(consumer_secret), escape(token_secret))
else:
key = '%s&' % escape(consumer_secret)
try:
import hashlib
hashed = hmac.new(key, raw, hashlib.sha1)
except ImportError:
import sha
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
#?
def build_rsa_signature(self, http_request, oauth_params, cert):
base_string = build_signature_base_string(http_request, oauth_params)
# Pull the private key from the certificate
privatekey = keyfactory.parsePrivateKey(cert)
# Sign using the key
signed = privatekey.hashAndSign(base_string)
return binascii.b2a_base64(signed)[:-1]
#?
def check_signature(self, http_request, oauth_params, cert, signature):
decoded_sig = base64.b64decode(signature);
base_string = build_signature_base_string(http_request, oauth_params)
# Pull the public key from the certificate
publickey = keyfactory.parsePEMKey(cert, public=True)
# Check the signature
return publickey.hashAndVerify(decoded_sig, base_string)
def to_auth_header(oauth_params):
# Create a tuple containing key value pairs with an = between.
# Example: oauth_token="ad180jjd733klru7"
pairs = ('%s="%s"' % (escape(k), escape(v)) for k, v in oauth_params.iteritems())
# Place a , between each pair and return as an OAuth auth header value.
return 'OAuth %s' % (','.join(pairs))
TEST_PUBLIC_CERT = """
-----BEGIN CERTIFICATE-----
MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0
IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV
BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY
zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb
mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3
DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d
4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb
WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J
-----END CERTIFICATE-----
"""
TEST_PRIVATE_CERT = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----
"""
| apache-2.0 |
algorhythms/LintCode | Subarray Sum II.py | 4 | 1766 | """
Given an integer array, find a subarray where the sum of numbers is between two given interval. Your code should return
the number of possible answer.
Have you met this question in a real interview? Yes
Example
Given [1,2,3,4] and interval = [1,3], return 4. The possible answers are:
[0, 0]
[0, 1]
[1, 1]
[3, 3]
"""
__author__ = 'Daniel'
from bisect import bisect_left, bisect_right
class Solution:
def subarraySumII(self, A, start, end):
"""
O(n lg n) Binary Search
Bound:
f[i] - f[j] = start
f[i] - f[j'] = end
start < end
f[j] > f[j']
:param A: an integer array
:param start: start an integer
:param end: end an integer
:return:
"""
n = len(A)
cnt = 0
f = [0 for _ in xrange(n+1)]
for i in xrange(1, n+1):
f[i] = f[i-1]+A[i-1] # from left
f.sort()
for i in xrange(n+1):
lo = bisect_left(f, f[i]-end, 0, i)
hi = bisect_right(f, f[i]-start, 0, i)
cnt += hi-lo # 0----lo----hi-----END
return cnt
def subarraySumII_TLE(self, A, start, end):
"""
O(n^2)
:param A: an integer array
:param start: start an integer
:param end: end an integer
:return:
"""
n = len(A)
cnt = 0
f = [0 for _ in xrange(n+1)]
for i in xrange(1, n+1):
f[i] = f[i-1]+A[i-1] # from left
for i in xrange(0, n+1):
for j in xrange(i+1, n+1):
s = f[j]-f[i]
if start <= s <= end:
cnt += 1
return cnt
if __name__ == "__main__":
assert Solution().subarraySumII([1, 2, 3, 4], 1, 3) == 4
| apache-2.0 |
Gendreau/SnapchatBot | snapchat_bots/bot.py | 4 | 6980 | import logging, time, uuid, requests, base64
from pysnap import Snapchat
from pysnap.utils import make_request_token, timestamp
from snap import Snap
from constants import DEFAULT_TIMEOUT, STATIC_TOKEN, BASE_URL
FORMAT = '[%(asctime)-15s] %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
logger.level = logging.DEBUG
class SnapchatBot(object):
def __init__(self, username, password, **kwargs):
self.bot_id = uuid.uuid4().hex[0:4]
self.auth_token = STATIC_TOKEN
self.username = username
self.password = password
r = self._make_request("/loq/login", {
'username': self.username,
'password': self.password
})
result = r.json()
self.auth_token = result['updates_response']['auth_token']
self.client = Snapchat()
self.client.username = username
self.client.auth_token = self.auth_token
self.current_friends = self.get_friends()
self.added_me = self.get_added_me()
if hasattr(self, "initialize"):
self.initialize(**kwargs)
def log(self, message, level=logging.DEBUG):
logger.log(level, "[%s-%s] %s" % (self.__class__.__name__, self.bot_id, message))
@staticmethod
def process_snap(snap_obj, data, is_story = False):
media_type = snap_obj["media_type"]
sender = snap_obj["sender"]
snap_id = snap_obj['id']
duration = snap_obj['time']
snap = Snap(data=data,
snap_id=snap_id,
media_type=media_type,
duration=duration,
sender=sender,
is_story=is_story)
return snap
def mark_viewed(self, snap):
self.client.mark_viewed(snap.snap_id)
def listen(self, timeout=DEFAULT_TIMEOUT):
while True:
self.log("Querying for new snaps...")
snaps = self.get_snaps()
if hasattr(self, "on_snap"):
for snap in snaps:
self.on_snap(snap.sender, snap)
added_me = self.get_added_me()
newly_added = set(added_me).difference(self.added_me)
newly_deleted = set(self.added_me).difference(added_me)
self.added_me = added_me
if hasattr(self, "on_friend_add"):
for friend in newly_added:
self.log("User %s added me" % friend)
self.on_friend_add(friend)
if hasattr(self, "on_friend_delete"):
for friend in newly_deleted:
self.log("User %s deleted me" % friend)
self.on_friend_delete(friend)
time.sleep(timeout)
def get_friends(self):
return map(lambda fr: fr['name'], self.client.get_friends())
def get_added_me(self):
updates = self.client.get_updates()
return map(lambda fr: fr['name'], updates["added_friends"])
def send_snap(self, recipients, snap):
media_id = self._upload_snap(snap)
if type(recipients) is not list:
recipients = [recipients]
recipients_str = ','.join(recipients)
self.log("Sending snap %s to %s" % (snap.snap_id, recipients_str))
self.client.send(media_id, recipients_str, snap.duration)
def post_story(self, snap):
media_id = self._upload_snap(snap)
response = self.client.send_to_story(media_id, snap.duration, snap.media_type)
try:
snap.story_id = response['json']['story']['id']
except:
pass
def delete_story(self, snap):
print snap.story_id
if snap.story_id is None:
return
self.client._request('delete_story', {
'username': self.username,
'story_id': snap.story_id
})
def add_friend(self, username):
self.client.add_friend(username)
def delete_friend(self, username):
self.client.delete_friend(username)
def block(self, username):
self.client.block(username)
def process_snaps(self, snaps, mark_viewed = True):
ret = []
for snap_obj in snaps:
if snap_obj['status'] == 2:
continue
data = self.client.get_blob(snap_obj["id"])
if data is None:
continue
snap = self.process_snap(snap_obj, data)
if mark_viewed:
self.mark_viewed(snap)
ret.append(snap)
return ret
def process_stories(self, stories):
ret = []
for snap_obj in stories:
media_key = base64.b64decode(snap_obj['media_key'])
media_iv = base64.b64decode(snap_obj['media_iv'])
data = self.client.get_story_blob(snap_obj['media_id'],
media_key,
media_iv)
if data is None:
continue
snap_obj['sender'] = self.username
snap = self.process_snap(snap_obj, data, is_story = True)
ret.append(snap)
return ret
def get_snaps(self, mark_viewed=True):
snaps = self.client.get_snaps()
return self.process_snaps(snaps)
def get_my_stories(self):
response = self.client._request('stories', {
'username': self.username
})
stories = map(lambda s: s['story'], response.json()['my_stories'])
return self.process_stories(stories)
def get_friend_stories(self):
response = self.client._request('stories', {
'username': self.username
})
ret = []
stories_per_friend = map(lambda s: s['stories'], response.json()['friend_stories'])
for stories_obj in stories_per_friend:
stories = map(lambda so: so['story'], stories_obj)
ret.extend(self.process_stories(stories))
return ret
def clear_stories(self):
for story in self.get_my_stories():
self.delete_story(story)
def _upload_snap(self, snap):
if not snap.uploaded:
snap.media_id = self.client.upload(snap.file.name)
snap.uploaded = True
return snap.media_id
def _make_request(self, path, data = None, method = 'POST', files = None):
if data is None:
data = {}
headers = {
'User-Agent': 'Snapchat/8.1.1 (iPhone5,1; iOS 8.1.3; gzip)',
'Accept-Language': 'en-US;q=1, en;q=0.9',
'Accept-Locale': 'en'
}
now = timestamp()
if method == 'POST':
data['timestamp'] = now
data['req_token'] = make_request_token(self.auth_token, str(now))
resp = requests.post(BASE_URL + path, data = data, files = files, headers = headers)
else:
resp = requests.get(BASE_URL + path, params = data, headers = headers)
return resp
| mit |
dmoliveira/vim-bootstrap | lib/jinja2/runtime.py | 606 | 19558 | # -*- coding: utf-8 -*-
"""
jinja2.runtime
~~~~~~~~~~~~~~
Runtime helpers.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from itertools import chain
from jinja2.nodes import EvalContext, _context_function_types
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
internalcode, object_type_repr
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
TemplateNotFound
from jinja2._compat import next, imap, text_type, iteritems, \
implements_iterator, implements_to_string, string_types, PY2
# these variables are exported to the template runtime
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
'TemplateRuntimeError', 'missing', 'concat', 'escape',
'markup_join', 'unicode_join', 'to_string', 'identity',
'TemplateNotFound']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
def unicode_join(seq):
"""Simple args to unicode conversion and concatenation."""
return concat(imap(text_type, seq))
def new_context(environment, template_name, blocks, vars=None,
shared=None, globals=None, locals=None):
"""Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in iteritems(locals):
if key[:2] == 'l_' and value is not missing:
parent[key[2:]] = value
return Context(environment, parent, template_name, blocks)
class TemplateReference(object):
"""The `self` in templates."""
def __init__(self, context):
self.__context = context
def __getitem__(self, name):
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.__context.name
)
class Context(object):
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`contextfunction`\s get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
__slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars',
'name', 'blocks', '__weakref__')
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
def super(self, name, current):
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined('there is no parent block '
'called %r.' % name,
name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
"""Returns an item from the template context, if it doesn't exist
`default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
"""Looks up a variable like `__getitem__` or `get` but returns an
:class:`Undefined` object with the name of the name looked up.
"""
if key in self.vars:
return self.vars[key]
if key in self.parent:
return self.parent[key]
return self.environment.undefined(name=key)
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars)
def get_all(self):
"""Return a copy of the complete context as dict including the
exported variables.
"""
return dict(self.parent, **self.vars)
@internalcode
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True
# Allow callable classes to take a context
fn = __obj.__call__
for fn_type in ('contextfunction',
'evalcontextfunction',
'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because '
'a callable raised a '
'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context."""
context = new_context(self.environment, self.name, {},
self.parent, True, None, locals)
context.vars.update(self.vars)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
def _all(meth):
proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
# not available on python 3
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return name in self.vars or name in self.parent
def __getitem__(self, key):
"""Lookup a variable or raise `KeyError` if the variable is
undefined.
"""
item = self.resolve(key)
if isinstance(item, Undefined):
raise KeyError(key)
return item
def __repr__(self):
return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
self.name
)
# register the context as mapping if possible
try:
from collections import Mapping
Mapping.register(Context)
except ImportError:
pass
class BlockReference(object):
"""One block on a template reference."""
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1)
@internalcode
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
class LoopContext(object):
"""A loop context for dynamic iteration."""
def __init__(self, iterable, recurse=None, depth0=0):
self._iterator = iter(iterable)
self._recurse = recurse
self._after = self._safe_next()
self.index0 = -1
self.depth0 = depth0
# try to get the length of the iterable early. This must be done
# here because there are some broken iterators around where there
# __len__ is the number of iterations left (i'm looking at your
# listreverseiterator!).
try:
self._length = len(iterable)
except (TypeError, AttributeError):
self._length = None
def cycle(self, *args):
"""Cycles among the arguments with the current loop index."""
if not args:
raise TypeError('no items for cycling given')
return args[self.index0 % len(args)]
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
depth = property(lambda x: x.depth0 + 1)
def __len__(self):
return self.length
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@internalcode
def loop(self, iterable):
if self._recurse is None:
raise TypeError('Tried to call non recursive loop. Maybe you '
"forgot the 'recursive' modifier.")
return self._recurse(iterable, self._recurse, self.depth0 + 1)
# a nifty trick to enhance the error message if someone tried to call
# the the loop without or with too many arguments.
__call__ = loop
del loop
@property
def length(self):
if self._length is None:
# if was not possible to get the length of the iterator when
# the loop context was created (ie: iterating over a generator)
# we have to convert the iterable into a sequence and use the
# length of that.
iterable = tuple(self._iterator)
self._iterator = iter(iterable)
self._length = len(iterable) + self.index0 + 1
return self._length
def __repr__(self):
return '<%s %r/%r>' % (
self.__class__.__name__,
self.index,
self.length
)
@implements_iterator
class LoopContextIterator(object):
"""The iterator for a loop context."""
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __iter__(self):
return self
def __next__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopIteration()
next_elem = ctx._after
ctx._after = ctx._safe_next()
return next_elem, ctx
class Macro(object):
"""Wraps a macro function."""
def __init__(self, environment, func, name, arguments, defaults,
catch_kwargs, catch_varargs, caller):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.defaults = defaults
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
@internalcode
def __call__(self, *args, **kwargs):
# try to consume the positional arguments
arguments = list(args[:self._argument_count])
off = len(arguments)
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
try:
value = self.defaults[idx - self._argument_count + off]
except IndexError:
value = self._environment.undefined(
'parameter %r was not provided' % name, name=name)
arguments.append(value)
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller:
caller = kwargs.pop('caller', None)
if caller is None:
caller = self._environment.undefined('No caller defined',
name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
raise TypeError('macro %r takes no keyword argument %r' %
(self.name, next(iter(kwargs))))
if self.catch_varargs:
arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
raise TypeError('macro %r takes not more than %d argument(s)' %
(self.name, len(self.arguments)))
return self._func(*arguments)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
'_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, string_types):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = \
_fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
yield None
def __nonzero__(self):
return False
def __repr__(self):
return 'Undefined'
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
return u'{{ %s }}' % self._undefined_name
return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = \
Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
| mit |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_multiprocessing.py | 48 | 55201 | #!/usr/bin/env python
#
# Unit tests for the multiprocessing package
#
import unittest
import threading
import Queue
import time
import sys
import os
import gc
import signal
import array
import copy
import socket
import random
import logging
# Work around broken sem_open implementations
try:
import multiprocessing.synchronize
except ImportError, e:
from test.test_support import TestSkipped
raise TestSkipped(e)
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
import _multiprocessing
from multiprocessing import util
#
#
#
latin = str
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.WARNING
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertTrue(isinstance(authkey, bytes))
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def _test(self, q, *args, **kwds):
current = self.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if self.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEquals(p.authkey, current.authkey)
self.assertEquals(p.is_alive(), False)
self.assertEquals(p.daemon, True)
self.assertTrue(p not in self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEquals(p.exitcode, None)
self.assertEquals(p.is_alive(), True)
self.assertTrue(p in self.active_children())
self.assertEquals(q.get(), args[1:])
self.assertEquals(q.get(), kwargs)
self.assertEquals(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEquals(q.get(), current.authkey)
self.assertEquals(q.get(), p.pid)
p.join()
self.assertEquals(p.exitcode, 0)
self.assertEquals(p.is_alive(), False)
self.assertTrue(p not in self.active_children())
def _test_terminate(self):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertTrue(p in self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertTrue(p not in self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertTrue(p not in self.active_children())
p.start()
self.assertTrue(p in self.active_children())
p.join()
self.assertTrue(p not in self.active_children())
def _test_recursion(self, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = self.Process(
target=self._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
def _test_put(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
def _test_get(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
def _test_fork(self, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
def _test_task_done(self, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
return
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
def f(self, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
def _test_event(self, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporaily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
#self.assertEqual(event.is_set(), False)
self.assertEqual(wait(0.0), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
# self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), None)
#
#
#
class _TestValue(BaseTestCase):
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def _test(self, values):
for sv, cv in zip(values, self.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if self.TYPE != 'processes':
return
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
if self.TYPE != 'processes':
return
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
def f(self, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
def test_array(self, raw=False):
if self.TYPE != 'processes':
return
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
def test_rawarray(self):
self.test_array(raw=True)
def test_getobj_getlock_obj(self):
if self.TYPE != 'processes':
return
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
if refs != EXPECTED_NUMBER:
print self.manager._debug_info()
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def _putter(self, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
def _putter(self, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 9999), authkey=authkey, serializer=SERIALIZER)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=('localhost', 9999), authkey=authkey, serializer=SERIALIZER)
manager.start()
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _echo(self, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _test(self, address):
conn = self.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
#
#
#
try:
from ctypes import Structure, Value, copy, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _double(self, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
if c_int is None:
return
x = Value('i', 7, lock=lock)
y = Value(ctypes.c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = Array('d', range(10), lock=lock)
string = Array('c', 20, lock=lock)
string.value = 'hello'
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
if c_int is None:
return
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _test_finalize(self, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call mutliprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = (
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.reduction', 'multiprocessing.sharedctypes',
'multiprocessing.synchronize', 'multiprocessing.util'
)
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
def _test_level(self, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
def test_invalid_handles(self):
if WIN32:
return
conn = _multiprocessing.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _multiprocessing.Connection, -1)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type[0].upper() + type[1:]
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
testcases_other = [OtherTest, TestInvalidHandle]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
from test.test_support import TestSkipped
raise TestSkipped("OSError raises on RLock creation, see issue 3111!")
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
| apache-2.0 |
Glutanimate/image-occlusion-enhanced | src/image_occlusion_enhanced/editor.py | 1 | 16200 | # -*- coding: utf-8 -*-
####################################################
## ##
## Image Occlusion Enhanced ##
## ##
## Copyright (c) Glutanimate 2016-2017 ##
## (https://github.com/Glutanimate) ##
## ##
## Based on Image Occlusion 2.0 ##
## Copyright (c) 2012-2015 tmbb ##
## (https://github.com/tmbb) ##
## ##
####################################################
"""
Image Occlusion editor dialog
"""
import os
from aqt.qt import *
from aqt import mw, webview, deckchooser, tagedit, sip
from aqt.utils import saveGeom, restoreGeom
from anki.hooks import addHook, remHook
from .dialogs import ioHelp
from .consts import *
from .config import *
class ImgOccWebPage(webview.AnkiWebPage):
def acceptNavigationRequest(self, url, navType, isMainFrame):
return True
class ImgOccWebView(webview.AnkiWebView):
def __init__(self, parent=None):
super().__init__(parent=parent)
self._domDone = False
def _onBridgeCmd(self, cmd):
# ignore webchannel messages that arrive after underlying webview
# deleted
if sip.isdeleted(self):
return
if cmd == "domDone":
return
if cmd == "svgEditDone":
self._domDone = True
self._maybeRunActions()
else:
return self.onBridgeCmd(cmd)
def runOnLoaded(self, callback):
self._domDone = False
self._queueAction("callback", callback)
def _maybeRunActions(self):
while self._pendingActions and self._domDone:
name, args = self._pendingActions.pop(0)
if name == "eval":
self._evalWithCallback(*args)
elif name == "setHtml":
self._setHtml(*args)
elif name == "callback":
callback = args[0]
callback()
else:
raise Exception("unknown action: {}".format(name))
class ImgOccEdit(QDialog):
"""Main Image Occlusion Editor dialog"""
def __init__(self, imgoccadd, parent):
QDialog.__init__(self)
mw.setupDialogGC(self)
self.setWindowFlags(Qt.Window)
self.visible = False
self.imgoccadd = imgoccadd
self.parent = parent
self.mode = "add"
loadConfig(self)
self.setupUi()
restoreGeom(self, "imgoccedit")
addHook("unloadProfile", self.onProfileUnload)
def closeEvent(self, event):
if mw.pm.profile is not None:
self.deckChooser.cleanup()
saveGeom(self, "imgoccedit")
self.visible = False
self.svg_edit = None
del(self.svg_edit_anim) # might not be gc'd
remHook("unloadProfile", self.onProfileUnload)
QDialog.reject(self)
def onProfileUnload(self):
if not sip.isdeleted(self):
self.close()
def reject(self):
# Override QDialog Esc key reject
pass
def setupUi(self):
"""Set up ImgOccEdit UI"""
# Main widgets aside from fields
self.svg_edit = ImgOccWebView(parent=self)
self.svg_edit._page = ImgOccWebPage(self.svg_edit._onBridgeCmd)
self.svg_edit.setPage(self.svg_edit._page)
self.tags_hbox = QHBoxLayout()
self.tags_edit = tagedit.TagEdit(self)
self.tags_label = QLabel("Tags")
self.tags_label.setFixedWidth(70)
self.deck_container = QWidget()
self.deckChooser = deckchooser.DeckChooser(mw,
self.deck_container, label=True)
self.deckChooser.deck.setAutoDefault(False)
# workaround for tab focus order issue of the tags entry
# (this particular section is only needed when the quick deck
# buttons add-on is installed)
if self.deck_container.layout().children(): # multiple deck buttons
for i in range(self.deck_container.layout().children()[0].count()):
try:
item = self.deck_container.layout().children()[0].itemAt(i)
# remove Tab focus manually:
item.widget().setFocusPolicy(Qt.ClickFocus)
item.widget().setAutoDefault(False)
except AttributeError:
pass
# Button row widgets
self.bottom_label = QLabel()
button_box = QDialogButtonBox(Qt.Horizontal, self)
button_box.setCenterButtons(False)
image_btn = QPushButton("Change &Image", clicked=self.changeImage)
image_btn.setIcon(QIcon(os.path.join(ICONS_PATH, "add.png")))
image_btn.setIconSize(QSize(16, 16))
image_btn.setAutoDefault(False)
help_btn = QPushButton("&Help", clicked=self.onHelp)
help_btn.setAutoDefault(False)
self.occl_tp_select = QComboBox()
self.occl_tp_select.addItems(["Don't Change", "Hide All, Guess One",
"Hide One, Guess One"])
self.edit_btn = button_box.addButton("&Edit Cards",
QDialogButtonBox.ActionRole)
self.new_btn = button_box.addButton("&Add New Cards",
QDialogButtonBox.ActionRole)
self.ao_btn = button_box.addButton("Hide &All, Guess One",
QDialogButtonBox.ActionRole)
self.oa_btn = button_box.addButton("Hide &One, Guess One",
QDialogButtonBox.ActionRole)
close_button = button_box.addButton("&Close",
QDialogButtonBox.RejectRole)
image_tt = ("Switch to a different image while preserving all of "
"the shapes and fields")
dc_tt = "Preserve existing occlusion type"
edit_tt = "Edit all cards using current mask shapes and field entries"
new_tt = "Create new batch of cards without editing existing ones"
ao_tt = ("Generate cards with nonoverlapping information, where all<br>"
"labels are hidden on the front and one revealed on the back")
oa_tt = ("Generate cards with overlapping information, where one<br>"
"label is hidden on the front and revealed on the back")
close_tt = "Close Image Occlusion Editor without generating cards"
image_btn.setToolTip(image_tt)
self.edit_btn.setToolTip(edit_tt)
self.new_btn.setToolTip(new_tt)
self.ao_btn.setToolTip(ao_tt)
self.oa_btn.setToolTip(oa_tt)
close_button.setToolTip(close_tt)
self.occl_tp_select.setItemData(0, dc_tt, Qt.ToolTipRole)
self.occl_tp_select.setItemData(1, ao_tt, Qt.ToolTipRole)
self.occl_tp_select.setItemData(2, oa_tt, Qt.ToolTipRole)
for btn in [image_btn, self.edit_btn, self.new_btn, self.ao_btn,
self.oa_btn, close_button]:
btn.setFocusPolicy(Qt.ClickFocus)
self.edit_btn.clicked.connect(self.editNote)
self.new_btn.clicked.connect(self.new)
self.ao_btn.clicked.connect(self.addAO)
self.oa_btn.clicked.connect(self.addOA)
close_button.clicked.connect(self.close)
# Set basic layout up
# Button row
bottom_hbox = QHBoxLayout()
bottom_hbox.addWidget(image_btn)
bottom_hbox.addWidget(help_btn)
bottom_hbox.insertStretch(2, stretch=1)
bottom_hbox.addWidget(self.bottom_label)
bottom_hbox.addWidget(self.occl_tp_select)
bottom_hbox.addWidget(button_box)
# Tab 1
vbox1 = QVBoxLayout()
svg_edit_loader = QLabel("Loading...")
svg_edit_loader.setAlignment(Qt.AlignCenter)
loader_icon = os.path.join(ICONS_PATH, "loader.gif")
anim = QMovie(loader_icon)
svg_edit_loader.setMovie(anim)
anim.start()
self.svg_edit_loader = svg_edit_loader
self.svg_edit_anim = anim
vbox1.addWidget(self.svg_edit, stretch=1)
vbox1.addWidget(self.svg_edit_loader, stretch=1)
# Tab 2
# vbox2 fields are variable and added by setupFields() at a later point
self.vbox2 = QVBoxLayout()
# Main Tab Widget
tab1 = QWidget()
self.tab2 = QWidget()
tab1.setLayout(vbox1)
self.tab2.setLayout(self.vbox2)
self.tab_widget = QTabWidget()
self.tab_widget.setFocusPolicy(Qt.ClickFocus)
self.tab_widget.addTab(tab1, "&Masks Editor")
self.tab_widget.addTab(self.tab2, "&Fields")
self.tab_widget.setTabToolTip(
1, "Include additional information (optional)")
self.tab_widget.setTabToolTip(
0, "Create image occlusion masks (required)")
# Main Window
vbox_main = QVBoxLayout()
vbox_main.addWidget(self.tab_widget)
vbox_main.addLayout(bottom_hbox)
self.setLayout(vbox_main)
self.setMinimumWidth(640)
self.tab_widget.setCurrentIndex(0)
self.svg_edit.setFocus()
self.showSvgEdit(False)
# Define and connect key bindings
# Field focus hotkeys
for i in range(1, 10):
QShortcut(QKeySequence("Ctrl+%i" % i),
self).activated.connect(lambda f=i-1: self.focusField(f))
# Other hotkeys
QShortcut(QKeySequence("Ctrl+Return"),
self).activated.connect(lambda: self.defaultAction(True))
QShortcut(QKeySequence("Ctrl+Shift+Return"),
self).activated.connect(lambda: self.addOA(True))
QShortcut(QKeySequence("Ctrl+Tab"),
self).activated.connect(self.switchTabs)
QShortcut(QKeySequence("Ctrl+r"),
self).activated.connect(self.resetMainFields)
QShortcut(QKeySequence("Ctrl+Shift+r"),
self).activated.connect(self.resetAllFields)
QShortcut(QKeySequence("Ctrl+Shift+t"),
self).activated.connect(self.focusTags)
QShortcut(QKeySequence("Ctrl+f"),
self).activated.connect(self.fitImageCanvas)
# Various actions that act on / interact with the ImgOccEdit UI:
# Note actions
def changeImage(self):
self.imgoccadd.onChangeImage()
self.fitImageCanvas()
def defaultAction(self, close):
if self.mode == "add":
self.addAO(close)
else:
self.editNote()
def addAO(self, close=False):
self.imgoccadd.onAddNotesButton("ao", close)
def addOA(self, close=False):
self.imgoccadd.onAddNotesButton("oa", close)
def new(self, close=False):
choice = self.occl_tp_select.currentText()
self.imgoccadd.onAddNotesButton(choice, close)
def editNote(self):
choice = self.occl_tp_select.currentText()
self.imgoccadd.onEditNotesButton(choice)
def onHelp(self):
if self.mode == "add":
ioHelp("add", parent=self)
else:
ioHelp("edit", parent=self)
# Window state
def resetFields(self):
"""Reset all widgets. Needed for changes to the note type"""
layout = self.vbox2
for i in reversed(list(range(layout.count()))):
item = layout.takeAt(i)
layout.removeItem(item)
if item.widget():
item.widget().setParent(None)
elif item.layout():
sublayout = item.layout()
sublayout.setParent(None)
for i in reversed(list(range(sublayout.count()))):
subitem = sublayout.takeAt(i)
sublayout.removeItem(subitem)
subitem.widget().setParent(None)
self.tags_hbox.setParent(None)
def setupFields(self, flds):
"""Setup dialog text edits based on note type fields"""
self.tedit = {}
self.tlabel = {}
self.flds = flds
for i in flds:
if i['name'] in self.ioflds_priv:
continue
hbox = QHBoxLayout()
tedit = QPlainTextEdit()
label = QLabel(i["name"])
hbox.addWidget(label)
hbox.addWidget(tedit)
tedit.setTabChangesFocus(True)
tedit.setMinimumHeight(40)
label.setFixedWidth(70)
self.tedit[i["name"]] = tedit
self.tlabel[i["name"]] = label
self.vbox2.addLayout(hbox)
self.tags_hbox.addWidget(self.tags_label)
self.tags_hbox.addWidget(self.tags_edit)
self.vbox2.addLayout(self.tags_hbox)
self.vbox2.addWidget(self.deck_container)
# switch Tab focus order of deckchooser and tags_edit (
# for some reason it's the wrong way around by default):
self.tab2.setTabOrder(self.tags_edit, self.deckChooser.deck)
def switchToMode(self, mode):
"""Toggle between add and edit layouts"""
hide_on_add = [self.occl_tp_select, self.edit_btn, self.new_btn]
hide_on_edit = [self.ao_btn, self.oa_btn]
self.mode = mode
for i in list(self.tedit.values()):
i.show()
for i in list(self.tlabel.values()):
i.show()
if mode == "add":
for i in hide_on_add:
i.hide()
for i in hide_on_edit:
i.show()
dl_txt = "Deck"
ttl = "Image Occlusion Enhanced - Add Mode"
bl_txt = "Add Cards:"
else:
for i in hide_on_add:
i.show()
for i in hide_on_edit:
i.hide()
for i in self.sconf['skip']:
if i in list(self.tedit.keys()):
self.tedit[i].hide()
self.tlabel[i].hide()
dl_txt = "Deck for <i>Add new cards</i>"
ttl = "Image Occlusion Enhanced - Editing Mode"
bl_txt = "Type:"
self.deckChooser.deckLabel.setText(dl_txt)
self.setWindowTitle(ttl)
self.bottom_label.setText(bl_txt)
def showSvgEdit(self, state):
if not state:
self.svg_edit.hide()
self.svg_edit_anim.start()
self.svg_edit_loader.show()
else:
self.svg_edit_anim.stop()
self.svg_edit_loader.hide()
self.svg_edit.show()
# Other actions
def switchTabs(self):
currentTab = self.tab_widget.currentIndex()
if currentTab == 0:
self.tab_widget.setCurrentIndex(1)
if isinstance(QApplication.focusWidget(), QPushButton):
self.tedit[self.ioflds["hd"]].setFocus()
else:
self.tab_widget.setCurrentIndex(0)
def focusField(self, idx):
"""Focus field in vbox2 layout by index number"""
self.tab_widget.setCurrentIndex(1)
target_item = self.vbox2.itemAt(idx)
if not target_item:
return
target_layout = target_item.layout()
target_widget = target_item.widget()
if target_layout:
target = target_layout.itemAt(1).widget()
elif target_widget:
target = target_widget
target.setFocus()
def focusTags(self):
self.tab_widget.setCurrentIndex(1)
self.tags_edit.setFocus()
def resetMainFields(self):
"""Reset all fields aside from sticky ones"""
for i in self.flds:
fn = i['name']
if fn in self.ioflds_priv or fn in self.ioflds_prsv:
continue
self.tedit[fn].setPlainText("")
def resetAllFields(self):
"""Reset all fields"""
self.resetMainFields()
for i in self.ioflds_prsv:
self.tedit[i].setPlainText("")
def fitImageCanvas(self):
self.svg_edit.eval("""
setTimeout(function(){
svgCanvas.zoomChanged('', 'canvas');
}, 5)
""")
| bsd-2-clause |
chillinc/angel | lib/devops/settings_helpers.py | 2 | 1374 |
import sys
def key_value_string_to_dict(data_string, key_value_separator='='):
''' Given a string like:
key=value\nkey2=value2
Return a dict with data[key] = values.
- Ignores all lines that start with # or are empty.
- Returns None on any parse errors.
- Strips leading and trailing white-space on a line
'''
if not isinstance(data_string, str):
print >>sys.stderr, "Warning: key_value_string_to_dict called with a non-string value of data_string: %s" % data_string
return None
try:
data_lines = map(lambda x: x.lstrip(), data_string.strip().split('\n'))
data_lines_wo_comments = filter(lambda x: not (len(x) == 0 or x[0] == '#'), data_lines)
invalid_lines = filter(lambda x: x.find(key_value_separator) < 0 and len(x.strip()), data_lines_wo_comments)
if len(invalid_lines):
print >>sys.stderr, "Invalid lines found while parsing string:\n%s" % '\n'.join(invalid_lines)
return None
return dict([ [y.strip() for y in x.split(key_value_separator,1)] for x in filter(lambda x: x.find(key_value_separator) > 0, data_lines_wo_comments)])
except Exception as e:
print >>sys.stderr, "Parse error in key_value_string_to_dict: %s" % e
import traceback
traceback.format_exc(sys.exc_info()[2])
return None
| apache-2.0 |
topxiaoke/myedx | cms/djangoapps/contentstore/features/video_editor.py | 33 | 11227 | # -*- coding: utf-8 -*-
# disable missing docstring
# pylint: disable=C0111
import requests
from lettuce import world, step
from nose.tools import assert_true, assert_equal, assert_in, assert_not_equal # pylint: disable=E0611
from terrain.steps import reload_the_page
from django.conf import settings
from common import upload_file, attach_file
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
DISPLAY_NAME = "Component Display Name"
NATIVE_LANGUAGES = {lang: label for lang, label in settings.LANGUAGES if len(lang) == 2}
LANGUAGES = {
lang: NATIVE_LANGUAGES.get(lang, display)
for lang, display in settings.ALL_LANGUAGES
}
LANGUAGES.update({
'table': 'Table of Contents'
})
TRANSLATION_BUTTONS = {
'add': '.metadata-video-translations .create-action',
'upload': '.metadata-video-translations .upload-action',
'download': '.metadata-video-translations .download-action',
'remove': '.metadata-video-translations .remove-action',
'clear': '.metadata-video-translations .setting-clear',
}
VIDEO_MENUS = {
'language': '.lang .menu',
}
class RequestHandlerWithSessionId(object):
def get(self, url):
"""
Sends a request.
"""
kwargs = dict()
session_id = [{i['name']:i['value']} for i in world.browser.cookies.all() if i['name'] == u'sessionid']
if session_id:
kwargs.update({
'cookies': session_id[0]
})
response = requests.get(url, **kwargs)
self.response = response
self.status_code = response.status_code
self.headers = response.headers
self.content = response.content
return self
def is_success(self):
"""
Returns `True` if the response was succeed, otherwise, returns `False`.
"""
if self.status_code < 400:
return True
return False
def check_header(self, name, value):
"""
Returns `True` if the response header exist and has appropriate value,
otherwise, returns `False`.
"""
if value in self.headers.get(name, ''):
return True
return False
def success_upload_file(filename):
upload_file(filename, sub_path="uploads/")
world.css_has_text('#upload_confirm', 'Success!')
world.is_css_not_present('.wrapper-modal-window-assetupload', wait_time=30)
def get_translations_container():
return world.browser.find_by_xpath('//label[text()="Transcript Languages"]/following-sibling::div')
def get_setting_container(lang_code):
try:
get_xpath = lambda value: './/descendant::a[@data-lang="{}" and contains(@class,"remove-setting")]/parent::*'.format(value)
return get_translations_container().find_by_xpath(get_xpath(lang_code)).first
except Exception:
return None
def get_last_dropdown():
return get_translations_container().find_by_xpath('.//descendant::select[last()]').last
def choose_option(dropdown, value):
dropdown.find_by_value(value)[0].click()
def choose_new_lang(lang_code):
world.css_click(TRANSLATION_BUTTONS['add'])
choose_option(get_last_dropdown(), lang_code)
assert_equal(get_last_dropdown().value, lang_code, "Option with provided value is not available or was not selected")
def open_menu(menu):
world.browser.execute_script("$('{selector}').parent().addClass('is-opened')".format(
selector=VIDEO_MENUS[menu]
))
@step('I have set "transcript display" to (.*)$')
def set_show_captions(step, setting):
# Prevent cookies from overriding course settings
world.browser.cookies.delete('hide_captions')
world.edit_component()
world.select_editor_tab('Advanced')
world.browser.select('Show Transcript', setting)
world.save_component()
@step('when I view the video it (.*) show the captions$')
def shows_captions(_step, show_captions):
world.wait_for_js_variable_truthy("Video")
world.wait(0.5)
if show_captions == 'does not':
assert_true(world.is_css_present('div.video.closed'))
else:
assert_true(world.is_css_not_present('div.video.closed'))
# Prevent cookies from overriding course settings
world.browser.cookies.delete('hide_captions')
world.browser.cookies.delete('current_player_mode')
@step('I see the correct video settings and default values$')
def correct_video_settings(_step):
expected_entries = [
# basic
[DISPLAY_NAME, 'Video', False],
['Default Video URL', 'http://youtu.be/OEoXaMPEzfM, , ', False],
# advanced
[DISPLAY_NAME, 'Video', False],
['Default Timed Transcript', '', False],
['Download Transcript Allowed', 'False', False],
['Downloadable Transcript URL', '', False],
['Show Transcript', 'True', False],
['Transcript Languages', '', False],
['Upload Handout', '', False],
['Video Download Allowed', 'False', False],
['Video File URLs', '', False],
['Video Start Time', '00:00:00', False],
['Video Stop Time', '00:00:00', False],
['YouTube ID', 'OEoXaMPEzfM', False],
['YouTube ID for .75x speed', '', False],
['YouTube ID for 1.25x speed', '', False],
['YouTube ID for 1.5x speed', '', False]
]
world.verify_all_setting_entries(expected_entries)
@step('my video display name change is persisted on save$')
def video_name_persisted(step):
world.save_component()
reload_the_page(step)
world.wait_for_xmodule()
world.edit_component()
world.verify_setting_entry(
world.get_setting_entry(DISPLAY_NAME),
DISPLAY_NAME, '3.4', True
)
@step('I can modify video display name')
def i_can_modify_video_display_name(_step):
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, '3.4')
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, '3.4', True)
@step('I upload transcript file(?:s)?:$')
def upload_transcript(step):
input_hidden = '.metadata-video-translations .input'
# Number of previously added translations
initial_index = len(world.css_find(TRANSLATION_BUTTONS['download']))
if step.hashes:
for i, item in enumerate(step.hashes):
lang_code = item['lang_code']
filename = item['filename']
index = initial_index + i
choose_new_lang(lang_code)
expected_text = world.css_text(TRANSLATION_BUTTONS['upload'], index=index)
assert_equal(expected_text, "Upload")
assert_equal(world.css_find(input_hidden).last.value, "")
world.css_click(TRANSLATION_BUTTONS['upload'], index=index)
success_upload_file(filename)
world.wait_for_visible(TRANSLATION_BUTTONS['download'], index=index)
assert_equal(world.css_find(TRANSLATION_BUTTONS['upload']).last.text, "Replace")
assert_equal(world.css_find(input_hidden).last.value, filename)
@step('I try to upload transcript file "([^"]*)"$')
def try_to_upload_transcript(step, filename):
world.css_click(TRANSLATION_BUTTONS['upload'])
attach_file(filename, 'uploads/')
@step('I upload transcript file "([^"]*)" for "([^"]*)" language code$')
def upload_transcript_for_lang(step, filename, lang_code):
get_xpath = lambda value: './/div/a[contains(@class, "upload-action")]'.format(value)
container = get_setting_container(lang_code)
# If translation isn't uploaded, prepare drop-down and try to find container again
choose_new_lang(lang_code)
container = get_setting_container(lang_code)
button = container.find_by_xpath(get_xpath(lang_code)).first
button.click()
success_upload_file(filename)
@step('I replace transcript file for "([^"]*)" language code by "([^"]*)"$')
def replace_transcript_for_lang(step, lang_code, filename):
get_xpath = lambda value: './/div/a[contains(@class, "upload-action")]'.format(value)
container = get_setting_container(lang_code)
button = container.find_by_xpath(get_xpath(lang_code)).first
button.click()
success_upload_file(filename)
@step('I see validation error "([^"]*)"$')
def verify_validation_error_message(step, error_message):
assert_equal(world.css_text('#upload_error'), error_message)
@step('I can download transcript for "([^"]*)" language code, that contains text "([^"]*)"$')
def i_can_download_transcript(_step, lang_code, text):
MIME_TYPE = 'application/x-subrip'
get_xpath = lambda value: './/div/a[contains(text(), "Download")]'.format(value)
container = get_setting_container(lang_code)
assert container
button = container.find_by_xpath(get_xpath(lang_code)).first
url = button['href']
request = RequestHandlerWithSessionId()
assert_true(request.get(url).is_success())
assert_true(request.check_header('content-type', MIME_TYPE))
assert_in(text.encode('utf-8'), request.content)
@step('I remove translation for "([^"]*)" language code$')
def i_can_remove_transcript(_step, lang_code):
get_xpath = lambda value: './/descendant::a[@data-lang="{}" and contains(@class,"remove-setting")]'.format(value)
container = get_setting_container(lang_code)
assert container
button = container.find_by_xpath(get_xpath(lang_code)).first
button.click()
@step('I see translations for "([^"]*)"$')
def verify_translations(_step, lang_codes_string):
expected = [l.strip() for l in lang_codes_string.split(',')]
actual = [l['data-lang'] for l in world.css_find('.metadata-video-translations .remove-setting')]
assert_equal(set(expected), set(actual))
@step('I do not see translations$')
def no_translations(_step):
assert_true(world.is_css_not_present('.metadata-video-translations .remove-setting'))
@step('I confirm prompt$')
def confirm_prompt(_step):
world.confirm_studio_prompt()
@step('I (cannot )?choose "([^"]*)" language code$')
def i_choose_lang_code(_step, cannot, lang_code):
choose_option(get_last_dropdown(), lang_code)
if cannot:
assert_not_equal(get_last_dropdown().value, lang_code, "Option with provided value was selected, but shouldn't")
else:
assert_equal(get_last_dropdown().value, lang_code, "Option with provided value is not available or was not selected")
@step('I click button "([^"]*)"$')
def click_button(_step, button):
world.css_click(TRANSLATION_BUTTONS[button.lower()])
@step('video language menu has "([^"]*)" translations$')
def i_see_correct_langs(_step, langs):
menu_name = 'language'
open_menu(menu_name)
items = world.css_find(VIDEO_MENUS[menu_name] + ' li')
translations = {t.strip(): LANGUAGES[t.strip()] for t in langs.split(',')}
assert_equal(len(translations), len(items))
for lang_code, label in translations.items():
assert_true(any([i.text == label for i in items]))
assert_true(any([i['data-lang-code'] == lang_code for i in items]))
@step('video language with code "([^"]*)" at position "(\d+)"$')
def i_see_lang_at_position(_step, code, position):
menu_name = 'language'
open_menu(menu_name)
item = world.css_find(VIDEO_MENUS[menu_name] + ' li')[int(position)]
assert_equal(item['data-lang-code'], code)
| agpl-3.0 |
lgarren/spack | var/spack/repos/builtin/packages/kahip/package.py | 3 | 3520 | ##############################################################################
# Copyright (c) 2017 Christian Schulz
# Karlsruhe Institute of Technology (KIT), Karlsruhe, Germany
#
# This file is released as part of Spack under the LGPL license
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE file for the LLNL notice and LGPL.
#
# License
# -------
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
import re
class Kahip(SConsPackage):
"""KaHIP - Karlsruhe High Quality Partitioning - is a family of graph
partitioning programs. It includes KaFFPa (Karlsruhe Fast Flow
Partitioner), which is a multilevel graph partitioning algorithm,
in its variants Strong, Eco and Fast, KaFFPaE (KaFFPaEvolutionary)
which is a parallel evolutionary algorithm that uses KaFFPa to
provide combine and mutation operations, as well as KaBaPE which
extends the evolutionary algorithm. Moreover, specialized
techniques are included to partition road networks (Buffoon), to
output a vertex separator from a given partition or techniques
geared towards efficient partitioning of social networks.
"""
homepage = 'http://algo2.iti.kit.edu/documents/kahip/index.html'
url = 'http://algo2.iti.kit.edu/schulz/software_releases/KaHIP_2.00.tar.gz'
version('develop', git='https://github.com/schulzchristian/KaHIP.git')
version('2.00', '0a66b0a604ad72cfb7e3dce00e2c9fdfac82b855')
depends_on('argtable')
depends_on('mpi') # Note: upstream package only tested on openmpi
conflicts('%clang')
def patch(self):
"""Internal compile.sh scripts hardcode number of cores to build with.
Filter these out so Spack can control it."""
files = [
'compile.sh',
'parallel/modified_kahip/compile.sh',
'parallel/parallel_src/compile.sh',
]
for f in files:
filter_file('NCORES=.*', 'NCORES={0}'.format(make_jobs), f)
def build(self, spec, prefix):
"""Build using the KaHIP compile.sh script. Uses scons internally."""
builder = Executable('./compile.sh')
builder()
def install(self, spec, prefix):
"""Install under the prefix"""
# Ugly: all files land under 'deploy' and we need to disentangle them
mkdirp(prefix.bin)
mkdirp(prefix.include)
mkdirp(prefix.lib)
with working_dir('deploy'):
for f in os.listdir('.'):
if re.match(r'.*\.(a|so|dylib)$', f):
install(f, prefix.lib)
elif re.match(r'.*\.h$', f):
install(f, prefix.include)
else:
install(f, prefix.bin)
| lgpl-2.1 |
chemelnucfin/tensorflow | tensorflow/contrib/distributions/python/ops/kumaraswamy.py | 1 | 9274 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Kumaraswamy distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import uniform
from tensorflow.python.util import deprecation
__all__ = [
"Kumaraswamy",
]
_kumaraswamy_sample_note = """Note: `x` must have dtype `self.dtype` and be in
`[0, 1].` It must have a shape compatible with `self.batch_shape()`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _harmonic_number(x):
"""Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
"""
one = array_ops.ones([], dtype=x.dtype)
return math_ops.digamma(x + one) - math_ops.digamma(one)
class Kumaraswamy(transformed_distribution.TransformedDistribution):
"""Kumaraswamy distribution.
The Kumaraswamy distribution is defined over the `(0, 1)` interval using
parameters
`concentration1` (aka "alpha") and `concentration0` (aka "beta"). It has a
shape similar to the Beta distribution, but is reparameterizeable.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta) = alpha * beta * x**(alpha - 1) * (1 - x**alpha)**(beta -
1)
```
where:
* `concentration1 = alpha`,
* `concentration0 = beta`,
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Examples
```python
# Create a batch of three Kumaraswamy distributions.
alpha = [1, 2, 3]
beta = [1, 2, 3]
dist = Kumaraswamy(alpha, beta)
dist.sample([4, 5]) # Shape [4, 5, 3]
# `x` has three batch entries, each with two samples.
x = [[.1, .4, .5],
[.2, .3, .5]]
# Calculate the probability of each pair of samples under the corresponding
# distribution in `dist`.
dist.prob(x) # Shape [2, 3]
```
```python
# Create batch_shape=[2, 3] via parameter broadcast:
alpha = [[1.], [2]] # Shape [2, 1]
beta = [3., 4, 5] # Shape [3]
dist = Kumaraswamy(alpha, beta)
# alpha broadcast as: [[1., 1, 1,],
# [2, 2, 2]]
# beta broadcast as: [[3., 4, 5],
# [3, 4, 5]]
# batch_Shape [2, 3]
dist.sample([4, 5]) # Shape [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # Shape [2, 3]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
concentration1=None,
concentration0=None,
validate_args=False,
allow_nan_stats=True,
name="Kumaraswamy"):
"""Initialize a batch of Kumaraswamy distributions.
Args:
concentration1: Positive floating-point `Tensor` indicating mean
number of successes; aka "alpha". Implies `self.dtype` and
`self.batch_shape`, i.e.,
`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
concentration0: Positive floating-point `Tensor` indicating mean
number of failures; aka "beta". Otherwise has same semantics as
`concentration1`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
with ops.name_scope(name, values=[concentration1, concentration0]) as name:
concentration1 = ops.convert_to_tensor(
concentration1, name="concentration1")
concentration0 = ops.convert_to_tensor(
concentration0, name="concentration0")
super(Kumaraswamy, self).__init__(
distribution=uniform.Uniform(
low=array_ops.zeros([], dtype=concentration1.dtype),
high=array_ops.ones([], dtype=concentration1.dtype),
allow_nan_stats=allow_nan_stats),
bijector=bijectors.Kumaraswamy(
concentration1=concentration1, concentration0=concentration0,
validate_args=validate_args),
batch_shape=distribution_util.get_broadcast_shape(
concentration1, concentration0),
name=name)
self._reparameterization_type = distribution.FULLY_REPARAMETERIZED
@property
def concentration1(self):
"""Concentration parameter associated with a `1` outcome."""
return self.bijector.concentration1
@property
def concentration0(self):
"""Concentration parameter associated with a `0` outcome."""
return self.bijector.concentration0
def _entropy(self):
a = self.concentration1
b = self.concentration0
return (1 - 1. / a) + (
1 - 1. / b) * _harmonic_number(b) + math_ops.log(a) + math_ops.log(b)
def _moment(self, n):
"""Compute the n'th (uncentered) moment."""
total_concentration = self.concentration1 + self.concentration0
expanded_concentration1 = array_ops.ones_like(
total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
total_concentration, dtype=self.dtype) * self.concentration0
beta_arg0 = 1 + n / expanded_concentration1
beta_arg = array_ops.stack([beta_arg0, expanded_concentration0], -1)
log_moment = math_ops.log(expanded_concentration0) + special_math_ops.lbeta(
beta_arg)
return math_ops.exp(log_moment)
def _mean(self):
return self._moment(1)
def _variance(self):
# TODO(b/72696533): Investigate a more numerically stable version.
return self._moment(2) - math_ops.square(self._moment(1))
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when `concentration1 <= 1` or
`concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
is used for undefined modes. If `self.allow_nan_stats` is `False` an
exception is raised when one or more modes are undefined.""")
def _mode(self):
a = self.concentration1
b = self.concentration0
mode = ((a - 1) / (a * b - 1))**(1. / a)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype),
name="nan")
is_defined = (self.concentration1 > 1.) & (self.concentration0 > 1.)
return array_ops.where_v2(is_defined, mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], dtype=self.concentration1.dtype),
self.concentration1,
message="Mode undefined for concentration1 <= 1."),
check_ops.assert_less(
array_ops.ones([], dtype=self.concentration0.dtype),
self.concentration0,
message="Mode undefined for concentration0 <= 1.")
], mode)
| apache-2.0 |
workflo/dxf2gcode | python_examples/gtk/pygtkcanvas-1.0/canvas.py | 1 | 11443 | __all__ = ['Canvas']
import pygtk
pygtk.require("2.0")
import gtk
import gobject
import cairo
from math import pi
from canvasmath import *
class Canvas(gtk.DrawingArea):
def __init__(self):
gtk.DrawingArea.__init__(self)
self.set_double_buffered(False)
self.add_events(
gtk.gdk.BUTTON_PRESS_MASK|
gtk.gdk.BUTTON_RELEASE_MASK|
gtk.gdk.BUTTON1_MOTION_MASK|
gtk.gdk.BUTTON2_MOTION_MASK|
gtk.gdk.BUTTON3_MOTION_MASK|
gtk.gdk.SCROLL_MASK|
gtk.gdk.KEY_PRESS_MASK|
gtk.gdk.KEY_RELEASE_MASK)
# foregorund, background
self.fg = (0, 0, 0)
self.bg = (0, 0, 0)
# list of layers
self.layers = []
# visible are used for speedup as list
self.visible = None
# canvas bindings
self.bindings = {}
# last item which had some event
self.current_item = None
# cairo context
self.cr = None
# cached cairo image surfaces using filenames/images as keys
self.cache_filenames = {}
self.cache_images = {}
# connect event callbacks
gtk.DrawingArea.connect(self, 'configure-event', self.drawingarea_configure_event_cb)
gtk.DrawingArea.connect(self, 'expose-event', self.drawingarea_expose_event_cb)
gtk.DrawingArea.connect(self, 'button-press-event', self.drawingarea_button_press_event_cb)
gtk.DrawingArea.connect(self, 'button-release-event', self.drawingarea_button_release_event_cb)
gtk.DrawingArea.connect(self, 'motion-notify-event', self.drawingarea_motion_notify_event_cb)
gtk.DrawingArea.connect(self, 'scroll-event', self.drawingarea_scroll_event_cb)
def connect(self, detailed_signal, handler, *args, **kw):
# makes sure that all event names have '-' but nor '_'
detailed_signal = detailed_signal.replace('_', '-')
if detailed_signal not in self.bindings:
self.bindings[detailed_signal] = [(handler, args, kw)]
else:
self.bindings[detailed_signal].append((handler, args, kw))
def drawingarea_configure_event_cb(self, widget, event):
x, y, width, height = self.get_allocation()
self.pixmap = gtk.gdk.Pixmap(self.window, width, height)
self.gc = self.window.new_gc()
self.visible = self.find_visible(*self.get_allocation())
return True
def drawingarea_expose_event_cb(self, widget, event):
self.redraw_visible()
return False
def drawingarea_button_press_event_cb(self, widget, event):
self.event_cb(widget, event, 'button-press-event')
return False
def drawingarea_motion_notify_event_cb(self, widget, event):
self.event_cb(widget, event, 'motion-notify-event')
return False
def drawingarea_button_release_event_cb(self, widget, signal_id):
self.event_cb(widget, signal_id, 'button-release-event')
return False
def drawingarea_scroll_event_cb(self, widget, event):
self.event_cb(widget, event, 'scroll-event')
return False
def event_cb(self, widget, event, name):
# check if canvas_item already 'pressed'
# this reduces time required for finding
# over which item mouse is above
if self.current_item:
try:
func_, args_, kw_ = self.current_item.bindings[name]
func_(self, self.current_item, event, *args_, **kw_)
except KeyError:
pass
if name == 'button-press-event':
self.visible = self.find_visible(*self.get_allocation())
elif name == 'motion-notify-event':
self.redraw_visible()
elif name == 'button-release-event':
self.current_item = None
# classical way for finding where is mouse above
else:
x = event.x
y = event.y
for n in reversed(self.visible):
bindings = n.bindings
if bindings and name in bindings:
if n.is_coord_above(x, y):
func_, args_, kw_ = bindings[name]
func_(self, n, event, *args_, **kw_)
self.current_item = n
break
if not self.current_item:
try:
for handler_, args_, kw_ in self.bindings[name]:
handler_(widget, event, *args_, **kw_)
except KeyError:
pass
self.visible = self.find_visible(*self.get_allocation())
self.redraw_visible()
def redraw_visible(self):
# cairo context
self.cr = cr = self.pixmap.cairo_create()
# clip
xmin, ymin = 0, 0
xmax, ymax = tuple(self.get_allocation())[2:]
cr.rectangle(xmin, ymin, xmax, ymax)
cr.clip()
# background
cr.set_source_rgb(self.bg[0],self.bg[1], self.bg[2])
cr.rectangle(xmin, ymin, xmax, ymax)
cr.fill()
# draw items
for item in self.visible:
item.draw(cr)
# draw on canvas
self.window.draw_drawable(self.gc, self.pixmap, xmin, ymin, xmin, ymin, xmax-xmin, ymax-ymin)
def set_foreground(self, color):
self.fg = color
def set_background(self, color):
self.bg = color
def draw_line(self, x0, y0, x1, y1, fg=None, line_width=1.0):
cr = self.cr
cr.move_to(x0, y0)
cr.line_to(x1, y1)
cr.set_line_width(line_width)
cr.set_source_rgb(*(fg if fg else self.fg))
cr.stroke()
def draw_rect(self, x0, y0, x1, y1, fg=None, bg=None, outline=False, line_width=1.0, filled=False):
cr = self.cr
x = x0
y = y0
w = x1 - x0
h = y1 - y0
if filled:
cr.rectangle(x, y, w, h)
cr.set_source_rgb(*(bg if bg else self.bg))
cr.fill()
if not outline:
return
cr.rectangle(x, y, w, h)
cr.set_line_width(line_width)
cr.set_source_rgb(*(fg if fg else self.fg))
cr.stroke()
def draw_oval(self, x0, y0, x1, y1, fg=None, bg=None, outline=False, line_width=1.0, filled=False):
cr = self.cr
x2 = (x0 + x1) / 2.0
y2 = (y0 + y1) / 2.0
w2 = (x1 - x0) / 2.0
h2 = (y1 - y0) / 2.0
pi2 = 2.0 * pi
if filled:
cr.save()
cr.translate(x2, y2)
cr.scale(w2, h2)
cr.arc(0.0, 0.0, 1.0, 0.0, 2 * pi)
cr.restore()
cr.set_source_rgb(*(bg if bg else self.bg))
cr.fill()
if not outline:
return
cr.save()
cr.translate(x2, y2)
cr.scale(w2, h2)
cr.arc(0.0, 0.0, 1.0, 0.0, 2 * pi)
cr.restore()
cr.set_line_width(line_width)
cr.set_source_rgb(*(fg if fg else self.fg))
cr.stroke()
def draw_arc(self, x0, y0, x1, y1, fg=None, bg=None, outline=False, line_width=1.0, filled=False, start=0.0, extent=1.5 * pi):
cr = self.cr
x2 = (x0 + x1) / 2.0
y2 = (y0 + y1) / 2.0
w2 = (x1 - x0) / 2.0
h2 = -(y1 - y0) / 2.0
if filled:
cr.save()
cr.translate(x2, y2)
cr.scale(w2, h2)
cr.arc(0.0, 0.0, 1.0, start, extent)
cr.restore()
cr.set_source_rgb(*(bg if bg else self.bg))
cr.fill()
if not outline:
return
cr.save()
cr.translate(x2, y2)
cr.scale(w2, h2)
cr.arc(0.0, 0.0, 1.0, start, extent)
cr.restore()
cr.set_line_width(line_width)
cr.set_source_rgb(*(fg if fg else self.fg))
cr.stroke()
def draw_text(self, x0, y0, text, fg=None, size=10):
cr = self.cr
cr.set_font_size(size)
cr.move_to(x0, y0 + size)
cr.set_source_rgb(*(fg if fg else self.fg))
cr.show_text(text)
def draw_image(self, x0, y0, xs=1.0, ys=1.0, filename=None, image=None):
cr = self.cr
cr.save()
cr.translate(x0, y0)
cr.scale(xs, ys)
if filename:
if filename not in self.cache_filenames:
cairo_image = cairo_image_surface_from_filename(filename)
self.cache_filenames[filename] = cairo_image
else:
cairo_image = self.cache_filenames[filename]
elif image:
if image not in self.cache_images:
cairo_image = cairo_image_surface_from_image(image)
self.cache_images[image] = cairo_image
else:
cairo_image = self.cache_images[image]
cr.set_source_surface(cairo_image)
cr.paint()
cr.restore()
def append(self, layer):
self.layers.append(layer)
def insert(self, index, layer):
self.layers.insert(index, layer)
def remove(self, layer):
self.layers.remove(layer)
def pop(self, index):
return self.layers.pop(index)
def move_all(self, dx, dy):
for layer in self.layers:
layer.move_all(dx, dy)
def scale_all(self, xc, yc, xs, ys):
for layer in self.layers:
layer.scale_all(xc, yc, xs, ys)
def find_above(self, item):
l = []
for layer in self.layers:
if layer.get_visible():
l.extend(layer.find_above(item))
return l
def find_all_above(self, item):
l = []
for layer in self.layers:
if layer.get_visible():
l.extend(layer.find_all_above(item))
return l
def find_below(self, item):
l = []
for layer in self.layers:
if layer.get_visible():
l.extend(layer.find_below(item))
return l
def find_all_below(self, item):
l = []
for layer in self.layers:
if layer.get_visible():
l.extend(layer.find_all_below(item))
return l
def find_visible(self, x0, y0, x1, y1):
l = []
for layer in self.layers:
if layer.get_visible():
l.extend(layer.find_visible(x0, y0, x1, y1))
return l
def find_closest(self, x, y, halo=0, start=None, end=None):
l = []
for layer in self.layers:
if layer.get_visible():
l.extend(layer.find_closest(x0, y0, x1, y1))
return l
def find_enclosed(self, x0, y0, x1, y1):
l = []
for layer in self.layers:
if layer.get_visible():
l.extend(layer.find_enclosed(x0, y0, x1, y1))
return l
def find_overlapping(self, x0, y0, x1, y1):
l = []
for layer in self.layers:
if layer.get_visible():
l.extend(layer.find_overlapping(x0, y0, x1, y1))
return l
| gpl-3.0 |
jamesbeebop/mkdocs | mkdocs/commands/new.py | 30 | 1433 | # coding: utf-8
from __future__ import unicode_literals
import io
import logging
import os
config_text = 'site_name: My Docs\n'
index_text = """# Welcome to MkDocs
For full documentation visit [mkdocs.org](http://mkdocs.org).
## Commands
* `mkdocs new [dir-name]` - Create a new project.
* `mkdocs serve` - Start the live-reloading docs server.
* `mkdocs build` - Build the documentation site.
* `mkdocs help` - Print this help message.
## Project layout
mkdocs.yml # The configuration file.
docs/
index.md # The documentation homepage.
... # Other markdown pages, images and other files.
"""
log = logging.getLogger(__name__)
def new(output_dir):
docs_dir = os.path.join(output_dir, 'docs')
config_path = os.path.join(output_dir, 'mkdocs.yml')
index_path = os.path.join(docs_dir, 'index.md')
if os.path.exists(config_path):
log.info('Project already exists.')
return
if not os.path.exists(output_dir):
log.info('Creating project directory: %s', output_dir)
os.mkdir(output_dir)
log.info('Writing config file: %s', config_path)
io.open(config_path, 'w', encoding='utf-8').write(config_text)
if os.path.exists(index_path):
return
log.info('Writing initial docs: %s', index_path)
if not os.path.exists(docs_dir):
os.mkdir(docs_dir)
io.open(index_path, 'w', encoding='utf-8').write(index_text)
| bsd-2-clause |
krikru/tensorflow-opencl | tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py | 107 | 21031 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fractional average pool operation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class FractionalAvgTest(test.TestCase):
# Random number generate with seed.
_PRNG = np.random.RandomState(341261000)
_SEED = 341261001
_SEED2 = 341261002
def _AvgPoolAlongRows(self, input_matrix, row_seq, overlapping):
"""Perform average pool along row of a 2-D matrix based on row_seq.
Args:
input_matrix: A 2-D matrix.
row_seq: Cumulative pooling sequence along row.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = len(row_seq)-1
* num_cols = input_matrix.num_cols.
"""
output_image = np.zeros(input_matrix.shape[1])
row_max = row_seq[-1]
for i in range(row_seq.shape[0] - 1):
row_start = row_seq[i]
row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]
row_end = min(row_end, row_max)
output_image = np.vstack((output_image, np.mean(
input_matrix[row_start:row_end, :], axis=0))) # axis 0 is along row
# remove the sentinel row
return output_image[1:, :]
def _AvgPoolAlongCols(self, input_matrix, col_seq, overlapping):
"""Perform average pool along column of a 2-D matrix based on col_seq.
Args:
input_matrix: A 2-D matrix.
col_seq: Cumulative pooling sequence along column.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = input_matrix.num_rows
* num_cols = len(col_seq)-1.
"""
input_matrix = input_matrix.transpose()
output_matrix = self._AvgPoolAlongRows(input_matrix, col_seq, overlapping)
return output_matrix.transpose()
def _GetExpectedFractionalAvgPoolResult(self, input_tensor, row_seq, col_seq,
overlapping):
"""Get expected fractional average pooling result.
row_seq and col_seq together defines the fractional pooling region.
Args:
input_tensor: Original input tensor, assuming it is a 4-D tensor, with
dimension as [batch, height/row, width/column, channels/depth].
row_seq: Cumulative pooling sequence along row.
col_seq: Cumulative pooling sequence along column.
overlapping: Use overlapping when doing pooling.
Returns:
A 4-D tensor that is the result of average pooling on input_tensor based
on pooling region defined by row_seq and col_seq, conditioned on whether
or not overlapping is used.
"""
input_shape = input_tensor.shape
output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1,
input_shape[3])
output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
for batch in range(input_shape[0]):
for channel in range(input_shape[3]):
two_dim_slice = input_tensor[batch, :, :, channel]
tmp = self._AvgPoolAlongRows(two_dim_slice, row_seq, overlapping)
output_tensor[batch, :, :, channel] = self._AvgPoolAlongCols(
tmp, col_seq, overlapping)
return output_tensor
def _ValidateFractionalAvgPoolResult(self, input_tensor, pooling_ratio,
pseudo_random, overlapping):
"""Validate FractionalAvgPool's result against expected.
Expected result is computed given input_tensor, and pooling region defined
by row_seq and col_seq.
Args:
input_tensor: A tensor or numpy ndarray.
pooling_ratio: A list or tuple of length 4, first and last element be 1.
pseudo_random: Use pseudo random method to generate pooling sequence.
overlapping: Use overlapping when pooling.
Returns:
None
"""
with self.test_session() as sess:
p, r, c = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
actual, row_seq, col_seq = sess.run([p, r, c])
expected = self._GetExpectedFractionalAvgPoolResult(input_tensor, row_seq,
col_seq, overlapping)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def _testVisually(self):
"""Manual test by printing out intermediate result of a small random tensor.
Since _GetExpectedFractionalAvgPoolResult is 'automated', it feels safer to
have a test case that you can see what's happening.
This test will generate a small, random, int 2D matrix, and feed it to
FractionalAvgPool and _GetExpectedFractionalAvgPoolResult.
"""
num_rows = 6
num_cols = 6
tensor_shape = (1, num_rows, num_cols, 1)
pseudo_random = False
for overlapping in True, False:
print("-" * 70)
print("Testing FractionalAvgPool with overlapping = {}".format(
overlapping))
rand_mat = self._PRNG.randint(10, size=tensor_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]
with self.test_session() as sess:
p, r, c = nn_ops.fractional_avg_pool(
rand_mat.astype(np.float32),
pooling_ratio,
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
tensor_output, row_seq, col_seq = sess.run([p, r, c])
expected_result = self._GetExpectedFractionalAvgPoolResult(
rand_mat.astype(np.float32), row_seq, col_seq, overlapping)
print("row sequence:")
print(row_seq)
print("column sequence:")
print(col_seq)
print("Input:")
# Print input with pooling region marked.
for i in range(num_rows):
row_to_print = []
for j in range(num_cols):
if j in col_seq:
row_to_print.append("|")
row_to_print.append(str(rand_mat[0, i, j, 0]))
row_to_print.append("|")
if i in row_seq:
print("-" * 2 * len(row_to_print))
print(" ".join(row_to_print))
print("-" * 2 * len(row_to_print))
print("Output from FractionalAvgPool:")
print(tensor_output[0, :, :, 0])
print("Expected result:")
print(expected_result[0, :, :, 0])
def testAllInputOptions(self):
"""Try all possible input options for fractional_avg_pool.
"""
num_batches = 5
num_channels = 3
num_rows = 20
num_cols = 30
for pseudo_random in True, False:
for overlapping in True, False:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testIntegerTensorInput(self):
"""Test FractionalAvgPool works fine when input tensor is integer type.
I would have used _ValidateFractionalAvgPoolResult function to automate this
process, however, there's rounding issue. It is caused by numpy.mean cast
integer input to numpy.float64 for intermediate use. While for
fractional_avg_pool, the mean operation is integer division (trucated). So,
for this test case, I will hard code a simple matrix.
"""
pseudo_random = True
overlapping = True
tensor_shape = (1, 6, 6, 1)
# pyformat: disable
mat = np.array([
[2, 6, 4, 1, 3, 6],
[8, 9, 1, 6, 6, 8],
[3, 9, 8, 2, 5, 6],
[2, 7, 9, 5, 4, 5],
[8, 5, 0, 5, 7, 4],
[4, 4, 5, 9, 7, 2]
])
# pyformat: enable
with self.test_session() as sess:
# Since deterministic = True, seed and seed2 are fixed. Therefore r, and c
# are the same each time. We can have an expected result precomputed.
# r = [0, 2, 4, 6]
# c = [0, 1, 3, 4, 6]
# pyformat: disable
expected = np.array([
[6, 5, 3, 5],
[5, 5, 4, 5],
[5, 4, 7, 5]
]).reshape((1, 3, 4, 1))
# pyformat: enable
p, unused_r, unused_c = nn_ops.fractional_avg_pool(
mat.reshape(tensor_shape), [1, math.sqrt(3), math.sqrt(2), 1],
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
actual = sess.run(p)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def testDifferentTensorShapes(self):
"""Test different shapes of input tensor.
Mainly test different combinations of num_rows and num_cols.
"""
pseudo_random = True
overlapping = True
for num_batches in [1, 3]:
for num_channels in [1, 3]:
for num_rows in [10, 20, 50]:
for num_cols in [10, 20, 50]:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testLargePoolingRatio(self):
"""Test when pooling ratio is not within [1, 2).
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
for row_ratio in [math.sqrt(11), math.sqrt(37)]:
for col_ratio in [math.sqrt(11), math.sqrt(27)]:
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat,
[1, row_ratio, col_ratio, 1],
pseudo_random, overlapping)
def testDivisiblePoolingRatio(self):
"""Test when num of rows/cols can evenly divide pooling ratio.
This is a case regular average pooling can handle. Should be handled by
fractional pooling as well.
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat, [1, 2, 2, 1], pseudo_random,
overlapping)
class FractionalAvgPoolGradTest(test.TestCase):
"""Tests for FractionalAvgPoolGrad.
Two types of tests for FractionalAvgPoolGrad.
1) Test fractional_avg_pool_grad() directly.
This type of test relies on gen_nn_ops._avg_pool_grad() returns the
correct result. For example:
* input_tensor_shape = (1, 10, 10, 1)
* window_size = (1, 2, 2, 1)
* stride_size = (1, 2, 2, 1)
* padding: not really important, since 10/2 is divisible
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 6, 8, 10]
* col_sequence = [0, 2, 4, 6, 8, 10]
* overlapping = False
This also means their gradients in such case will be the same.
Similarly, when
* input_tensor_shape = (1, 7, 7, 1)
* window_size = (1, 3, 3, 1)
* stride_size = (1, 2, 2, 1)
* padding: not important
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 7]
* col_sequence = [0, 2, 4, 7]
* overlapping = True
2) Test through compute_gradient_error()
"""
_PRNG = np.random.RandomState(341261004)
_SEED = 341261005
_SEED2 = 341261006
def _GenerateRandomInputTensor(self, shape):
num_elements = 1
for dim_size in shape:
num_elements *= dim_size
x = self._PRNG.rand(num_elements) * 1000
return x.reshape(shape)
def testDirectNotUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = row_window_size * 5
num_cols = col_window_size * 7
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops._avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fap_input_backprop_tensor = gen_nn_ops._fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=False)
fap_input_backprop = fap_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
def testDirectUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = (row_window_size - 1) * 5 + 1
num_cols = (col_window_size - 1) * 7 + 1
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops._avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
fap_input_backprop_tensor = gen_nn_ops._fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=True)
fap_input_backprop = fap_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
def testAllInputOptionsThroughGradientError(self):
input_shape = (1, 7, 13, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(3), 1]
for pseudo_random in True, False:
for overlapping in True, False:
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
output_data = output_tensor.eval()
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testDifferentTensorShapesThroughGradientError(self):
pseudo_random = True
overlapping = True
pooling_ratio = [1, math.sqrt(3), math.sqrt(2), 1]
for num_batches in [1, 2]:
for num_rows in [5, 13]:
for num_cols in [5, 11]:
for num_channels in [1, 3]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateRandomInputTensor(input_shape)
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
output_data = output_tensor.eval()
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testLargePoolingRatioThroughGradientError(self):
input_shape = (1, 17, 23, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = (1, math.sqrt(13), math.sqrt(7), 1)
output_shape = [int(a / b) for a, b in zip(input_shape, pooling_ratio)]
overlapping = True
pseudo_random = False
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
if __name__ == "__main__":
test.main()
| apache-2.0 |
roambotics/swift | utils/create-filecheck-test.py | 33 | 1349 | #!/usr/bin/env python
# The following script takes as input a SIL fragment and changes all
# SSA variables into FileCheck variables. This significantly reduces
# the amount of time required for creating complicated FileCheck
# Tests.
import argparse
import re
import sys
import textwrap
parser = argparse.ArgumentParser(description=textwrap.dedent("""
Takes an input SIL fragment and changes all SSA variables into FileCheck
variables.
"""))
parser.add_argument('input', type=argparse.FileType('r'),
help='Input file. \'-\' for stdin.')
parser.add_argument('-o', type=argparse.FileType('w'),
metavar='output',
help='Output file. Defaults to stdout.',
default=sys.stdout)
args = parser.parse_args()
seen_variables = set([])
ssa_re = re.compile(r'[%](\d+)')
for line in args.input.readlines():
line = line[:line.find('//')].rstrip() + "\n"
have_match = False
for match in ssa_re.finditer(line):
have_match = True
var = match.groups()[0]
if var not in seen_variables:
line = line.replace('%' + var, '[[VAR_%s:%%[0-9]+]]' % var)
seen_variables.add(var)
else:
line = line.replace('%' + var, '[[VAR_%s]]' % var)
if have_match:
line = '// CHECK: ' + line
args.o.write(line)
| apache-2.0 |
chuan9/chromium-crosswalk | tools/multi_process_rss.py | 128 | 3646 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Counts a resident set size (RSS) of multiple processes without double-counts.
# If they share the same page frame, the page frame is counted only once.
#
# Usage:
# ./multi-process-rss.py <pid>|<pid>r [...]
#
# If <pid> has 'r' at the end, all descendants of the process are accounted.
#
# Example:
# ./multi-process-rss.py 12345 23456r
#
# The command line above counts the RSS of 1) process 12345, 2) process 23456
# and 3) all descendant processes of process 23456.
import collections
import logging
import os
import psutil
import sys
if sys.platform.startswith('linux'):
_TOOLS_PATH = os.path.dirname(os.path.abspath(__file__))
_TOOLS_LINUX_PATH = os.path.join(_TOOLS_PATH, 'linux')
sys.path.append(_TOOLS_LINUX_PATH)
import procfs # pylint: disable=F0401
class _NullHandler(logging.Handler):
def emit(self, record):
pass
_LOGGER = logging.getLogger('multi-process-rss')
_LOGGER.addHandler(_NullHandler())
def _recursive_get_children(pid):
try:
children = psutil.Process(pid).get_children()
except psutil.error.NoSuchProcess:
return []
descendant = []
for child in children:
descendant.append(child.pid)
descendant.extend(_recursive_get_children(child.pid))
return descendant
def list_pids(argv):
pids = []
for arg in argv[1:]:
try:
if arg.endswith('r'):
recursive = True
pid = int(arg[:-1])
else:
recursive = False
pid = int(arg)
except ValueError:
raise SyntaxError("%s is not an integer." % arg)
else:
pids.append(pid)
if recursive:
children = _recursive_get_children(pid)
pids.extend(children)
pids = sorted(set(pids), key=pids.index) # uniq: maybe slow, but simple.
return pids
def count_pageframes(pids):
pageframes = collections.defaultdict(int)
pagemap_dct = {}
for pid in pids:
maps = procfs.ProcMaps.load(pid)
if not maps:
_LOGGER.warning('/proc/%d/maps not found.' % pid)
continue
pagemap = procfs.ProcPagemap.load(pid, maps)
if not pagemap:
_LOGGER.warning('/proc/%d/pagemap not found.' % pid)
continue
pagemap_dct[pid] = pagemap
for pid, pagemap in pagemap_dct.iteritems():
for vma in pagemap.vma_internals.itervalues():
for pageframe, number in vma.pageframes.iteritems():
pageframes[pageframe] += number
return pageframes
def count_statm(pids):
resident = 0
shared = 0
private = 0
for pid in pids:
statm = procfs.ProcStatm.load(pid)
if not statm:
_LOGGER.warning('/proc/%d/statm not found.' % pid)
continue
resident += statm.resident
shared += statm.share
private += (statm.resident - statm.share)
return (resident, shared, private)
def main(argv):
logging_handler = logging.StreamHandler()
logging_handler.setLevel(logging.WARNING)
logging_handler.setFormatter(logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s'))
_LOGGER.setLevel(logging.WARNING)
_LOGGER.addHandler(logging_handler)
if sys.platform.startswith('linux'):
logging.getLogger('procfs').setLevel(logging.WARNING)
logging.getLogger('procfs').addHandler(logging_handler)
pids = list_pids(argv)
pageframes = count_pageframes(pids)
else:
_LOGGER.error('%s is not supported.' % sys.platform)
return 1
# TODO(dmikurube): Classify this total RSS.
print len(pageframes) * 4096
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
kctan0805/vdpm | share/gdal/gdal-2.0.0/swig/python/samples/ogr_layer_algebra.py | 1 | 16666 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#******************************************************************************
# $Id: ogr_layer_algebra.py 29254 2015-05-27 12:45:56Z rouault $
#
# Project: GDAL Python Interface
# Purpose: Application for executing OGR layer algebra operations
# Author: Even Rouault, even dot rouault at mines-paris dot org
#
#******************************************************************************
# Copyright (c) 2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
from osgeo import gdal, ogr, osr
import os
import sys
###############################################################################
def Usage():
print("""
Usage: ogr_layer_algebra.py Union|Intersection|SymDifference|Identity|Update|Clip|Erase
-input_ds name [-input_lyr name]
-method_ds [-method_lyr name]
-output_ds name [-output_lyr name] [-overwrite]
[-opt NAME=VALUE]*
[-f format_name] [-dsco NAME=VALUE]* [-lco NAME=VALUE]*
[-input_fields NONE|ALL|fld1,fl2,...fldN] [-method_fields NONE|ALL|fld1,fl2,...fldN]
[-nlt geom_type] [-a_srs srs_def]""")
return 1
###############################################################################
def EQUAL(a, b):
return a.lower() == b.lower()
###############################################################################
def CreateLayer(output_ds, output_lyr_name, srs, geom_type, lco, \
input_lyr, input_fields, \
method_lyr, method_fields, opt):
output_lyr = output_ds.CreateLayer(output_lyr_name, srs, geom_type, lco)
if output_lyr is None:
print('Cannot create layer "%s"' % output_lyr_name)
return None
input_prefix = ''
method_prefix = ''
for val in opt:
if val.lower().find('input_prefix=') == 0:
input_prefix = val[len('input_prefix='):]
elif val.lower().find('method_prefix=') == 0:
method_prefix = val[len('method_prefix='):]
if input_fields == 'ALL':
layer_defn = input_lyr.GetLayerDefn()
for idx in range(layer_defn.GetFieldCount()):
fld_defn = layer_defn.GetFieldDefn(idx)
fld_defn = ogr.FieldDefn(input_prefix + fld_defn.GetName(), fld_defn.GetType())
if output_lyr.CreateField(fld_defn) != 0:
print('Cannot create field "%s" in layer "%s"' % (fld_defn.GetName(), output_lyr.GetName()))
elif input_fields != 'NONE':
layer_defn = input_lyr.GetLayerDefn()
for fld in input_fields:
idx = layer_defn.GetFieldIndex(fld)
if idx < 0:
print('Cannot find field "%s" in layer "%s"' % (fld, layer_defn.GetName()))
continue
fld_defn = layer_defn.GetFieldDefn(idx)
fld_defn = ogr.FieldDefn(input_prefix + fld_defn.GetName(), fld_defn.GetType())
if output_lyr.CreateField(fld_defn) != 0:
print('Cannot create field "%s" in layer "%s"' % (fld, output_lyr.GetName()))
if method_fields == 'ALL':
layer_defn = method_lyr.GetLayerDefn()
for idx in range(layer_defn.GetFieldCount()):
fld_defn = layer_defn.GetFieldDefn(idx)
fld_defn = ogr.FieldDefn(method_prefix + fld_defn.GetName(), fld_defn.GetType())
if output_lyr.CreateField(fld_defn) != 0:
print('Cannot create field "%s" in layer "%s"' % (fld_defn.GetName(), output_lyr.GetName()))
elif method_fields != 'NONE':
layer_defn = method_lyr.GetLayerDefn()
for fld in method_fields:
idx = layer_defn.GetFieldIndex(fld)
if idx < 0:
print('Cannot find field "%s" in layer "%s"' % (fld, layer_defn.GetName()))
continue
fld_defn = layer_defn.GetFieldDefn(idx)
fld_defn = ogr.FieldDefn(method_prefix + fld_defn.GetName(), fld_defn.GetType())
if output_lyr.CreateField(fld_defn) != 0:
print('Cannot create field "%s" in layer "%s"' % (fld, output_lyr.GetName()))
return output_lyr
###############################################################################
def main(argv = None):
format = 'ESRI Shapefile'
quiet_flag = 0
input_ds_name = None
input_lyr_name = None
method_ds_name = None
method_lyr_name = None
output_ds_name = None
output_lyr_name = None
op_str = None
dsco = []
lco = []
opt = []
overwrite = False
input_fields = 'ALL'
method_fields = None
geom_type = ogr.wkbUnknown
srs_name = None
srs = None
argv = ogr.GeneralCmdLineProcessor( sys.argv )
if argv is None:
return 1
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-f' and i + 1 < len(argv):
i = i + 1
format = argv[i]
elif arg == '-input_ds' and i + 1 < len(argv):
i = i + 1
input_ds_name = argv[i]
elif arg == '-input_lyr' and i + 1 < len(argv):
i = i + 1
input_lyr_name = argv[i]
elif arg == '-method_ds' and i + 1 < len(argv):
i = i + 1
method_ds_name = argv[i]
elif arg == '-method_lyr' and i + 1 < len(argv):
i = i + 1
method_lyr_name = argv[i]
elif arg == '-output_ds' and i + 1 < len(argv):
i = i + 1
output_ds_name = argv[i]
elif arg == '-output_lyr' and i + 1 < len(argv):
i = i + 1
output_lyr_name = argv[i]
elif arg == '-input_fields' and i + 1 < len(argv):
i = i + 1
if EQUAL(argv[i], "NONE"):
input_fields = "NONE"
elif EQUAL(argv[i], "ALL"):
input_fields = "ALL"
else:
input_fields = argv[i].split(',')
elif arg == '-method_fields' and i + 1 < len(argv):
i = i + 1
if EQUAL(argv[i], "NONE"):
method_fields = "NONE"
elif EQUAL(argv[i], "ALL"):
method_fields = "ALL"
else:
method_fields = argv[i].split(',')
elif arg == '-dsco' and i + 1 < len(argv):
i = i + 1
dsco.append(argv[i])
elif arg == '-lco' and i + 1 < len(argv):
i = i + 1
lco.append(argv[i])
elif arg == '-opt' and i + 1 < len(argv):
i = i + 1
opt.append(argv[i])
elif arg == "-nlt" and i + 1 < len(argv):
i = i + 1
val = argv[i]
if EQUAL(val,"NONE"):
geom_type = ogr.wkbNone
elif EQUAL(val,"GEOMETRY"):
geom_type = ogr.wkbUnknown
elif EQUAL(val,"POINT"):
geom_type = ogr.wkbPoint
elif EQUAL(val,"LINESTRING"):
geom_type = ogr.wkbLineString
elif EQUAL(val,"POLYGON"):
geom_type = ogr.wkbPolygon
elif EQUAL(val,"GEOMETRYCOLLECTION"):
geom_type = ogr.wkbGeometryCollection
elif EQUAL(val,"MULTIPOINT"):
geom_type = ogr.wkbMultiPoint
elif EQUAL(val,"MULTILINESTRING"):
geom_type = ogr.wkbMultiLineString
elif EQUAL(val,"MULTIPOLYGON"):
geom_type = ogr.wkbMultiPolygon
elif EQUAL(val,"GEOMETRY25D"):
geom_type = ogr.wkbUnknown | ogr.wkb25DBit
elif EQUAL(val,"POINT25D"):
geom_type = ogr.wkbPoint25D
elif EQUAL(val,"LINESTRING25D"):
geom_type = ogr.wkbLineString25D
elif EQUAL(val,"POLYGON25D"):
geom_type = ogr.wkbPolygon25D
elif EQUAL(val,"GEOMETRYCOLLECTION25D"):
geom_type = ogr.wkbGeometryCollection25D
elif EQUAL(val,"MULTIPOINT25D"):
geom_type = ogr.wkbMultiPoint25D
elif EQUAL(val,"MULTILINESTRING25D"):
geom_type = ogr.wkbMultiLineString25D
elif EQUAL(val,"MULTIPOLYGON25D"):
geom_type = ogr.wkbMultiPolygon25D
else:
print("-nlt %s: type not recognised." % val)
return 1
elif arg == "-a_srs" and i + 1 < len(argv):
i = i + 1
srs_name = argv[i]
elif EQUAL(arg, "Union"):
op_str = "Union"
elif EQUAL(arg, "Intersection"):
op_str = "Intersection"
elif EQUAL(arg, "SymDifference"):
op_str = "SymDifference"
elif EQUAL(arg, "Identity"):
op_str = "Identity"
elif EQUAL(arg, "Update"):
op_str = "Update"
elif EQUAL(arg, "Clip"):
op_str = "Clip"
elif EQUAL(arg, "Erase"):
op_str = "Erase"
elif arg == "-overwrite":
overwrite = True
elif arg == '-q' or arg == '-quiet':
quiet_flag = 1
else:
return Usage()
i = i + 1
if input_ds_name is None or \
method_ds_name is None or \
output_ds_name is None or \
op_str is None:
return Usage()
if method_fields is None:
if op_str in ( 'Update', 'Clip', 'Erase' ):
method_fields = 'NONE'
else:
method_fields = 'ALL'
if input_fields == 'NONE' and method_fields == 'NONE':
print('Warning: -input_fields NONE and -method_fields NONE results in all fields being added')
# Input layer
input_ds = ogr.Open(input_ds_name)
if input_ds is None:
print('Cannot open input dataset : %s' % input_ds_name)
return 1
if input_lyr_name is None:
cnt = input_ds.GetLayerCount()
if cnt != 1:
print('Input datasource has not a single layer, so you should specify its name with -input_lyr')
return 1
input_lyr = input_ds.GetLayer(0)
else:
input_lyr = input_ds.GetLayerByName(input_lyr_name)
if input_lyr is None:
print('Cannot find input layer "%s"' % input_lyr_name)
return 1
# Method layer
method_ds = ogr.Open(method_ds_name)
if method_ds is None:
print('Cannot open method dataset : %s' % method_ds_name)
return 1
if method_lyr_name is None:
cnt = method_ds.GetLayerCount()
if cnt != 1:
print('Method datasource has not a single layer, so you should specify its name with -method_lyr')
return 1
method_lyr = method_ds.GetLayer(0)
else:
method_lyr = method_ds.GetLayerByName(method_lyr_name)
if method_lyr is None:
print('Cannot find method layer "%s"' % method_lyr_name)
return 1
# SRS
if srs_name is not None:
if not EQUAL(srs_name, "NULL") and not EQUAL(srs_name, "NONE"):
srs = osr.SpatialReference()
if srs.SetFromUserInput( srs_name ) != 0:
print( "Failed to process SRS definition: %s" % srs_name )
return 1
else:
srs = input_lyr.GetSpatialRef()
srs2 = method_lyr.GetSpatialRef()
if srs is None and srs2 is not None:
print('Warning: input layer has no SRS defined, but method layer has one.')
elif srs is not None and srs2 is None:
print('Warning: input layer has a SRS defined, but method layer has not one.')
elif srs is not None and srs2 is not None and srs.IsSame(srs2) != 1:
print('Warning: input and method layers have SRS defined, but they are not identical. No on-the-fly reprojection will be done.')
# Result layer
output_ds = ogr.Open(output_ds_name, update = 1)
if output_ds is None:
output_ds = ogr.Open(output_ds_name)
if output_ds is not None:
print('Output datasource "%s" exists, but cannot be opened in update mode' % output_ds_name)
return 1
drv = ogr.GetDriverByName(format)
if drv is None:
print('Cannot find driver %s' % format)
return 1
output_ds = drv.CreateDataSource(output_ds_name, options = dsco)
if output_ds is None:
print('Cannot create datasource "%s"' % output_ds_name)
return 1
# Special case
if EQUAL(drv.GetName(), "ESRI Shapefile") and output_lyr_name is None \
and EQUAL(os.path.splitext(output_ds_name)[1], ".SHP"):
output_lyr_name = os.path.splitext(os.path.basename(output_ds_name))[0]
if output_lyr_name is None:
print('-output_lyr should be specified')
return 1
output_lyr = CreateLayer(output_ds, output_lyr_name, srs, geom_type, lco, input_lyr, input_fields, method_lyr, method_fields, opt)
if output_lyr is None:
return 1
else:
drv = output_ds.GetDriver()
if output_lyr_name is None:
cnt = output_ds.GetLayerCount()
if cnt != 1:
print('Result datasource has not a single layer, so you should specify its name with -output_lyr')
return 1
output_lyr = output_ds.GetLayer(0)
output_lyr_name = output_lyr.GetName()
else:
output_lyr = output_ds.GetLayerByName(output_lyr_name)
if output_lyr is None:
if EQUAL(drv.GetName(), "ESRI Shapefile") and \
EQUAL(os.path.splitext(output_ds_name)[1], ".SHP") and \
not EQUAL(output_lyr_name, os.path.splitext(os.path.basename(output_ds_name))[0]):
print('Cannot create layer "%s" in a shapefile called "%s"' % (output_lyr_name, output_ds_name))
return 1
output_lyr = CreateLayer(output_ds, output_lyr_name, srs, geom_type, lco, input_lyr, input_fields, method_lyr, method_fields, opt)
if output_lyr is None:
return 1
if overwrite:
cnt = output_ds.GetLayerCount()
for iLayer in range(cnt):
poLayer = output_ds.GetLayer(iLayer)
if poLayer is not None \
and poLayer.GetName() == output_lyr_name:
break
if iLayer != cnt:
if output_ds.DeleteLayer(iLayer) != 0:
print("DeleteLayer() failed when overwrite requested." )
return 1
output_lyr = CreateLayer(output_ds, output_lyr_name, srs, geom_type, lco, input_lyr, input_fields, method_lyr, method_fields, opt)
if output_lyr is None:
return 1
op = getattr(input_lyr, op_str)
if quiet_flag == 0:
ret = op(method_lyr, output_lyr, options = opt, callback = gdal.TermProgress_nocb)
else:
ret = op(method_lyr, output_lyr, options = opt)
input_ds = None
method_ds = None
output_ds = None
if ret != 0:
print('An error occured during %s operation' % op_str)
return 1
return 0
###############################################################################
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1100000:
print('ERROR: Python bindings of GDAL 1.10 or later required')
sys.exit(1)
sys.exit(main( sys.argv ))
| lgpl-2.1 |
kosz85/django | tests/basic/tests.py | 8 | 29560 | import threading
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.utils.translation import gettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
with self.assertRaisesMessage(TypeError, "'foo' is an invalid keyword argument for this function"):
Article(
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertIsNotNone(a.id)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date, datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Parrot programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
with self.assertRaisesMessage(AttributeError, "Manager isn't accessible via Article instances"):
getattr(Article(), "objects",)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(
Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"]
)
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date, datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported_edge_case(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a = Article.objects.create(
headline='Article',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertEqual(
Article.objects.get(pk=a.pk).pub_date,
datetime(2008, 12, 31, 23, 59, 59),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"]
)
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline, '\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]]
)
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_gettext_lazy(self):
"""
gettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = gettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
msg = "EmptyQuerySet can't be instantiated"
with self.assertRaisesMessage(TypeError, msg):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
self.assertNotIsInstance('', EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
msg = 'Model instances without primary key value are unhashable'
with self.assertRaisesMessage(TypeError, msg):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
def test_delete_and_access_field(self):
# Accessing a field after it's deleted from a model reloads its value.
pub_date = datetime.now()
article = Article.objects.create(headline='foo', pub_date=pub_date)
new_pub_date = article.pub_date + timedelta(days=10)
article.headline = 'bar'
article.pub_date = new_pub_date
del article.headline
with self.assertNumQueries(1):
self.assertEqual(article.headline, 'foo')
# Fields that weren't deleted aren't reloaded.
self.assertEqual(article.pub_date, new_pub_date)
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Swallow programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Parrot programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(), ['<Article: Parrot programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(id__exact=2000,)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
with self.assertRaises(ObjectDoesNotExist):
Article.objects.get(pub_date__year=2005, pub_date__month=8)
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(pub_date__week_day=6,)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]), ["<Article: Swallow programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Swallow bites Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(headline__startswith='Swallow',)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005,)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005, pub_date__month=7)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
'union',
'intersection',
'difference',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet)),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
select_on_save works correctly if the database doesn't return correct
information about matched rows from UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager._queryset_class
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super()._update(*args, **kwargs)
return 0
try:
Article._base_manager._queryset_class = FakeQuerySet
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'):
asos.save(force_update=True)
msg = (
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
with self.assertRaisesMessage(DatabaseError, msg):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager._queryset_class = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_unknown_kwarg(self):
s = SelfRef.objects.create()
msg = "refresh_from_db() got an unexpected keyword argument 'unknown_kwarg'"
with self.assertRaisesMessage(TypeError, msg):
s.refresh_from_db(unknown_kwarg=10)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_fk_on_delete_set_null(self):
a = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
s1 = SelfRef.objects.create(article=a)
a.delete()
s1.refresh_from_db()
self.assertIsNone(s1.article_id)
self.assertIsNone(s1.article)
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
| bsd-3-clause |
AdrianGaudebert/socorro | socorro/unittest/external/rabbitmq/test_connection_context.py | 3 | 8203 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.tools import eq_, ok_, assert_raises
from mock import (
Mock,
call,
patch
)
from threading import currentThread
from socorro.external.rabbitmq.connection_context import (
Connection,
ConnectionContext,
ConnectionContextPooled
)
from socorro.lib.util import DotDict
from socorro.unittest.testbase import TestCase
class TestConnection(TestCase):
"""Test PostgreSQLBase class. """
def test_constructor(self):
faked_connection_object = Mock()
config = DotDict()
conn = Connection(
config,
faked_connection_object
)
ok_(conn.config is config)
ok_(conn.connection is faked_connection_object)
faked_connection_object.channel.called_once_with()
eq_(
faked_connection_object.channel.return_value
.queue_declare.call_count,
3
)
expected_queue_declare_call_args = [
call(queue='socorro.normal', durable=True),
call(queue='socorro.priority', durable=True),
call(queue='socorro.reprocessing', durable=True),
]
eq_(
faked_connection_object.channel.return_value.queue_declare \
.call_args_list,
expected_queue_declare_call_args
)
def test_close(self):
faked_connection_object = Mock()
config = DotDict()
conn = Connection(
config,
faked_connection_object
)
conn.close()
faked_connection_object.close.assert_called_once_with()
class TestConnectionContext(TestCase):
def _setup_config(self):
config = DotDict();
config.host = 'localhost'
config.virtual_host = '/'
config.port = '5672'
config.rabbitmq_user = 'guest'
config.rabbitmq_password = 'guest'
config.standard_queue_name = 'dwight'
config.priority_queue_name = 'wilma'
config.reprocessing_queue_name = 'betty'
config.rabbitmq_connection_wrapper_class = Connection
config.executor_identity = lambda: 'MainThread'
return config
def test_constructor(self):
conn_context_functor = ConnectionContext(self._setup_config)
ok_(
conn_context_functor.config is conn_context_functor.local_config
)
def test_connection(self):
config = self._setup_config()
pika_string = 'socorro.external.rabbitmq.connection_context.pika'
with patch(pika_string) as mocked_pika_module:
conn_context_functor = ConnectionContext(config)
conn = conn_context_functor.connection()
mocked_pika_module.credentials.PlainCredentials \
.assert_called_once_with('guest', 'guest')
mocked_pika_module.ConnectionParameters.assert_called_once_with(
host=conn_context_functor.config.host,
port=conn_context_functor.config.port,
virtual_host=conn_context_functor.config.virtual_host,
credentials=mocked_pika_module.credentials. \
PlainCredentials.return_value
)
mocked_pika_module.BlockingConnection.assert_called_one_with(
mocked_pika_module.ConnectionParameters.return_value
)
ok_(isinstance(conn, Connection))
ok_(conn.config is config)
ok_(
conn.connection is
mocked_pika_module.BlockingConnection.return_value
)
ok_(
conn.channel is conn.connection.channel.return_value
)
expected_queue_declare_call_args = [
call(queue='dwight', durable=True),
call(queue='wilma', durable=True),
call(queue='betty', durable=True),
]
eq_(
conn.channel.queue_declare.call_args_list,
expected_queue_declare_call_args
)
def test_call_and_close_connecton(self):
config = self._setup_config()
pika_string = 'socorro.external.rabbitmq.connection_context.pika'
with patch(pika_string):
conn_context_functor = ConnectionContext(config)
with conn_context_functor() as conn_context:
ok_(isinstance(conn_context, Connection))
conn_context.connection.close.assert_called_once_with()
class TestConnectionContextPooled(TestCase):
def _setup_config(self):
config = DotDict();
config.host = 'localhost'
config.virtual_host = '/'
config.port = '5672'
config.rabbitmq_user = 'guest'
config.rabbitmq_password = 'guest'
config.standard_queue_name = 'dwight'
config.priority_queue_name = 'wilma'
config.reprocessing_queue_name = 'betty'
config.rabbitmq_connection_wrapper_class = Connection
config.logger = Mock()
config.executor_identity = lambda: 'MainThread'
return config
def test_constructor(self):
conn_context_functor = ConnectionContextPooled(self._setup_config)
ok_(
conn_context_functor.config is conn_context_functor.local_config
)
eq_(len(conn_context_functor.pool), 0)
def test_connection(self):
config = self._setup_config()
pika_string = 'socorro.external.rabbitmq.connection_context.pika'
with patch(pika_string):
conn_context_functor = ConnectionContextPooled(config)
conn = conn_context_functor.connection()
ok_(
conn is conn_context_functor.pool[currentThread().getName()]
)
conn = conn_context_functor.connection('dwight')
ok_(
conn is conn_context_functor.pool['dwight']
)
# get the same connection again to make sure it really is the same
conn = conn_context_functor.connection()
ok_(
conn is conn_context_functor.pool[currentThread().getName()]
)
def test_close_connection(self):
config = self._setup_config()
pika_string = 'socorro.external.rabbitmq.connection_context.pika'
with patch(pika_string):
conn_context_functor = ConnectionContextPooled(config)
conn = conn_context_functor.connection('dwight')
ok_(
conn is conn_context_functor.pool['dwight']
)
conn_context_functor.close_connection(conn)
# should be no change
ok_(
conn is conn_context_functor.pool['dwight']
)
eq_(len(conn_context_functor.pool), 1)
conn_context_functor.close_connection(conn, True)
assert_raises(
KeyError,
lambda : conn_context_functor.pool['dwight']
)
eq_(len(conn_context_functor.pool), 0)
def test_close(self):
config = self._setup_config()
pika_string = 'socorro.external.rabbitmq.connection_context.pika'
with patch(pika_string):
conn_context_functor = ConnectionContextPooled(config)
conn_context_functor.connection()
conn_context_functor.connection('dwight')
conn_context_functor.connection('wilma')
conn_context_functor.close()
eq_(len(conn_context_functor.pool), 0)
def test_force_reconnect(self):
config = self._setup_config()
pika_string = 'socorro.external.rabbitmq.connection_context.pika'
with patch(pika_string):
conn_context_functor = ConnectionContextPooled(config)
conn = conn_context_functor.connection()
ok_(
conn is conn_context_functor.pool[currentThread().getName()]
)
conn_context_functor.force_reconnect()
eq_(len(conn_context_functor.pool), 0)
conn2 = conn_context_functor.connection()
ok_(not conn == conn2)
| mpl-2.0 |
BNUCNL/FreeROI | froi/algorithm/unused/spectralmapper.py | 6 | 3516 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Mapper for spectral clustering.
Date: 2012.05.29
"""
__docformat__ = 'restructuredtext'
import numpy as np
import scipy.sparse as sp
from mvpa2.base import warning
from mvpa2.base.dochelpers import _str, borrowkwargs, _repr_attrs
from mvpa2.mappers.base import accepts_dataset_as_samples, Mapper
from mvpa2.datasets.base import Dataset
from mvpa2.datasets.miscfx import get_nsamples_per_attr, get_samples_by_attr
from mvpa2.support import copy
from sklearn.cluster import SpectralClustering
class SpectralMapper(Mapper):
"""Mapper to do spectral clustering
"""
def __init__(self, chunks_attr=None, k=8, mode='arpack', random_state=None, n_init=10, **kwargs):
"""
parameters
__________
chunks_attr : str or None
If provided, it specifies the name of a samples attribute in the
training data, unique values of which will be used to identify chunks of
samples, and to perform individual clustering within them.
k : int or ndarray
The number of clusters to form as well as the number of centroids to
generate. If init initialization string is matrix, or if a ndarray
is given instead, it is interpreted as initial cluster to use instead
mode : {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
n_init : int
Number of iterations of the k-means algrithm to run. Note that this
differs in meaning from the iters parameter to the kmeans function.
"""
Mapper.__init__(self, **kwargs)
self.__chunks_attr = chunks_attr
self.__k = k
self.__mode = mode
self.__random_state = random_state
self.__n_init = n_init
def __repr__(self, prefixes=[]):
return super(KMeanMapper, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['chunks_attr', 'k', 'mode', 'random_state', 'n_init']))
def __str__(self):
return _str(self)
def _forward_dataset(self, ds):
chunks_attr = self.__chunks_attr
mds = Dataset([])
mds.a = ds.a
# mds.sa =ds.sa
# mds.fa =ds.fa
if chunks_attr is None:
# global kmeans
mds.samples = self._spectralcluster(ds.samples).labels_
print max(mds.samples)
else:
# per chunk kmeans
for c in ds.sa[chunks_attr].unique:
slicer = np.where(ds.sa[chunks_attr].value == c)[0]
mds.samples = ds.samples[0,:]
mds.samples[slicer] = self._spectralcluster(ds.samples[slicer]).labels_
return mds
def _spectralcluster(self, samples):
if sp.issparse(samples):
samples = samples.todense()
print np.shape(samples)
samples = np.exp(-samples/samples.std())
return SpectralClustering(k=self.__k, n_init=self.__n_init, mode=self.__mode).fit(samples)
| bsd-3-clause |
kou/zulip | zerver/lib/markdown/fenced_code.py | 2 | 16289 | """
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Removes trailing whitespace from code blocks that cause horizontal scrolling
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block \t\t\t\t\t\t\t
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: [email protected]
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
"""
import re
from typing import Any, Iterable, List, Mapping, MutableSequence, Optional, Sequence
import lxml.html
from django.utils.html import escape
from markdown import Markdown
from markdown.extensions import Extension
from markdown.extensions.codehilite import CodeHilite, CodeHiliteExtension
from markdown.preprocessors import Preprocessor
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from zerver.lib.exceptions import MarkdownRenderingException
from zerver.lib.tex import render_tex
# Global vars
FENCE_RE = re.compile("""
# ~~~ or ```
(?P<fence>
^(?:~{3,}|`{3,})
)
[ ]* # spaces
(
\\{?\\.?
(?P<lang>
[a-zA-Z0-9_+-./#]*
) # "py" or "javascript"
\\}?
) # language, like ".py" or "{javascript}"
[ ]* # spaces
(
\\{?\\.?
(?P<header>
[^~`]*
)
\\}?
) # header for features that use fenced block header syntax (like spoilers)
$
""", re.VERBOSE)
CODE_WRAP = '<pre><code{}>{}\n</code></pre>'
LANG_TAG = ' class="{}"'
def validate_curl_content(lines: List[str]) -> None:
error_msg = """
Missing required -X argument in curl command:
{command}
""".strip()
for line in lines:
regex = r'curl [-](sS)?X "?(GET|DELETE|PATCH|POST)"?'
if line.startswith('curl'):
if re.search(regex, line) is None:
raise MarkdownRenderingException(error_msg.format(command=line.strip()))
CODE_VALIDATORS = {
'curl': validate_curl_content,
}
class FencedCodeExtension(Extension):
def __init__(self, config: Mapping[str, Any] = {}) -> None:
self.config = {
'run_content_validators': [
config.get('run_content_validators', False),
'Boolean specifying whether to run content validation code in CodeHandler',
],
}
for key, value in config.items():
self.setConfig(key, value)
def extendMarkdown(self, md: Markdown) -> None:
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
processor = FencedBlockPreprocessor(
md, run_content_validators=self.config['run_content_validators'][0])
md.preprocessors.register(processor, 'fenced_code_block', 25)
class BaseHandler:
def handle_line(self, line: str) -> None:
raise NotImplementedError()
def done(self) -> None:
raise NotImplementedError()
def generic_handler(processor: Any, output: MutableSequence[str],
fence: str, lang: str, header: str,
run_content_validators: bool=False,
default_language: Optional[str]=None) -> BaseHandler:
lang = lang.lower()
if lang in ('quote', 'quoted'):
return QuoteHandler(processor, output, fence, default_language)
elif lang == 'math':
return TexHandler(processor, output, fence)
elif lang == 'spoiler':
return SpoilerHandler(processor, output, fence, header)
else:
return CodeHandler(processor, output, fence, lang, run_content_validators)
def check_for_new_fence(processor: Any, output: MutableSequence[str], line: str,
run_content_validators: bool=False,
default_language: Optional[str]=None) -> None:
m = FENCE_RE.match(line)
if m:
fence = m.group('fence')
lang = m.group('lang')
header = m.group('header')
if not lang and default_language:
lang = default_language
handler = generic_handler(processor, output, fence, lang, header,
run_content_validators, default_language)
processor.push(handler)
else:
output.append(line)
class OuterHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str],
run_content_validators: bool=False,
default_language: Optional[str]=None) -> None:
self.output = output
self.processor = processor
self.run_content_validators = run_content_validators
self.default_language = default_language
def handle_line(self, line: str) -> None:
check_for_new_fence(self.processor, self.output, line,
self.run_content_validators, self.default_language)
def done(self) -> None:
self.processor.pop()
class CodeHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str],
fence: str, lang: str, run_content_validators: bool=False) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.lang = lang
self.lines: List[str] = []
self.run_content_validators = run_content_validators
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line.rstrip())
def done(self) -> None:
text = '\n'.join(self.lines)
# run content validators (if any)
if self.run_content_validators:
validator = CODE_VALIDATORS.get(self.lang, lambda text: None)
validator(self.lines)
text = self.processor.format_code(self.lang, text)
text = self.processor.placeholder(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class QuoteHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str],
fence: str, default_language: Optional[str]=None) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.lines: List[str] = []
self.default_language = default_language
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.processor, self.lines, line, default_language=self.default_language)
def done(self) -> None:
text = '\n'.join(self.lines)
text = self.processor.format_quote(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class SpoilerHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str],
fence: str, spoiler_header: str) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.spoiler_header = spoiler_header
self.lines: List[str] = []
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.processor, self.lines, line)
def done(self) -> None:
if len(self.lines) == 0:
# No content, do nothing
return
else:
header = self.spoiler_header
text = '\n'.join(self.lines)
text = self.processor.format_spoiler(header, text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class TexHandler(BaseHandler):
def __init__(self, processor: Any, output: MutableSequence[str], fence: str) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.lines: List[str] = []
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line)
def done(self) -> None:
text = '\n'.join(self.lines)
text = self.processor.format_tex(text)
text = self.processor.placeholder(text)
processed_lines = text.split('\n')
self.output.append('')
self.output.extend(processed_lines)
self.output.append('')
self.processor.pop()
class FencedBlockPreprocessor(Preprocessor):
def __init__(self, md: Markdown, run_content_validators: bool=False) -> None:
super().__init__(md)
self.checked_for_codehilite = False
self.run_content_validators = run_content_validators
self.codehilite_conf: Mapping[str, Sequence[Any]] = {}
def push(self, handler: BaseHandler) -> None:
self.handlers.append(handler)
def pop(self) -> None:
self.handlers.pop()
def run(self, lines: Iterable[str]) -> List[str]:
""" Match and store Fenced Code Blocks in the HtmlStash. """
output: List[str] = []
processor = self
self.handlers: List[BaseHandler] = []
default_language = None
try:
default_language = self.md.zulip_realm.default_code_block_language
except AttributeError:
pass
handler = OuterHandler(processor, output, self.run_content_validators, default_language)
self.push(handler)
for line in lines:
self.handlers[-1].handle_line(line)
while self.handlers:
self.handlers[-1].done()
# This fiddly handling of new lines at the end of our output was done to make
# existing tests pass. Markdown is just kind of funny when it comes to new lines,
# but we could probably remove this hack.
if len(output) > 2 and output[-2] != '':
output.append('')
return output
def format_code(self, lang: str, text: str) -> str:
if lang:
langclass = LANG_TAG.format(lang)
else:
langclass = ''
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(text,
linenums=self.codehilite_conf['linenums'][0],
guess_lang=self.codehilite_conf['guess_lang'][0],
css_class=self.codehilite_conf['css_class'][0],
style=self.codehilite_conf['pygments_style'][0],
use_pygments=self.codehilite_conf['use_pygments'][0],
lang=(lang or None),
noclasses=self.codehilite_conf['noclasses'][0])
code = highliter.hilite().rstrip('\n')
else:
code = CODE_WRAP.format(langclass, self._escape(text))
# To support our "view in playground" feature, the frontend
# needs to know what Pygments language was used for
# highlighting this code block. We record this in a data
# attribute attached to the outer `pre` element.
# Unfortunately, the pygments API doesn't offer a way to add
# this, so we need to do it in a post-processing step.
if lang:
div_tag = lxml.html.fromstring(code)
# For the value of our data element, we get the lexer
# subclass name instead of directly using the language,
# since that canonicalizes aliases (Eg: `js` and
# `javascript` will be mapped to `JavaScript`).
try:
code_language = get_lexer_by_name(lang).name
except ClassNotFound:
# If there isn't a Pygments lexer by this name, we
# still tag it with the user's data-code-language
# value, since this allows hooking up a "playground"
# for custom "languages" that aren't known to Pygments.
code_language = lang
div_tag.attrib['data-code-language'] = code_language
code = lxml.html.tostring(div_tag, encoding="unicode")
return code
def format_quote(self, text: str) -> str:
paragraphs = text.split("\n")
quoted_paragraphs = []
for paragraph in paragraphs:
lines = paragraph.split("\n")
quoted_paragraphs.append("\n".join("> " + line for line in lines))
return "\n".join(quoted_paragraphs)
def format_spoiler(self, header: str, text: str) -> str:
output = []
header_div_open_html = '<div class="spoiler-block"><div class="spoiler-header">'
end_header_start_content_html = '</div><div class="spoiler-content" aria-hidden="true">'
footer_html = '</div></div>'
output.append(self.placeholder(header_div_open_html))
output.append(header)
output.append(self.placeholder(end_header_start_content_html))
output.append(text)
output.append(self.placeholder(footer_html))
return "\n\n".join(output)
def format_tex(self, text: str) -> str:
paragraphs = text.split("\n\n")
tex_paragraphs = []
for paragraph in paragraphs:
html = render_tex(paragraph, is_inline=False)
if html is not None:
tex_paragraphs.append(html)
else:
tex_paragraphs.append('<span class="tex-error">' +
escape(paragraph) + '</span>')
return "\n\n".join(tex_paragraphs)
def placeholder(self, code: str) -> str:
return self.md.htmlStash.store(code)
def _escape(self, txt: str) -> str:
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(*args: Any, **kwargs: None) -> FencedCodeExtension:
return FencedCodeExtension(kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 |
lachlandeer/bom-scraper | src/lib/processBoxOfficeReturns.py | 1 | 10372 | """
This collection of functions scrapes Box Office Returns at the
weekly, weekend, and daily levels from a film's page on Box Office Mojo.
Last Edit: March, 2017
"""
import requests
from bs4 import BeautifulSoup
import re
import dateutil.parser
from string import ascii_uppercase
import pandas as pd
# import pickle
import time
import urllib.request
import csv
sess = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=10)
sess.mount('http://', adapter)
# First Information about dates
def extract_year(anchorString):
'''
Find the year the current data belongs to
'''
try:
year=re.findall(r'20[0-9][0-9]', anchorString)[0]
return year
except:
return None
def extract_calendarWeek(anchorString):
'''
Find the calendar week the current data belongs to
'''
try:
calendarWeek=re.findall(r'wk\=(.[0-9]{1})', anchorString)[0]
return calendarWeek
except:
pass
try:
calendarWeek=re.findall(r'wknd\=(.[0-9]{1})', anchorString)[0]
return calendarWeek
except:
return None
def extract_date(anchorString):
'''
Find the start and end date of the Calendar Week
'''
try:
date = re.findall(r'<b>(.+?)<', anchorString)[0]
# clean out any badly parsed symbols
date = re.sub('\x96', '-', date)
return date
except:
return None
def find_dateInfo(anchorString):
'''
Returns all relevant date information contained in the Box Office mojo href string
'''
#obj = str(anchor)
year=extract_year(anchorString)
calendarWeek=extract_calendarWeek(anchorString)
date=extract_date(anchorString)
return year, calendarWeek, date
# Now Box Office Relevant information
def money_to_int(moneystring):
'''
A helper function to strip out dollar signs ($) and commas leaving any
dollar value as a integer
'''
try:
moneystring = moneystring.replace('$', '').replace(',', '')
return int(moneystring)
except:
return moneystring
def get_weekly_movieRank(anchor):
'''
Return the Rank of the movie over a given time period.
Rank compares a movie's Box Office takings to other movies currently in cinemas
'''
try:
rank_tag = anchor.find_next("td")
rank = rank_tag.get_text()
return rank
except:
return None
def get_boxOffice(anchor):
'''
Return the Rank of the movie over a given week or weekend.
'''
try:
boxOffice_tag = anchor.find_next("td").find_next("td")
boxOffice = boxOffice_tag.get_text()
boxOffice = money_to_int(boxOffice)
return boxOffice
except:
return None
def get_theatres(anchor):
'''
Return the number of theatres the movie was showing in over a given
week/weekend
The data are always reported as constant over a week, using the
weekend number as the number of theatres.
'''
try:
theatres_tag = anchor.find_next("td").find_next("td").find_next("td").find_next("td")
theatres = theatres_tag.get_text()
theatres = int(theatres.replace(',' , ''))
return theatres
except:
return None
def get_totalBoxOfficeToDate(anchor):
'''
Return the the total box office returns of a film upto (and including)
that week/weekend
'''
try:
totalBoxOffice_tag = anchor.find_next("td").find_next("td").find_next("td").find_next("td").find_next("td").find_next("td").find_next("td")
totalBoxOffice = totalBoxOffice_tag.get_text()
totalBoxOffice = money_to_int(totalBoxOffice)
return totalBoxOffice
except:
return None
def identify_longWeekend(df):
'''
Identifies long weekends by a leading <i> on the date column.
Creates Dummy variable for long weekends, and then cleans up the date column
and passes data frame back to user
'''
df['longWeekend'] = df.date.str.contains('<i>')
df['date'] = df.date.str.replace('<i>', '')
return df
def scrape_BoxOfficeInfo(href_pattern, soup, movie_id):
'''
Scrape the necessary Box Office information from the webpage
'''
df_movie = pd.DataFrame()
for iAnchor in soup.findAll('a', href=href_pattern):
## convert to string for regular expression parsing
anchorString = str(iAnchor)
## Get date information from stripping info from inside the href link
year, calendarWeek, date = find_dateInfo(anchorString)
## Get Box Office Information etc
rank = get_weekly_movieRank(iAnchor)
boxOffice = get_boxOffice(iAnchor)
theatres = get_theatres(iAnchor)
grossBoxOffice = get_totalBoxOfficeToDate(iAnchor)
## Put data into a weekly data-frame
df_week = pd.DataFrame([[movie_id, year, calendarWeek, date,
rank, boxOffice, theatres, grossBoxOffice
]]
)
## append that week to existing data
df_movie = df_movie.append(df_week, ignore_index=True)
## end for loop
# label the columns
if not df_movie.empty:
df_movie.columns = ["movie_id", "year", "calendarWeek", "date", "rank",
"boxOffice", "theatres", "grossBoxOffice"]
return df_movie
else:
pass
def scrape_dailyBoxOfficeInfo(href_pattern, soup, movie_id):
'''
Scrape the necessary daily Box Office information from the webpage.
Daily Box Office returns are stored in a different pattern than weekly and
weekend returns, so need a separate scraper
'''
df_movie = pd.DataFrame()
for iAnchor in soup.findAll('a', href=href_pattern):
## convert to string for regular expression parsing
anchorString = str(iAnchor)
# date Information
try:
year=re.findall(r'20[0-9][0-9]', anchorString)[0]
except:
year = None
try:
date = re.findall(r'<b>(.+?)<', anchorString)[0]
date = re.sub('\x96', '-', date)
except:
date = None
# Get Box Office Information etc
try:
rank_tag = iAnchor.find_next("td")
rank = rank_tag.get_text()
except:
rank = None
# here is box office
try:
boxOffice_tag = rank_tag.find_next("td")
boxOffice = boxOffice_tag.get_text()
boxOffice = money_to_int(boxOffice)
except:
boxOffice = None
# find theatres
try:
theatres_tag = boxOffice_tag.find_next("td").find_next("td").find_next("td").contents[0]
theatres = theatres_tag.get_text()
theatres = int(theatres.replace(',' , ''))
except:
theatres = None
# find gross to date
try:
grossBO_tag = theatres_tag.find_next("td").find_next("td").contents[0]
grossBoxOffice = grossBO_tag.get_text()
grossBoxOffice = money_to_int(grossBoxOffice)
except:
grossBoxOffice = None
# get day of release
try:
dayOfRelease_tag = grossBO_tag.find_next("td").contents[0]
dayOfRelease = dayOfRelease_tag.get_text()
except:
dayOfRelease = None
# package it up
df_week = pd.DataFrame([[movie_id, year, date,
rank, boxOffice, theatres, grossBoxOffice, dayOfRelease
]]
)
df_movie = df_movie.append(df_week, ignore_index=True)
## label the columns
if not df_movie.empty:
df_movie.columns = ["movie_id", "year", "date", "rank", "boxOffice",
"theatres", "grossBoxOffice", "dayOfRelease"]
return df_movie
else:
pass
def process_weekendBoxOffice(currentURL):
'''
Takes a URL to a movie website on Box Office Mojo and collects weekend
Box Office information.
'''
href_pattern = re.compile('^/weekend/chart/\?yr')
# Get the movie ID and direct to the page storing weekend Box Office takings
movie_id = currentURL.rsplit('=', 1)[-1].rsplit('.', 1)[0]
print('Getting Weekend Box Office for', movie_id)
boxOffice_url = 'http://www.boxofficemojo.com/movies/?page=weekend&id=' + movie_id + '.htm'
response = sess.get(boxOffice_url)
if response.status_code != 200:
return None
page = response.text
soup = BeautifulSoup(page,"lxml")
df_movie = scrape_BoxOfficeInfo(href_pattern, soup, movie_id)
# clean up long weekend information
if df_movie is not None:
df_movie = identify_longWeekend(df_movie)
else:
pass
return movie_id, df_movie
def process_weeklyBoxOffice(currentURL):
'''
Takes a URL to a movie website on Box Office Mojo and collects weekly
Box Office information.
'''
href_pattern = re.compile('^/weekly/chart/')
# Get the movie ID and direct to the page storing weekend Box Office takings
movie_id = currentURL.rsplit('=', 1)[-1].rsplit('.', 1)[0]
print('Getting Weekly Box Office for', movie_id)
boxOffice_url = 'http://www.boxofficemojo.com/movies/?page=weekly&id=' + movie_id + '.htm'
response = sess.get(boxOffice_url)
if response.status_code != 200:
return None
page = response.text
soup = BeautifulSoup(page,"lxml")
df_movie = scrape_BoxOfficeInfo(href_pattern, soup, movie_id)
return movie_id, df_movie
def process_dailyBoxOffice(currentURL):
'''
Takes a URL to a movie website on Box Office Mojo and collects daily
Box Office information.
'''
href_pattern = re.compile('^/daily/chart/\?sortdate=')
# Get the movie ID and direct to the page storing weekend Box Office takings
movie_id = currentURL.rsplit('=', 1)[-1].rsplit('.', 1)[0]
print('Getting Daily Box Office for', movie_id)
boxOffice_url = 'http://www.boxofficemojo.com/movies/?page=daily&view=chart&id=' + movie_id + '.htm'
response = sess.get(boxOffice_url)
if response.status_code != 200:
return None
page = response.text
soup = BeautifulSoup(page,"lxml")
df_movie = scrape_dailyBoxOfficeInfo(href_pattern, soup, movie_id)
return movie_id, df_movie
| mit |
qlands/onadata | onadata/apps/logger/migrations/0029_auto__chg_field_attachment_mimetype__add_field_xform_encrypted__add_fi.py | 13 | 10462 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Attachment.mimetype'
db.alter_column(u'odk_logger_attachment', 'mimetype', self.gf('django.db.models.fields.CharField')(max_length=50))
# Adding field 'XForm.surveys_with_geopoints'
db.add_column(u'odk_logger_xform', 'surveys_with_geopoints',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Changing field 'Attachment.mimetype'
db.alter_column(u'odk_logger_attachment', 'mimetype', self.gf('django.db.models.fields.CharField')(max_length=20))
# Deleting field 'XForm.surveys_with_geopoints'
db.delete_column(u'odk_logger_xform', 'surveys_with_geopoints')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.instancehistory': {
'Meta': {'object_name': 'InstanceHistory'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform_instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submission_history'", 'to': "orm['odk_logger.Instance']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'), ('user', 'sms_id_string'))", 'object_name': 'XForm'},
'allows_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'encrypted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'is_crowd_form': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sms_id_string': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '50'}),
'surveys_with_geopoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['logger']
| bsd-2-clause |
collex100/odoo | addons/mrp/wizard/stock_move.py | 110 | 3398 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_move_consume(osv.osv_memory):
_name = "stock.move.consume"
_description = "Consume Products"
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot'),
}
#TOFIX: product_uom should not have different category of default UOM of product. Qty should be convert into UOM of original move line before going in consume and scrap
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(stock_move_consume, self).default_get(cr, uid, fields, context=context)
move = self.pool.get('stock.move').browse(cr, uid, context['active_id'], context=context)
if 'product_id' in fields:
res.update({'product_id': move.product_id.id})
if 'product_uom' in fields:
res.update({'product_uom': move.product_uom.id})
if 'product_qty' in fields:
res.update({'product_qty': move.product_uom_qty})
if 'location_id' in fields:
res.update({'location_id': move.location_id.id})
return res
def do_move_consume(self, cr, uid, ids, context=None):
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
uom_obj = self.pool.get('product.uom')
move_ids = context['active_ids']
for data in self.browse(cr, uid, ids, context=context):
if move_ids and move_ids[0]:
move = move_obj.browse(cr, uid, move_ids[0], context=context)
qty = uom_obj._compute_qty(cr, uid, data['product_uom'].id, data.product_qty, data.product_id.uom_id.id)
move_obj.action_consume(cr, uid, move_ids,
qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id,
context=context)
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
rynomster/django | django/contrib/gis/geos/prototypes/io.py | 25 | 9981 | import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils import six
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
wkt_writer_get_outdim = GEOSFuncFactory(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = GEOSFuncFactory(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_char])
wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int])
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self._destructor.func = self._destructor.get_func(
*self._destructor.args, **self._destructor.kwargs
)
def __del__(self):
# Cleaning up with the appropriate destructor.
try:
self._destructor(self._ptr)
except (AttributeError, TypeError):
pass # Some part might already have been garbage collected
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
_destructor = wkt_reader_destroy
ptr_type = WKT_READ_PTR
def read(self, wkt):
if not isinstance(wkt, (bytes, six.string_types)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
_destructor = wkb_reader_destroy
ptr_type = WKB_READ_PTR
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, six.memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, six.string_types)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
_destructor = wkt_writer_destroy
ptr_type = WKT_WRITE_PTR
_trim = False
_precision = None
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
@property
def trim(self):
return self._trim
@trim.setter
def trim(self, flag):
self._trim = bool(flag)
wkt_writer_set_trim(self.ptr, b'\x01' if flag else b'\x00')
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, precision):
if isinstance(precision, int) and precision >= 0 or precision is None:
self._precision = precision
wkt_writer_set_precision(self.ptr, -1 if precision is None else precision)
else:
raise AttributeError('WKT output rounding precision must be non-negative integer or None.')
class WKBWriter(IOBase):
_constructor = wkb_writer_create
_destructor = wkb_writer_destroy
ptr_type = WKB_WRITE_PTR
def write(self, geom):
"Returns the WKB representation of the given geometry."
return six.memoryview(wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t())))
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
return wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
def _get_outdim(self):
return wkb_writer_get_outdim(self.ptr)
def _set_outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
outdim = property(_get_outdim, _set_outdim)
# Property for getting/setting the include srid flag.
def _get_include_srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
def _set_include_srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
srid = property(_get_include_srid, _set_include_srid)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter()
thread_context.wkt_w.outdim = dim
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter()
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter()
thread_context.ewkb_w.srid = True
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
| bsd-3-clause |
kenden/lollypop | src/toolbar_end.py | 1 | 7416 | # Copyright (c) 2014-2015 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gio
from lollypop.pop_next import NextPopover
from lollypop.pop_queue import QueueWidget
from lollypop.pop_search import SearchPopover
from lollypop.define import Lp, Shuffle
class ToolbarEnd(Gtk.Bin):
"""
Toolbar end
"""
def __init__(self, app):
"""
Init toolbar
@param app as Gtk.Application
"""
Gtk.Bin.__init__(self)
self._pop_next = NextPopover()
builder = Gtk.Builder()
builder.add_from_resource('/org/gnome/Lollypop/ToolbarEnd.ui')
builder.connect_signals(self)
self.add(builder.get_object('end'))
self._shuffle_btn = builder.get_object('shuffle-button')
self._shuffle_btn_image = builder.get_object('shuffle-button-image')
Lp.settings.connect('changed::shuffle', self._shuffle_btn_aspect)
self._party_btn = builder.get_object('party-button')
party_action = Gio.SimpleAction.new('party', None)
party_action.connect('activate', self._activate_party_button)
app.add_action(party_action)
app.set_accels_for_action("app.party", ["<Control>p"])
search_button = builder.get_object('search-button')
self._search = SearchPopover(self)
self._search.set_relative_to(search_button)
searchAction = Gio.SimpleAction.new('search', None)
searchAction.connect('activate', self._on_search_btn_clicked)
app.add_action(searchAction)
app.set_accels_for_action("app.search", ["<Control>f"])
self._queue_button = builder.get_object('queue-button')
self._settings_button = builder.get_object('settings-button')
Lp.player.connect('party-changed', self._on_party_changed)
def setup_menu_btn(self, menu):
"""
Add an application menu to menu button
@parma: menu as Gio.Menu
"""
self._settings_button.show()
self._settings_button.set_menu_model(menu)
def on_status_changed(self, player):
"""
Update buttons on status changed
@param player as Player
"""
if player.is_playing():
# Party mode can be activated
# via Fullscreen class, so check button state
self._party_btn.set_active(player.is_party())
def do_realize(self):
"""
Show popover if needed
"""
Gtk.Bin.do_realize(self)
self._set_shuffle_icon()
def on_next_changed(self, player):
"""
Update buttons on current changed
@param player as Player
"""
# Do not show next popover non internal tracks as
# tags will be readed on the fly
if player.next_track.id is not None and\
player.next_track.id >= 0 and\
player.is_playing() and\
(player.is_party() or
Lp.settings.get_enum('shuffle') == Shuffle.TRACKS):
self._pop_next.update()
self._pop_next.set_relative_to(self)
self._pop_next.show()
else:
self._pop_next.hide()
#######################
# PRIVATE #
#######################
def _set_shuffle_icon(self):
"""
Set shuffle icon
"""
shuffle = Lp.settings.get_enum('shuffle')
if shuffle == Shuffle.NONE:
self._shuffle_btn_image.get_style_context().remove_class(
'selected')
self._shuffle_btn_image.set_from_icon_name(
"media-playlist-consecutive-symbolic",
Gtk.IconSize.SMALL_TOOLBAR)
else:
self._shuffle_btn_image.set_from_icon_name(
"media-playlist-shuffle-symbolic",
Gtk.IconSize.SMALL_TOOLBAR)
if shuffle in [Shuffle.TRACKS, Shuffle.TRACKS_ARTIST]:
self._shuffle_btn_image.get_style_context().add_class(
'selected')
else:
self._shuffle_btn_image.get_style_context().remove_class(
'selected')
if shuffle == Shuffle.TRACKS:
if Lp.player.next_track.id is not None and\
not self._pop_next.is_visible():
self._pop_next.set_relative_to(self)
self._pop_next.update()
self._pop_next.show()
elif Lp.player.is_playing():
self._pop_next.set_relative_to(None)
self._pop_next.hide()
def _shuffle_btn_aspect(self, settings, value):
"""
Mark shuffle button as active when shuffle active
@param settings as Gio.Settings, value as str
"""
self._set_shuffle_icon()
def _activate_party_button(self, action=None, param=None):
"""
Activate party button
@param action as Gio.SimpleAction
@param param as GLib.Variant
"""
self._party_btn.set_active(not self._party_btn.get_active())
def _on_search_btn_clicked(self, obj, param=None):
"""
Show search widget on search button clicked
@param obj as Gtk.Button or Gtk.Action
"""
self._search.show()
def _on_queue_btn_clicked(self, button):
"""
Show queue widget on queue button clicked
@param button as Gtk.Button
"""
queue = QueueWidget()
queue.set_relative_to(self._queue_button)
queue.populate()
queue.show()
def _on_party_btn_toggled(self, button):
"""
Set party mode on if party button active
@param obj as Gtk.button
"""
active = self._party_btn.get_active()
self._shuffle_btn.set_sensitive(not active)
if not Lp.settings.get_value('dark-ui'):
settings = Gtk.Settings.get_default()
settings.set_property("gtk-application-prefer-dark-theme", active)
is_playing = Lp.player.is_playing()
Lp.player.set_party(active)
if not active:
self._pop_next.set_relative_to(None)
self._pop_next.hide()
elif is_playing and not self._pop_next.is_visible():
self._pop_next.set_relative_to(self)
self._pop_next.update()
self._pop_next.show()
def _on_party_changed(self, player, is_party):
"""
On party change, sync toolbar
@param player as Player
@param is party as bool
"""
if self._party_btn.get_active() != is_party:
self._activate_party_button()
| gpl-3.0 |
ar45/django | tests/template_tests/filter_tests/test_length.py | 521 | 1900 | from django.template.defaultfilters import length
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LengthTests(SimpleTestCase):
@setup({'length01': '{{ list|length }}'})
def test_length01(self):
output = self.engine.render_to_string('length01', {'list': ['4', None, True, {}]})
self.assertEqual(output, '4')
@setup({'length02': '{{ list|length }}'})
def test_length02(self):
output = self.engine.render_to_string('length02', {'list': []})
self.assertEqual(output, '0')
@setup({'length03': '{{ string|length }}'})
def test_length03(self):
output = self.engine.render_to_string('length03', {'string': ''})
self.assertEqual(output, '0')
@setup({'length04': '{{ string|length }}'})
def test_length04(self):
output = self.engine.render_to_string('length04', {'string': 'django'})
self.assertEqual(output, '6')
@setup({'length05': '{% if string|length == 6 %}Pass{% endif %}'})
def test_length05(self):
output = self.engine.render_to_string('length05', {'string': mark_safe('django')})
self.assertEqual(output, 'Pass')
# Invalid uses that should fail silently.
@setup({'length06': '{{ int|length }}'})
def test_length06(self):
output = self.engine.render_to_string('length06', {'int': 7})
self.assertEqual(output, '0')
@setup({'length07': '{{ None|length }}'})
def test_length07(self):
output = self.engine.render_to_string('length07', {'None': None})
self.assertEqual(output, '0')
class FunctionTests(SimpleTestCase):
def test_string(self):
self.assertEqual(length('1234'), 4)
def test_safestring(self):
self.assertEqual(length(mark_safe('1234')), 4)
def test_list(self):
self.assertEqual(length([1, 2, 3, 4]), 4)
| bsd-3-clause |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/test/test_operator.py | 67 | 16993 | import operator
import unittest
from test import support
class Seq1:
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class Seq2(object):
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class OperatorTestCase(unittest.TestCase):
def test_lt(self):
self.assertRaises(TypeError, operator.lt)
self.assertRaises(TypeError, operator.lt, 1j, 2j)
self.assertFalse(operator.lt(1, 0))
self.assertFalse(operator.lt(1, 0.0))
self.assertFalse(operator.lt(1, 1))
self.assertFalse(operator.lt(1, 1.0))
self.assertTrue(operator.lt(1, 2))
self.assertTrue(operator.lt(1, 2.0))
def test_le(self):
self.assertRaises(TypeError, operator.le)
self.assertRaises(TypeError, operator.le, 1j, 2j)
self.assertFalse(operator.le(1, 0))
self.assertFalse(operator.le(1, 0.0))
self.assertTrue(operator.le(1, 1))
self.assertTrue(operator.le(1, 1.0))
self.assertTrue(operator.le(1, 2))
self.assertTrue(operator.le(1, 2.0))
def test_eq(self):
class C(object):
def __eq__(self, other):
raise SyntaxError
self.assertRaises(TypeError, operator.eq)
self.assertRaises(SyntaxError, operator.eq, C(), C())
self.assertFalse(operator.eq(1, 0))
self.assertFalse(operator.eq(1, 0.0))
self.assertTrue(operator.eq(1, 1))
self.assertTrue(operator.eq(1, 1.0))
self.assertFalse(operator.eq(1, 2))
self.assertFalse(operator.eq(1, 2.0))
def test_ne(self):
class C(object):
def __ne__(self, other):
raise SyntaxError
self.assertRaises(TypeError, operator.ne)
self.assertRaises(SyntaxError, operator.ne, C(), C())
self.assertTrue(operator.ne(1, 0))
self.assertTrue(operator.ne(1, 0.0))
self.assertFalse(operator.ne(1, 1))
self.assertFalse(operator.ne(1, 1.0))
self.assertTrue(operator.ne(1, 2))
self.assertTrue(operator.ne(1, 2.0))
def test_ge(self):
self.assertRaises(TypeError, operator.ge)
self.assertRaises(TypeError, operator.ge, 1j, 2j)
self.assertTrue(operator.ge(1, 0))
self.assertTrue(operator.ge(1, 0.0))
self.assertTrue(operator.ge(1, 1))
self.assertTrue(operator.ge(1, 1.0))
self.assertFalse(operator.ge(1, 2))
self.assertFalse(operator.ge(1, 2.0))
def test_gt(self):
self.assertRaises(TypeError, operator.gt)
self.assertRaises(TypeError, operator.gt, 1j, 2j)
self.assertTrue(operator.gt(1, 0))
self.assertTrue(operator.gt(1, 0.0))
self.assertFalse(operator.gt(1, 1))
self.assertFalse(operator.gt(1, 1.0))
self.assertFalse(operator.gt(1, 2))
self.assertFalse(operator.gt(1, 2.0))
def test_abs(self):
self.assertRaises(TypeError, operator.abs)
self.assertRaises(TypeError, operator.abs, None)
self.assertEqual(operator.abs(-1), 1)
self.assertEqual(operator.abs(1), 1)
def test_add(self):
self.assertRaises(TypeError, operator.add)
self.assertRaises(TypeError, operator.add, None, None)
self.assertTrue(operator.add(3, 4) == 7)
def test_bitwise_and(self):
self.assertRaises(TypeError, operator.and_)
self.assertRaises(TypeError, operator.and_, None, None)
self.assertTrue(operator.and_(0xf, 0xa) == 0xa)
def test_concat(self):
self.assertRaises(TypeError, operator.concat)
self.assertRaises(TypeError, operator.concat, None, None)
self.assertTrue(operator.concat('py', 'thon') == 'python')
self.assertTrue(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
self.assertTrue(operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7])
self.assertTrue(operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7])
self.assertRaises(TypeError, operator.concat, 13, 29)
def test_countOf(self):
self.assertRaises(TypeError, operator.countOf)
self.assertRaises(TypeError, operator.countOf, None, None)
self.assertTrue(operator.countOf([1, 2, 1, 3, 1, 4], 3) == 1)
self.assertTrue(operator.countOf([1, 2, 1, 3, 1, 4], 5) == 0)
def test_delitem(self):
a = [4, 3, 2, 1]
self.assertRaises(TypeError, operator.delitem, a)
self.assertRaises(TypeError, operator.delitem, a, None)
self.assertTrue(operator.delitem(a, 1) is None)
self.assertTrue(a == [4, 2, 1])
def test_floordiv(self):
self.assertRaises(TypeError, operator.floordiv, 5)
self.assertRaises(TypeError, operator.floordiv, None, None)
self.assertTrue(operator.floordiv(5, 2) == 2)
def test_truediv(self):
self.assertRaises(TypeError, operator.truediv, 5)
self.assertRaises(TypeError, operator.truediv, None, None)
self.assertTrue(operator.truediv(5, 2) == 2.5)
def test_getitem(self):
a = range(10)
self.assertRaises(TypeError, operator.getitem)
self.assertRaises(TypeError, operator.getitem, a, None)
self.assertTrue(operator.getitem(a, 2) == 2)
def test_indexOf(self):
self.assertRaises(TypeError, operator.indexOf)
self.assertRaises(TypeError, operator.indexOf, None, None)
self.assertTrue(operator.indexOf([4, 3, 2, 1], 3) == 1)
self.assertRaises(ValueError, operator.indexOf, [4, 3, 2, 1], 0)
def test_invert(self):
self.assertRaises(TypeError, operator.invert)
self.assertRaises(TypeError, operator.invert, None)
self.assertEqual(operator.inv(4), -5)
def test_lshift(self):
self.assertRaises(TypeError, operator.lshift)
self.assertRaises(TypeError, operator.lshift, None, 42)
self.assertTrue(operator.lshift(5, 1) == 10)
self.assertTrue(operator.lshift(5, 0) == 5)
self.assertRaises(ValueError, operator.lshift, 2, -1)
def test_mod(self):
self.assertRaises(TypeError, operator.mod)
self.assertRaises(TypeError, operator.mod, None, 42)
self.assertTrue(operator.mod(5, 2) == 1)
def test_mul(self):
self.assertRaises(TypeError, operator.mul)
self.assertRaises(TypeError, operator.mul, None, None)
self.assertTrue(operator.mul(5, 2) == 10)
def test_neg(self):
self.assertRaises(TypeError, operator.neg)
self.assertRaises(TypeError, operator.neg, None)
self.assertEqual(operator.neg(5), -5)
self.assertEqual(operator.neg(-5), 5)
self.assertEqual(operator.neg(0), 0)
self.assertEqual(operator.neg(-0), 0)
def test_bitwise_or(self):
self.assertRaises(TypeError, operator.or_)
self.assertRaises(TypeError, operator.or_, None, None)
self.assertTrue(operator.or_(0xa, 0x5) == 0xf)
def test_pos(self):
self.assertRaises(TypeError, operator.pos)
self.assertRaises(TypeError, operator.pos, None)
self.assertEqual(operator.pos(5), 5)
self.assertEqual(operator.pos(-5), -5)
self.assertEqual(operator.pos(0), 0)
self.assertEqual(operator.pos(-0), 0)
def test_pow(self):
self.assertRaises(TypeError, operator.pow)
self.assertRaises(TypeError, operator.pow, None, None)
self.assertEqual(operator.pow(3,5), 3**5)
self.assertEqual(operator.__pow__(3,5), 3**5)
self.assertRaises(TypeError, operator.pow, 1)
self.assertRaises(TypeError, operator.pow, 1, 2, 3)
def test_rshift(self):
self.assertRaises(TypeError, operator.rshift)
self.assertRaises(TypeError, operator.rshift, None, 42)
self.assertTrue(operator.rshift(5, 1) == 2)
self.assertTrue(operator.rshift(5, 0) == 5)
self.assertRaises(ValueError, operator.rshift, 2, -1)
def test_contains(self):
self.assertRaises(TypeError, operator.contains)
self.assertRaises(TypeError, operator.contains, None, None)
self.assertTrue(operator.contains(range(4), 2))
self.assertFalse(operator.contains(range(4), 5))
def test_setitem(self):
a = list(range(3))
self.assertRaises(TypeError, operator.setitem, a)
self.assertRaises(TypeError, operator.setitem, a, None, None)
self.assertTrue(operator.setitem(a, 0, 2) is None)
self.assertTrue(a == [2, 1, 2])
self.assertRaises(IndexError, operator.setitem, a, 4, 2)
def test_sub(self):
self.assertRaises(TypeError, operator.sub)
self.assertRaises(TypeError, operator.sub, None, None)
self.assertTrue(operator.sub(5, 2) == 3)
def test_truth(self):
class C(object):
def __bool__(self):
raise SyntaxError
self.assertRaises(TypeError, operator.truth)
self.assertRaises(SyntaxError, operator.truth, C())
self.assertTrue(operator.truth(5))
self.assertTrue(operator.truth([0]))
self.assertFalse(operator.truth(0))
self.assertFalse(operator.truth([]))
def test_bitwise_xor(self):
self.assertRaises(TypeError, operator.xor)
self.assertRaises(TypeError, operator.xor, None, None)
self.assertTrue(operator.xor(0xb, 0xc) == 0x7)
def test_is(self):
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.assertRaises(TypeError, operator.is_)
self.assertTrue(operator.is_(a, b))
self.assertFalse(operator.is_(a,c))
def test_is_not(self):
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.assertRaises(TypeError, operator.is_not)
self.assertFalse(operator.is_not(a, b))
self.assertTrue(operator.is_not(a,c))
def test_attrgetter(self):
class A:
pass
a = A()
a.name = 'arthur'
f = operator.attrgetter('name')
self.assertEqual(f(a), 'arthur')
f = operator.attrgetter('rank')
self.assertRaises(AttributeError, f, a)
self.assertRaises(TypeError, operator.attrgetter, 2)
self.assertRaises(TypeError, operator.attrgetter)
# multiple gets
record = A()
record.x = 'X'
record.y = 'Y'
record.z = 'Z'
self.assertEqual(operator.attrgetter('x','z','y')(record), ('X', 'Z', 'Y'))
self.assertRaises(TypeError, operator.attrgetter, ('x', (), 'y'))
class C(object):
def __getattr__(self, name):
raise SyntaxError
self.assertRaises(SyntaxError, operator.attrgetter('foo'), C())
# recursive gets
a = A()
a.name = 'arthur'
a.child = A()
a.child.name = 'thomas'
f = operator.attrgetter('child.name')
self.assertEqual(f(a), 'thomas')
self.assertRaises(AttributeError, f, a.child)
f = operator.attrgetter('name', 'child.name')
self.assertEqual(f(a), ('arthur', 'thomas'))
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter('child.')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter('.child')
self.assertRaises(AttributeError, f, a)
a.child.child = A()
a.child.child.name = 'johnson'
f = operator.attrgetter('child.child.name')
self.assertEqual(f(a), 'johnson')
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertEqual(f(a), ('arthur', 'thomas', 'johnson'))
def test_itemgetter(self):
a = 'ABCDE'
f = operator.itemgetter(2)
self.assertEqual(f(a), 'C')
f = operator.itemgetter(10)
self.assertRaises(IndexError, f, a)
class C(object):
def __getitem__(self, name):
raise SyntaxError
self.assertRaises(SyntaxError, operator.itemgetter(42), C())
f = operator.itemgetter('name')
self.assertRaises(TypeError, f, a)
self.assertRaises(TypeError, operator.itemgetter)
d = dict(key='val')
f = operator.itemgetter('key')
self.assertEqual(f(d), 'val')
f = operator.itemgetter('nonkey')
self.assertRaises(KeyError, f, d)
# example used in the docs
inventory = [('apple', 3), ('banana', 2), ('pear', 5), ('orange', 1)]
getcount = operator.itemgetter(1)
self.assertEqual(list(map(getcount, inventory)), [3, 2, 5, 1])
self.assertEqual(sorted(inventory, key=getcount),
[('orange', 1), ('banana', 2), ('apple', 3), ('pear', 5)])
# multiple gets
data = list(map(str, range(20)))
self.assertEqual(operator.itemgetter(2,10,5)(data), ('2', '10', '5'))
self.assertRaises(TypeError, operator.itemgetter(2, 'x', 5), data)
def test_methodcaller(self):
self.assertRaises(TypeError, operator.methodcaller)
class A:
def foo(self, *args, **kwds):
return args[0] + args[1]
def bar(self, f=42):
return f
a = A()
f = operator.methodcaller('foo')
self.assertRaises(IndexError, f, a)
f = operator.methodcaller('foo', 1, 2)
self.assertEqual(f(a), 3)
f = operator.methodcaller('bar')
self.assertEqual(f(a), 42)
self.assertRaises(TypeError, f, a, a)
f = operator.methodcaller('bar', f=5)
self.assertEqual(f(a), 5)
def test_inplace(self):
class C(object):
def __iadd__ (self, other): return "iadd"
def __iand__ (self, other): return "iand"
def __ifloordiv__(self, other): return "ifloordiv"
def __ilshift__ (self, other): return "ilshift"
def __imod__ (self, other): return "imod"
def __imul__ (self, other): return "imul"
def __ior__ (self, other): return "ior"
def __ipow__ (self, other): return "ipow"
def __irshift__ (self, other): return "irshift"
def __isub__ (self, other): return "isub"
def __itruediv__ (self, other): return "itruediv"
def __ixor__ (self, other): return "ixor"
def __getitem__(self, other): return 5 # so that C is a sequence
c = C()
self.assertEqual(operator.iadd (c, 5), "iadd")
self.assertEqual(operator.iand (c, 5), "iand")
self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
self.assertEqual(operator.ilshift (c, 5), "ilshift")
self.assertEqual(operator.imod (c, 5), "imod")
self.assertEqual(operator.imul (c, 5), "imul")
self.assertEqual(operator.ior (c, 5), "ior")
self.assertEqual(operator.ipow (c, 5), "ipow")
self.assertEqual(operator.irshift (c, 5), "irshift")
self.assertEqual(operator.isub (c, 5), "isub")
self.assertEqual(operator.itruediv (c, 5), "itruediv")
self.assertEqual(operator.ixor (c, 5), "ixor")
self.assertEqual(operator.iconcat (c, c), "iadd")
self.assertEqual(operator.__iadd__ (c, 5), "iadd")
self.assertEqual(operator.__iand__ (c, 5), "iand")
self.assertEqual(operator.__ifloordiv__(c, 5), "ifloordiv")
self.assertEqual(operator.__ilshift__ (c, 5), "ilshift")
self.assertEqual(operator.__imod__ (c, 5), "imod")
self.assertEqual(operator.__imul__ (c, 5), "imul")
self.assertEqual(operator.__ior__ (c, 5), "ior")
self.assertEqual(operator.__ipow__ (c, 5), "ipow")
self.assertEqual(operator.__irshift__ (c, 5), "irshift")
self.assertEqual(operator.__isub__ (c, 5), "isub")
self.assertEqual(operator.__itruediv__ (c, 5), "itruediv")
self.assertEqual(operator.__ixor__ (c, 5), "ixor")
self.assertEqual(operator.__iconcat__ (c, c), "iadd")
def test_main(verbose=None):
import sys
test_classes = (
OperatorTestCase,
)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| apache-2.0 |
schlueter/ansible | test/runner/lib/sanity/pylint.py | 9 | 9399 | """Sanity test using pylint."""
from __future__ import absolute_import, print_function
import collections
import json
import os
import datetime
try:
import ConfigParser as configparser
except ImportError:
import configparser
from lib.sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
)
from lib.util import (
SubprocessError,
run_command,
display,
find_executable,
)
from lib.executor import (
SUPPORTED_PYTHON_VERSIONS,
)
from lib.ansible_util import (
ansible_environment,
)
from lib.config import (
SanityConfig,
)
from lib.test import (
calculate_confidence,
calculate_best_confidence,
)
PYLINT_SKIP_PATH = 'test/sanity/pylint/skip.txt'
PYLINT_IGNORE_PATH = 'test/sanity/pylint/ignore.txt'
UNSUPPORTED_PYTHON_VERSIONS = (
'2.6',
)
class PylintTest(SanitySingleVersion):
"""Sanity test using pylint."""
def __init__(self):
super(PylintTest, self).__init__()
self.plugin_dir = 'test/sanity/pylint/plugins'
self.plugin_names = sorted(p[0] for p in [os.path.splitext(p) for p in os.listdir(self.plugin_dir)] if p[1] == '.py' and p[0] != '__init__')
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: SanityResult
"""
if args.python_version in UNSUPPORTED_PYTHON_VERSIONS:
display.warning('Skipping pylint on unsupported Python version %s.' % args.python_version)
return SanitySkipped(self.name)
with open(PYLINT_SKIP_PATH, 'r') as skip_fd:
skip_paths = skip_fd.read().splitlines()
invalid_ignores = []
supported_versions = set(SUPPORTED_PYTHON_VERSIONS) - set(UNSUPPORTED_PYTHON_VERSIONS)
supported_versions = set([v.split('.')[0] for v in supported_versions]) | supported_versions
with open(PYLINT_IGNORE_PATH, 'r') as ignore_fd:
ignore_entries = ignore_fd.read().splitlines()
ignore = collections.defaultdict(dict)
line = 0
for ignore_entry in ignore_entries:
line += 1
if ' ' not in ignore_entry:
invalid_ignores.append((line, 'Invalid syntax'))
continue
path, code = ignore_entry.split(' ', 1)
if not os.path.exists(path):
invalid_ignores.append((line, 'Remove "%s" since it does not exist' % path))
continue
if ' ' in code:
code, version = code.split(' ', 1)
if version not in supported_versions:
invalid_ignores.append((line, 'Invalid version: %s' % version))
continue
if version != args.python_version and version != args.python_version.split('.')[0]:
continue # ignore version specific entries for other versions
ignore[path][code] = line
skip_paths_set = set(skip_paths)
paths = sorted(i.path for i in targets.include if (os.path.splitext(i.path)[1] == '.py' or i.path.startswith('bin/')) and i.path not in skip_paths_set)
contexts = {}
remaining_paths = set(paths)
def add_context(available_paths, context_name, context_filter):
"""
:type available_paths: set[str]
:type context_name: str
:type context_filter: (str) -> bool
"""
filtered_paths = set(p for p in available_paths if context_filter(p))
contexts[context_name] = sorted(filtered_paths)
available_paths -= filtered_paths
add_context(remaining_paths, 'ansible-test', lambda p: p.startswith('test/runner/'))
add_context(remaining_paths, 'units', lambda p: p.startswith('test/units/'))
add_context(remaining_paths, 'test', lambda p: p.startswith('test/'))
add_context(remaining_paths, 'hacking', lambda p: p.startswith('hacking/'))
add_context(remaining_paths, 'modules', lambda p: p.startswith('lib/ansible/modules/'))
add_context(remaining_paths, 'module_utils', lambda p: p.startswith('lib/ansible/module_utils/'))
add_context(remaining_paths, 'ansible', lambda p: True)
messages = []
context_times = []
test_start = datetime.datetime.utcnow()
for context in sorted(contexts):
context_paths = contexts[context]
if not context_paths:
continue
context_start = datetime.datetime.utcnow()
messages += self.pylint(args, context, context_paths)
context_end = datetime.datetime.utcnow()
context_times.append('%s: %d (%s)' % (context, len(context_paths), context_end - context_start))
test_end = datetime.datetime.utcnow()
for context_time in context_times:
display.info(context_time, verbosity=4)
display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4)
errors = [SanityMessage(
message=m['message'].replace('\n', ' '),
path=m['path'],
line=int(m['line']),
column=int(m['column']),
level=m['type'],
code=m['symbol'],
) for m in messages]
if args.explain:
return SanitySuccess(self.name)
line = 0
filtered = []
for error in errors:
if error.code in ignore[error.path]:
ignore[error.path][error.code] = None # error ignored, clear line number of ignore entry to track usage
else:
filtered.append(error) # error not ignored
errors = filtered
for invalid_ignore in invalid_ignores:
errors.append(SanityMessage(
code='A201',
message=invalid_ignore[1],
path=PYLINT_IGNORE_PATH,
line=invalid_ignore[0],
column=1,
confidence=calculate_confidence(PYLINT_IGNORE_PATH, line, args.metadata) if args.metadata.changes else None,
))
for path in skip_paths:
line += 1
if not os.path.exists(path):
# Keep files out of the list which no longer exist in the repo.
errors.append(SanityMessage(
code='A101',
message='Remove "%s" since it does not exist' % path,
path=PYLINT_SKIP_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((PYLINT_SKIP_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
for path in paths:
if path not in ignore:
continue
for code in ignore[path]:
line = ignore[path][code]
if not line:
continue
errors.append(SanityMessage(
code='A102',
message='Remove since "%s" passes "%s" pylint test' % (path, code),
path=PYLINT_IGNORE_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((PYLINT_IGNORE_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
def pylint(self, args, context, paths):
"""
:type args: SanityConfig
:param context: str
:param paths: list[str]
:return: list[dict[str, str]]
"""
rcfile = 'test/sanity/pylint/config/%s' % context
if not os.path.exists(rcfile):
rcfile = 'test/sanity/pylint/config/default'
parser = configparser.SafeConfigParser()
parser.read(rcfile)
if parser.has_section('ansible-test'):
config = dict(parser.items('ansible-test'))
else:
config = dict()
disable_plugins = set(i.strip() for i in config.get('disable-plugins', '').split(',') if i)
load_plugins = set(self.plugin_names) - disable_plugins
cmd = [
'python%s' % args.python_version,
find_executable('pylint'),
'--jobs', '0',
'--reports', 'n',
'--max-line-length', '160',
'--rcfile', rcfile,
'--output-format', 'json',
'--load-plugins', ','.join(load_plugins),
] + paths
env = ansible_environment(args)
env['PYTHONPATH'] += '%s%s' % (os.pathsep, self.plugin_dir)
if paths:
try:
stdout, stderr = run_command(args, cmd, env=env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr or status >= 32:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
else:
stdout = None
if not args.explain and stdout:
messages = json.loads(stdout)
else:
messages = []
return messages
| gpl-3.0 |
Refefer/pylearn2 | pylearn2/devtools/list_files.py | 45 | 1772 | """Code for listing files that belong to the library."""
import logging
import pylearn2
import os
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
logger = logging.getLogger(__name__)
def list_files(suffix=""):
"""
Returns a list of all files in pylearn2 with the given suffix.
Parameters
----------
suffix : str
Returns
-------
file_list : list
A list of all files in pylearn2 whose filepath ends with `suffix`
"""
pl2_path, = pylearn2.__path__
file_list = _list_files(pl2_path, suffix)
return file_list
def _list_files(path, suffix=""):
"""
.. todo::
WRITEME
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself)
"""
if os.path.isdir(path):
incomplete = os.listdir(path)
complete = [os.path.join(path, entry) for entry in incomplete]
lists = [_list_files(subpath, suffix) for subpath in complete]
flattened = []
for l in lists:
for elem in l:
flattened.append(elem)
return flattened
else:
assert os.path.exists(path), "couldn't find file '%s'" % path
if path.endswith(suffix):
return [path]
return []
if __name__ == '__main__':
# Print all .py files in the library
result = list_files('.py')
for path in result:
logger.info(path)
| bsd-3-clause |
arnoldthebat/linux-stable | scripts/gdb/linux/lists.py | 630 | 2897 | #
# gdb helper commands and functions for Linux kernel debugging
#
# list tools
#
# Copyright (c) Thiebaud Weksteen, 2015
#
# Authors:
# Thiebaud Weksteen <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
list_head = utils.CachedType("struct list_head")
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write("Starting with: {}\n".format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if p['next'] != c.address:
gdb.write('prev.next != current: '
'current@{current_addr}={current} '
'prev@{p_addr}={p}\n'.format(
current_addr=c.address,
current=c,
p_addr=p.address,
p=p,
))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
try:
if n['prev'] != c.address:
gdb.write('next.prev != current: '
'current@{current_addr}={current} '
'next@{n_addr}={n}\n'.format(
current_addr=c.address,
current=c,
n_addr=n.address,
n=n,
))
return
except gdb.MemoryError:
gdb.write('next is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
c = n
nb += 1
if c == head:
gdb.write("list is consistent: {} node(s)\n".format(nb))
return
class LxListChk(gdb.Command):
"""Verify a list consistency"""
def __init__(self):
super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError("lx-list-check takes one argument")
list_check(gdb.parse_and_eval(argv[0]))
LxListChk()
| gpl-2.0 |
AIML/scikit-learn | sklearn/linear_model/randomized_l1.py | 33 | 23358 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
dulems/hue | desktop/core/ext-py/MySQL-python-1.2.5/setup_common.py | 47 | 1088 | try:
# Python 2.x
from ConfigParser import SafeConfigParser
except ImportError:
# Python 3.x
from configparser import ConfigParser as SafeConfigParser
def get_metadata_and_options():
config = SafeConfigParser()
config.read(['metadata.cfg', 'site.cfg'])
metadata = dict(config.items('metadata'))
options = dict(config.items('options'))
metadata['py_modules'] = list(filter(None, metadata['py_modules'].split('\n')))
metadata['classifiers'] = list(filter(None, metadata['classifiers'].split('\n')))
return metadata, options
def enabled(options, option):
value = options[option]
s = value.lower()
if s in ('yes','true','1','y'):
return True
elif s in ('no', 'false', '0', 'n'):
return False
else:
raise ValueError("Unknown value %s for option %s" % (value, option))
def create_release_file(metadata):
rel = open("MySQLdb/release.py",'w')
rel.write("""
__author__ = "%(author)s <%(author_email)s>"
version_info = %(version_info)s
__version__ = "%(version)s"
""" % metadata)
rel.close()
| apache-2.0 |
chouseknecht/ansible | lib/ansible/modules/cloud/google/gcp_compute_ssl_certificate.py | 5 | 11789 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_ssl_certificate
description:
- An SslCertificate resource, used for HTTPS load balancing. This resource provides
a mechanism to upload an SSL key and certificate to the load balancer to serve secure
connections from the user.
short_description: Creates a GCP SslCertificate
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
certificate:
description:
- The certificate in PEM format.
- The certificate chain must be no greater than 5 certs long.
- The chain must include at least one intermediate cert.
required: true
type: str
description:
description:
- An optional description of this resource.
required: false
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: false
type: str
private_key:
description:
- The write-only private key in PEM format.
required: true
type: str
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates)'
- 'Official Documentation: U(https://cloud.google.com/load-balancing/docs/ssl-certificates)'
'''
EXAMPLES = '''
- name: create a SSL certificate
gcp_compute_ssl_certificate:
name: test_object
description: A certificate for testing. Do not use this certificate in production
certificate: |-
-----BEGIN CERTIFICATE-----
MIICqjCCAk+gAwIBAgIJAIuJ+0352Kq4MAoGCCqGSM49BAMCMIGwMQswCQYDVQQG
EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjERMA8GA1UEBwwIS2lya2xhbmQxFTAT
BgNVBAoMDEdvb2dsZSwgSW5jLjEeMBwGA1UECwwVR29vZ2xlIENsb3VkIFBsYXRm
b3JtMR8wHQYDVQQDDBZ3d3cubXktc2VjdXJlLXNpdGUuY29tMSEwHwYJKoZIhvcN
AQkBFhJuZWxzb25hQGdvb2dsZS5jb20wHhcNMTcwNjI4MDQ1NjI2WhcNMjcwNjI2
MDQ1NjI2WjCBsDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xETAP
BgNVBAcMCEtpcmtsYW5kMRUwEwYDVQQKDAxHb29nbGUsIEluYy4xHjAcBgNVBAsM
FUdvb2dsZSBDbG91ZCBQbGF0Zm9ybTEfMB0GA1UEAwwWd3d3Lm15LXNlY3VyZS1z
aXRlLmNvbTEhMB8GCSqGSIb3DQEJARYSbmVsc29uYUBnb29nbGUuY29tMFkwEwYH
KoZIzj0CAQYIKoZIzj0DAQcDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ
4mzkzTv0dXyB750fOGN02HtkpBOZzzvUARTR10JQoSe2/5PIwaNQME4wHQYDVR0O
BBYEFKIQC3A2SDpxcdfn0YLKineDNq/BMB8GA1UdIwQYMBaAFKIQC3A2SDpxcdfn
0YLKineDNq/BMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSQAwRgIhALs4vy+O
M3jcqgA4fSW/oKw6UJxp+M6a+nGMX+UJR3YgAiEAvvl39QRVAiv84hdoCuyON0lJ
zqGNhIPGq2ULqXKK8BY=
-----END CERTIFICATE-----
private_key: |-
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIObtRo8tkUqoMjeHhsOh2ouPpXCgBcP+EDxZCB/tws15oAoGCCqGSM49
AwEHoUQDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ4mzkzTv0dXyB750f
OGN02HtkpBOZzzvUARTR10JQoSe2/5PIwQ==
-----END EC PRIVATE KEY-----
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
certificate:
description:
- The certificate in PEM format.
- The certificate chain must be no greater than 5 certs long.
- The chain must include at least one intermediate cert.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
privateKey:
description:
- The write-only private key in PEM format.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
certificate=dict(required=True, type='str'),
description=dict(type='str'),
name=dict(type='str'),
private_key=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#sslCertificate'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
delete(module, self_link(module), kind)
create(module, collection(module), kind)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#sslCertificate',
u'certificate': module.params.get('certificate'),
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'privateKey': module.params.get('private_key'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/sslCertificates/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/sslCertificates".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'certificate': response.get(u'certificate'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'id': response.get(u'id'),
u'name': response.get(u'name'),
u'privateKey': module.params.get('private_key'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#sslCertificate')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
| gpl-3.0 |
Caoimhinmg/PmagPy | dialogs/pmag_basic_dialogs_native3.py | 1 | 142122 | #!/usr/bin/env pythonw
#--------------------------------------------------------------
# converting magnetometer files to MagIC format
#--------------------------------------------------------------
import wx
import wx.grid
import os
import subprocess
import sys
from pmagpy import pmag
from pmagpy import ipmag
from dialogs import pmag_widgets as pw
from dialogs import drop_down_menus2 as drop_down_menus
from dialogs import drop_down_menus3
from dialogs import magic_grid2 as magic_grid
sys.path.append("../programs") #later fix imports further down in code to "from programs import ...." also imports should be moved to top of file unless import is so large it slows down the program
from programs.conversion_scripts import tdt_magic
from programs.conversion_scripts import generic_magic
from programs.conversion_scripts import sio_magic
from programs.conversion_scripts import cit_magic
from programs.conversion_scripts import huji_magic
from programs.conversion_scripts import _2g_bin_magic
from programs.conversion_scripts import ldeo_magic
from programs.conversion_scripts import iodp_srm_magic
from programs.conversion_scripts import iodp_dscr_magic
from programs.conversion_scripts import pmd_magic
from programs.conversion_scripts import jr6_txt_magic
from programs.conversion_scripts import jr6_jr6_magic
from programs.conversion_scripts import iodp_jr6_magic
from programs.conversion_scripts import utrecht_magic
from programs.conversion_scripts import bgc_magic
from pmagpy.mapping import map_magic
class import_magnetometer_data(wx.Dialog):
def __init__(self, parent, id, title, WD):
wx.Dialog.__init__(self, parent, id, title, name='import_magnetometer_data')
self.WD = WD
self.InitUI()
self.SetTitle(title)
self.parent = parent
def InitUI(self):
self.panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
formats = ['generic format','SIO format','CIT format','2g-binary format',
'HUJI format','LDEO format','IODP SRM (csv) format','PMD (ascii) format',
'TDT format', 'JR6 format', 'Utrecht format', 'BGC format']
sbs = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, 'step 1: choose file format'), wx.VERTICAL)
sbs.AddSpacer(5)
radio_buttons = []
for fmt in formats:
radio_button = wx.RadioButton(self.panel, -1, label=fmt, name=fmt)
radio_buttons.append(radio_button)
sbs.Add(radio_button, flag=wx.BOTTOM, border=5)
if len(radio_buttons) == 1:
sbs.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)
#sbs.AddSpacer(5)
self.Bind(wx.EVT_RADIOBUTTON, self.OnRadioButtonSelect, radio_button)
radio_buttons[0].SetValue(True)
self.checked_rb = radio_buttons[0]
#---------------------
# OK/Cancel buttons
#---------------------
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(self.panel, id=-1, label='Import file')
self.okButton.SetDefault()
self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.nextButton = wx.Button(self.panel, id=-1, label='Go to next step')
self.Bind(wx.EVT_BUTTON, self.on_nextButton, self.nextButton)
hboxok.Add(self.okButton)
hboxok.AddSpacer(20)
hboxok.Add(self.cancelButton )
hboxok.AddSpacer(20)
hboxok.Add(self.nextButton )
#-----------------------
# design the frame
#-----------------------
vbox.AddSpacer(10)
vbox.Add(sbs)
vbox.AddSpacer(10)
vbox.Add(hboxok)
vbox.AddSpacer(10)
hbox1=wx.BoxSizer(wx.HORIZONTAL)
hbox1.AddSpacer(10)
hbox1.Add(vbox)
hbox1.AddSpacer(10)
self.panel.SetSizer(hbox1)
hbox1.Fit(self)
#-----------------------
# button methods
#-----------------------
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Show()
self.Parent.Raise()
def on_okButton(self,event):
os.chdir(self.WD)
file_type = self.checked_rb.Label.split()[0] # extracts name of the checked radio button
if file_type == 'generic':
dia = convert_generic_files_to_MagIC(self, self.WD, "PmagPy generic file conversion")
elif file_type == 'SIO':
dia = convert_SIO_files_to_MagIC(self, self.WD, "PmagPy SIO file conversion")
elif file_type == 'CIT':
dia = convert_CIT_files_to_MagIC(self, self.WD, "PmagPy CIT file conversion")
elif file_type == '2g-binary':
dia = convert_2g_binary_files_to_MagIC(self, self.WD, "PmagPy 2g-binary file conversion")
elif file_type == 'HUJI':
dia = convert_HUJI_files_to_MagIC(self, self.WD, "PmagPy HUJI file conversion")
elif file_type == 'LDEO':
dia = convert_LDEO_files_to_MagIC(self, self.WD, "PmagPy LDEO file conversion")
elif file_type == 'IODP':
dia = convert_IODP_files_to_MagIC(self, self.WD, "PmagPy IODP csv conversion")
elif file_type == 'PMD':
dia = convert_PMD_files_to_MagIC(self, self.WD, "PmagPy PMD conversion")
elif file_type == 'BGC':
dia = convert_BGC_files_to_magic(self, self.WD, "PmagPy BGC conversion")
elif file_type == 'TDT':
tdt_magic.convert(False, self.WD)
return True
elif file_type == 'JR6':
dia = convert_JR6_files_to_MagIC(self, self.WD)
elif file_type == 'Utrecht':
dia = convert_Utrecht_files_to_MagIC(self, self.WD, "PmagPy Utrecht conversion")
dia.Center()
dia.Show()
def OnRadioButtonSelect(self, event):
self.checked_rb = event.GetEventObject()
def on_nextButton(self,event):
self.Destroy()
combine_dia = combine_magic_dialog(self.WD, self.parent)
combine_dia.Show()
combine_dia.Center()
#--------------------------------------------------------------
# dialog for combine_magic.py
#--------------------------------------------------------------
class combine_magic_dialog(wx.Frame):
""""""
title = "Combine magic files"
def __init__(self, WD, parent):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self) #wx.Panel(self)
self.panel.SetScrollbars(20, 20, 50, 50)
self.WD=WD
self.InitUI()
def InitUI(self):
pnl = self.panel
#---sizer information ----
TEXT="Step 2: \nCombine different MagIC formatted files to one file named 'measurements.txt'"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.combine_files(self, ".magic")
#------------------
self.okButton = wx.Button(self.panel, wx.ID_OK, "&OK")
self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.nextButton = wx.Button(self.panel, id=-1, label='Go to last step')
self.Bind(wx.EVT_BUTTON, self.on_nextButton, self.nextButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
hboxok.Add(self.okButton)
hboxok.Add(self.cancelButton, flag=wx.LEFT, border=5)
hboxok.Add(self.nextButton, flag=wx.LEFT, border=5)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT)
vbox.AddSpacer(10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT)
vbox.AddSpacer(10)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(5)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_cancelButton(self,event):
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
# make sure contribution is created
self.Parent.get_wd_data()
def on_nextButton(self, event):
combine_dia = combine_everything_dialog(self.WD, self.Parent)
combine_dia.Show()
combine_dia.Center()
self.Destroy()
def on_okButton(self,event):
os.chdir(self.WD) # make sure OS is working in self.WD (Windows issue)
files_text=self.bSizer0.file_paths.GetValue()
files=files_text.strip('\n').replace(" ","")
if files:
files = files.split('\n')
files = [os.path.join(self.WD, f) for f in files]
COMMAND="combine_magic.py -F measurements.txt -f %s"%(" ".join(files) )
if ipmag.combine_magic(files, 'measurements.txt', data_model=3.0):
MSG="%i file are merged to one MagIC format file:\n measurements.txt.\n\nSee Terminal/message window for errors"%(len(files))
dlg1 = wx.MessageDialog(None,caption="Message:", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
else:
pw.simple_warning()
return
self.on_nextButton(event)
self.Destroy()
class combine_everything_dialog(wx.Frame):
""""""
title = "Combine MagIC files"
def __init__(self, WD, parent):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self) #wx.Panel(self)
self.panel.SetScrollbars(20, 20, 50, 50)
self.WD=WD
self.InitUI()
def InitUI(self):
pnl = self.panel
#---sizer information ----
TEXT="Step 3: \nCombine different MagIC formatted files to one file name (if necessary). All files should be from the working directory."
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_LEFT)
possible_file_dias = ['specimens.txt', 'samples.txt', 'sites.txt', 'locations.txt']
self.file_dias = []
all_files = os.listdir(self.WD)
for dia in possible_file_dias:
for f in all_files:
if dia in f:
bSizer = pw.combine_files(self, dia)
self.file_dias.append(bSizer)
break
if not self.file_dias:
file_string = ', '.join(possible_file_dias)
MSG = "You have no more files that can be combined.\nFile types that can be combined are:\n{}\nNote that your file name must end with the file type, i.e.:\nsomething_something_specimens.txt".format(file_string)
dlg = wx.MessageDialog(None,caption="Message:", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
# deleted by rshaar 10.11.2015
#self.Destroy()
#------------------
self.okButton = wx.Button(self.panel, wx.ID_OK, "&OK")
self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
hboxok.Add(self.okButton)
hboxok.Add(self.cancelButton, flag=wx.LEFT, border=5 )
#file_dias = [self.bSizer0, self.bSizer1, self.bSizer2]
if len(self.file_dias) == 4:
num_cols, num_rows = 2, 2
else:
num_cols = min(len(self.file_dias), 3)
num_rows = 2 if len(self.file_dias) > 3 else 1
hboxfiles = wx.GridSizer(num_rows, num_cols, 1, 1)
hboxfiles.AddMany(self.file_dias)
#hboxfiles = wx.BoxSizer(wx.HORIZONTAL)
#hboxfiles.AddMany([self.bSizer0, self.bSizer1, self.bSizer2])
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=5)
vbox.AddSpacer(10)
vbox.Add(hboxfiles, flag=wx.ALIGN_LEFT)
vbox.AddSpacer(10)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(5)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_cancelButton(self,event):
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
# make sure contribution is created
self.Parent.get_wd_data()
def on_okButton(self,event):
os.chdir(self.WD)
success = True
new_files = []
# go through each pw.combine_files sizer, extract the files, try to combine them into one:
for bSizer in self.file_dias:
full_list = bSizer.file_paths.GetValue()
file_name = bSizer.text
files = full_list.strip('\n').replace(" ", "")
if files:
files = files.split('\n')
if ipmag.combine_magic(files, file_name, data_model=3.0):
new_files.append(file_name) # add to the list of successfully combined files
else:
success = False
if success:
new = '\n' + '\n'.join(new_files)
MSG = "Created new file(s): {} \nSee Terminal/message window for details and errors".format(new)
dlg1 = wx.MessageDialog(None,caption="Message:", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
# make sure contribution is created
self.Parent.get_wd_data()
else:
pw.simple_warning()
# make sure contribution is created
self.Parent.get_wd_data()
#--------------------------------------------------------------
# MagIC generic files conversion
#--------------------------------------------------------------
class convert_files_to_MagIC(wx.Frame):
"""
Abstract class for file conversion frames
"""
def __init__(self, parent, WD, title):
self.parent = parent
self.WD = WD
self.title = title
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self)
self.panel.SetScrollbars(20, 20, 50, 50)
self.InitUI()
def InitUI(self):
pass
def on_cancelButton(self, event):
self.Destroy()
self.parent.Show()
self.parent.Raise()
def on_add_file_button(self, event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_add_dir_button(self, event):
text = "choose directory of files to convert to MagIC"
pw.on_add_dir_button(self.bSizer0, text)
class convert_generic_files_to_MagIC(convert_files_to_MagIC):
""""""
title = "PmagPy generic file conversion"
def InitUI(self):
pnl = self.panel
#---sizer infor ----
TEXT = "convert generic file to MagIC format"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
self.bSizer1 = pw.labeled_text_field(pnl)
#---sizer 2 ----
# unique because only accepts 1 experiment type
TEXT="Experiment:"
self.bSizer2 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.HORIZONTAL)
self.gridBSizer = wx.GridBagSizer(5, 10)
self.label1 = wx.StaticText(pnl, label=TEXT)
self.experiments_names=['Demag (AF and/or Thermal)','Paleointensity-IZZI/ZI/ZI','ATRM 6 positions','AARM 6 positions','cooling rate','TRM']
self.protocol_info = wx.ComboBox(self.panel, -1, self.experiments_names[0], size=(300,25),choices=self.experiments_names, style=wx.CB_READONLY)
self.gridBSizer.Add(self.label1, (0, 0))
self.gridBSizer.Add(self.protocol_info, (1, 0))
self.bSizer2.Add(self.gridBSizer, wx.ALIGN_LEFT)
#
self.Bind(wx.EVT_COMBOBOX, self.on_select_protocol, self.protocol_info)
self.bSizer2a = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.HORIZONTAL )
text = 'Cooling Rate, format is xxx,yyy,zzz with no spaces '
self.cooling_rate = wx.TextCtrl(pnl)
self.bSizer2a.AddMany([wx.StaticText(pnl, label=text), self.cooling_rate])
#---sizer 3 ----
self.bSizer3 = pw.lab_field(pnl)
#---sizer 4 ----
# unique because only allows 4 choices (most others have ncn choices)
self.bSizer4 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
self.sample_naming_conventions=['sample=specimen','no. of initial characters','no. of terminal characters','character delimited']
self.sample_naming_convention = wx.ComboBox(self.panel, -1, self.sample_naming_conventions[0], size=(250,25), choices=self.sample_naming_conventions, style=wx.CB_READONLY)
self.sample_naming_convention_char = wx.TextCtrl(self.panel, id=-1, size=(40,25))
gridbSizer4 = wx.GridSizer(2, 2, 0, 10)
gridbSizer4.AddMany( [(wx.StaticText(self.panel,label="specimen-sample naming convention",style=wx.TE_CENTER),wx.ALIGN_LEFT),
(wx.StaticText(self.panel,label="delimiter/number (if necessary)",style=wx.TE_CENTER),wx.ALIGN_LEFT),
(self.sample_naming_convention,wx.ALIGN_LEFT),
(self.sample_naming_convention_char,wx.ALIGN_LEFT)])
#bSizer4.Add(self.sample_specimen_text,wx.ALIGN_LEFT)
self.bSizer4.AddSpacer(10)
self.bSizer4.Add(gridbSizer4,wx.ALIGN_LEFT)
#---sizer 5 ----
self.bSizer5 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
self.site_naming_conventions=['site=sample','no. of initial characters','no. of terminal characters','character delimited']
self.site_naming_convention_char = wx.TextCtrl(self.panel, id=-1, size=(40,25))
self.site_naming_convention = wx.ComboBox(self.panel, -1, self.site_naming_conventions[0], size=(250,25), choices=self.site_naming_conventions, style=wx.CB_READONLY)
gridbSizer5 = wx.GridSizer(2, 2, 0, 10)
gridbSizer5.AddMany( [(wx.StaticText(self.panel,label="sample-site naming convention",style=wx.TE_CENTER),wx.ALIGN_LEFT),
(wx.StaticText(self.panel,label="delimiter/number (if necessary)",style=wx.TE_CENTER),wx.ALIGN_LEFT),
(self.site_naming_convention,wx.ALIGN_LEFT),
(self.site_naming_convention_char,wx.ALIGN_LEFT)])
self.bSizer5.AddSpacer(10)
self.bSizer5.Add(gridbSizer5,wx.ALIGN_LEFT)
#---sizer 6 ----
TEXT="Location name:"
self.bSizer6 = pw.labeled_text_field(pnl, TEXT)
#---sizer 7 ----
self.bSizer7 = pw.site_lat_lon(pnl)
#---sizer 8 ----
self.bSizer8 = pw.replicate_measurements(pnl)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer2a, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=5)
vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border=5)
vbox.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(5)
self.hbox_all= wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
self.bSizer2a.ShowItems(False)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
def on_select_protocol(self, event):
if self.protocol_info.GetValue() == "cooling rate":
self.bSizer2a.ShowItems(True)
else:
self.bSizer2a.ShowItems(False)
self.hbox_all.Fit(self)
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self,event):
os.chdir(self.WD)
# generic_magic.py -WD WD - f FILE -fsa er_samples.txt -F OUTFILE.magic -exp [Demag/PI/ATRM 6/AARM 6/CR -samp X Y -site X Y -loc LOCNAME -dc B PHI THETA [-A] -WD path
options = {}
ErrorMessage = ""
#-----------
if not self.bSizer0.file_path.GetValue():
pw.simple_warning('You must provide a generic format file')
return False
FILE = str(self.bSizer0.file_path.GetValue())
options['magfile'] = FILE
#-----------
# WD="/".join(FILE.split("/")[:-1])
WD=self.WD
options['WD'] = WD
input_dir = os.path.split(FILE)[0]
magicoutfile=os.path.split(FILE)[1]+".magic"
options['meas_file'] = magicoutfile
print("magicoutfile", magicoutfile)
OUTFILE=os.path.join(self.WD,magicoutfile)
#-----------
#OUTFILE=self.WD+"/"+FILE.split('/')[-1]+".magic"
#-----------
EXP = ""
exp = str(self.protocol_info.GetValue())
if exp == 'Demag (AF and/or Thermal)':
EXP = 'Demag'
elif exp == 'Paleointensity-IZZI/ZI/ZI':
EXP = 'PI'
elif exp == 'ATRM 6 positions':
EXP ='ATRM 6'
elif exp == 'AARM 6 positions':
EXP = 'AARM 6'
elif exp == 'cooling rate':
cooling = self.cooling_rate.GetValue()
if not cooling:
text = "You must provide cooling rate for this experiment type!\nThe format is: xxx, yyy,zzz...\nThis should be cooling rates in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70"
pw.simple_warning(text)
return False
EXP = 'CR {}'.format(cooling)
if 'CR' in EXP:
options['experiment'], options['cooling_times_list'] = EXP.split()
elif 'AARM' in EXP:
options['experiment'], options['aarm_n_pos'] = EXP.split()
elif 'ATRM' in EXP:
options['experiment'], options['atrm_n_pos'] = EXP.split()
else:
options['experiment'] = EXP
#-----------
SAMP="1 0" #default
samp_naming_convention = str(self.sample_naming_convention.GetValue())
try:
samp_naming_convention_char=int(self.sample_naming_convention_char.GetValue())
except:
samp_naming_convention_char = "0"
if samp_naming_convention == 'sample=specimen':
SAMP = "1 0"
elif samp_naming_convention == 'no. of initial characters':
SAMP = "0 %i" % int(samp_naming_convention_char)
elif samp_naming_convention == 'no. of terminal characters':
SAMP = "1 %s" % samp_naming_convention_char
elif samp_naming_convention == 'character delimited':
SAMP = "2 %s" % samp_naming_convention_char
options['sample_nc'] = SAMP.split()
#-----------
SITE = "1 0" #default
site_naming_convention = str(self.site_naming_convention.GetValue())
try:
site_naming_convention_char = int(self.site_naming_convention_char.GetValue())
except:
site_naming_convention_char = "0"
if site_naming_convention == 'sample=specimen':
SITE = "1 0"
elif site_naming_convention == 'no. of initial characters':
SITE = "0 %i" % int(site_naming_convention_char)
elif site_naming_convention == 'no. of terminal characters':
SITE = "1 %s" % site_naming_convention_char
elif site_naming_convention == 'character delimited':
SITE = "2 %s" % site_naming_convention_char
options['site_nc'] = SITE.split()
#-----------
LOC = str(self.bSizer6.return_value())
if LOC!="": options['location'] = LOC
if str(self.bSizer6.return_value()) != "":
LOC="-loc \"%s\""%LOC
else:
LOC=""
#-----------
LABFIELD=" "
try:
B_uT, DEC, INC = self.bSizer3.return_value().split()
except ValueError:
B_uT, DEC, INC = '0', '0', '0'
#print "B_uT, DEC, INC", B_uT, DEC, INC
options['labfield'], options['labfield_phi'], options['labfield_theta'] = B_uT, DEC, INC
if EXP != "Demag":
LABFIELD="-dc " +B_uT+ " " + DEC + " " + INC
#-----------
try: lat,lon = self.bSizer7.return_value().split()
except ValueError: lat,lon = '',''
options['lat'] = lat
options['lon'] = lon
lat = '-lat ' + lat
lon = '-lat ' + lon
#-----------
DONT_AVERAGE = " "
if not self.bSizer8.return_value():
DONT_AVERAGE = "-A"
options['noave'] = 1
else:
options['noave'] = 0
#-----------
# some special
SPEC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_specimens.txt"
SAMP_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_samples.txt"
SITE_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_sites.txt"
LOC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_locations.txt"
options['spec_file'] = SPEC_OUTFILE
options['samp_file'] = SAMP_OUTFILE
options['site_file'] = SITE_OUTFILE
options['loc_file'] = LOC_OUTFILE
COMMAND="generic_magic.py -WD %s -f %s -fsa er_samples.txt -F %s -exp %s -samp %s -site %s %s %s %s -Fsp %s -Fsa %s -Fsi %s -Flo %s %s %s"\
%(WD,FILE,OUTFILE,EXP,SAMP,SITE,LOC,LABFIELD,DONT_AVERAGE, SPEC_OUTFILE, SAMP_OUTFILE, SITE_OUTFILE, LOC_OUTFILE, lat, lon)
print("-I- Running Python command:\n %s"%COMMAND)
program_run, error_message = generic_magic.convert(**options)
if program_run:
pw.close_window(self, COMMAND, OUTFILE)
else:
pw.simple_warning(error_message)
return False
self.Destroy()
self.parent.Raise()
#def on_cancelButton(self,event):
# self.Destroy()
# self.parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=generic_magic.do_help())
def get_sample_name(self, specimen, sample_naming_convenstion):
if sample_naming_convenstion[0] == "sample=specimen":
sample = specimen
elif sample_naming_convenstion[0] == "no. of terminal characters":
n = int(sample_naming_convenstion[1]) * -1
sample = specimen[:n]
elif sample_naming_convenstion[0] == "character delimited":
d = sample_naming_convenstion[1]
sample_splitted = specimen.split(d)
if len(sample_splitted) == 1:
sample = sample_splitted[0]
else:
sample = d.join(sample_splitted[:-1])
return sample
def get_site_name(self, sample, site_naming_convention):
if site_naming_convention[0] == "site=sample":
site = sample
elif site_naming_convention[0] == "no. of terminal characters":
n = int(site_naming_convention[1])*-1
site = sample[:n]
elif site_naming_convention[0] == "character delimited":
d = site_naming_convention[1]
site_splitted = sample.split(d)
if len(site_splitted) == 1:
site = site_splitted[0]
else:
site = d.join(site_splitted[:-1])
return site
class convert_SIO_files_to_MagIC(convert_files_to_MagIC):
"""
convert SIO formatted measurement file to MagIC formated files
"""
def InitUI(self):
pnl = self.panel
TEXT = "SIO Format file"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
# bSizer_info.Add(wx.StaticText(self), wx.ALIGN_LEFT)
self.bSizer0 = pw.choose_file(pnl, method = self.on_add_file_button)
#---sizer 1 ----
self.bSizer1 = pw.labeled_text_field(pnl)
#---sizer 2 ----
self.bSizer2 = pw.experiment_type(pnl)
#---sizer 3 ----
self.bSizer3 = pw.lab_field(pnl)
#---sizer 4 ----
self.bSizer4 = pw.specimen_n(pnl)
#---sizer 4a ----
self.bSizer4a = pw.select_ncn(pnl)
#---sizer 5 ----
TEXT="Location name:"
self.bSizer5 = pw.labeled_text_field(pnl, TEXT)
#---sizer 11 ----
self.bSizer11 = pw.site_lat_lon(pnl)
#---sizer 6 ---
TEXT="Instrument name (optional):"
self.bSizer6 = pw.labeled_text_field(pnl, TEXT)
#---sizer 7 ----
self.bSizer7 = pw.replicate_measurements(pnl)
#---sizer 8 ----
TEXT = "peak AF field (mT) if ARM: "
self.bSizer8 = pw.labeled_text_field(pnl, TEXT)
#---sizer 9 ----
TEXT = "Coil number for ASC impulse coil (if treatment units in Volts): "
self.bSizer9 = pw.labeled_text_field(pnl, TEXT)
#---sizer 10 ---
#self.bSizer10 = pw.synthetic(pnl)
#---sizer 10 ---
TEXT = "cooling rates [K/minutes] (seperated by comma) for cooling rate experiment:"
self.bSizer10 = pw.labeled_text_field(pnl, TEXT)
#---buttons ----
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
hbox0 = wx.BoxSizer(wx.HORIZONTAL)
hbox0.Add(self.bSizer5, flag=wx.ALIGN_LEFT)
hbox0.Add(self.bSizer11, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox0.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox1 =wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.bSizer8, flag=wx.ALIGN_LEFT)
hbox1.Add(self.bSizer9, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox2 =wx.BoxSizer(wx.HORIZONTAL)
hbox2.Add(self.bSizer10, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(self.bSizer4a, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(hbox0, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(hbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hbox2, flag=wx.ALIGN_LEFT|wx.TOP, border=8)
vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_okButton(self, event):
os.chdir(self.WD)
options_dict = {}
SIO_file = self.bSizer0.return_value()
if not SIO_file:
pw.simple_warning('You must provide a SIO format file')
return False
options_dict['mag_file'] = str(SIO_file)
magicoutfile=os.path.split(SIO_file)[1]+".magic"
outfile =os.path.join(self.WD, magicoutfile)
options_dict['meas_file'] = str(outfile)
user = self.bSizer1.return_value()
options_dict['user'] = str(user)
if user:
user = "-usr " + user
experiment_type = self.bSizer2.return_value()
options_dict['codelist'] = str(experiment_type)
if experiment_type:
experiment_type = "-LP " + experiment_type
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = ""
options_dict['labfield'] = 0
options_dict['phi'] = 0
options_dict['theta'] = 0
else:
lab_field_list = str(lab_field).split()
options_dict['labfield'] = lab_field_list[0]
options_dict['phi'] = lab_field_list[1]
options_dict['theta'] = lab_field_list[2]
lab_field = "-dc " + lab_field
spc = self.bSizer4.return_value()
options_dict['specnum'] = spc
ncn = self.bSizer4a.return_value()
options_dict['samp_con'] = ncn
loc_name = self.bSizer5.return_value()
options_dict['location'] = str(loc_name)
if loc_name:
loc_name = "-loc " + loc_name
instrument = self.bSizer6.return_value()
options_dict['inst'] = str(instrument)
if instrument:
instrument = "-ins " + instrument
replicate = self.bSizer7.return_value()
if replicate:
options_dict['noave'] = 0
else:
options_dict['noave'] = 1
if replicate:
replicate = ''
else:
replicate = '-A'
peak_AF = self.bSizer8.return_value()
options_dict['peakfield'] = peak_AF
if peak_AF:
peak_AF = "-ac " + peak_AF
coil_number = self.bSizer9.return_value()
options_dict['coil'] = coil_number
if coil_number:
coil_number = "-V " + coil_number
cooling_rates=""
cooling_rates = self.bSizer10.return_value()
options_dict['cooling_rates'] = cooling_rates
try: lat,lon = self.bSizer11.return_value().split()
except ValueError: lat,lon = '',''
options_dict['lat'] = lat
options_dict['lon'] = lon
lat = '-lat ' + lat
lon = '-lat ' + lon
# Force -A option on cooling rate correction experiment
if cooling_rates !="" and experiment_type =="-LP CR":
replicate = '-A';options_dict['noave'] = 1
SPEC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_specimens.txt"
SAMP_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_samples.txt"
SITE_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_sites.txt"
LOC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_locations.txt"
options_dict['spec_file'] = SPEC_OUTFILE
options_dict['samp_file'] = SAMP_OUTFILE
options_dict['site_file'] = SITE_OUTFILE
options_dict['loc_file'] = LOC_OUTFILE
COMMAND = "sio_magic.py -F {0} -Fsp {1} -Fsa {2} -Fsi {3} -Flo {4} -f {5} -spc {6} -ncn {7} {8} {9} {10} {11} {12} {13} {14} {15} {16} {17} {18}".format(outfile, SPEC_OUTFILE, SAMP_OUTFILE, SITE_OUTFILE, LOC_OUTFILE, SIO_file, spc, ncn, user, experiment_type, cooling_rates, loc_name, lab_field, peak_AF, coil_number, instrument, replicate, lat, lon)
print("COMMAND", COMMAND)
# to run as module:
if sio_magic.convert(**options_dict):
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning()
def on_helpButton(self, event):
pw.on_helpButton(text=sio_magic.do_help())
class convert_CIT_files_to_MagIC(convert_files_to_MagIC):
"""Class that converts CIT files magnetometer files into MagIC format for analysis and archiving"""
def InitUI(self):
pnl = self.panel
TEXT = "CIT Format file"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
TEXT="Measurer (optional):"
self.bSizer1 = pw.labeled_text_field(pnl, TEXT)
#---sizer 2 ----
self.bSizer2 = pw.sampling_particulars(pnl)
#---sizer 3 ----
self.bSizer3 = pw.lab_field(pnl)
#---sizer 4 ----
self.bSizer4 = pw.select_ncn(pnl)
#---sizer 5 ---
TEXT = "specify number of characters to designate a specimen, default = 0"
self.bSizer5 = pw.specimen_n(pnl)
#---sizer 6 ----
TEXT="Location name:"
self.bSizer6 = pw.labeled_text_field(pnl, TEXT)
#---sizer 7 ----
self.bSizer7 = pw.replicate_measurements(pnl)
self.bSizer7.replicate_rb2.SetValue(True)
#---sizer 8 ---
TEXT = "peak AF field (mT) if ARM: "
self.bSizer8 = pw.labeled_text_field(pnl, TEXT)
#---sizer 9 ----
TEXT="Number of measurement orientations (default=8)"
self.bSizer9 = pw.labeled_text_field(pnl, TEXT)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer9, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_okButton(self, event):
os.chdir(self.WD)
options_dict = {}
wd = self.WD
options_dict['dir_path'] = wd
full_file = self.bSizer0.return_value()
if not full_file:
pw.simple_warning('You must provide a CIT format file')
return False
input_directory, CIT_file = os.path.split(full_file)
options_dict['magfile'] = CIT_file
options_dict['input_dir_path'] = input_directory
if input_directory:
ID = "-ID " + input_directory
else:
ID = ''
outfile = CIT_file + ".magic"
options_dict['meas_file'] = outfile
samp_outfile = CIT_file[:CIT_file.find('.')] + "_samples.txt"
options_dict['samp_file'] = samp_outfile
spec_outfile = CIT_file[:CIT_file.find('.')] + "_specimens.txt"
options_dict['spec_file'] = spec_outfile
site_outfile = CIT_file[:CIT_file.find('.')] + "_sites.txt"
options_dict['site_file'] = site_outfile
loc_outfile = CIT_file[:CIT_file.find('.')] + "_locations.txt"
options_dict['loc_file'] = loc_outfile
user = self.bSizer1.return_value()
options_dict['user'] = user
dc_flag,dc_params = '',''
if self.bSizer3.return_value() != '':
dc_params = self.bSizer3.return_value().split()
options_dict['labfield'] = dc_params[0]
options_dict['phi'] = dc_params[1]
options_dict['theta'] = dc_params[2]
dc_flag = '-dc'
if user:
user = "-usr " + user
spec_num = self.bSizer5.return_value()
options_dict['specnum'] = spec_num
if spec_num:
spec_num = "-spc " + str(spec_num)
else:
spec_num = "-spc 0" # defaults to 0 if user doesn't choose number
loc_name = self.bSizer6.return_value()
options_dict['locname'] = loc_name
if loc_name:
loc_name = "-loc " + loc_name
ncn = self.bSizer4.return_value()
if "-" in ncn:
ncn, Z = ncn.split("-")
else:
Z = ''
options_dict['samp_con'] = ncn
particulars = self.bSizer2.return_value()
options_dict['methods'] = particulars
if particulars:
particulars = "-mcd " + particulars
peak_AF = self.bSizer8.return_value()
options_dict['peak_AF'] = peak_AF
if peak_AF:
peak_AF = "-ac " + peak_AF
replicate = self.bSizer7.return_value()
if replicate:
options_dict['avg'] = 0
replicate = ''
else:
options_dict['avg'] = 1
replicate = '-A'
meas_n_orient = self.bSizer9.return_value()
if meas_n_orient!='':
try:
int(meas_n_orient)
options_dict['meas_n_orient'] = meas_n_orient
except ValueError:
pw.simple_warning("value for number of measured orienations must be a positive integer")
COMMAND = "cit_magic.py -WD {} -f {} -F {} {} {} {} {} -ncn {} {} {} {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} {} {} -mno {}".format(wd, CIT_file, outfile, particulars, spec_num, loc_name, user, ncn, Z, peak_AF, ID, spec_outfile, samp_outfile, site_outfile, loc_outfile, replicate, dc_flag, dc_params, meas_n_orient)
# to run as module:
program_ran, error_message = cit_magic.convert(**options_dict)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
def on_helpButton(self, event):
pw.on_helpButton(text=cit_magic.do_help())
class convert_HUJI_files_to_MagIC(convert_files_to_MagIC):
""" """
def InitUI(self):
pnl = self.panel
TEXT = "HUJI format file"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
TEXT = "HUJI sample orientation data file (Optional)"
bSizer_infoA = wx.BoxSizer(wx.HORIZONTAL)
bSizer_infoA.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0A ----
self.bSizer0A = pw.choose_file(pnl, 'add', method = self.on_add_dat_file_button)
#---sizer 1 ----
self.bSizer1 = pw.labeled_text_field(pnl)
#---sizer 2 ----
exp_names=['AF Demag', 'Thermal (includes thellier but not trm)', 'NRM only', 'TRM acquisition', 'Anisotropy experiment', 'Cooling rate experiment']
self.bSizer2 = pw.experiment_type(pnl, exp_names)
#---sizer 2a ---
#for box in self.bSizer2.boxes:
# self.Bind(wx.EVT_CHECKBOX, self.on_select_protocol, box)
self.bSizer2a = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.HORIZONTAL )
text = 'Cooling Rate (required only for cooling rate type experiments)\nformat is xxx,yyy,zzz with no spaces '
self.cooling_rate = wx.TextCtrl(pnl)
self.bSizer2a.AddMany([wx.StaticText(pnl, label=text), self.cooling_rate])
#---sizer 3 ----
self.bSizer3 = pw.lab_field(pnl)
#---sizer 4 ---
TEXT = "specify number of characters to designate a specimen, default = 0"
self.bSizer4 = pw.labeled_text_field(pnl, TEXT)
#---sizer 5 ----
self.bSizer5 = pw.select_ncn(pnl)
#---sizer 6 ----
TEXT="Location name:"
self.bSizer6 = pw.labeled_text_field(pnl, TEXT)
#---sizer 7 ---
TEXT = "peak AF field (mT) if ARM: "
self.bSizer7 = pw.labeled_text_field(pnl, TEXT)
#---sizer 8 ---
self.bSizer8 = pw.replicate_measurements(pnl)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(bSizer_infoA, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0A, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.hbox_all= wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
self.bSizer2a.ShowItems(True)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
def on_add_dat_file_button(self,event):
text = "HUJI sample orientation data file (Optional)"
pw.on_add_file_button(self.bSizer0A, text)
def on_okButton(self, event):
"""
grab user input values, format them, and run huji_magic.py with the appropriate flags
"""
os.chdir(self.WD)
options = {}
HUJI_file = self.bSizer0.return_value()
if not HUJI_file:
pw.simple_warning("You must select a HUJI format file")
return False
options['magfile'] = HUJI_file
dat_file = self.bSizer0A.return_value()
if os.path.isfile(dat_file): options['datafile'] = dat_file
else: dat_file=""
magicoutfile=os.path.split(HUJI_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options['meas_file'] = outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_specimens.txt"
spec_outfile=os.path.join(self.WD, magicoutfile)
options['spec_file'] = spec_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_samples.txt"
samp_outfile=os.path.join(self.WD, magicoutfile)
options['samp_file'] = samp_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_sites.txt"
site_outfile=os.path.join(self.WD, magicoutfile)
options['site_file'] = site_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_locations.txt"
loc_outfile=os.path.join(self.WD, magicoutfile)
options['loc_file'] = loc_outfile
user = self.bSizer1.return_value()
options['user'] = user
if user:
user = '-usr ' + user
experiment_type = self.bSizer2.return_value()
options['codelist'] = experiment_type
if not experiment_type:
pw.simple_warning("You must select an experiment type")
return False
cooling_rate = self.cooling_rate.GetValue() or 0
if cooling_rate:
experiment_type = experiment_type + " " + cooling_rate
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = "0 0 0"
lab_field_list = lab_field.split()
options['labfield'] = lab_field_list[0]
options['phi'] = lab_field_list[1]
options['theta'] = lab_field_list[2]
lab_field = '-dc ' + lab_field
spc = self.bSizer4.return_value()
options['specnum'] = spc or 0
if not spc:
spc = '-spc 0'
else:
spc = '-spc ' + spc
ncn = self.bSizer5.return_value()
options['samp_con'] = ncn
loc_name = self.bSizer6.return_value()
options['location'] = loc_name
if loc_name:
loc_name = '-loc ' + loc_name
peak_AF = self.bSizer7.return_value()
options['peakfield'] = peak_AF
replicate = self.bSizer8.return_value()
if replicate:
options['noave'] = 0
replicate = ''
else:
options['noave'] = 1
replicate = '-A'
COMMAND = "huji_magic.py -f {} -fd {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} -LP {} {} -ncn {} {} {} {} {}".format(HUJI_file, dat_file, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF, replicate)
program_ran, error_message = huji_magic.convert(**options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
def on_helpButton(self, event):
pw.on_helpButton(text=huji_magic.do_help())
class convert_2g_binary_files_to_MagIC(convert_files_to_MagIC):
def InitUI(self):
pnl = self.panel
TEXT = "Folder containing one or more 2g-binary format files"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
#self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
self.bSizer0 = pw.choose_dir(pnl, btn_text = 'add', method = self.on_add_dir_button)
#---sizer 1 ----
self.bSizer1 = pw.sampling_particulars(pnl)
#---sizer 2 ----
ncn_keys = ['XXXXY', 'XXXX-YY', 'XXXX.YY', 'XXXX[YYY] where YYY is sample designation, enter number of Y', 'sample name=site name', 'Site is entered under a separate column', '[XXXX]YYY where XXXX is the site name, enter number of X']
self.bSizer2 = pw.select_ncn(pnl, ncn_keys)
#---sizer 3 ----
TEXT = "specify number of characters to designate a specimen, default = 0"
self.bSizer3 = pw.labeled_text_field(pnl, TEXT)
#---sizer 4 ----
self.bSizer4 = pw.select_specimen_ocn(pnl)
#---sizer 5 ----
TEXT="Location name:"
self.bSizer5 = pw.labeled_text_field(pnl, TEXT)
#---sizer 6 ---
TEXT="Instrument name (optional):"
self.bSizer6 = pw.labeled_text_field(pnl, TEXT)
#---sizer 7 ----
self.bSizer7 = pw.replicate_measurements(pnl)
#---sizer 8 ----
self.bSizer8 = pw.site_lat_lon(pnl)
#---buttons ---
hboxok = pw.btn_panel(self, pnl) # creates ok, cancel, help buttons and binds them to appropriate methods
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
#---button methods ---
def on_okButton(self, event):
os.chdir(self.WD)
options_dict = {}
WD = self.WD
options_dict['dir_path'] = WD
directory = self.bSizer0.return_value()
options_dict['ID'] = directory
if not directory:
pw.simple_warning('You must select a directory containing 2g binary files')
return False
files = os.listdir(directory)
files = [str(f) for f in files if str(f).endswith('.dat')]
if not files:
pw.simple_warning('No .dat files found in {}'.format(directory))
return False
ID = "-ID " + directory
if self.bSizer1.return_value():
particulars = self.bSizer1.return_value()
options_dict['gmeths'] = particulars
mcd = '-mcd ' + particulars
else:
mcd = ''
ncn = self.bSizer2.return_value()
options_dict['samp_con'] = ncn
spc = self.bSizer3.return_value()
options_dict['specnum'] = spc or 0
if not spc:
spc = '-spc 1'
else:
spc = '-spc ' + spc
ocn = self.bSizer4.return_value()
options_dict['or_con'] = ocn
loc_name = self.bSizer5.return_value()
options_dict['location_name'] = loc_name
if loc_name:
loc_name = "-loc " + loc_name
try: lat,lon = self.bSizer8.return_value().split()
except ValueError: lat,lon = '',''
options_dict['lat'] = lat
options_dict['lon'] = lon
instrument = self.bSizer6.return_value()
options_dict['inst'] = instrument
if instrument:
instrument = "-ins " + instrument
replicate = self.bSizer7.return_value()
if replicate:
replicate = '-a'
options_dict['noave'] = 0
else:
replicate = ''
options_dict['noave'] = 1
spec_outfile = files[0][:files[0].find('.')] + "_" + files[-1][:files[-1].find('.')] + "_specimens.txt"
options_dict['spec_file'] = spec_outfile
samp_outfile = files[0][:files[0].find('.')] + "_" + files[-1][:files[-1].find('.')] + "_samples.txt"
options_dict['samp_file'] = samp_outfile
sites_outfile = files[0][:files[0].find('.')] + "_" + files[-1][:files[-1].find('.')] + "_sites.txt"
options_dict['site_file'] = sites_outfile
loc_outfile = files[0][:files[0].find('.')] + "_" + files[-1][:files[-1].find('.')] + "_locations.txt"
options_dict['loc_file'] = loc_outfile
for f in files:
file_2g_bin = f
outfile = file_2g_bin + ".magic"
options_dict['meas_file'] = outfile
options_dict['mag_file'] = f
COMMAND = "_2g_bin_magic.py -WD {} -f {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} -ncn {} {} {} -ocn {} {} {} {} {} -lat {} -lon {}".format(WD, file_2g_bin, outfile, spec_outfile, samp_outfile, sites_outfile, loc_outfile, ncn, mcd, spc, ocn, loc_name, replicate, ID, instrument,lat,lon)
if files.index(f) == (len(files) - 1): # terminate process on last file call
# to run as module:
if _2g_bin_magic.convert(**options_dict):
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning()
else:
print("Running equivalent of python command: ", COMMAND)
if _2g_bin_magic.convert(**options_dict):
pass # success, continue on to next file
else:
pw.simple_warning()
def on_helpButton(self, event):
# to run as module:
pw.on_helpButton(text=_2g_bin_magic.do_help())
# to run as command line:
#pw.on_helpButton("_2g_bin_magic.py -h")
class convert_LDEO_files_to_MagIC(convert_files_to_MagIC):
""" """
def InitUI(self):
pnl = self.panel
TEXT = "LDEO format file"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
self.bSizer1 = pw.labeled_text_field(pnl)
#---sizer 2 ---
exp_names=['AF Demag', 'Thermal (includes thellier but not trm)', 'Shaw method', 'IRM (acquisition)', 'NRM only', 'TRM acquisition', 'double AF demag', 'triple AF demag (GRM protocol)', 'Anisotropy experiment']
self.bSizer2 = pw.experiment_type(pnl, exp_names)
#---sizer 2a ---
# add conditional boxsizer for Shaw experiments
# if arm_labfield and trm_peakT are properly added into ldeo_magic
#---sizer 3 ----
self.bSizer3 = pw.lab_field(pnl)
#---sizer 4 ----
self.bSizer4 = pw.select_ncn(pnl)
#---sizer 5 ----
TEXT = "specify number of characters to designate a specimen, default = 0"
self.bSizer5 = pw.labeled_text_field(pnl, TEXT)
#---sizer 6 ---
TEXT="Location name:"
self.bSizer6 = pw.labeled_text_field(pnl, TEXT)
#---sizer 7 ---
TEXT="Instrument name (optional):"
self.bSizer7 = pw.labeled_text_field(pnl, TEXT)
#---sizer 8 ---
self.bSizer8 = pw.replicate_measurements(pnl)
#---sizer 9 ----
TEXT = "peak AF field (mT) if ARM: "
self.bSizer9 = pw.labeled_text_field(pnl, TEXT)
#---sizer 10 ---
TEXT = "Coil number for ASC impulse coil (if treatment units in Volts): "
self.bSizer10 = pw.labeled_text_field(pnl, TEXT)
#---sizer 11 ---
self.bSizer11 = pw.mass_or_volume_buttons(pnl)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
hbox0 = wx.BoxSizer(wx.HORIZONTAL)
hbox0.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.RIGHT, border=5)
hbox0.Add(self.bSizer7, flag=wx.ALIGN_LEFT)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.bSizer9, flag=wx.ALIGN_LEFT|wx.RIGHT, border=5)
hbox1.Add(self.bSizer10, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer11, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER|wx.BOTTOM, border=20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_okButton(self, event):
os.chdir(self.WD)
options_dict = {}
LDEO_file = self.bSizer0.return_value()
if not LDEO_file:
pw.simple_warning("You must provide a LDEO format file")
return False
options_dict['magfile'] = LDEO_file
magicoutfile=os.path.split(LDEO_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options_dict['meas_file'] = outfile
magicoutfile=os.path.split(LDEO_file)[1]+"_specimens.txt"
spec_outfile=os.path.join(self.WD, magicoutfile)
options_dict['spec_file'] = spec_outfile
magicoutfile=os.path.split(LDEO_file)[1]+"_samples.txt"
samp_outfile=os.path.join(self.WD, magicoutfile)
options_dict['samp_file'] = samp_outfile
magicoutfile=os.path.split(LDEO_file)[1]+"_sites.txt"
site_outfile=os.path.join(self.WD, magicoutfile)
options_dict['site_file'] = site_outfile
magicoutfile=os.path.split(LDEO_file)[1]+"_locations.txt"
loc_outfile=os.path.join(self.WD, magicoutfile)
options_dict['loc_file'] = loc_outfile
user = self.bSizer1.return_value()
options_dict['user'] = user
if user:
user = "-usr " + user
experiment_type = self.bSizer2.return_value()
options_dict['codelist'] = experiment_type
if experiment_type:
experiment_type = "-LP " + experiment_type
lab_field = self.bSizer3.return_value()
if lab_field:
options_dict['labfield'], options_dict['phi'], options_dict['theta'] = lab_field.split()
lab_field = "-dc " + lab_field
ncn = self.bSizer4.return_value()
options_dict['samp_con'] = ncn
spc = self.bSizer5.return_value()
options_dict['specnum'] = spc or 0
if spc:
spc = "-spc " + spc
else:
spc = "-spc 0"
loc_name = self.bSizer6.return_value()
options_dict['location'] = loc_name
if loc_name:
loc_name = "-loc " + loc_name
instrument = self.bSizer7.return_value()
options_dict['inst'] = instrument
if instrument:
instrument = "-ins " + instrument
replicate = self.bSizer8.return_value()
if replicate:
replicate = ""
options_dict['noave'] = 0 # do average
else:
replicate = "-A"
options_dict['noave'] = 1 # don't average
AF_field = self.bSizer9.return_value()
options_dict['peakfield'] = AF_field or 0
if AF_field:
AF_field = "-ac " + AF_field
coil_number = self.bSizer10.return_value()
options_dict['coil'] = coil_number
if coil_number:
coil_number = "-V " + coil_number
mv = self.bSizer11.return_value()
options_dict['mv'] = mv
COMMAND = "ldeo_magic.py -f {0} -F {1} -Fsp {2} -Fsa {3} -Fsi {4} -Flo {5} {6} {7} {8} -ncn {9} {10} {11} {12} {13} {14} {15} -mv {16}".format(LDEO_file, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, user, experiment_type, lab_field, ncn, spc, loc_name, instrument, replicate, AF_field, coil_number, mv)
# to run as module:
program_ran, error_message = ldeo_magic.convert(**options_dict)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
def on_helpButton(self, event):
pw.on_helpButton(text=ldeo_magic.do_help())
class convert_IODP_files_to_MagIC(convert_files_to_MagIC):
""" """
def InitUI(self):
pnl = self.panel
TEXT = "IODP csv format file"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0a ---
TEXT = "IODP file type"
label1 = "SRM section"
label2 = "SRM discrete"
self.bSizer0a = pw.labeled_yes_or_no(pnl, TEXT, label1, label2)
#self.bSizer0a = pw.radio_buttons(pnl, ['old format', 'srm', 'discrete'], 'IODP file type')
#---sizer 0b ---
TEXT = "If you don't choose a file, Pmag GUI will try to import any .csv files in your working directory into one MagIC format file"
self.bSizer0b = pw.simple_text(pnl, TEXT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
self.bSizer1 = pw.site_lat_lon(pnl)
#---sizer 2 ----
self.bSizer2 = pw.replicate_measurements(pnl)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0b, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.AddSpacer(10)
#vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_okButton(self, event):
os.chdir(self.WD)
wait = wx.BusyInfo("Please wait, working...")
options = {}
wd = self.WD
options['dir_path'] = wd
is_section = self.bSizer0a.return_value()
full_file = self.bSizer0.return_value()
ID, IODP_file = os.path.split(full_file)
if not ID:
ID = '.'
options['csv_file'] = IODP_file
options['input_dir_path'] = ID
outfile = IODP_file + ".magic"
options['meas_file'] = outfile
spec_outfile = IODP_file[:IODP_file.find('.')] + "_specimens.txt"
options['spec_file'] = spec_outfile
samp_outfile = IODP_file[:IODP_file.find('.')] + "_samples.txt"
options['samp_file'] = samp_outfile
site_outfile = IODP_file[:IODP_file.find('.')] + "_sites.txt"
options['site_file'] = site_outfile
loc_outfile = IODP_file[:IODP_file.find('.')] + "_locations.txt"
options['loc_file'] = loc_outfile
replicate = self.bSizer2.return_value()
if replicate: # do average
replicate = ''
options['noave'] = 0
else: # don't average
replicate = "-A"
options['noave'] = 1
try: lat,lon = self.bSizer1.return_value().split()
except ValueError: lat,lon = '',''
options['lat'] = lat
options['lon'] = lon
lat_with_flag,lon_with_flag = '-lat '+lat,'-lon '+lon
COMMAND = "iodp_srm_magic.py -WD {0} -f {1} -F {2} {3} -ID {4} -Fsp {5} -Fsa {6} -Fsi {7} -Flo {8} {9} {10}".format(wd, IODP_file, outfile, replicate, ID, spec_outfile, samp_outfile, site_outfile, loc_outfile, lat_with_flag, lon_with_flag)
if is_section: # SRM section
program_ran, error_message = iodp_srm_magic.convert(**options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
else: # SRM discrete
program_ran, error_message = iodp_dscr_magic.convert(**options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
del wait
def on_helpButton(self, event):
is_section = self.bSizer0a.return_value()
if is_section:
pw.on_helpButton(text=iodp_srm_magic.do_help())
else:
pw.on_helpButton(text=iodp_dscr_magic.do_help())
class convert_PMD_files_to_MagIC(convert_files_to_MagIC):
""" """
def InitUI(self):
pnl = self.panel
TEXT = "Folder containing one or more PMD format files"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_dir(pnl, 'add', method = self.on_add_dir_button)
#---sizer 2 ----
ncn_keys = ['XXXXY', 'XXXX-YY', 'XXXX.YY', 'XXXX[YYY] where YYY is sample designation, enter number of Y', 'sample name=site name', 'Site is entered under a separate column', '[XXXX]YYY where XXXX is the site name, enter number of X']
self.bSizer2 = pw.select_ncn(pnl, ncn_keys)
#---sizer 3 ---
# TEXT = "specify number of characters to designate a specimen, default = 0"
# self.bSizer3 = pw.labeled_text_field(pnl, TEXT)
self.bSizer3 = pw.specimen_n(pnl)
#---sizer 4 ----
TEXT="Location name:"
self.bSizer4 = pw.labeled_text_field(pnl, TEXT)
#---sizer 5 ----
self.bSizer5 = pw.sampling_particulars(pnl)
#---sizer 6 ---
self.bSizer6 = pw.replicate_measurements(pnl)
#---sizer 7 ---
self.bSizer7 = pw.site_lat_lon(pnl)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_okButton(self, event):
os.chdir(self.WD)
options = {}
WD = self.WD
options['dir_path'] = WD
directory = self.bSizer0.return_value() or '.'
options['input_dir_path'] = directory
files = os.listdir(directory)
files = [str(f) for f in files if str(f).endswith('.pmd')]
if files:
samp_outfile = files[0][:files[0].find('.')] + files[-1][:files[-1].find('.')] + "_samples.txt"
options['samp_file'] = samp_outfile
else:
#raise Exception("No pmd files found in {}, try a different directory".format(WD))
pw.simple_warning("No pmd files found in {}, try a different directory".format(WD))
ID = "-ID " + directory
ncn = self.bSizer2.return_value()
options['samp_con'] = ncn
spc = self.bSizer3.return_value() or 0
options['specnum'] = spc
loc_name = self.bSizer4.return_value()
options['location'] = loc_name
if loc_name:
location = loc_name
loc_name = "-loc " + loc_name
particulars = self.bSizer5.return_value()
options['meth_code'] = particulars
if particulars:
particulars = "-mcd " + particulars
try: lat,lon = self.bSizer7.return_value().split()
except ValueError: lat,lon = '',''
options['lat'] = lat
options['lon'] = lon
lat = '-lat ' + lat
lon = '-lat ' + lon
replicate = self.bSizer6.return_value()
if replicate:
replicate = ''
else:
replicate = '-A'
options['noave'] = 1 # don't average
for f in files:
options['mag_file'] = f
outfile = f + ".magic"
options['meas_file'] = outfile
spec_outfile = f[:f.find('.')] + "_specimens.txt"
options['spec_file'] = spec_outfile
samp_outfile = f[:f.find('.')] + "_samples.txt"
options['samp_file'] = samp_outfile
site_outfile = f[:f.find('.')] + "_sites.txt"
options['site_file'] = site_outfile
loc_outfile = f[:f.find('.')] + "_locations.txt"
options['loc_file'] = loc_outfile
COMMAND = "pmd_magic.py -WD {} -f {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} -ncn {} {} -spc {} {} {} {} {} {}".format(WD, f, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, ncn, particulars, spc, replicate, ID, loc_name, lat, lon)
program_ran, error_message = pmd_magic.convert(**options)
if not program_ran:
pw.simple_warning(error_message)
return False
elif files.index(f) == len(files) -1:
pw.close_window(self, COMMAND, outfile)
else:
print("Just ran equivalent of Python command: ", COMMAND)
def on_helpButton(self, event):
# to run as module:
pw.on_helpButton(text=pmd_magic.do_help())
class convert_JR6_files_to_MagIC(wx.Frame):
""" """
title = "PmagPy JR6 file conversion"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "JR6 format file (currently .txt format only)"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0a ----
TEXT = "JR6 file Type"
label1 = ".txt format"
label2 = ".jr6 format"
self.bSizer0a = pw.labeled_yes_or_no(pnl, TEXT, label1, label2)
#---sizer 0b ---
self.bSizer0b = pw.check_box(pnl, 'Joides Resolution')
self.Bind(wx.EVT_CHECKBOX, self.on_check_joides, self.bSizer0b.cb)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, btn_text='add measurement file', method = self.on_add_file_button)
#---sizer 1b ----
TEXT="User (Optional):"
self.bSizer1b = pw.labeled_text_field(pnl, TEXT)
#---sizer 1c ----
TEXT="Expedition (i.e. 312)"
self.bSizer1c = pw.labeled_text_field(pnl, TEXT)
self.bSizer1c.ShowItems(False)
#---sizer 1d ----
TEXT="Hole name (i.e. U1456A)"
self.bSizer1d = pw.labeled_text_field(pnl, TEXT)
self.bSizer1d.ShowItems(False)
#---sizer 1 ----
self.bSizer1 = pw.sampling_particulars(pnl)
#---sizer 1a ---
self.bSizer1a = pw.labeled_text_field(pnl, 'Specimen volume, default is 2.5 cc.\nPlease provide volume in cc.')
#---sizer 2 ---
self.bSizer2 = pw.specimen_n(pnl)
#---sizer 3 ----
ncn_keys = ['XXXXY', 'XXXX-YY', 'XXXX.YY', 'XXXX[YYY] where YYY is sample designation, enter number of Y', 'sample name=site name']
self.bSizer3 = pw.select_ncn(pnl, ncn_keys)
#---sizer 4 ----
TEXT="Location name:"
self.bSizer4 = pw.labeled_text_field(pnl, TEXT)
#---sizer 6 ----
self.bSizer6 = pw.site_lat_lon(pnl)
#---sizer 5 ----
self.bSizer5 = pw.replicate_measurements(pnl)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
hbox0 = wx.BoxSizer(wx.HORIZONTAL)
hbox0.AddMany([(self.bSizer0a,wx.ALIGN_LEFT|wx.TOP), (self.bSizer0b,wx.ALIGN_LEFT|wx.TOP)])
vbox.AddSpacer(10)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1d, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1c, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1b, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_check_joides(self, event):
if self.bSizer0b.cb.IsChecked():
self.bSizer0a.ShowItems(False)
self.bSizer1.ShowItems(False)
self.bSizer1a.ShowItems(False)
self.bSizer2.ShowItems(False)
self.bSizer3.ShowItems(False)
self.bSizer4.ShowItems(False)
self.bSizer1b.ShowItems(True)
self.bSizer1c.ShowItems(True)
self.bSizer1d.ShowItems(True)
else:
self.bSizer1b.ShowItems(False)
self.bSizer1c.ShowItems(False)
self.bSizer1d.ShowItems(False)
self.bSizer0a.ShowItems(True)
self.bSizer1.ShowItems(True)
self.bSizer1a.ShowItems(True)
self.bSizer2.ShowItems(True)
self.bSizer3.ShowItems(True)
self.bSizer4.ShowItems(True)
self.panel.Layout()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_add_sampfile_button(self, event):
text = "choose er_samples type file"
pw.on_add_file_button(self.bSizer0c, text)
def on_okButton(self, event):
samp_file = ''
options = {}
input_format = self.bSizer0a.return_value()
JR = self.bSizer0b.return_value()
if input_format:
input_format = 'txt'
else:
input_format = 'jr6'
output_dir_path = self.WD
options['dir_path'] = str(output_dir_path)
input_dir_path, mag_file = os.path.split(self.bSizer0.return_value())
if not mag_file:
pw.simple_warning("You must select a JR6 format file")
return False
options['input_dir_path'], options['mag_file'] = str(input_dir_path), str(mag_file)
meas_file = os.path.split(mag_file)[1]+".magic"
options['meas_file'] = str(meas_file)
spec_file = os.path.split(mag_file)[1]+"_specimens.txt"
options['spec_file'] = str(spec_file)
samp_file = os.path.split(mag_file)[1]+"_samples.txt"
options['samp_file'] = str(samp_file)
site_file = os.path.split(mag_file)[1]+"_sites.txt"
options['site_file'] = str(site_file)
loc_file = os.path.split(mag_file)[1]+"_locations.txt"
options['loc_file'] = str(loc_file)
specnum = self.bSizer2.return_value()
options['specnum'] = specnum
samp_con = self.bSizer3.return_value()
options['samp_con'] = samp_con
user = self.bSizer1b.return_value()
options['user'] = str(user)
location = self.bSizer4.return_value()
if location!='':
options['location'] = str(location)
expedition = self.bSizer1c.return_value()
options['expedition'] = str(expedition)
site = self.bSizer1d.return_value()
options['site'] = str(site)
average = self.bSizer5.return_value()
if average:
noave = 0
else:
noave = 1
options['noave'] = noave
meth_code = self.bSizer1.return_value()
options['meth_code'] = meth_code
try: lat,lon = self.bSizer6.return_value().split()
except ValueError: lat,lon = '',''
options['lat'] = lat
options['lon'] = lon
lat,lon = '-lat '+str(lat), '-lon '+str(lon)
volume = self.bSizer1a.return_value()
os.chdir(self.WD)
COMMAND = ""
# validate arguments;
if volume!='':
try:
volume = float(volume)
options['volume'] = volume
except:
pw.simple_warning("You must provide a valid quanity for volume, or no volume")
return False
# validate file type and run jr6_magic:
if not JR:
if 'jr6' in input_format and 'jr6' not in mag_file.lower():
pw.simple_warning("You must provide a .jr6 format file")
return False
elif 'txt' in input_format and 'txt' not in mag_file.lower():
pw.simple_warning("You must provide a .txt format file")
return False
if input_format == 'txt': # .txt format
program_ran, error_message = jr6_txt_magic.convert(**options)
if program_ran:
COMMAND = "options={}\njr6_txt_magic.convert(**options)".format(str(options))
pw.close_window(self, COMMAND, meas_file)
else:
pw.simple_warning(error_message)
else:
program_ran, error_message = jr6_jr6_magic.convert(**options)
if program_ran:
COMMAND = "options={}\njr6_jr6_magic.convert(**options)".format(str(options))
pw.close_window(self, COMMAND, meas_file)
else:
pw.simple_warning(error_message)
else: # Joides Resolution
if not mag_file:
pw.simple_warning('You must provide a valid IODP JR6 file')
program_ran, error_message = iodp_jr6_magic.convert(**options)
if program_ran:
COMMAND = "options={}\niodp_jr6_magic.convert(**options)".format(str(options))
pw.close_window(self, COMMAND, meas_file)
else:
pw.simple_warning(error_message)
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
input_format = self.bSizer0a.return_value()
if input_format:
input_format = 'txt'
else:
input_format = 'jr6'
if input_format == 'txt': # .txt format
pw.on_helpButton(text=jr6_txt_magic.do_help())
else:
pw.on_helpButton(text=jr6_jr6_magic.do_help())
class convert_BGC_files_to_magic(wx.Frame):
""" """
title = "PmagPy BGC file conversion"
def __init__(self, parent, WD, title):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
text = "convert Berkeley Geochronology Center file to MagIC format"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=text), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1a ----
self.bSizer1a = pw.labeled_text_field(pnl, 'User (Optional):')
#---sizer 1 ----
self.bSizer1 = pw.labeled_text_field(pnl, 'Location name:')
#---sizer 2 ----
self.bSizer2 = pw.labeled_text_field(pnl, 'Site name (if using convention bellow leave blank):')
# sitename
#---sizer 3 ----
self.bSizer3 = pw.sampling_particulars(pnl)
# meth codes
#---sizer 4 ----
self.bSizer4 = pw.replicate_measurements(pnl)
# average replicates
#---sizer 5 ---
self.bSizer5 = pw.labeled_text_field(pnl, 'Provide specimen volume in cubic centimeters\nNote: the volume given in data file will be used unless it equals 0.0 ')
#---sizer 6 ----
self.bSizer6 = pw.select_ncn(pnl)
#---sizer 7 ----
TEXT = "specify number of characters to designate a specimen, default = 0"
self.bSizer7 = pw.specimen_n(pnl)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.AddSpacer(10)
#vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
options = {}
full_file = self.bSizer0.return_value()
ID, infile = os.path.split(full_file)
options['dir_path'] = self.WD
options['input_dir_path'] = ID
options['mag_file'] = infile
outfile = infile + ".magic"
options['meas_file'] = outfile
spec_outfile = infile + "_specimens.txt"
options['spec_file'] = spec_outfile
samp_outfile = infile + "_samples.txt"
options['samp_file'] = samp_outfile
site_outfile = infile + "_sites.txt"
options['site_file'] = site_outfile
loc_outfile = infile + "_locations.txt"
options['loc_file'] = loc_outfile
user = str(self.bSizer1a.return_value())
options['user'] = str(user)
loc_name = str(self.bSizer1.return_value())
options['location'] = str(loc_name)
site_name = self.bSizer2.return_value()
if site_name!='': options['site'] = str(site_name)
spec_num = self.bSizer7.return_value()
options['specnum'] = spec_num
if spec_num:
spec_num = "-spc " + str(spec_num)
else:
spec_num = "-spc 0" # defaults to 0 if user doesn't choose number
ncn = self.bSizer6.return_value()
options['samp_con'] = ncn
meth_code = self.bSizer3.return_value()
options['meth_code'] = meth_code
average = self.bSizer4.return_value()
options['noave'] = average
volume = self.bSizer5.return_value()
if volume:
try:
options['volume'] = float(volume)
except ValueError:
pw.simple_warning('You must provide a valid numerical value for specimen volume')
return False
for key, value in list(options.items()):
print(key, value)
COMMAND = "options = {}\nbgc_magic.convert(**options)".format(str(options))
if infile=='':
all_files=[f for f in os.listdir('.') if os.path.isfile(f)]
outfiles=[]
for infile in all_files:
options['mag_file'] = infile
outfile = infile + ".magic"
options['meas_file'] = outfile
spec_outfile = infile + "_specimens.txt"
options['spec_file'] = spec_outfile
samp_outfile = infile + "_samples.txt"
options['samp_file'] = samp_outfile
site_outfile = infile + "_sites.txt"
options['site_file'] = site_outfile
loc_outfile = infile + "_locations.txt"
options['loc_file'] = loc_outfile
try: program_ran, error_message = bgc_magic.convert(**options)
except IndexError: continue
if program_ran: outfiles.append(outfile)
outfile=str(outfiles)
else: program_ran, error_message = bgc_magic.convert(**options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=bgc_magic.do_help())
class convert_Utrecht_files_to_MagIC(convert_files_to_MagIC):
"""
A GUI which allows easy input of meta data required to convert Utrecht
Magnetometer files into MagIC format for analysis or contribution to the
EarthRef MagIC Archive.
"""
def InitUI(self):
"""
Override of InitUI in parent class convert_files_to_MagIC.
Creates UI for input of relavent data to convert Utrecht to MagIC.
"""
pnl = self.panel
TEXT = "Convert Utrecht Magnetometer file format"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
self.bSizer1 = pw.sampling_particulars(pnl)
#---sizer 2 ----
self.bSizer2 = pw.select_ncn(pnl)
#---sizer 3 ----
TEXT = "specify number of characters to designate a specimen, default = 0"
self.bSizer3 = pw.specimen_n(pnl)
#---sizer 4 ----
TEXT="Location name:"
self.bSizer4 = pw.labeled_text_field(pnl, TEXT)
#---sizer 5 ---
self.bSizer5 = pw.replicate_measurements(pnl)
#---sizer 6 ----
self.bSizer6 = pw.lab_field(pnl)
#---sizer 7 ---
TEXT= "use the European date format (dd/mm/yyyy)"
self.bSizer7 = pw.check_box(pnl, TEXT)
#---sizer 8 ---
self.bSizer8 = pw.site_lat_lon(pnl)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_okButton(self, event):
"""
Complies information input in GUI into a kwargs dictionary which can
be passed into the utrecht_magic script and run to output magic files
"""
os.chdir(self.WD)
options_dict = {}
wd = self.WD
options_dict['dir_path'] = wd
full_file = self.bSizer0.return_value()
if not full_file:
pw.simple_warning('You must provide a Utrecht format file')
return False
input_directory, Utrecht_file = os.path.split(full_file)
options_dict['mag_file'] = Utrecht_file
options_dict['input_dir_path'] = input_directory
if input_directory:
ID = "-ID " + input_directory
else:
ID = ''
outfile = Utrecht_file + ".magic"
options_dict['meas_file'] = outfile
spec_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_specimens.txt"
options_dict['spec_file'] = spec_outfile
samp_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_samples.txt"
options_dict['samp_file'] = samp_outfile
site_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_sites.txt"
options_dict['site_file'] = site_outfile
loc_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_locations.txt"
options_dict['loc_file'] = loc_outfile
dc_flag,dc_params = '',''
if self.bSizer6.return_value() != '':
dc_params = list(map(float,self.bSizer6.return_value().split()))
options_dict['lab_field'] = dc_params[0]
options_dict['phi'] = dc_params[1]
options_dict['theta'] = dc_params[2]
dc_flag = '-dc ' + self.bSizer6.return_value()
spec_num = self.bSizer3.return_value()
options_dict['specnum'] = spec_num
if spec_num:
spec_num = "-spc " + str(spec_num)
else:
spec_num = "-spc 0" # defaults to 0 if user doesn't choose number
loc_name = self.bSizer4.return_value()
options_dict['location'] = loc_name
if loc_name:
loc_name = "-loc " + loc_name
ncn = self.bSizer2.return_value()
options_dict['samp_con'] = ncn
particulars = self.bSizer1.return_value()
options_dict['meth_code'] = particulars
if particulars:
particulars = "-mcd " + particulars
euro_date = self.bSizer7.return_value()
if euro_date: options_dict['dmy_flag'] = True; dmy_flag='-dmy'
else: options_dict['dmy_flag'] = False; dmy_flag=''
try: lat,lon = self.bSizer8.return_value().split()
except ValueError: lat,lon = '',''
options_dict['lat'] = lat
options_dict['lon'] = lon
replicate = self.bSizer5.return_value()
if replicate:
options_dict['avg'] = False
replicate = ''
else:
options_dict['avg'] = True
replicate = '-A'
COMMAND = "utrecht_magic.py -WD {} -f {} -F {} {} {} {} -ncn {} {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} {} {} -lat {} -lon {}".format(wd, Utrecht_file, outfile, particulars, spec_num, loc_name, ncn, ID, spec_outfile, samp_outfile, site_outfile, loc_outfile, replicate, dc_flag, dmy_flag, lon, lat)
# to run as module:
program_ran, error_message = utrecht_magic.convert(**options_dict)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
def on_helpButton(self, event):
"""
Displays utrecht_magic scripts help message
"""
pw.on_helpButton(text=utrecht_magic.do_help())
# template for an import window
class something(wx.Frame):
""" """
def InitUI(self):
pnl = self.panel
text = "Hello here is a bunch of text"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=text), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
#---sizer 2 ----
#---sizer 3 ----
#---sizer 4 ----
#---sizer 5 ---
#---sizer 6 ----
#---sizer 7 ---
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.AddSpacer(10)
#vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all= wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
hbox_all.AddSpacer(20)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Centre()
self.Show()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, self.WD, event, text)
def on_okButton(self, event):
os.chdir(self.WD)
COMMAND = ""
pw.run_command_and_close_window(self, COMMAND, outfile)
def on_helpButton(self, event):
pw.on_helpButton(text='')
#=================================================================
# demag_orient:
# read/write demag_orient.txt
# calculate sample orientation
#=================================================================
class OrientFrameGrid3(wx.Frame):
def __init__(self, parent, id, title, WD, contribution, size):
wx.Frame.__init__(self, parent, -1, title, size=size,
name='calculate geographic directions')
#--------------------
# initialize stuff
#--------------------
if sys.platform in ['win32', 'win64']:
self.panel = wx.ScrolledWindow(self, style=wx.SIMPLE_BORDER|wx.ALWAYS_SHOW_SB)
else:
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.WD = WD
#self.Data_hierarchy = Data_hierarchy
self.contribution = contribution
# contribution has already propagated measurement data...
if 'samples' not in self.contribution.tables:
print('-E- No sample data available')
samples_name_list = []
else:
samples_name_list = self.contribution.tables['samples'].df.index.unique()
self.orient_data = {}
try:
fname = os.path.join(self.WD, "demag_orient.txt")
self.orient_data, dtype, keys = pmag.magic_read_dict(fname, sort_by_this_name="sample_name",
return_keys=True)
except Exception as ex:
print("-W-", ex)
#pass
# self.headers is a list of two-item tuples.
#the first is the proper column name as understood by orientation_magic.py
# the second is the name for display in the GUI
self.header_display_names = ["sample_name", "sample_orientation_flag", "mag_azimuth",
"field_dip", "bedding_dip_direction", "bedding_dip",
"shadow_angle", "latitude", "longitude", "mm/dd/yy",
"hh:mm", "GPS_baseline", "GPS_Az", "magic_method_codes"]
self.header_names = ["sample_name", "sample_orientation_flag", "mag_azimuth",
"field_dip", "bedding_dip_direction", "bedding_dip",
"shadow_angle", "lat", "long", "date",
"hhmm", "GPS_baseline", "GPS_Az", "magic_method_codes"]
self.headers = list(zip(self.header_names, self.header_display_names))
# get sample table and convert relevant headers to orient.txt format
if (not self.orient_data) and ('samples' in self.contribution.tables):
print("-I- Couldn't find demag_orient.txt, trying to extract information from samples table")
samp_container = self.contribution.tables['samples']
raw_orient_data = samp_container.convert_to_pmag_data_list("dict")
# convert from 3.0. headers to orient.txt headers
self.orient_data = {}
for key, rec in list(raw_orient_data.items()):
self.orient_data[key] = map_magic.mapping(rec, map_magic.magic3_2_orient_magic_map)
self.create_sheet()
TEXT = """
A template for a file named 'demag_orient.txt', which contains samples orientation data, was created in MagIC working directory.
You can view/modify demag_orient.txt using this Python frame, or you can use Excel/Open Office.
If you use Excel, save the file as 'tab delimited' and then use the 'Import Orientation File' button below to import the data into Pmag GUI.
If you use the Python frame, you can edit all the values in a column by clicking on the column header and then entering your desired value.
After orientation data is filled in, you can Calculate sample orientations.
"""
label = wx.StaticText(self.panel, label=TEXT)
btn_box = wx.BoxSizer(wx.HORIZONTAL)
save_btn = wx.Button(self.panel, wx.ID_ANY, "Save Orientation File")
self.Bind(wx.EVT_BUTTON, self.on_m_save_file, save_btn)
import_btn = wx.Button(self.panel, wx.ID_ANY, "Import Orientation File")
self.Bind(wx.EVT_BUTTON, self.on_m_open_file, import_btn)
calculate_btn = wx.Button(self.panel, wx.ID_ANY, "Calculate Sample Orientations")
self.Bind(wx.EVT_BUTTON, self.on_m_calc_orient, calculate_btn)
btn_box.Add(save_btn)
btn_box.Add(import_btn, flag=wx.LEFT, border=5)
btn_box.Add(calculate_btn, flag=wx.LEFT, border=5)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(label, flag=wx.CENTRE)
self.vbox.Add(btn_box, flag=wx.CENTRE)
self.vbox.Add(self.grid, flag=wx.ALL, border=20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.Add(self.vbox)
if sys.platform in ['win32', 'win64']:
self.panel.SetScrollbars(20, 20, 50, 50)
self.panel.SetSizer(self.hbox_all)
self.hbox_all.Fit(self)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
# save the template
self.on_m_save_file(None)
self.Centre()
self.Show()
def create_sheet(self):
'''
create an editable grid showing demag_orient.txt
'''
#--------------------------------
# orient.txt supports many other headers
# but we will only initialize with
# the essential headers for
# sample orientation and headers present
# in existing demag_orient.txt file
#--------------------------------
#--------------------------------
# create the grid
#--------------------------------
samples_list = list(self.orient_data.keys())
samples_list.sort()
self.samples_list = [ sample for sample in samples_list if sample is not "" ]
#self.headers.extend(self.add_extra_headers(samples_list))
display_headers = [header[1] for header in self.headers]
self.grid = magic_grid.MagicGrid(self.panel, 'orient grid',
self.samples_list, display_headers)
self.grid.InitUI()
#--------------------------------
# color the columns by groups
#--------------------------------
for i in range(len(self.samples_list)):
self.grid.SetCellBackgroundColour(i, 0, "LIGHT GREY")
self.grid.SetCellBackgroundColour(i, 1, "LIGHT STEEL BLUE")
self.grid.SetCellBackgroundColour(i, 2, "YELLOW")
self.grid.SetCellBackgroundColour(i, 3, "YELLOW")
self.grid.SetCellBackgroundColour(i, 4, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 5, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 6, "KHAKI")
self.grid.SetCellBackgroundColour(i, 7, "KHAKI")
self.grid.SetCellBackgroundColour(i, 8, "KHAKI")
self.grid.SetCellBackgroundColour(i, 9, "KHAKI")
self.grid.SetCellBackgroundColour(i, 10, "KHAKI")
self.grid.SetCellBackgroundColour(i, 11, "LIGHT MAGENTA")
self.grid.SetCellBackgroundColour(i, 12, "LIGHT MAGENTA")
#--------------------------------
# fill data from self.orient_data
#--------------------------------
headers = [header[0] for header in self.headers]
for sample in self.samples_list:
for key in list(self.orient_data[sample].keys()):
if key in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(key)
val = str(self.orient_data[sample][key])
# if it's a pmag_object, use its name
try:
val = val.name
except AttributeError:
pass
self.grid.SetCellValue(sample_index, i, val)
#--------------------------------
#--------------------------------
# fill in some default values
#--------------------------------
for row in range(self.grid.GetNumberRows()):
col = 1
if not self.grid.GetCellValue(row, col):
self.grid.SetCellValue(row, col, 'g')
#--------------------------------
# temporary trick to get drop-down-menus to work
self.grid.changes = {'a'}
self.grid.AutoSize()
#self.drop_down_menu = drop_down_menus.Menus("orient", self, self.grid, '')
self.drop_down_menu = drop_down_menus3.Menus("orient", self.contribution, self.grid)
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
def update_sheet(self):
self.grid.Destroy()
self.create_sheet()
self.vbox.Add(self.grid, flag=wx.ALL, border=20)
#self.Hide()
#self.Show()
self.hbox_all.Fit(self.panel)
#self.panel.Refresh()
self.Hide()
self.Show()
def onLeftClickLabel(self, event):
"""
When user clicks on a grid label, determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values, or highlight a row for deletion).
"""
#if event.Col == -1 and event.Row == -1:
# pass
#elif event.Col < 0:
# self.onSelectRow(event)
if event.Row < 0:
self.drop_down_menu.on_label_click(event)
def on_m_open_file(self,event):
'''
open orient.txt
read the data
display the data from the file in a new grid
'''
dlg = wx.FileDialog(
self, message="choose orient file",
defaultDir=self.WD,
defaultFile="",
style=wx.OPEN | wx.CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
orient_file = dlg.GetPath()
dlg.Destroy()
new_data, dtype, keys = pmag.magic_read_dict(orient_file,
sort_by_this_name="sample_name",
return_keys=True)
if len(new_data) > 0:
self.orient_data={}
self.orient_data=new_data
#self.create_sheet()
self.update_sheet()
print("-I- If you don't see a change in the spreadsheet, you may need to manually re-size the window")
def on_m_save_file(self,event):
'''
save demag_orient.txt
(only the columns that appear on the grid frame)
'''
fout = open(os.path.join(self.WD, "demag_orient.txt"), 'w')
STR = "tab\tdemag_orient\n"
fout.write(STR)
headers = [header[0] for header in self.headers]
STR = "\t".join(headers) + "\n"
fout.write(STR)
for sample in self.samples_list:
STR = ""
for header in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(header)
value = self.grid.GetCellValue(sample_index, i)
STR = STR + value + "\t"
fout.write(STR[:-1] + "\n")
if event != None:
dlg1 = wx.MessageDialog(None,caption="Message:", message="data saved in file demag_orient.txt" ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
def on_m_calc_orient(self,event):
'''
This fucntion does exactly what the 'import orientation' fuction does in MagIC.py
after some dialog boxes the function calls orientation_magic.py
'''
# first see if demag_orient.txt
self.on_m_save_file(None)
orient_convention_dia = orient_convention(None)
orient_convention_dia.Center()
#orient_convention_dia.ShowModal()
if orient_convention_dia.ShowModal() == wx.ID_OK:
ocn_flag = orient_convention_dia.ocn_flag
dcn_flag = orient_convention_dia.dcn_flag
gmt_flags = orient_convention_dia.gmt_flags
orient_convention_dia.Destroy()
else:
return
or_con = orient_convention_dia.ocn
dec_correction_con = int(orient_convention_dia.dcn)
try:
hours_from_gmt = float(orient_convention_dia.gmt)
except:
hours_from_gmt = 0
try:
dec_correction = float(orient_convention_dia.correct_dec)
except:
dec_correction = 0
method_code_dia=method_code_dialog(None)
method_code_dia.Center()
if method_code_dia.ShowModal() == wx.ID_OK:
bedding_codes_flags=method_code_dia.bedding_codes_flags
methodcodes_flags=method_code_dia.methodcodes_flags
method_code_dia.Destroy()
else:
print("-I- Canceling calculation")
return
method_codes = method_code_dia.methodcodes
average_bedding = method_code_dia.average_bedding
bed_correction = method_code_dia.bed_correction
command_args=['orientation_magic.py']
command_args.append("-WD %s"%self.WD)
command_args.append("-Fsa er_samples_orient.txt")
command_args.append("-Fsi er_sites_orient.txt ")
command_args.append("-f %s"%"demag_orient.txt")
command_args.append(ocn_flag)
command_args.append(dcn_flag)
command_args.append(gmt_flags)
command_args.append(bedding_codes_flags)
command_args.append(methodcodes_flags)
commandline = " ".join(command_args)
print("-I- executing command: %s" %commandline)
os.chdir(self.WD)
if os.path.exists(os.path.join(self.WD, 'er_samples.txt')) or os.path.exists(os.path.join(self.WD, 'er_sites.txt')):
append = True
else:
append = False
samp_file = "er_samples.txt"
site_file = "er_sites.txt"
success, error_message = ipmag.orientation_magic(or_con, dec_correction_con, dec_correction,
bed_correction, hours_from_gmt=hours_from_gmt,
method_codes=method_codes, average_bedding=average_bedding,
orient_file='demag_orient.txt', samp_file=samp_file,
site_file=site_file, input_dir_path=self.WD,
output_dir_path=self.WD, append=append, data_model=3)
if not success:
dlg1 = wx.MessageDialog(None,caption="Message:", message="-E- ERROR: Error in running orientation_magic.py\n{}".format(error_message) ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
print("-E- ERROR: Error in running orientation_magic.py")
return
else:
dlg2 = wx.MessageDialog(None,caption="Message:", message="-I- Successfully ran orientation_magic", style=wx.OK|wx.ICON_INFORMATION)
dlg2.ShowModal()
dlg2.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
self.contribution.add_magic_table('samples')
return
def OnCloseWindow(self,event):
dlg1 = wx.MessageDialog(self,caption="Message:", message="Save changes to demag_orient.txt?\n " ,style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
self.on_m_save_file(None)
dlg1.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
if result == wx.ID_CANCEL:
dlg1.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
class OrientFrameGrid(wx.Frame):
def __init__(self, parent, id, title, WD, ErMagic, size):
wx.Frame.__init__(self, parent, -1, title, size=size, name='calculate geographic directions')
#--------------------
# initialize stuff
#--------------------
if sys.platform in ['win32', 'win64']:
self.panel = wx.ScrolledWindow(self, style=wx.SIMPLE_BORDER|wx.ALWAYS_SHOW_SB)
else:
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.WD = WD
#self.Data_hierarchy = Data_hierarchy
self.er_magic_data = ErMagic
self.grid = None
#--------------------
# get the orientation data
# 1) from file demag_orient.txt
# 2) from Data_hierarchy
# and save it to self.orient_data={}
#--------------------
# self.headers is a list of two-item tuples.
#the first is the proper column name as understood by orientation_magic.py
# the second is the name for display in the GUI
self.header_display_names = ["sample_name", "sample_orientation_flag", "mag_azimuth",
"field_dip", "bedding_dip_direction", "bedding_dip",
"shadow_angle", "latitude", "longitude", "mm/dd/yy",
"hh:mm", "GPS_baseline", "GPS_Az", "magic_method_codes"]
self.header_names = ["sample_name", "sample_orientation_flag", "mag_azimuth",
"field_dip", "bedding_dip_direction", "bedding_dip",
"shadow_angle", "lat", "long", "date",
"hhmm", "GPS_baseline", "GPS_Az", "magic_method_codes"]
self.headers = list(zip(self.header_names, self.header_display_names))
empty = True
self.er_magic_data.get_data()
samples_name_list = self.er_magic_data.make_name_list(self.er_magic_data.samples)
self.orient_data = {}
try:
self.orient_data = self.er_magic_data.read_magic_file(os.path.join(self.WD, "demag_orient.txt"), "sample_name")[0]
except Exception as ex:
print("-W-", ex)
#pass
for sample_name in samples_name_list:
if sample_name not in list(self.orient_data.keys()):
sample = self.er_magic_data.find_by_name(sample_name, self.er_magic_data.samples)
self.orient_data[sample_name]={}
self.orient_data[sample_name]["sample_name"] = sample_name
if sample:
val = sample.site
# if it is a pmag_object, use its name
try:
val = val.name
except AttributeError:
pass
self.orient_data[sample_name]["site_name"] = val
else:
self.orient_data[sample_name]["site_name"] = ''
#--------------------
# create the grid sheet
#--------------------
self.create_sheet()
TEXT = """
A template for a file named 'demag_orient.txt', which contains samples orientation data, was created in MagIC working directory.
You can view/modify demag_orient.txt using this Python frame, or you can use Excel/Open Office.
If you use Excel, save the file as 'tab delimited' and then use the 'Import Orientation File' button below to import the data into Pmag GUI.
If you use the Python frame, you can edit all the values in a column by clicking on the column header and then entering your desired value.
After orientation data is filled in, you can Calculate sample orientations.
"""
label = wx.StaticText(self.panel, label=TEXT)
btn_box = wx.BoxSizer(wx.HORIZONTAL)
save_btn = wx.Button(self.panel, wx.ID_ANY, "Save Orientation File")
self.Bind(wx.EVT_BUTTON, self.on_m_save_file, save_btn)
import_btn = wx.Button(self.panel, wx.ID_ANY, "Import Orientation File")
self.Bind(wx.EVT_BUTTON, self.on_m_open_file, import_btn)
calculate_btn = wx.Button(self.panel, wx.ID_ANY, "Calculate Sample Orientations")
self.Bind(wx.EVT_BUTTON, self.on_m_calc_orient, calculate_btn)
btn_box.Add(save_btn)
btn_box.Add(import_btn, flag=wx.LEFT, border=5)
btn_box.Add(calculate_btn, flag=wx.LEFT, border=5)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(label, flag=wx.CENTRE)
self.vbox.Add(btn_box, flag=wx.CENTRE)
self.vbox.Add(self.grid, flag=wx.ALL, border=20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.Add(self.vbox)
if sys.platform in ['win32', 'win64']:
self.panel.SetScrollbars(20, 20, 50, 50)
self.panel.SetSizer(self.hbox_all)
self.hbox_all.Fit(self)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
# save the template
self.on_m_save_file(None)
self.Centre()
self.Show()
def add_extra_headers(self, sample_names):
"""
If there are samples, add any additional keys they might use
to supplement the default headers.
Return the headers headers for adding, with the format:
[(header_name, header_display_name), ....]
"""
if not sample_names:
return []
full_headers = list(self.orient_data[sample_names[0]].keys())
add_ons = []
for head in full_headers:
if head not in self.header_names:
add_ons.append((head, head))
return add_ons
def create_sheet(self):
'''
create an editable grid showing demag_orient.txt
'''
#--------------------------------
# orient.txt supports many other headers
# but we will only initialize with
# the essential headers for
# sample orientation and headers present
# in existing demag_orient.txt file
#--------------------------------
#--------------------------------
# create the grid
#--------------------------------
#print "self.orient_data", self.orient_data
samples_list = list(self.orient_data.keys())
samples_list.sort()
self.samples_list = [ sample for sample in samples_list if sample is not "" ]
self.headers.extend(self.add_extra_headers(samples_list))
display_headers = [header[1] for header in self.headers]
self.grid = magic_grid.MagicGrid(self.panel, 'orient grid',
self.samples_list, display_headers)
self.grid.InitUI()
#--------------------------------
# color the columns by groups
#--------------------------------
for i in range(len(self.samples_list)):
self.grid.SetCellBackgroundColour(i, 0, "LIGHT GREY")
self.grid.SetCellBackgroundColour(i, 1, "LIGHT STEEL BLUE")
self.grid.SetCellBackgroundColour(i, 2, "YELLOW")
self.grid.SetCellBackgroundColour(i, 3, "YELLOW")
self.grid.SetCellBackgroundColour(i, 4, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 5, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 6, "KHAKI")
self.grid.SetCellBackgroundColour(i, 7, "KHAKI")
self.grid.SetCellBackgroundColour(i, 8, "KHAKI")
self.grid.SetCellBackgroundColour(i, 9, "KHAKI")
self.grid.SetCellBackgroundColour(i, 10, "KHAKI")
self.grid.SetCellBackgroundColour(i, 11, "LIGHT MAGENTA")
self.grid.SetCellBackgroundColour(i, 12, "LIGHT MAGENTA")
#--------------------------------
# fill data from self.orient_data
#--------------------------------
headers = [header[0] for header in self.headers]
for sample in self.samples_list:
for key in list(self.orient_data[sample].keys()):
if key in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(key)
val = str(self.orient_data[sample][key])
# if it's a pmag_object, use its name
try:
val = val.name
except AttributeError:
pass
self.grid.SetCellValue(sample_index, i, val)
#--------------------------------
#--------------------------------
# fill in some default values
#--------------------------------
for row in range(self.grid.GetNumberRows()):
col = 1
if not self.grid.GetCellValue(row, col):
self.grid.SetCellValue(row, col, 'g')
#--------------------------------
# temporary trick to get drop-down-menus to work
self.grid.changes = {'a'}
self.grid.AutoSize()
self.drop_down_menu = drop_down_menus.Menus("orient", self, self.grid, '')
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
def update_sheet(self):
self.grid.Destroy()
self.create_sheet()
self.vbox.Add(self.grid, flag=wx.ALL, border=20)
#self.Hide()
#self.Show()
self.hbox_all.Fit(self.panel)
#self.panel.Refresh()
self.Hide()
self.Show()
def onLeftClickLabel(self, event):
"""
When user clicks on a grid label, determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values, or highlight a row for deletion).
"""
#if event.Col == -1 and event.Row == -1:
# pass
#elif event.Col < 0:
# self.onSelectRow(event)
if event.Row < 0:
self.drop_down_menu.on_label_click(event)
def on_m_open_file(self,event):
'''
open orient.txt
read the data
display the data from the file in a new grid
'''
dlg = wx.FileDialog(
self, message="choose orient file",
defaultDir=self.WD,
defaultFile="",
style=wx.OPEN | wx.CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
orient_file = dlg.GetPath()
dlg.Destroy()
new_data = self.er_magic_data.read_magic_file(orient_file, "sample_name")[0]
if len(new_data) > 0:
self.orient_data={}
self.orient_data=new_data
#self.create_sheet()
self.update_sheet()
print("-I- If you don't see a change in the spreadsheet, you may need to manually re-size the window")
def on_m_save_file(self,event):
'''
save demag_orient.txt
(only the columns that appear on the grid frame)
'''
fout = open(os.path.join(self.WD, "demag_orient.txt"), 'w')
STR = "tab\tdemag_orient\n"
fout.write(STR)
headers = [header[0] for header in self.headers]
STR = "\t".join(headers) + "\n"
fout.write(STR)
for sample in self.samples_list:
STR = ""
for header in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(header)
value = self.grid.GetCellValue(sample_index, i)
STR = STR + value + "\t"
fout.write(STR[:-1] + "\n")
if event != None:
dlg1 = wx.MessageDialog(None,caption="Message:", message="data saved in file demag_orient.txt" ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
def on_m_calc_orient(self,event):
'''
This fucntion does exactly what the 'import orientation' fuction does in MagIC.py
after some dialog boxes the function calls orientation_magic.py
'''
# first see if demag_orient.txt
self.on_m_save_file(None)
orient_convention_dia = orient_convention(None)
orient_convention_dia.Center()
#orient_convention_dia.ShowModal()
if orient_convention_dia.ShowModal() == wx.ID_OK:
ocn_flag = orient_convention_dia.ocn_flag
dcn_flag = orient_convention_dia.dcn_flag
gmt_flags = orient_convention_dia.gmt_flags
orient_convention_dia.Destroy()
else:
return
or_con = orient_convention_dia.ocn
dec_correction_con = int(orient_convention_dia.dcn)
try:
hours_from_gmt = float(orient_convention_dia.gmt)
except:
hours_from_gmt = 0
try:
dec_correction = float(orient_convention_dia.correct_dec)
except:
dec_correction = 0
method_code_dia=method_code_dialog(None)
method_code_dia.Center()
if method_code_dia.ShowModal() == wx.ID_OK:
bedding_codes_flags=method_code_dia.bedding_codes_flags
methodcodes_flags=method_code_dia.methodcodes_flags
method_code_dia.Destroy()
else:
print("-I- Canceling calculation")
return
method_codes = method_code_dia.methodcodes
average_bedding = method_code_dia.average_bedding
bed_correction = method_code_dia.bed_correction
command_args=['orientation_magic.py']
command_args.append("-WD %s"%self.WD)
command_args.append("-Fsa er_samples_orient.txt")
command_args.append("-Fsi er_sites_orient.txt ")
command_args.append("-f %s"%"demag_orient.txt")
command_args.append(ocn_flag)
command_args.append(dcn_flag)
command_args.append(gmt_flags)
command_args.append(bedding_codes_flags)
command_args.append(methodcodes_flags)
commandline = " ".join(command_args)
print("-I- executing command: %s" %commandline)
os.chdir(self.WD)
if os.path.exists(os.path.join(self.WD, 'er_samples.txt')) or os.path.exists(os.path.join(self.WD, 'er_sites.txt')):
append = True
else:
append = False
samp_file = "er_samples.txt"
site_file = "er_sites.txt"
ran_successfully, error_message = ipmag.orientation_magic(or_con, dec_correction_con, dec_correction, bed_correction, hours_from_gmt=hours_from_gmt, method_codes=method_codes, average_bedding=average_bedding, orient_file='demag_orient.txt', samp_file=samp_file, site_file=site_file, input_dir_path=self.WD, output_dir_path=self.WD, append=append)
if not ran_successfully:
dlg1 = wx.MessageDialog(None,caption="Message:", message="-E- ERROR: Error in running orientation_magic.py\n{}".format(error_message) ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
print("-E- ERROR: Error in running orientation_magic.py")
return
else:
dlg2 = wx.MessageDialog(None,caption="Message:", message="-I- Successfully ran orientation_magic", style=wx.OK|wx.ICON_INFORMATION)
dlg2.ShowModal()
dlg2.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
return
def OnCloseWindow(self,event):
dlg1 = wx.MessageDialog(self,caption="Message:", message="Save changes to demag_orient.txt?\n " ,style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
self.on_m_save_file(None)
dlg1.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
if result == wx.ID_CANCEL:
dlg1.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
class orient_convention(wx.Dialog):
def __init__(self, *args, **kw):
super(orient_convention, self).__init__(*args, **kw)
self.InitUI()
#self.SetSize((250, 200))
self.SetTitle("set orientation convention")
def InitUI(self):
pnl = wx.Panel(self)
vbox=wx.BoxSizer(wx.VERTICAL)
#-----------------------
# orientation convention
#-----------------------
sbs = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'orientation convention' ), wx.VERTICAL )
sbs.AddSpacer(5)
self.oc_rb1 = wx.RadioButton(pnl, -1,label='Pomeroy: Lab arrow azimuth = mag_azimuth; Lab arrow dip=-field_dip (field_dip is hade)',name='1', style=wx.RB_GROUP)
sbs.Add(self.oc_rb1)
sbs.AddSpacer(5)
self.oc_rb2 = wx.RadioButton(pnl, -1, label='Lab arrow azimuth = mag_azimuth-90 (mag_azimuth is strike); Lab arrow dip = -field_dip', name='2')
sbs.Add(self.oc_rb2)
sbs.AddSpacer(5)
self.oc_rb3 = wx.RadioButton(pnl, -1, label='Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip (field_dip is inclination of lab arrow)', name='3')
sbs.Add(self.oc_rb3)
sbs.AddSpacer(5)
self.oc_rb4 = wx.RadioButton(pnl, -1, label='Lab arrow azimuth and dip are same as mag_azimuth, field_dip', name='4')
sbs.Add(self.oc_rb4)
sbs.AddSpacer(5)
self.oc_rb5 = wx.RadioButton(pnl, -1, label='ASC: Lab arrow azimuth and dip are mag_azimuth, field_dip-90 (field arrow is inclination of specimen Z direction)',name='5')
sbs.Add(self.oc_rb5)
sbs.AddSpacer(5)
self.oc_rb6 = wx.RadioButton(pnl, -1, label='Lab arrow azimuth = mag_azimuth-90 (mag_azimuth is strike); Lab arrow dip = 90-field_dip', name='6')
sbs.Add(self.oc_rb6)
sbs.AddSpacer(5)
#-----------------------
# declination correction
#-----------------------
sbs2 = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'declination correction' ), wx.VERTICAL )
hbox_dc1 = wx.BoxSizer(wx.HORIZONTAL)
sbs2.AddSpacer(5)
self.dc_rb1 = wx.RadioButton(pnl, -1, 'Use the IGRF DEC value at the lat/long and date supplied', (10, 50), style=wx.RB_GROUP)
self.dc_rb2 = wx.RadioButton(pnl, -1, 'Use this DEC:', (10, 50))
self.dc_tb2 = wx.TextCtrl(pnl,style=wx.CENTER)
self.dc_rb3 = wx.RadioButton(pnl, -1, 'DEC=0, mag_az is already corrected in file', (10, 50))
sbs2.Add(self.dc_rb1)
sbs2.AddSpacer(5)
hbox_dc1.Add(self.dc_rb2)
hbox_dc1.AddSpacer(5)
hbox_dc1.Add(self.dc_tb2)
sbs2.Add(hbox_dc1)
sbs2.AddSpacer(5)
sbs2.Add(self.dc_rb3)
sbs2.AddSpacer(5)
#-----------------------
# orienation priority
#-----------------------
sbs3 = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'orientation priority' ), wx.VERTICAL )
sbs3.AddSpacer(5)
self.op_rb1 = wx.RadioButton(pnl, -1, label='1) sun compass 2) differential GPS 3) magnetic compass',
name='1', style=wx.RB_GROUP)
sbs3.Add(self.op_rb1)
sbs3.AddSpacer(5)
self.op_rb2 = wx.RadioButton(pnl, -1, label='1) differential GPS 2) magnetic compass 3) sun compass ',
name='2')
sbs3.Add(self.op_rb2)
sbs3.AddSpacer(5)
#-----------------------
# add local time for GMT
#-----------------------
sbs4 = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'add local time' ), wx.HORIZONTAL )
#hbox_alt = wx.BoxSizer(wx.HORIZONTAL)
sbs4.AddSpacer(5)
self.dc_alt = wx.TextCtrl(pnl,style=wx.CENTER)
alt_txt = wx.StaticText(pnl, label="Hours to SUBTRACT from local time for GMT, default is 0",
style=wx.TE_CENTER)
sbs4.Add(alt_txt)
sbs4.AddSpacer(5)
sbs4.Add(self.dc_alt)
#-----------------------
# OK button
#-----------------------
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl, wx.ID_OK, "&OK")
self.Bind(wx.EVT_BUTTON, self.OnOK, self.okButton)
hbox2.Add(self.okButton)
self.cancelButton = wx.Button(pnl, wx.ID_CANCEL, "&Cancel")
self.Bind(wx.EVT_BUTTON, self.OnCancel, self.cancelButton)
hbox2.Add(self.cancelButton)
#-----------------------
# design the frame
#-----------------------
vbox.AddSpacer(10)
vbox.Add(sbs)
vbox.AddSpacer(10)
vbox.Add(sbs2)
vbox.AddSpacer(10)
vbox.Add(sbs3)
vbox.AddSpacer(10)
vbox.Add(sbs4)
vbox.AddSpacer(10)
vbox.Add(hbox2)
vbox.AddSpacer(10)
hbox1=wx.BoxSizer(wx.HORIZONTAL)
hbox1.AddSpacer(10)
hbox1.Add(vbox)
hbox1.AddSpacer(10)
pnl.SetSizer(hbox1)
hbox1.Fit(self)
#-----------------------
# intialize defalut value
#-----------------------
self.oc_rb4.SetValue(True)
self.dc_rb1.SetValue(True)
self.op_rb1.SetValue(True)
def OnCancel(self, e):
self.EndModal(wx.ID_CANCEL)
def OnOK(self, e):
self.ocn = ""
if self.oc_rb1.GetValue() == True:
self.ocn = "1"
if self.oc_rb2.GetValue() == True:
self.ocn="2"
if self.oc_rb3.GetValue() == True:
self.ocn="3"
if self.oc_rb4.GetValue() == True:
self.ocn = "4"
if self.oc_rb5.GetValue() == True:
self.ocn="5"
if self.oc_rb6.GetValue() == True:
self.ocn="6"
self.dcn = ""
self.correct_dec = ""
if self.dc_rb1.GetValue() == True:
self.dcn = "1"
if self.dc_rb2.GetValue() == True:
self.dcn="2"
try:
self.correct_dec = float(self.dc_tb2.GetValue())
except:
dlg1 = wx.MessageDialog(None, caption="Error:", message="Add declination", style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
if self.dc_rb3.GetValue()==True:
self.dcn = "3"
if self.op_rb1.GetValue() == True:
self.op = "1"
if self.op_rb2.GetValue() == True:
self.op = "2"
if self.dc_alt.GetValue() != "":
try:
self.gmt = float(self.dc_alt.GetValue())
gmt_flags = "-gmt " + self.dc_alt.GetValue()
except:
gmt_flags=""
else:
self.gmt = ""
gmt_flags = ""
#-------------
self.ocn_flag = "-ocn "+ self.ocn
self.dcn_flag = "-dcn "+ self.dcn
self.gmt_flags = gmt_flags
self.EndModal(wx.ID_OK)
#self.Close()
class method_code_dialog(wx.Dialog):
def __init__(self, *args, **kw):
super(method_code_dialog, self).__init__(*args, **kw)
self.InitUI()
self.SetTitle("additional required information")
def InitUI(self):
pnl = wx.Panel(self)
vbox=wx.BoxSizer(wx.VERTICAL)
#-----------------------
# MagIC codes
#-----------------------
sbs1 = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'MagIC codes' ), wx.VERTICAL )
self.cb1 = wx.CheckBox(pnl, -1, 'FS-FD: field sampling done with a drill')
self.cb2 = wx.CheckBox(pnl, -1, 'FS-H: field sampling done with hand sample')
self.cb3 = wx.CheckBox(pnl, -1, 'FS-LOC-GPS: field location done with GPS')
self.cb4 = wx.CheckBox(pnl, -1, 'FS-LOC-MAP: field location done with map')
self.cb5 = wx.CheckBox(pnl, -1, 'SO-POM: a Pomeroy orientation device was used')
self.cb6 = wx.CheckBox(pnl, -1, 'SO-ASC: an ASC orientation device was used')
self.cb7 = wx.CheckBox(pnl, -1, 'SO-MAG: magnetic compass used for all orientations')
self.cb8 = wx.CheckBox(pnl, -1, 'SO-SUN: sun compass used for all orientations')
self.cb9 = wx.CheckBox(pnl, -1, 'SO-SM: either magnetic or sun used on all orientations ')
self.cb10 = wx.CheckBox(pnl, -1, 'SO-SIGHT: orientation from sighting')
for cb in [self.cb1, self.cb2, self.cb3, self.cb4, self.cb5,
self.cb6, self.cb7, self.cb8, self.cb9, self.cb10]:
sbs1.Add(cb, flag=wx.BOTTOM, border=5)
#-----------------------
# Bedding convention
#-----------------------
sbs2 = wx.StaticBoxSizer(wx.StaticBox(pnl, wx.ID_ANY, 'bedding convention'), wx.VERTICAL)
self.bed_con1 = wx.CheckBox(pnl, -1, 'Take fisher mean of bedding poles?')
self.bed_con2 = wx.CheckBox(pnl, -1, "Don't correct bedding dip direction with declination - already correct")
sbs2.Add(self.bed_con1, flag=wx.BOTTOM, border=5)
sbs2.Add(self.bed_con2, flag=wx.BOTTOM, border=5)
#-----------------------
# OK button
#-----------------------
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl, wx.ID_OK, "&OK")
self.Bind(wx.EVT_BUTTON, self.OnOK, self.okButton)
hbox2.Add(self.okButton)
self.cancelButton = wx.Button(pnl, wx.ID_CANCEL, "&Cancel")
self.Bind(wx.EVT_BUTTON, self.OnCancel, self.cancelButton)
hbox2.Add(self.cancelButton)
#-----------------------
# design the frame
#-----------------------
vbox.Add(sbs1)
vbox.AddSpacer(5)
vbox.Add(sbs2)
vbox.AddSpacer(5)
vbox.Add(hbox2)
vbox.AddSpacer(10)
hbox1=wx.BoxSizer(wx.HORIZONTAL)
hbox1.AddSpacer(10)
hbox1.Add(vbox)
hbox1.AddSpacer(10)
pnl.SetSizer(hbox1)
hbox1.Fit(self)
def OnCancel(self, e):
self.EndModal(wx.ID_CANCEL)
def OnOK(self, e):
methodcodes=[]
if self.cb1.GetValue() == True:
methodcodes.append('FS-FD')
if self.cb2.GetValue() == True:
methodcodes.append('FS-H')
if self.cb3.GetValue() == True:
methodcodes.append('FS-LOC-GPS')
if self.cb4.GetValue() == True:
methodcodes.append('FS-LOC-MAP')
if self.cb5.GetValue() == True:
methodcodes.append('SO-POM')
if self.cb6.GetValue() == True:
methodcodes.append('SO-ASC')
if self.cb7.GetValue() == True:
methodcodes.append('SO-MAG')
if self.cb8.GetValue() == True:
methodcodes.append('SO-SUN')
if self.cb9.GetValue() == True:
methodcodes.append('SO-SM')
if self.cb10.GetValue() == True:
methodcodes.append('SO-SIGHT')
if methodcodes == []:
self.methodcodes_flags=""
self.methodcodes = ""
else:
self.methodcodes_flags = "-mcd " + ":".join(methodcodes)
self.methodcodes = ":".join(methodcodes)
bedding_codes=[]
if self.bed_con1.GetValue() == True:
bedding_codes.append("-a")
self.average_bedding = True
else:
self.average_bedding = False
if self.bed_con2.GetValue() ==True:
bedding_codes.append("-BCN")
self.bed_correction = False
else:
self.bed_correction = True
self.bedding_codes_flags = " ".join(bedding_codes)
self.EndModal(wx.ID_OK)
#self.Close()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.