repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
googleapis/python-api-core | tests/unit/test_client_info.py | 1 | 2488 | # Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.api_core import client_info
def test_constructor_defaults():
info = client_info.ClientInfo()
assert info.python_version is not None
assert info.grpc_version is not None
assert info.api_core_version is not None
assert info.gapic_version is None
assert info.client_library_version is None
assert info.rest_version is None
def test_constructor_options():
info = client_info.ClientInfo(
python_version="1",
grpc_version="2",
api_core_version="3",
gapic_version="4",
client_library_version="5",
user_agent="6",
rest_version="7",
)
assert info.python_version == "1"
assert info.grpc_version == "2"
assert info.api_core_version == "3"
assert info.gapic_version == "4"
assert info.client_library_version == "5"
assert info.user_agent == "6"
assert info.rest_version == "7"
def test_to_user_agent_minimal():
info = client_info.ClientInfo(
python_version="1", api_core_version="2", grpc_version=None
)
user_agent = info.to_user_agent()
assert user_agent == "gl-python/1 gax/2"
def test_to_user_agent_full():
info = client_info.ClientInfo(
python_version="1",
grpc_version="2",
api_core_version="3",
gapic_version="4",
client_library_version="5",
user_agent="app-name/1.0",
)
user_agent = info.to_user_agent()
assert user_agent == "app-name/1.0 gl-python/1 grpc/2 gax/3 gapic/4 gccl/5"
def test_to_user_agent_rest():
info = client_info.ClientInfo(
python_version="1",
grpc_version=None,
rest_version="2",
api_core_version="3",
gapic_version="4",
client_library_version="5",
user_agent="app-name/1.0",
)
user_agent = info.to_user_agent()
assert user_agent == "app-name/1.0 gl-python/1 rest/2 gax/3 gapic/4 gccl/5"
| apache-2.0 | -3,570,916,829,049,336,300 | 27.272727 | 79 | 0.64791 | false | 3.450763 | false | false | false |
ItsCalebJones/SpaceLaunchNow-Server | api/v330/spacestation/views.py | 1 | 1860 | from rest_framework.viewsets import ModelViewSet
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from api.models import *
from api.permission import HasGroupPermission
from api.v330.spacestation.serializers import SpaceStationDetailedSerializer, SpaceStationSerializer
class SpaceStationViewSet(ModelViewSet):
"""
API endpoint that allows Space Stations to be viewed.
GET:
Return a list of all the existing space stations.
FILTERS:
Parameters - 'name', 'status', 'owners', 'orbit', 'type', 'owners__name', 'owners__abrev'
Example - /api/3.3.0/spacestation/?status=Active
SEARCH EXAMPLE:
Example - /api/3.3.0/spacestation/?search=ISS
Searches through 'name', 'owners__name', 'owners__abbrev'
ORDERING:
Fields - 'id', 'status', 'type', 'founded', 'volume'
Example - /api/3.3.0/spacestation/?ordering=id
"""
def get_serializer_class(self):
mode = self.request.query_params.get("mode", "normal")
if self.action == 'retrieve' or mode == "detailed":
return SpaceStationDetailedSerializer
else:
return SpaceStationSerializer
queryset = SpaceStation.objects.all()
permission_classes = [HasGroupPermission]
permission_groups = {
'retrieve': ['_Public'], # retrieve can be accessed without credentials (GET 'site.com/api/foo/1')
'list': ['_Public'] # list returns None and is therefore NOT accessible by anyone (GET 'site.com/api/foo')
}
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_fields = ('name', 'status', 'owners', 'orbit', 'type', 'owners__name', 'owners__abbrev')
search_fields = ('$name', 'owners__name', 'owners__abbrev')
ordering_fields = ('id', 'status', 'type', 'founded', 'volume') | apache-2.0 | -6,008,000,186,509,618,000 | 39.456522 | 114 | 0.687634 | false | 3.858921 | false | false | false |
luan-th-nguyen/seisflows_ndt | seisflows/config.py | 1 | 6784 |
import copy_reg
import imp
import os
import re
import sys
import types
from importlib import import_module
from os.path import abspath, join, exists
from seisflows.tools import msg
from seisflows.tools.err import ParameterError
from seisflows.tools import unix
from seisflows.tools.tools import loadjson, loadobj, loadpy, savejson, saveobj
from seisflows.tools.tools import module_exists, package_exists
# SeisFlows consists of interacting 'system', 'preprocess', 'solver', 'postprocess', 'optimize', and 'workflow' objects. Each corresponds simultaneously to a module in the SeisFlows source code, a class that is instantiated and made accessible via sys.modules, and a parameter in a global dictionary. Once in memory, these objects can be thought of as comprising the complete 'state' of a SeisFlows session
# The following list is one of the few hardwired aspects of the whole SeisFlows package. Any changes may result in circular imports or other problems
names = []
names += ['system']
names += ['preprocess']
names += ['solver']
names += ['postprocess']
names += ['optimize']
names += ['workflow']
def config():
""" Instantiates SeisFlows objects and makes them globally accessible by
registering them in sys.modules
"""
# parameters and paths must already be loaded
# (normally this is done by sfsubmit)
assert 'seisflows_parameters' in sys.modules
assert 'seisflows_paths' in sys.modules
# check if objects already exist on disk
if exists(_output()):
print msg.WarningOverwrite
sys.exit()
# instantiate and register objects
for name in names:
sys.modules['seisflows_'+name] = custom_import(name)()
# error checking
for name in names:
sys.modules['seisflows_'+name].check()
if not hasattr(sys.modules['seisflows_parameters'], 'workflow'.upper()):
print msg.MissingParameter_Worfklow
sys.exit(-1)
if not hasattr(sys.modules['seisflows_parameters'], 'system'.upper()):
print msg.MissingParameter_System
sys.exit(-1)
def save():
""" Exports session to disk
"""
unix.mkdir(_output())
for name in ['parameters', 'paths']:
fullfile = join(_output(), 'seisflows_'+name+'.json')
savejson(fullfile, sys.modules['seisflows_'+name].__dict__)
for name in names:
fullfile = join(_output(), 'seisflows_'+name+'.p')
saveobj(fullfile, sys.modules['seisflows_'+name])
def load(path):
""" Imports session from disk
"""
for name in ['parameters', 'paths']:
fullfile = join(_full(path), 'seisflows_'+name+'.json')
sys.modules['seisflows_'+name] = Dict(loadjson(fullfile))
for name in names:
fullfile = join(_full(path), 'seisflows_'+name+'.p')
sys.modules['seisflows_'+name] = loadobj(fullfile)
class Dict(object):
""" Dictionary-like object for holding parameters or paths
"""
def __iter__(self):
return iter(sorted(self.__dict__.keys()))
def __getattr__(self, key):
return self.__dict__[key]
def __getitem__(self, key):
return self.__dict__[key]
def __setattr__(self, key, val):
if key in self.__dict__:
raise TypeError("Once defined, parameters cannot be changed.")
self.__dict__[key] = val
def __delattr__(self, key):
if key in self.__dict__:
raise TypeError("Once defined, parameters cannot be deleted.")
raise KeyError
def update(self, newdict):
super(Dict, self).__setattr__('__dict__', newdict)
def __init__(self, newdict):
self.update(newdict)
class Null(object):
""" Always and reliably does nothing
"""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __nonzero__(self):
return False
def __getattr__(self, key):
return self
def __setattr__(self, key, val):
return self
def __delattr__(self, key):
return self
def custom_import(*args):
""" Imports SeisFlows module and extracts class of same name. For example,
custom_import('workflow', 'inversion')
imports 'seisflows.workflow.inversion' and, from this module, extracts
class 'inversion'.
"""
# parse input arguments
if len(args) == 0:
raise Exception(msg.ImportError1)
if args[0] not in names:
raise Exception(msg.ImportError2)
if len(args) == 1:
args += (_try(args[0]),)
if not args[1]:
return Null
# generate package list
packages = ['seisflows']
# does module exist?
_exists = False
for package in packages:
full_dotted_name = package+'.'+args[0]+'.'+args[1]
if module_exists(full_dotted_name):
_exists = True
break
if not _exists:
raise Exception(msg.ImportError3 %
(args[0], args[1], args[0].upper()))
# import module
module = import_module(full_dotted_name)
# extract class
if hasattr(module, args[1]):
return getattr(module, args[1])
else:
raise Exception(msg.ImportError4 %
(args[0], args[1], args[1]))
def tilde_expand(mydict):
""" Expands tilde character in path strings
"""
for key,val in mydict.items():
if type(val) not in [str, unicode]:
raise Exception
if val[0:2] == '~/':
mydict[key] = os.getenv('HOME') +'/'+ val[2:]
return mydict
# utility functions
def _par(key):
return sys.modules['seisflows_parameters'][key.upper()]
def _path(key):
return sys.modules['seisflows_paths'][key.upper()]
def _try(key):
try:
return _par(key)
except KeyError:
return None
def _output():
try:
return _full(_path('output'))
except:
return _full(join('.', 'output'))
def _full(path):
try:
return join(abspath(path), '')
except:
raise IOError
# the following code changes how instance methods are handled by pickle. placing it here, in this module, ensures that pickle changes will be in effect for all SeisFlows workflows
# for relevant discussion, see stackoverflow thread "Can't pickle <type 'instancemethod'> when using python's multiprocessing Pool.map()"
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
| bsd-2-clause | -5,586,993,668,159,306,000 | 26.803279 | 406 | 0.627064 | false | 3.876571 | false | false | false |
ChristophKirst/ClearMap | ClearMap/IO/NRRD.py | 1 | 20940 | #!/usr/bin/env python
# encoding: utf-8
"""
Interface to NRRD volumetric image data files.
The interface is based on nrrd.py, an all-python (and numpy)
implementation for reading and writing nrrd files.
See http://teem.sourceforge.net/nrrd/format.html for the specification.
Example:
>>> import os, numpy
>>> import ClearMap.Settings as settings
>>> import ClearMap.IO.NRRD as nrrd
>>> filename = os.path.join(settings.ClearMapPath, 'Test/Data/Nrrd/test.nrrd');
>>> data = nrrd.readData(filename);
>>> print data.shape
(20, 50, 10)
Author
""""""
Copyright 2011 Maarten Everts and David Hammond.
Modified to integrate into ClearMap framework by Christoph Kirst, The Rockefeller University, New York City, 2015
"""
import numpy as np
import gzip
import bz2
import os.path
from datetime import datetime
import ClearMap.IO as io
class NrrdError(Exception):
"""Exceptions for Nrrd class."""
pass
#This will help prevent loss of precision
#IEEE754-1985 standard says that 17 decimal digits is enough in all cases.
def _convert_to_reproducible_floatingpoint( x ):
if type(x) == float:
value = '{:.16f}'.format(x).rstrip('0').rstrip('.') # Remove trailing zeros, and dot if at end
else:
value = str(x)
return value
_TYPEMAP_NRRD2NUMPY = {
'signed char': 'i1',
'int8': 'i1',
'int8_t': 'i1',
'uchar': 'u1',
'unsigned char': 'u1',
'uint8': 'u1',
'uint8_t': 'u1',
'short': 'i2',
'short int': 'i2',
'signed short': 'i2',
'signed short int': 'i2',
'int16': 'i2',
'int16_t': 'i2',
'ushort': 'u2',
'unsigned short': 'u2',
'unsigned short int': 'u2',
'uint16': 'u2',
'uint16_t': 'u2',
'int': 'i4',
'signed int': 'i4',
'int32': 'i4',
'int32_t': 'i4',
'uint': 'u4',
'unsigned int': 'u4',
'uint32': 'u4',
'uint32_t': 'u4',
'longlong': 'i8',
'long long': 'i8',
'long long int': 'i8',
'signed long long': 'i8',
'signed long long int': 'i8',
'int64': 'i8',
'int64_t': 'i8',
'ulonglong': 'u8',
'unsigned long long': 'u8',
'unsigned long long int': 'u8',
'uint64': 'u8',
'uint64_t': 'u8',
'float': 'f4',
'double': 'f8',
'block': 'V'
}
_TYPEMAP_NUMPY2NRRD = {
'i1': 'int8',
'u1': 'uint8',
'i2': 'int16',
'u2': 'uint16',
'i4': 'int32',
'u4': 'uint32',
'i8': 'int64',
'u8': 'uint64',
'f4': 'float',
'f8': 'double',
'V': 'block'
}
_NUMPY2NRRD_ENDIAN_MAP = {
'<': 'little',
'L': 'little',
'>': 'big',
'B': 'big'
}
def parse_nrrdvector(inp):
"""Parse a vector from a nrrd header, return a list."""
assert inp[0] == '(', "Vector should be enclosed by parenthesis."
assert inp[-1] == ')', "Vector should be enclosed by parenthesis."
return [_convert_to_reproducible_floatingpoint(x) for x in inp[1:-1].split(',')]
def parse_optional_nrrdvector(inp):
"""Parse a vector from a nrrd header that can also be none."""
if (inp == "none"):
return inp
else:
return parse_nrrdvector(inp)
_NRRD_FIELD_PARSERS = {
'dimension': int,
'type': str,
'sizes': lambda fieldValue: [int(x) for x in fieldValue.split()],
'endian': str,
'encoding': str,
'min': float,
'max': float,
'oldmin': float,
'old min': float,
'oldmax': float,
'old max': float,
'lineskip': int,
'line skip': int,
'byteskip': int,
'byte skip': int,
'content': str,
'sample units': str,
'datafile': str,
'data file': str,
'spacings': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'thicknesses': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axis mins': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axismins': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axis maxs': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axismaxs': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'centerings': lambda fieldValue: [str(x) for x in fieldValue.split()],
'labels': lambda fieldValue: [str(x) for x in fieldValue.split()],
'units': lambda fieldValue: [str(x) for x in fieldValue.split()],
'kinds': lambda fieldValue: [str(x) for x in fieldValue.split()],
'space': str,
'space dimension': int,
'space units': lambda fieldValue: [str(x) for x in fieldValue.split()],
'space origin': parse_nrrdvector,
'space directions': lambda fieldValue:
[parse_optional_nrrdvector(x) for x in fieldValue.split()],
'measurement frame': lambda fieldValue:
[parse_nrrdvector(x) for x in fieldValue.split()],
}
_NRRD_REQUIRED_FIELDS = ['dimension', 'type', 'encoding', 'sizes']
# The supported field values
_NRRD_FIELD_ORDER = [
'type',
'dimension',
'space dimension',
'space',
'sizes',
'space directions',
'kinds',
'endian',
'encoding',
'min',
'max',
'oldmin',
'old min',
'oldmax',
'old max',
'content',
'sample units',
'spacings',
'thicknesses',
'axis mins',
'axismins',
'axis maxs',
'axismaxs',
'centerings',
'labels',
'units',
'space units',
'space origin',
'measurement frame',
'data file']
def _determine_dtype(fields):
"""Determine the numpy dtype of the data."""
# Check whether the required fields are there
for field in _NRRD_REQUIRED_FIELDS:
if field not in fields:
raise NrrdError('Nrrd header misses required field: "%s".' % (field))
# Process the data type
np_typestring = _TYPEMAP_NRRD2NUMPY[fields['type']]
if np.dtype(np_typestring).itemsize > 1:
if 'endian' not in fields:
raise NrrdError('Nrrd header misses required field: "endian".')
if fields['endian'] == 'big':
np_typestring = '>' + np_typestring
elif fields['endian'] == 'little':
np_typestring = '<' + np_typestring
return np.dtype(np_typestring)
def _read_data(fields, filehandle, filename=None):
"""Read the actual data into a numpy structure."""
data = np.zeros(0)
# Determine the data type from the fields
dtype = _determine_dtype(fields)
# determine byte skip, line skip, and data file (there are two ways to write them)
lineskip = fields.get('lineskip', fields.get('line skip', 0))
byteskip = fields.get('byteskip', fields.get('byte skip', 0))
datafile = fields.get("datafile", fields.get("data file", None))
datafilehandle = filehandle
if datafile is not None:
# If the datafile path is absolute, don't muck with it. Otherwise
# treat the path as relative to the directory in which the detached
# header is in
if os.path.isabs(datafile):
datafilename = datafile
else:
datafilename = os.path.join(os.path.dirname(filename), datafile)
datafilehandle = open(datafilename,'rb')
numPixels=np.array(fields['sizes']).prod()
totalbytes = dtype.itemsize * numPixels
if fields['encoding'] == 'raw':
if byteskip == -1: # This is valid only with raw encoding
datafilehandle.seek(-totalbytes, 2)
else:
for _ in range(lineskip):
datafilehandle.readline()
datafilehandle.read(byteskip)
data = np.fromfile(datafilehandle, dtype)
elif fields['encoding'] == 'gzip' or\
fields['encoding'] == 'gz':
gzipfile = gzip.GzipFile(fileobj=datafilehandle)
# Again, unfortunately, np.fromfile does not support
# reading from a gzip stream, so we'll do it like this.
# I have no idea what the performance implications are.
data = np.fromstring(gzipfile.read(), dtype)
elif fields['encoding'] == 'bzip2' or\
fields['encoding'] == 'bz2':
bz2file = bz2.BZ2File(fileobj=datafilehandle)
# Again, unfortunately, np.fromfile does not support
# reading from a gzip stream, so we'll do it like this.
# I have no idea what the performance implications are.
data = np.fromstring(bz2file.read(), dtype)
else:
raise NrrdError('Unsupported encoding: "%s"' % fields['encoding'])
if numPixels != data.size:
raise NrrdError('ERROR: {0}-{1}={2}'.format(numPixels,data.size,numPixels-data.size))
# dkh : eliminated need to reverse order of dimensions. nrrd's
# data layout is same as what numpy calls 'Fortran' order,
shape_tmp = list(fields['sizes'])
data = np.reshape(data, tuple(shape_tmp), order='F')
return data
def _validate_magic_line(line):
"""For NRRD files, the first four characters are always "NRRD", and
remaining characters give information about the file format version
"""
if not line.startswith('NRRD'):
raise NrrdError('Missing magic "NRRD" word. Is this an NRRD file?')
try:
if int(line[4:]) > 5:
raise NrrdError('NRRD file version too new for this library.')
except:
raise NrrdError('Invalid NRRD magic line: %s' % (line,))
return len(line)
def readHeader(filename):
"""Parse the fields in the nrrd header
nrrdfile can be any object which supports the iterator protocol and
returns a string each time its next() method is called — file objects and
list objects are both suitable. If csvfile is a file object, it must be
opened with the ‘b’ flag on platforms where that makes a difference
(e.g. Windows)
>>> readHeader(("NRRD0005", "type: float", "dimension: 3"))
{'type': 'float', 'dimension': 3, 'keyvaluepairs': {}}
>>> readHeader(("NRRD0005", "my extra info:=my : colon-separated : values"))
{'keyvaluepairs': {'my extra info': 'my : colon-separated : values'}}
"""
if isinstance(filename, basestring):
nrrdfile = open(filename,'rb');
else:
nrrdfile = filename;
# Collect number of bytes in the file header (for seeking below)
headerSize = 0
it = iter(nrrdfile)
headerSize += _validate_magic_line(next(it).decode('ascii'))
header = { 'keyvaluepairs': {} }
for raw_line in it:
headerSize += len(raw_line)
raw_line = raw_line.decode('ascii')
# Trailing whitespace ignored per the NRRD spec
line = raw_line.rstrip()
# Comments start with '#', no leading whitespace allowed
if line.startswith('#'):
continue
# Single blank line separates the header from the data
if line == '':
break
# Handle the <key>:=<value> lines first since <value> may contain a
# ': ' which messes up the <field>: <desc> parsing
key_value = line.split(':=', 1)
if len(key_value) is 2:
key, value = key_value
# TODO: escape \\ and \n ??
# value.replace(r'\\\\', r'\\').replace(r'\n', '\n')
header['keyvaluepairs'][key] = value
continue
# Handle the "<field>: <desc>" lines.
field_desc = line.split(': ', 1)
if len(field_desc) is 2:
field, desc = field_desc
## preceeding and suffixing white space should be ignored.
field = field.rstrip().lstrip()
desc = desc.rstrip().lstrip()
if field not in _NRRD_FIELD_PARSERS:
raise NrrdError('Unexpected field in nrrd header: "%s".' % field)
if field in header.keys():
raise NrrdError('Duplicate header field: "%s"' % field)
header[field] = _NRRD_FIELD_PARSERS[field](desc)
continue
# Should not reach here
raise NrrdError('Invalid header line: "%s"' % line)
# line reading was buffered; correct file pointer to just behind header:
nrrdfile.seek(headerSize)
return header
def readData(filename, **args):
"""Read nrrd file image data
Arguments:
filename (str): file name as regular expression
x,y,z (tuple): data range specifications
Returns:
array: image data
"""
with open(filename,'rb') as filehandle:
header = readHeader(filehandle)
#print header
data = _read_data(header, filehandle, filename)
#return (data, header)
#return data.transpose([1,0,2]);
data = io.readData(data, **args);
return data;
def _format_nrrd_list(fieldValue) :
return ' '.join([_convert_to_reproducible_floatingpoint(x) for x in fieldValue])
def _format_nrrdvector(v) :
return '(' + ','.join([_convert_to_reproducible_floatingpoint(x) for x in v]) + ')'
def _format_optional_nrrdvector(v):
if (v == 'none') :
return 'none'
else :
return _format_nrrdvector(v)
_NRRD_FIELD_FORMATTERS = {
'dimension': str,
'type': str,
'sizes': _format_nrrd_list,
'endian': str,
'encoding': str,
'min': str,
'max': str,
'oldmin': str,
'old min': str,
'oldmax': str,
'old max': str,
'lineskip': str,
'line skip': str,
'byteskip': str,
'byte skip': str,
'content': str,
'sample units': str,
'datafile': str,
'data file': str,
'spacings': _format_nrrd_list,
'thicknesses': _format_nrrd_list,
'axis mins': _format_nrrd_list,
'axismins': _format_nrrd_list,
'axis maxs': _format_nrrd_list,
'axismaxs': _format_nrrd_list,
'centerings': _format_nrrd_list,
'labels': _format_nrrd_list,
'units': _format_nrrd_list,
'kinds': _format_nrrd_list,
'space': str,
'space dimension': str,
'space units': _format_nrrd_list,
'space origin': _format_nrrdvector,
'space directions': lambda fieldValue: ' '.join([_format_optional_nrrdvector(x) for x in fieldValue]),
'measurement frame': lambda fieldValue: ' '.join([_format_optional_nrrdvector(x) for x in fieldValue]),
}
def _write_data(data, filehandle, options):
# Now write data directly
#rawdata = data.transpose([2,0,1]).tostring(order = 'C')
rawdata = data.transpose([2,1,0]).tostring(order = 'C');
if options['encoding'] == 'raw':
filehandle.write(rawdata)
elif options['encoding'] == 'gzip':
gzfileobj = gzip.GzipFile(fileobj = filehandle)
gzfileobj.write(rawdata)
gzfileobj.close()
elif options['encoding'] == 'bz2':
bz2fileobj = bz2.BZ2File(fileobj = filehandle)
bz2fileobj.write(rawdata)
bz2fileobj.close()
else:
raise NrrdError('Unsupported encoding: "%s"' % options['encoding'])
def writeData(filename, data, options={}, separateHeader=False, x = all, y = all, z = all):
"""Write data to nrrd file
Arguments:
filename (str): file name as regular expression
data (array): image data
options (dict): options dictionary
separateHeader (bool): write a separate header file
Returns:
str: nrrd output file name
To sample data use `options['spacings'] = [s1, s2, s3]` for
3d data with sampling deltas `s1`, `s2`, and `s3` in each dimension.
"""
data = io.dataToRange(data, x = x, y = y, z = z);
# Infer a number of fields from the ndarray and ignore values
# in the options dictionary.
options['type'] = _TYPEMAP_NUMPY2NRRD[data.dtype.str[1:]]
if data.dtype.itemsize > 1:
options['endian'] = _NUMPY2NRRD_ENDIAN_MAP[data.dtype.str[:1]]
# if 'space' is specified 'space dimension' can not. See http://teem.sourceforge.net/nrrd/format.html#space
if 'space' in options.keys() and 'space dimension' in options.keys():
del options['space dimension']
options['dimension'] = data.ndim
dsize = list(data.shape);
#dsize[0:2] = [dsize[1], dsize[0]];
options['sizes'] = dsize;
# The default encoding is 'gzip'
if 'encoding' not in options:
options['encoding'] = 'gzip'
# A bit of magic in handling options here.
# If *.nhdr filename provided, this overrides `separate_header=False`
# If *.nrrd filename provided AND separate_header=True, separate files
# written.
# For all other cases, header & data written to same file.
if filename[-5:] == '.nhdr':
separate_header = True
if 'data file' not in options:
datafilename = filename[:-4] + str('raw')
if options['encoding'] == 'gzip':
datafilename += '.gz'
options['data file'] = datafilename
else:
datafilename = options['data file']
elif filename[-5:] == '.nrrd' and separate_header:
separate_header = True
datafilename = filename
filename = filename[:-4] + str('nhdr')
else:
# Write header & data as one file
datafilename = filename;
separate_header = False;
with open(filename,'wb') as filehandle:
filehandle.write(b'NRRD0005\n')
filehandle.write(b'# This NRRD file was generated by pynrrd\n')
filehandle.write(b'# on ' +
datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S').encode('ascii') +
b'(GMT).\n')
filehandle.write(b'# Complete NRRD file format specification at:\n');
filehandle.write(b'# http://teem.sourceforge.net/nrrd/format.html\n');
# Write the fields in order, this ignores fields not in _NRRD_FIELD_ORDER
for field in _NRRD_FIELD_ORDER:
if field in options:
outline = (field + ': ' +
_NRRD_FIELD_FORMATTERS[field](options[field]) +
'\n').encode('ascii')
filehandle.write(outline)
d = options.get('keyvaluepairs', {})
for (k,v) in sorted(d.items(), key=lambda t: t[0]):
outline = (str(k) + ':=' + str(v) + '\n').encode('ascii')
filehandle.write(outline)
# Write the closing extra newline
filehandle.write(b'\n')
# If a single file desired, write data
if not separate_header:
_write_data(data, filehandle, options)
# If separate header desired, write data to different file
if separate_header:
with open(datafilename, 'wb') as datafilehandle:
_write_data(data, datafilehandle, options)
return filename;
def dataSize(filename, **args):
"""Read data size from nrrd image
Arguments:
filename (str): file name as regular expression
x,y,z (tuple): data range specifications
Returns:
tuple: data size
"""
header = readHeader(filename);
dims = header['sizes'];
#dims[0:2] = [dims[1], dims[0]];
return io.dataSizeFromDataRange(dims, **args);
def dataZSize(filename, z = all, **args):
"""Read data z size from nrrd image
Arguments:
filename (str): file name as regular expression
z (tuple): z data range specification
Returns:
int: z data size
"""
header = readHeader(filename);
dims = header['sizes'];
if len(dims) > 2:
return io.toDataSize(dims[2], r = z);
else:
return None;
def copyData(source, sink):
"""Copy an nrrd file from source to sink
Arguments:
source (str): file name pattern of source
sink (str): file name pattern of sink
Returns:
str: file name of the copy
Notes:
Todo: dealt with nrdh header files!
"""
io.copyFile(source, sink);
def test():
"""Test NRRD IO module"""
import ClearMap.IO.NRRD as self
reload(self)
from ClearMap.Settings import ClearMapPath
import os
import numpy
"""Test NRRD module"""
basedir = ClearMapPath;
fn = os.path.join(basedir, 'Test/Data/Nrrd/test.nrrd')
data = numpy.random.rand(20,50,10);
data[5:15, 20:45, 2:9] = 0;
reload(self)
print "writing nrrd image to: " + fn;
self.writeData(fn, data);
ds = self.dataSize(fn);
print "dataSize: %s" % str(ds);
print "Loading raw image from: " + fn;
img = self.readData(fn);
print "Image size: " + str(img.shape)
diff = img - data;
print (diff.max(), diff.min())
#some uint type
print "writing raw image to: " + fn;
udata = data * 10;
udata = udata.astype('uint16');
self.writeData(fn, udata);
print "Loading raw image from: " + fn;
img = self.readData(fn);
print "Image size: " + str(img.shape)
diff = img - udata;
print (diff.max(), diff.min())
#dataSize
print "dataSize is %s" % str(self.dataSize(fn))
print "dataZSize is %s" % str(self.dataZSize(fn))
if __name__ == "__main__":
test();
| gpl-3.0 | -2,766,026,941,562,460,000 | 31.009174 | 117 | 0.596542 | false | 3.508884 | false | false | false |
mdevaev/slog | src/remote.py | 1 | 1202 | # -*- mode: python; coding: utf-8; -*-
import dbus
import dbus.service, dbus.mainloop.glib
class Remote:
def __init__(self):
bus = dbus.SessionBus()
slog_obj = bus.get_object("org.LightLang.SLog", "/SLog")
self.iface = dbus.Interface(slog_obj, "org.LightLang.SLogInterface")
def __spy_toggle(self):
self.iface.spy_toggle()
def __window_toggle(self):
self.iface.toggle()
def __show(self):
self.iface.show()
def execute(self, cmd):
if cmd == "toggle":
self.__window_toggle()
elif cmd == "spy-toggle":
self.__spy_toggle()
elif cmd == "show":
self.__show()
class SLogDBus(dbus.service.Object):
def __init__(self, interface, obj_path = "/SLog"):
self.interface = interface
bus = dbus.SessionBus()
bus_name = dbus.service.BusName("org.LightLang.SLog", bus)
dbus.service.Object.__init__(self, bus_name, obj_path)
@dbus.service.method("org.LightLang.SLogInterface")
def spy_toggle(self):
self.interface.spy_action.activate()
@dbus.service.method("org.LightLang.SLogInterface")
def toggle(self):
self.interface.window_toggle()
@dbus.service.method("org.LightLang.SLogInterface")
def show(self):
self.interface.hide()
self.interface.app_show()
| gpl-2.0 | -6,164,688,821,272,691,000 | 24.041667 | 70 | 0.682196 | false | 2.855107 | false | false | false |
jabumaho/MNIST-neural-network | network.py | 1 | 3424 | import numpy as np
def sgm(x, derivative=False):
if not derivative:
return 1/(1+np.exp(-x))
else:
return sgm(x) * (1 - sgm(x))
def linear(x, derivative=False):
if not derivative:
return x
else:
return 1
class NeuralNetwork:
layerCount = 0
shape = None
weights = []
layerTransferFunc = []
def __init__(self, layerSize, layerTransferFunc=None):
self.layerCount = len(layerSize) - 1
self.shape = layerSize
self._layerInput = []
self._layerOutput = []
self._previousWeightDelta = []
for (l1, l2) in zip(layerSize[:-1], layerSize[1:]):
self.weights.append(np.random.normal(scale=0.1, size=(l2, l1 + 1)))
self._previousWeightDelta.append(np.zeros(shape=(l2, l1 + 1)))
if layerTransferFunc is None:
layerTransferFunc = []
for i in range(self.layerCount):
if i == self.layerCount - 1:
layerTransferFunc.append(sgm)
else:
layerTransferFunc.append(sgm)
else:
if len(layerTransferFunc) != len(layerSize):
raise ValueError("Incompatible no of transfer functions.")
elif layerTransferFunc[0] is not None:
raise ValueError("no transfer functions for input layer.")
else:
layerTransferFunc = layerTransferFunc[1:]
self.layerTransferFunc = layerTransferFunc
def run(self, inputr):
lnCases = inputr.shape[0]
self._layerInput = []
self._layerOutput = []
for i in range(self.layerCount):
if i == 0:
layerInput = self.weights[0].dot(np.vstack([inputr.T, np.ones([1, lnCases])]))
else:
layerInput = self.weights[i].dot(np.vstack([self._layerOutput[-1], np.ones([1, lnCases])]))
self._layerInput.append(layerInput)
self._layerOutput.append(self.layerTransferFunc[i](layerInput))
return self._layerOutput[-1].T
def trainEpoch(self, inputt, target, trainingRate=0.5, momentum=0.5):
delta = []
lnCases = inputt.shape[0]
self.run(inputt)
for i in reversed(range(self.layerCount)):
if i == self.layerCount - 1:
output_delta = self._layerOutput[i] - target.T
error = 0.5 * np.sum(output_delta**2)
delta.append(output_delta * self.layerTransferFunc[i](self._layerInput[i], True))
else:
deltaPullback = self.weights[i + 1].T.dot(delta[-1])
delta.append(deltaPullback[:-1, :] * self.layerTransferFunc[i](self._layerInput[i], True))
for i in range(self.layerCount):
deltaIndex = self.layerCount - 1 - i
if i == 0:
layerOutput = np.vstack([inputt.T, np.ones([1, lnCases])])
else:
layerOutput = np.vstack([self._layerOutput[i - 1], np.ones([1, self._layerOutput[i - 1].shape[1]])])
currentweightDelta = np.sum(layerOutput[None, :, :].transpose(2, 0, 1) * delta[deltaIndex][None, :, :].transpose(2, 1, 0), axis=0)
weightDelta = trainingRate * currentweightDelta + momentum * self._previousWeightDelta[i]
self.weights[i] -= weightDelta
self._previousWeightDelta[i] = weightDelta
return error
def test_network(self, inputtest, target):
self.run(inputtest)
output_delta = self._layerOutput[self.layerCount - 1] - target.T
return 0.5 * np.sum(output_delta**2)
def nudge(self, scale):
for i in xrange(len(self.weights)):
for j in xrange(len(self.weights[i])):
for k in xrange(len(self.weights[i][j])):
w = self.weights[i][j][k]
w *= scale
u = np.random.normal(scale=abs(w))
self.weights[i][j][k] += u
| gpl-3.0 | -1,273,426,215,257,752,800 | 28.035088 | 133 | 0.645736 | false | 2.990393 | false | false | false |
arulalant/mmDiagnosis | diagnosis1/extra/dirty/MULTIPLE PLOTS/por_landscape_2x3.py | 1 | 2203 | import cdms2
import cdutil
import numpy
import numpy.ma
import vcs
import os
import sys
import por_template_2x3_landscape as por_lanscape_2x3
x = por_lanscape_2x3.x
iso=x.createisofill('new1', 'ASD')
iso.levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
#([1, 5, 10, 15, 20, 25, 35, 45, 55, 60, 65, 70, 80])
#iso.levels=vcs.mkscale(0.,80.)
iso.fillareacolors = (246, 255, 252, 253, 254, 251, 140, 5, 171,
248, 249, 242, 239)
#iso.fillareacolors=vcs.getcolors(iso.levels)
iso.ext_1='y'
iso.ext_2='y'
iso.level_1=0
iso.level_2=1
hours=[24, 48, 72, 96, 120]
score_name= ['ts', 'pod', 'pofd', 'hr', 'far']
th_list=[0.1, 0.6, 1. , 3. , 5. , 7.]
file_name='/NCMRWF/Process_Files/T254/StatiScore/2010/Season/jjas/24/stati_spatial_distribution_score_24hr_jjas_2010_T254.nc'
f=cdms2.open(file_name)
for j in xrange(len(score_name)):
score_name_capital = score_name[j].upper()
for k in range(6):
score=TS=f(score_name[j], threshold = th_list[k])
title_plot='T254 D-01 %s %s THRESHOLD JJAS 2010' %(score_name_capital, str(th_list[k]))
if (k == 0):
x.plot(score, por_lanscape_2x3.leftOfTop_lscp, iso, title=title_plot, continents=1, bg=1)
elif (k == 1):
x.plot(score, por_lanscape_2x3.midOfTop_lscp, iso, title=title_plot, continents=1, bg=1)
elif (k == 2):
x.plot(score, por_lanscape_2x3.rightOfTop_lscp, iso, title=title_plot, continents=1, bg=1)
elif(k==3):
x.plot(score, por_lanscape_2x3.leftOfBot_lscp, iso, title=title_plot, continents=1, bg=1)
elif(k==4):
x.plot(score, por_lanscape_2x3.midOfBot_lscp, iso, title=title_plot, continents=1, bg=1)
elif(k==5):
x.plot(score, por_lanscape_2x3.rightOfBot_lscp, iso, title=title_plot, continents=1, bg=1)
else:
pass
out_f_name='/home/arulalan/Desktop/%s_2010_obs.png' %(score_name_capital)
x.png(out_f_name)
x.clear()
| gpl-3.0 | -1,482,070,405,860,495,400 | 31.880597 | 125 | 0.552429 | false | 2.641487 | false | false | false |
cedricpradalier/vrep_ros_ws | src/ar_loc_base/src/ar_loc_base/rover_pf.py | 1 | 4147 | import roslib; roslib.load_manifest('ar_loc_base')
import rospy
from numpy import *
from numpy.linalg import pinv, inv
from math import pi, sin, cos
from geometry_msgs.msg import *
import tf
import bisect
import threading
from rover_kinematics import *
class RoverPF(RoverKinematics):
def __init__(self, initial_pose, initial_uncertainty):
RoverKinematics.__init__(self)
self.initial_uncertainty = initial_uncertainty
self.lock = threading.Lock()
self.X = mat(vstack(initial_pose))
# Initialisation of the particle cloud around the initial position
self.N = 500
self.particles = [self.X + self.drawNoise(initial_uncertainty) for i in range(0,self.N)]
self.pa_pub = rospy.Publisher("~particles",PoseArray,queue_size=1)
def getRotation(self, theta):
R = mat(zeros((2,2)))
R[0,0] = cos(theta); R[0,1] = -sin(theta)
R[1,0] = sin(theta); R[1,1] = cos(theta)
return R
# Draw a vector uniformly around [0,0,0], scaled by norm
def drawNoise(self, norm):
if type(norm)==list:
return mat(vstack(norm)*(2*random.rand(3,1)-vstack([1,1,1])))
else:
return mat(multiply(norm,((2*random.rand(3,1)-vstack([1,1,1])))))
def predict(self, motor_state, drive_cfg, encoder_precision):
self.lock.acquire()
# The first time, we need to initialise the state
if self.first_run:
self.motor_state.copy(motor_state)
self.first_run = False
self.lock.release()
return
# Prepare odometry matrices (check rover_odo.py for usage)
iW = self.prepare_inversion_matrix(drive_cfg)
S = self.prepare_displacement_matrix(self.motor_state,motor_state,drive_cfg)
self.motor_state.copy(motor_state)
# Apply the particle filter prediction step here
# TODO
# self.particles = ...
self.lock.release()
def update_ar(self, Z, L, Uncertainty):
self.lock.acquire()
print "Update: L="+str(L.T)
# Implement particle filter update using landmarks here
# Note: the function bisect.bisect_left could be useful to implement
# the resampling process efficiently
# TODO
# self.particles = ...
self.lock.release()
def update_compass(self, angle, Uncertainty):
self.lock.acquire()
print "Update: C="+str(angle)
# Implement particle filter update using landmarks here
# Note: the function bisect.bisect_left could be useful to implement
# the resampling process efficiently
# TODO
# self.particles = ...
self.lock.release()
def updateMean(self):
X = mat(zeros((3,1)))
for x in self.particles:
X += x
self.X = X / len(self.particles)
return self.X
def publish(self, pose_pub, target_frame, stamp):
# Only compute the mean for plotting
self.updateMean()
pose = PoseStamped()
pose.header.frame_id = target_frame
pose.header.stamp = stamp
pose.pose.position.x = self.X[0,0]
pose.pose.position.y = self.X[1,0]
pose.pose.position.z = 0.0
Q = tf.transformations.quaternion_from_euler(0, 0, self.X[2,0])
pose.pose.orientation.x = Q[0]
pose.pose.orientation.y = Q[1]
pose.pose.orientation.z = Q[2]
pose.pose.orientation.w = Q[3]
pose_pub.publish(pose)
pa = PoseArray()
pa.header = pose.header
for p in self.particles:
po = Pose()
po.position.x = p[0,0]
po.position.y = p[1,0]
q = tf.transformations.quaternion_from_euler(0, 0, p[2,0])
po.orientation = Quaternion(*q)
pa.poses.append(po)
self.pa_pub.publish(pa)
def broadcast(self,br, target_frame, stamp):
br.sendTransform((self.X[0,0], self.X[1,0], 0),
tf.transformations.quaternion_from_euler(0, 0, self.X[2,0]),
stamp, "/%s/ground"%self.name, target_frame)
| bsd-3-clause | 2,330,022,787,027,257,000 | 32.991803 | 96 | 0.592235 | false | 3.499578 | false | false | false |
nblago/utils | src/model/BBFit.py | 1 | 66521 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 10:57:34 2018
Class that enables to fit a black body function to a set of magntidues.
@author: nadiablago
@version: 0.22
"""
from __future__ import print_function
import matplotlib
from matplotlib import pylab as plt
import corner
from astropy import units as u
import astropy.constants as cnt
import os, sys
import numpy as np
import emcee
from scipy import stats
import extinction
from astropy.cosmology import FlatLambdaCDM
import warnings
#If PYSYN_CDBS is not defined, it adds the environment variable which points to the
#filter response files for the bands we are interested in.
if not 'PYSYN_CDBS' in os.environ.keys():
print ("Adding the Pysynphot environment:")
os.environ['PYSYN_CDBS'] = "/Users/USER/SOMEWHERE/pysynphot_files"
print ('PYSYN_CDBS environment variable set to: ', os.environ['PYSYN_CDBS'])
'''os.environ['PYSYN_CDBS'] = "/scratch/Software/pysynphot_files/cdbs/"
# Add the environment variable which points to the filter response files for the bands we are interested in.
if not 'PYSYN_CDBS' in os.environ.keys():
print("Adding the Pysynphot environment:")
os.environ['PYSYN_CDBS'] = "/scratch/Software/pysynphot_files/cdbs/"
print('PYSYN_CDBS environment variable set to: ', os.environ['PYSYN_CDBS'])'''
os.environ['PYSYN_CDBS'] = "/Users/nadiablago/Documents/Software/pysynphot_files/"
import pysynphot as ps
class BBFit:
def __init__(self):
'''
Constructor initializes all the parameters to
defaults.
'''
#Some predefined constants in the units we need them
self.c = cnt.c.to(u.cm/u.s).value #2.99792458e+10 #cm / s
self.h = cnt.h.to(u.erg * u.s).value #6.62607004e-27 #erg s
self.k_B = cnt.k_B.to(u.erg / u.K).value#1.38064852e-16 #erg / K
#Source parameters
self.av_host = 0
self.av_mw = 0
self.law = "Fitzpatrick"
self.law_mw = "Fitzpatrick"
#Black body models
self.initT1 = 10000 #K
self.initR1 = 1 # Rsun
self.initT2 = 3000 #K
self.initR2 = 1 # Rsun
self.z = None
self.distMpc = None #in Mpc
self.mjd = 0
#Power law models
self.alpha = 0.75
self.alphaerr1 = 0
self.alphaerr2 = 0
self.scale = 1
self.scaleerr1 = 0.1
self.scaleerr2 = 0.1
#Disk model (scale is already in the power law model)
#Stellar mass, radius, log accretion mass per year, outer radius of accretion disk
self.Mstar = 1
self.Mstarerr1 = 0.1
self.Mstarerr2 = 0.1
self.Rstar = 1
self.Rstarerr1 = 0.1
self.rstarerr2 = 0.1
self.logMacc = -8
self.logMaccerr1 = -9
self.logMaccerr2 = -9
self.R_out = 3
self.R_outerr1 = 1
self.R_outerr2 = 1
#Location for plots
self.plotdir = "../../data/plots"
#Location for fit results
self.resdir = "../../data/modelfits"
self.resfile = "fit_results.txt"
#MCMC parameters
self.method = 'ensemble' #or HA for Hastings
self.mhtune = True # tuning of the Metropolis-Hastings
self.niterations = 10000
self.burnin = 5000
self.threads = 10
self.nwalkers = 20
self.sampler = None
self.model = "BlackBody" #others are "BlackBody_Av" or "BlackBody2_Av", "PowerLaw", "PowerLaw_BlackBody"
#Input data parameters.
#The fitter will run either with magnitudes or with fluxes
self.mags = None
self.magerrs = None
self.bands = None
#Indicates whether the magnitude is in AB or Vega
self.photsys = None
self.wls = None
self.fluxes = None
self.fluxerrs = None
#Output
self.T = None
self.Terr1 = None
self.Terr2 = None
self.R = None
self.Rerr1 = None
self.Rerr2 = None
self.L = None
self.Lerr1 = None
self.Lerr2 = None
#Output for the secondary star
self.Tsec = None
self.Tsecerr1 = None
self.Tsecerr2 = None
self.Rsec = None
self.Rsecerr1 = None
self.Rsecerr2 = None
self.Lsec = None
self.Lsecerr1 = None
self.Lsecerr2 = None
self.cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
#Set the plotting characteristics
self._matplotlib_init()
self.banddic = {"Y": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/ctio_y_andicam.dat"),
"J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_j_002.fits"),
"H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_h_002.fits"),
"K": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_k_002.fits"),
"keck,J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.J.dat"),
"keck,H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.H.dat"),
"keck,Ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.Ks.dat"),
"keck,K": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.K.dat"),
"spitzer,3.6": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac1_3.6.dat"),
"spitzer,4.5": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac2_4.5.dat"),
"spitzer,5.8": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac3_5.8.dat"),
"spitzer,8.0": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac4_8.0.dat"),
"wise,w1": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W1.dat"),
"wise,w2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W2.dat"),
"wise,w3": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W3.dat"),
"wise,w4": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W4.dat"),
"swift,uvw2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvw2_uvot.dat"),
"swift,uvm2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvm2_uvot.dat"),
"swift,uvw1": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvw1_uvot.dat"),
"swift,u": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_u_uvot.dat"),
"swift,b": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_b_uvot.dat"),
"swift,v": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_v_uvot.dat"),
"paranal,Y": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Y.dat"),
"paranal,Z": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Z.dat"),
"paranal,J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.J.dat"),
"paranal,H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.H.dat"),
"paranal,Ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Ks.dat"),
"omegacam,u": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.u_SDSS.dat"),
"omegacam,g": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.g_SDSS.dat"),
"omegacam,r": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.r_SDSS.dat"),
"omegacam,i": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.i_SDSS.dat"),
"omegacam,z": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.z_SDSS.dat"),
"omegacam,Halpha": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.Halpha.dat"),
"nirc2,j": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.J.dat"),
"nirc2,h": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.H.dat"),
"nirc2,ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.Ks.dat")
}
def _matplotlib_init(self):
'''
Set up preferences on matplotlib plot appearance.
'''
matplotlib.rcParams['xtick.minor.size'] = 6
matplotlib.rcParams['xtick.major.size'] = 6
matplotlib.rcParams['ytick.major.size'] = 6
matplotlib.rcParams['xtick.minor.size'] = 4
matplotlib.rcParams['ytick.minor.size'] = 4
matplotlib.rcParams['lines.linewidth'] = 0.5
matplotlib.rcParams['axes.linewidth'] = 1.5
matplotlib.rcParams['font.size']= 14.0
matplotlib.rcParams['font.family']= 'sans-serif'
matplotlib.rcParams['xtick.major.width']= 2.
matplotlib.rcParams['ytick.major.width']= 2.
matplotlib.rcParams['ytick.direction']='in'
matplotlib.rcParams['xtick.direction']='in'
def _band2flux(self):
'''
Will transform the magnitude measurement into a flux measurement.
'''
wls = np.array([])
fluxes = np.array([])
fluxerr = np.array([])
#Create a black body spectrum with an arbitrary value
lam = np.linspace(100, 120000, 10000)
sp = ps.BlackBody(10000)
sp.convert('flam')
sp2 = self._model_2(lam, 10000, 1)
sp2 = sp2 * np.max(sp.flux) / np.max(sp2)
sp = ps.ArraySpectrum(lam, sp2)
for b, m, me, psys in zip(self.bands, self.mags, self.magerrs, self.photsys):
print ("Band,",b)
#Create the observation bandpass
try:
band = ps.ObsBandpass(b)
except ValueError:
#The band is not in the standard list
#We need to go to the dictionary to retrieve the transmission function.
band = ps.FileBandpass(self.banddic[b])
#band.waveunits.convert("angstrom")
#else:
# band.waveunits = ps.units.Angstrom
#Oftain the effective (average) wavelength
effwave = band.avgwave()
#Correct for Milky Way extinction
m = m - extinction.fitzpatrick99(np.array([effwave]), a_v=self.av_mw, unit='aa')[0]
#Normalize the spectrum to the magnitude of the observation
sp_norm = sp.renorm(m, psys, band, force="extrap")
#Observe with the band
obs = ps.Observation(sp_norm, band)
#Get the flux
flux = obs.effstim('flam')
wls = np.append(wls, effwave)
fluxes = np.append(fluxes, flux)
#Compute the error bars
flux_high = flux * 10**(0.4*me)
flux_low = flux * 10**(-0.4*me)
fluxerr = np.append(fluxerr, np.average([flux - flux_low, flux_high-flux]))
return wls, fluxes, fluxerr
def _model(self, lam, p):
'''
Returns the flux for the single BlackBody model for the wavelength introduced.
lam is in A.
p = (T, R)
'''
lam = lam * u.Angstrom
T = p[0] * u.K
R = (p[1] * u.Rsun).to(u.cm)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Area = np.pi * (4 * np.pi * R**2)
flam = Area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
return flam.to(u.erg/u.s/u.Angstrom).value
def _model_2(self, lam, T, R):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
lam = lam * u.Angstrom
T = T * u.K
R = (R * u.Rsun).to(u.cm)
Area = np.pi * (4 * np.pi * R**2)
flam = Area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
return flam.to(u.erg/u.s/u.Angstrom).value
def _model_av_r(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
T = p[0] * u.K
R = (p[1] * u.Rsun).to(u.cm)
a_v = p[2]
if a_v < 0:
return lam * np.inf
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
lam = lam * u.Angstrom
area = np.pi * (4 * np.pi * R**2)
flam = area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
#Apply the reddening
flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
return flam
def _model_av_r_2(self, lam, T, R, a_v):
'''
Return units: erg s-1 A-1
'''
return self._model_av_r(lam, (T, R, a_v))
def _model2_av(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
T1 = p[0] * u.K
R1 = (p[1] * u.Rsun).to(u.cm)
a_v = p[2]
T2 = p[3] * u.K
R2 = (p[4] * u.Rsun).to(u.cm)
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
lam = lam * u.Angstrom
area1 = np.pi * (4 * np.pi * R1**2)
area2 = np.pi * (4 * np.pi * R2**2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = area1 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T1))-1)
flam2 = area2 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T2))-1)
flam = flam1 + flam2
#Apply the reddening
flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
return flam
def _model2_av_2(self, lam, T1, R1, a_v, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_av(lam, (T1, R1, a_v, T2, R2))
def _model2_av_r(self, lam, p):
'''
Return units: erg s-1 A-1
'''
T1 = p[0] #In K
R1 = p[1]*69570000000.0 #From Rsun to cm
a_v = p[2]
T2 = p[3]
R2 = p[4]*69570000000.0 #From Rsun to cm
lam = lam * 1e-8 #To cm
if a_v < 0:
return lam * np.inf
#We need an extra pi as it is integrated across all steradians
#The second factor is the surface of the black body
#The third ones is the Plank law
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = np.pi * (4 * np.pi * R1**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T1))-1)
flam2 = np.pi * (4 * np.pi * R2**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T2))-1)
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam*1e8, a_v, unit='aa'))
flam = (flam1 + flam2) * flux_red *1e-8 #to erg / s / A
#Apply the reddening and transform to erg /s/ A from cm
return flam
def _model2_av_r_2(self, lam, T1, R1, a_v, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_av_r(lam, (T1, R1, a_v, T2, R2))
def _model2_r(self, lam, p):
'''
Return units: erg s-1 A-1
'''
T1 = p[0] #In K
R1 = p[1]*69570000000.0 #From Rsun to cm
T2 = p[2]
R2 = p[3]*69570000000.0 #From Rsun to cm
lam = lam * 1e-8 #To cm
#We need an extra pi as it is integrated across all steradians
#The second factor is the surface of the black body
#The third ones is the Plank law
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = np.pi * (4 * np.pi * R1**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T1))-1)
flam2 = np.pi * (4 * np.pi * R2**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T2))-1)
flam = (flam1 + flam2)*1e-8 #to erg / s / A
return flam
def _model2_r_2(self, lam, T1, R1, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_r(lam, (T1, R1, T2, R2))
def _model_powerlaw(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
lam = lam * u.Angstrom
w0 = 4000 #p[0] #Refernce wavelength
alpha = p[0]
scale = p[1]
a_v = p[2]
f = ps.PowerLaw(w0, alpha)
f.convert('flam')
flam = np.interp(lam, f.wave, f.flux)
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
area = 10**scale
return area * flam * flux_red #.to(u.erg/u.s/u.Angstrom).value
def _model_powerlaw_2(self, lam, alpha, scale, a_v):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_powerlaw(lam, (alpha, scale, a_v))
def _model_powerlaw_bb(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
w0 = 4000 #p[0] #Refernce wavelength
alpha = p[0]
scale = p[1]
T_bb = p[2]
R_bb = p[3]
bb_flux = self._model_2(lam, T_bb, R_bb)
lam = lam * u.Angstrom
f = ps.PowerLaw(w0, alpha)
f.convert('flam')
flam = np.interp(lam, f.wave, f.flux)
area = 10**scale
return area * flam + bb_flux
def _model_powerlaw_bb_2(self, lam, alpha, scale, T_bb, R_bb):
'''
Return units: erg s-1 A-1
'''
return self._model_powerlaw_bb(lam, (alpha, scale, T_bb, R_bb))
def _model_accretion_disk_old2(self, lam, Mstar, Rstar, logMacc, scale, R_out):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_accretion_disk_old(lam, (Mstar, Rstar, logMacc, scale, R_out))
def _model_accretion_disk_old(self, lam, p):
'''
Equation 1 from Kenyon, Hartmann, Hewett 1988.
'''
Mstar = p[0]
Rstar = p[1]
Macc = p[2]
scale = p[3]
R_out = p[4]
if Mstar<0 or Macc<-12 or Rstar<0.001 or scale<0 or R_out < Rstar:
return np.ones(len(lam))*np.inf
Macc = 10**Macc
R = np.linspace(Rstar,R_out,20)
dR = R[1] - R[0]
F_r = (3 * cnt.G * Mstar * u.Msun * Macc * u.Msun/u.year / 8 / np.pi / (u.Rsun*Rstar)**3) * (Rstar/R)**3 * (1 - (Rstar/R)**0.5)
F_r = F_r.to(u.erg/u.cm**2/u.s)
T_r = ((F_r / cnt.sigma_sb)**0.25).to(u.K)
T_max = 13000 * u.K *(Mstar)**0.25 * (Macc / 1e-5)**0.25 * (Rstar)**-0.75
#Cretae the disk model
#For each differential radii, we compute the black body spectra corresponding
# to the temperature at that radius, and scale it by the flux expected at that
# radius.
disk_model = []
for i, ri in enumerate(R):
if ri>Rstar and ri<=1.5*Rstar:
sp = ps.BlackBody(T_max.value)
#sp = ps.BlackBody(T_r[i].value)
else:
sp = ps.BlackBody(T_r[i].value)
sp.convert('flam')
tot_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
#Compute the total emitted flux for the spherical area.
#Adopt the outer radius as the
dist_flux_fac = np.pi * ((ri+dR)**2 - ri**2) * (u.Rsun.to(u.cm))**2
scaled_flux = sp.flux / tot_flux * F_r[i].value #* dist_flux_fac
disk_model.append(scaled_flux)
disk = np.array(disk_model)
disk = np.nansum(disk, axis=0)
sp = ps.ArraySpectrum(sp.wave, disk)
#int_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
int_flux = np.max(sp.flux)
#Normalize (recover) the integral flux from 1kpc
flux_norm= sp.flux #/int_flux
#sp_norm = ps.ArraySpectrum(sp.wave, flux_norm)
flux_norm = np.interp(lam, sp.wave, flux_norm)
#flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
return flux_norm #* scale #* flux_red
def _model_disk_T(self, R, Mstar, Rstar, logMacc):
F_r = (3 * cnt.G * Mstar * 10**float(logMacc) * (u.Msun**2/u.year)) \
/ (8 * np.pi * (u.Rsun*R)**3) \
* (1 - (Rstar/R)**0.5)
T_r = ((F_r / cnt.sigma_sb)**0.25).to(u.K)
#print (F_r, T_r)
mask = (R>=Rstar) * (R<=1.5*Rstar)
if np.count_nonzero(mask)>0:
T_max = 13000 * u.K *(Mstar)**0.25 * (10**float(logMacc) / 1e-5)**0.25 * (Rstar)**-0.75
T_r[mask] = T_max
#print (mask, "Tmax", T_max, np.count_nonzero(mask))
return T_r.value
def _model_accretion_disk2(self, lam, Mstar, Rstar, logMacc, R_out):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_accretion_disk(lam, (Mstar, Rstar, logMacc, R_out))
def _model_accretion_disk(self, lam, p):
Mstar = np.maximum(1e-6, p[0])
Rstar = np.maximum(1e-6, p[1])
logMacc = np.maximum(-12, np.minimum(-7, p[2]))
R_out = np.maximum(1e-6, p[3])
i = 45.0
#Deg to radians
i = np.deg2rad(i%360)
d = self.distMpc*(u.Mpc).to(u.cm)
R = np.linspace(Rstar, R_out, 30)*u.Rsun
nu = (cnt.c / (lam*u.Angstrom)).to(u.Hz)
T_r = self._model_disk_T(R.value, Mstar, Rstar, logMacc)
F_nu_arr = []
for ni in nu:
I_nu_r = R / (np.exp(cnt.h * ni/(cnt.k_B*T_r*u.K)) - 1)
I_flux = np.trapz(I_nu_r, R)
F_nu = (4 * np.pi * cnt.h * np.cos(i)*ni**3)/(cnt.c**2 * d**2) * I_flux
F_nu_arr.append(F_nu.to(u.erg/u.s/u.Hz).value)
F_nu_arr = np.array(F_nu_arr)
s = ps.ArraySpectrum(lam, F_nu_arr, fluxunits='fnu', waveunits='Angstrom')
s.convert('flam')
fluxFactor = 4*np.pi*d**2
return s.flux*fluxFactor
def _get_Qnu(self, a, lam, wavedusttype="silicate"):
'''
'''
from scipy import interpolate
x = np.array([0.001, 0.01, 0.1, 1]) #size
y = np.array([0.01, 0.06, 0.2, 7, 10 ]) #wavelength
#--> size
# | wave
# v
z = np.array([[0.02, 0.2, 0.85, 0.85],
[0.02, 0.7, 0.7, 0.7],
[0.001, 0.01, 0.7, 0.7],
[0.00007, 0.001, 0.01, 0.1],
[0.001, 0.01, 0.1, 1]])
f = interpolate.interp2d(x, y, z, kind='linear')
return f(a, lam)
def _get_knu(self, a, wave, rho=1, ):
'''
Returns the values for the dust mass absorption coefficient
for the Spitzer bands for the given grain size and wavelength.
k_nu = (3. / 4 * np.pi * rho * a**3)* (np.pi * a**2 * Q_nu(a))
'''
k_nu = (3. / 4 * np.pi * rho * a**3)* (np.pi * a**2 * self.Q_nu(a, wave))
return k_nu
def _model_dust(self, Md, Td, a):
'''
Using the dust modelling approach from Fox et. al. 2010.
The assumption is that the dust is optically thin and that there is only one size and
one dust composition.
The opactities are taken from their Figure 4 values.
F_nu = M_d B_nu (T_d )k_nu(a) / d**2
'''
Bnu = ps.BlackBody(Td)
Bnu.convert('fnu')
knu = self._get_knu(a, wave) * u.cm**2 / u.g
Fnu = Md * u.Msun * Bnu * knu / (self.distMpc * u.Mpc)**2
#likelihood function
def _like(self, p, xdat, ydat, errdat, debug=False):
'''
p: function parameters
args: carry anything we want to pass to our function (e.g. the data)
'''
if self.model == "BlackBody":
ymod = self._model(xdat, p)
elif self.model == "BlackBody_Av":
ymod = self._model_av_r(xdat, p)
elif self.model == "BlackBody2_Av":
ymod = self._model2_av_r(xdat, p)
elif self.model == "BlackBody2":
ymod = self._model2_r(xdat, p)
elif self.model == "PowerLaw":
ymod = self._model_powerlaw(xdat, p)
elif self.model == "PowerLaw_BlackBody":
ymod = self._model_powerlaw_bb(xdat, p)
elif self.model == "Disk":
ymod = self._model_accretion_disk(xdat, p)
else:
print ("Unknown model", self.model)
return np.nan
#Discard models which exceed the upper limits
if (np.any(ymod[errdat<0] > ydat[errdat<0])):
prob = 1e-320
#Compute the likelihood with only valid datapoints.
else:
prob = stats.norm.pdf(ydat[errdat>0] , ymod[errdat>0] , errdat[errdat>0] )
# log probabilities
# we add tiny number to avoid NaNs
mylike = np.log(prob + 1e-320).sum()
return mylike
def _logposterior(self, p, xdat, ydat, errdat):
'''
Returns the posterior of the observations. In essence the likelihood and the prior:
#log(likelihood) + log(prior)
'''
lp = self._logprior(p)
if (not np.isinf(lp)):
lp= self._like(p, xdat, ydat, errdat) + lp
return lp
def _logprior(self, p):
'''
Returns the prior probability distribution for each model.
'''
if self.model == "BlackBody":
T1 = p[0]
R1 = p[1]
if T1 < 0 or R1 < 0:
return -np.inf
logp = stats.uniform.logpdf(T1, 10, 15000)
logp = logp + stats.uniform.logpdf(R1, 1, 50000)
if self.model =="BlackBody_Av":
T1 = p[0]
R1 = p[1]
av = p[2]
if T1 < 0 or R1 < 0 or av < 0:
return -np.inf
else:
logp = stats.uniform.logpdf(T1, 10, 15000)
logp = logp + stats.uniform.logpdf(R1, 10000, 120000)
logp = logp + stats.uniform.logpdf(av, 0, 3)
elif self.model == "BlackBody2":
T1 = p[0]
R1 = p[1]
T2 = p[2]
R2 = p[3]
if T1 < 0 or T2 > T1 or T2 < 0 or R1 < 0 or R2<0:
return - np.inf
else:
logp = stats.uniform.logpdf(T1, 100, 10000)
logp = logp + stats.uniform.logpdf(R1, 10, 12000)
logp = logp + stats.uniform.logpdf(T2, 10, 5000)
logp = logp + stats.uniform.logpdf(R2, 10, 12000)
elif self.model == "BlackBody2_Av":
T1 = p[0]
R1 = p[1]
av = p[2]
T2 = p[3]
R2 = p[4]
if T1 < 0 or T2 > T1 or T2 < 0 or av < 0 or av > 10:
return - np.inf
else:
logp = stats.uniform.logpdf(T1, 100, 1000)
logp = logp + stats.uniform.logpdf(R1, 10000, 120000)
logp = logp + stats.uniform.logpdf(av, 0, 3)
logp = logp + stats.uniform.logpdf(T2, 100, 1000)
logp = logp + stats.uniform.logpdf(R2, 10000, 120000)
elif self.model == "PowerLaw":
alpha = p[0]
scale = p[1]
av = p[2]
if av < 0:
logp = -np.inf
else:
logp = stats.uniform.logpdf(alpha, 0, 3)
logp = logp + stats.uniform.logpdf(scale, 0.1, 100)
logp = logp + stats.uniform.logpdf(av, 0, 3)
elif self.model == "PowerLaw_BlackBody":
alpha = p[0]
scale = p[1]
T1 = p[2]
R1 = p[3]
if R1 < 0 or T1 < 0 or alpha < 0:
logp = -np.inf
else:
logp = stats.uniform.logpdf(alpha, 0, 3)
logp = logp + stats.uniform.logpdf(scale, 0.1, 100)
logp = logp + stats.uniform.logpdf(T1, 500, 20000)
logp = logp + stats.uniform.logpdf(R1, 0, 500)
elif self.model == "Disk":
Mstar = p[0]
Rstar = p[1]
logMacc = p[2]
R_out = p[3]
if Rstar < 0 or Mstar < 0 or logMacc < -12 or R_out<0 or R_out < Rstar:
logp = -np.inf
else:
logp = stats.uniform.logpdf(Mstar, 0, 1.44)
logp = logp + stats.uniform.logpdf(Rstar, 0, 10)
logp = logp + stats.uniform.logpdf(logMacc, -12, 7)
logp = logp + stats.uniform.logpdf(R_out, 0, 50)
return logp
def _get_max_and_intervals(self, x):
'''
Provided a chain of samples, finds the average value and returns the values
for a 1 sigma distribution following the 34 and 66 percentiles.
'''
return np.percentile(x, 34), np.percentile(x, 50), np.percentile(x, 66)
#return percent1, maxp, percent2
def _area2rsun(self, A):
'''
Given the area of the black body in cm2 returns the radius for the object in solar radius.
'''
Aream2 = A * u.cm**2 # add units
Rad = np.sqrt(Aream2/(4*(np.pi)**2)).to(u.Rsun) #in Rsun
return Rad.value
def _fill_output(self):
'''
Computes the confidence intervals from the MCMC distribution.
Transforms the temperature ad radius into a black body luminosity.
'''
if self.model.startswith("BlackBody"):
T1, T, T2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
self.T = T
self.Terr1 = T - T1
self.Terr2 = T2 - T
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
self.L = self._get_bol_lum(T, R)
self.Lerr1 = self.L - self._get_bol_lum(T1, R1)
self.Lerr2 = self._get_bol_lum(T2, R2) - self.L
if self.model == "BlackBody_Av":
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
elif self.model == "BlackBody2_Av":
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
Tsec1, Tsec, Tsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
Rsec1, Rsec, Rsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,4])
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
self.Tsec = Tsec
self.Tsecerr1 = Tsec - Tsec1
self.Tsecerr2 = Tsec2 - Tsec
self.Rsec = Rsec
self.Rsecerr1 = Rsec - Rsec1
self.Rsecerr2 = Rsec2 - Rsec
elif self.model == "BlackBody2":
Tsec1, Tsec, Tsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
Rsec1, Rsec, Rsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.Tsec = Tsec
self.Tsecerr1 = Tsec - Tsec1
self.Tsecerr2 = Tsec2 - Tsec
self.Rsec = Rsec
self.Rsecerr1 = Rsec - Rsec1
self.Rsecerr2 = Rsec2 - Rsec
self.Lsec = self._get_bol_lum(Tsec, Rsec)
self.Lsecerr1 = self.Lsec - self._get_bol_lum(Tsec1, Rsec1)
self.Lsecerr2 = self._get_bol_lum(Tsec2, Rsec2) - self.Lsec
elif self.model=="PowerLaw":
alpha1, alpha, alpha2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
self.alpha = alpha
self.alphaerr1 = alpha - alpha1
self.alphaerr2 = alpha2 - alpha
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
elif self.model=="PowerLaw_BlackBody":
alpha1, alpha, alpha2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
scale1, scale, scale2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
T1, T, T2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.alpha = alpha
self.alphaerr1 = alpha - alpha1
self.alphaerr2 = alpha2 - alpha
self.scale = scale
self.scaleerr1 = scale - scale1
self.scaleerr2 = scale2 - scale
self.T = T
self.Terr1 = T - T1
self.Terr2 = T2 - T
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
elif self.model=="Disk":
Mstar1, Mstar, Mstar2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
Rstar1, Rstar, Rstar2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
logMacc1, logMacc, logMacc2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
R_out1, R_out, R_out2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
#scale1, scale, scale2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.Mstar = Mstar
self.Mstarerr1 = Mstar - Mstar1
self.Mstarerr2 = Mstar2 - Mstar
self.Rstar = Rstar
self.Rstarerr1 = Rstar - Rstar1
self.Rstarerr2 = Rstar2 - Rstar
self.logMacc = logMacc
self.logMaccerr1 = logMacc - logMacc1
self.logMaccerr2 = logMacc2 - logMacc
self.R_out = R_out
self.R_outerr1 = R_out - R_out1
self.R_outerr2 = R_out2 - R_out
def _save_output(self):
'''
Saves in a results file.
'''
exists = os.path.isfile(self.resfile)
with open(self.resfile, 'a') as outfile:
print ("Saving results to %s"%self.resfile)
if self.model == "BlackBody":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, self.L, self.Lerr1, self.Lerr2, self.av_mw))
elif self.model == "BlackBody_Av":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av Averr1 Averr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2, self.Av, self.Averr1, self.Averr2, self.av_mw))
elif self.model == "BlackBody2":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Tsec Tsecerr1 Tsecerr2 Rsec Rsecerr1 Rsecerr2 Lsec Lsecerr1 Lsecerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f \n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2,
self.Tsec, self.Tsecerr1, self.Tsecerr2, self.Rsec, self.Rsecerr1, self.Rsecerr2, \
self.Lsec, self.Lsecerr1, self.Lsecerr2, self.av_mw))
elif self.model == "BlackBody2_Av":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av Averr1 Averr2 Tsec Tsecerr1 Tsecerr2 Rsec Rsecerr1 Rsecerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2, self.Av, self.Averr1, self.Averr2,\
self.Tsec, self.Tsecerr1, self.Tsecerr2, self.Rsec, self.Rsecerr1, self.Rsecerr2, self.av_mw))
elif self.model == "PowerLaw":
if not exists:
outfile.write("mjd alpha alphaerr1 alphaerr2 scale scaleerr1 scaleerr2 Av Averr1 Averr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.alpha, self.alphaerr1, self.alphaerr2, self.scale, self.scaleerr1, self.scaleerr2, \
self.Av, self.Averr1, self.Averr2, self.av_mw))
elif self.model == "PowerLaw_BlackBody":
if not exists:
outfile.write("mjd alpha alphaerr1 alphaerr2 scale scaleerr1 scaleerr2 T Terr1 Terr2 R Rerr1 Rerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.alpha, self.alphaerr1, self.alphaerr2, self.scale, self.scaleerr1, self.scaleerr2, \
self.T, self.Terr1, self.Terr2, \
self.R, self.Rerr1, self.Rerr2, \
self.av_mw))
elif self.model == "Disk":
if not exists:
outfile.write("mjd M Merr1 Merr2 Rstar Rerr1 Rerr2 Macc Maccerr1 Maccerr2 R_out R_outerr1 R_outerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3e %.3e %.3e %.3f\n"%\
(self.mjd, self.Mstar, self.Mstarerr1, self.Mstarerr1, \
self.Rstar, self.Rstarerr1, self.Rstarerr2,\
self.logMacc, self.logMaccerr1, self.logMaccerr2,\
#self.scale, self.scaleerr1, self.scaleerr2, \
self.R_out, self.R_outerr1, self.R_outerr2,\
self.av_mw))
else:
print ("Unknown model! %s"%self.model)
def _get_bol_lum(self, T, R):
'''
T is in K
R in R_sun.
Gives the Lbol in Lsun
'''
L = cnt.sigma_sb * (T * u.K)**4 * 4 * np.pi * (R*u.Rsun)**2
return (L.to(u.Lsun)).value
def _get_save_path(self, savefile, plot_name=""):
'''
Checks what savefile name has been given.
If there is a value, then it jsut stores it in the plot directory provided.
If there is no name, then it creates a filename with the suffix provided.
It also checks if there is already a file named like that, and it that is the case,
it increases the suffix so that it has a higher number, avoiding collision.
'''
#If there is a given name to store the file, then we use that one
if (not savefile is None):
if os.path.dirname(savefile) == "":
name = os.path.join(self.plotdir, os.path.basename(savefile))
#If there is no name, then we will save the plots in the plot directory
#with an automatic name.
# This name will increase a count if the name exists already.
else:
i = 0
name = os.path.join(self.plotdir, "%s_%.1f_%d.pdf"%(plot_name, self.mjd, i))
while (os.path.isfile(name)):
i = i+1
name = os.path.join(self.plotdir, "%s_%.1f_%d.pdf"%(plot_name, self.mjd, i))
return name
def _initialize_parameters(self, plot=False):
'''
Runs the least squares optimiztion routine to find the best initial parameters
to start the MCMC with.
'''
lam = np.linspace(np.min(self.wls)*0.9, np.max(self.wls)*1.1, 2000)
a_v_wls = extinction.fitzpatrick99(self.wls, a_v=self.av_mw, unit='aa')
reddening = 10**(0.4*a_v_wls)
if self.model == "BlackBody":
flux_ini = self._model_2(lam, self.initT1, self.initR1)
p0 = (self.initT1, self.initR1)
print ("Initial parameters given:", p0)
#Perform a LSQ fit
#params, covar = curve_fit(self._model_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model_2(lam, *params)
if plot:
plt.clf()
mask_lims = self.fluxerrs<0
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls[~mask_lims], self.fluxes[~mask_lims], yerr=self.fluxerrs[~mask_lims], marker="o", color="b", lw=0, label="Measurements")
plt.errorbar(self.wls[mask_lims], self.fluxes[mask_lims], yerr=self.fluxes[mask_lims]*0.2, fmt="o", color="b", uplims=True)
plt.xlabel("Wavelength [A]")
plt.ylabel("$F_{\\lambda}$ [erg/s/cm2/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
plt.yscale("log")
name = self._get_save_path(None, "fluxes_obs_bb")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody_Av":
flux_ini = self._model_av_r_2(lam, self.initT1, self.initR1, self.av_host)
p0 = (self.initT1, self.initR1, self.av_host)
print ("Initial ", p0)
#params, covar = curve_fit(self._model_av_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model_av_r_2(lam, *params)
if plot:
plt.clf()
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs_bb_av")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody2_Av":
flux_ini = self._model2_av_r_2(lam, self.initT1, self.initR1, self.av_host, self.initT2, self.initR2)
p0 = (self.initT1, self.initR1, self.av_host, self.initT2, self.initR2)
print ("Initial ", p0)
#params, covar = curve_fit(self._model2_av_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model2_av_r_2(lam, *params)
if plot:
plt.clf()
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody2":
flux_ini = self._model2_r_2(lam, self.initT1, self.initR1, self.initT2, self.initR2)
p0 = (self.initT1, self.initR1, self.initT2, self.initR2)
print ("Initial ", p0)
#params, covar = curve_fit(self._model2_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model2_r_2(lam, *params)
#flux_1 = self._model_2(lam, *params[0:2])
#flux_2 = self._model_2(lam, *params[2:])
if plot:
plt.clf()
plt.figure(figsize=(6,4))
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
#plt.plot(lam, flux_1, label="BB1")
#plt.plot(lam, flux_2, label="BB2")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.legend(loc="best", fontsize=10)
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.yscale("log")
name = self._get_save_path(None, "fluxes_obs_2bb")
plt.savefig(name, dpi=200)
elif self.model == "PowerLaw":
#params, covar = curve_fit(self._model_powerlaw_2, self.wls , self.fluxes, \
#p0=(self.alpha, self.initR1, self.av_host), sigma=self.fluxerrs, absolute_sigma=True, maxfev = 10000)
lam = np.linspace(3000, 25000, 2000)
fluxpw = self._model_powerlaw_2(lam, self.alpha, self.scale, self.av_host)
if plot:
plt.clf()
plt.plot(lam, fluxpw, label="Fit initial parameters")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs_powerlaw")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
elif self.model == "PowerLaw_BlackBody":
#params, covar = curve_fit(self._model_powerlaw_2, self.wls , self.fluxes, \
#p0=(self.alpha, self.initR1, self.av_host), sigma=self.fluxerrs, absolute_sigma=True, maxfev = 10000)
lam = np.linspace(3000, 25000, 2000)
fluxpw = self._model_powerlaw_bb_2(lam, self.alpha, self.scale, self.initT1, self.initR1)
if plot:
plt.clf()
plt.plot(lam, fluxpw, label="Fit initial parameters")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="MW ext. corr")
plt.errorbar(self.wls, self.fluxes/reddening, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes/reddening), 1.2*np.max(self.fluxes))
plt.legend(loc="best")
name = self._get_save_path(None, "fluxes_obs_powerlaw_bb")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
if self.model == 'Disk':
#params = (0.5, 0.2, 5e-9, 1, 2)
p0 = (self.Mstar, self.Rstar, self.logMacc, self.R_out)
#params, covar = curve_fit(self._model_accretion_disk2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#print ("LSQ fit: Mstar:", params[0], " Rstar", params[1], "logMacc ", \
# params[2], "R_out", params[3])
lam = np.linspace(3000, 25000, 2000)
#flux_disk = self._model_accretion_disk2(lam, params[0], params[1], params[2], params[3])
if plot:
plt.clf()
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
#plt.plot(lam, flux_disk, lw=3)
plt.xlabel("Wavelength [$\\mu$m]")
plt.ylabel("Flux [erg/cm$^2$/s]")
plt.ylim(np.nanmin(self.fluxes)*0.9, np.nanmax(self.fluxes)*1.2)
plt.legend()
name = self._get_save_path(None, "fluxes_obs_disk")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
def initialize(self, plot=False):
'''
Will transform the magnitudes to fluxes and use the distance to the object to
calculate the luminosity at each wavelength.
'''
if (not os.path.isdir(self.plotdir)):
os.makedirs(self.plotdir)
print ("Created plot directory %s"%self.plotdir)
#Directory where to store the results
if (not os.path.isdir(self.resdir)):
os.makedirs(self.resdir)
print ("Created result directory %s"%(self.resdir))
self.resfile = os.path.join(self.resdir, self.model + os.path.basename(self.resfile))
# generate the data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.wls, self.fluxes, self.fluxerrs = self._band2flux()
#Plot the raw fluxes before correcting them.
'''if (plot):
plt.figure(figsize=(8,6))
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0)
for i in range(len(self.wls)):
plt.text(self.wls[i], self.fluxes[i]*1.01, self.bands[i].split(",")[-1], alpha=.4)
name = self._get_save_path(None, "fluxes_observed")
plt.yscale("log")
plt.xlabel("Wavelength [A]")
plt.ylabel("log (Flux/[erg/cm2/s])")
plt.tight_layout()
plt.savefig(name, dpi=200)'''
if not self.distMpc is None and self.distMpc !=0:
print ("Using distance to the source of %.1e Mpc"%self.distMpc)
fluxFactor = (4*np.pi*((self.distMpc*u.Mpc).to(u.cm) )**2).value
elif (self.distMpc is None or self.distMpc==0 )and (not self.z is None and self.z != 0):
self.distMpc = self.cosmo.luminosity_distance(self.z)
#Compute the flux multiplication factor for the object if it is at distance distMpc
#We transform that to cm, as the flux is in erg cm-2 s-1
fluxFactor = (4*np.pi*(self.distMpc.to(u.cm) )**2).value
else: # self.distMpc is None and self.z is None:
#Here we do not use any multiplication flux factor
print ("Warning: no redshift or distance provided!")
fluxFactor = 1
self.fluxes = self.fluxes * fluxFactor
self.fluxerrs = self.fluxerrs * fluxFactor
self._initialize_parameters(plot)
def run(self):
'''
Runs the main MCMC process.
Retrieves the priors, the likelihood process and computes the posterior probability.
'''
xs = self.wls
ys = self.fluxes
errs = self.fluxerrs
if self.model == "BlackBody":
p0 = np.array([ self.initT1, self.initR1])
sigs = np.array([self.initT1*0.2, self.initR1*0.2])
elif self.model == "BlackBody_Av":
p0 = np.array([ self.initT1, self.initR1, self.av_host])
sigs = np.array([2000, 10, 0.5])
elif self.model == "BlackBody2":
p0 = np.array([ self.initT1, self.initR1, self.initT2, self.initR2])
sigs = np.array([self.initT1*0.2, self.initR1*0.2, self.initT2*0.2, self.initR2*0.2])
elif self.model == "BlackBody2_Av":
p0 = np.array([ self.initT1, self.initR1, self.av_host, self.initT2, self.initR2])
sigs = np.array([2000, 5, 1, 2000, 5])
elif self.model == "PowerLaw":
p0 = np.array([ self.alpha, self.scale, self.av_host])
sigs = np.array([2, 3, 2])
elif self.model == "PowerLaw_BlackBody":
p0 = np.array([ self.alpha, self.scale, self.initT1, self.initR1])
sigs = np.array([2, 3, 2000, 2])
elif self.model == "Disk":
p0 = np.array([ self.Mstar, self.Rstar, self.logMacc, self.R_out])
sigs = np.array([0.1, 0.01, 1, 0.1])
print ("Initialized with p0", p0, " and sigmas ", sigs)
else:
print ("-------------------CRITICAL ERROR!----------------------")
print ("-------------------UNKNOWN model! %s----------------------"%self.model)
print ("-------------------CRITICAL ERROR!----------------------")
sys.exit()
ndim = len(p0)
# emsemble MCMC
p0s = emcee.utils.sample_ball(p0, sigs, self.nwalkers)
# initialize the ball of initial conditions
#Supports the threads=X argument for parallelization
sampler = emcee.EnsembleSampler(self.nwalkers, ndim, self._logposterior,\
args=(xs, ys, errs), threads=10)
pos, lnprob, state = sampler.run_mcmc(p0s, self.burnin)
print ("Burning phase finished")
sampler.reset()
pos, lnprob, state = sampler.run_mcmc(pos, self.niterations)
print ('Acceptance ratio', sampler.acceptance_fraction)
self.sampler = sampler
print ("MCMC main phase finished")
self._fill_output()
self._save_output()
def plot_corner_posteriors(self, savefile=None):
'''
Plots the corner plot of the MCMC results.
'''
if self.model == "BlackBody2":
labels=["T1", "R1", "T2", "R2"]
elif self.model.startswith("BlackBody"):
labels=["T1", "R1", "Av", "T2", "R2"]
elif self.model == "PowerLaw":
labels=["alpha", "scale", "Av"]
elif self.model == "PowerLaw_BlackBody":
labels = ["alpha", "scale", "T", "R"]
elif self.model == "Disk":
labels = ["Mstar", "Rstar", "logMacc", "R_out"]
ndim = len(self.sampler.flatchain[0,:])
chain = self.sampler
samples = chain.flatchain
samples = samples[:,0:ndim]
plt.figure(figsize=(8,8))
fig = corner.corner(samples, labels=labels[0:ndim], quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_kwargs={"fontsize": 12})
fig.suptitle("MJD: %.2f"%self.mjd)
name = self._get_save_path(savefile, "mcmc_posteriors")
plt.savefig(name)
plt.close("all")
plt.figure(figsize=(8,ndim*3))
for n in range(ndim):
plt.subplot(ndim,1,n+1)
chain = self.sampler.chain[:,:,n]
nwalk, nit = chain.shape
for i in np.arange(nwalk):
plt.plot(chain[i], lw=0.1)
plt.ylabel(labels[n])
plt.xlabel("Iteration")
name_walkers = self._get_save_path(savefile, "mcmc_walkers")
plt.tight_layout()
plt.savefig(name_walkers)
plt.close("all")
def plot_fit(self, lambdaFlambda=False):
'''
Plots the best fit model to the data.
'''
lam = np.linspace( np.min(self.wls) -1500 , np.max(self.wls) + 1500, 1000)
plt.clf()
plt.figure(figsize=(8,6))
mask_lims = self.fluxerrs<0
if lambdaFlambda:
factor_obs=self.wls
else:
factor_obs=np.ones_like(self.wls)
plt.errorbar(self.wls[~mask_lims], self.fluxes[~mask_lims]*factor_obs[~mask_lims], yerr=self.fluxerrs[~mask_lims]*factor_obs[~mask_lims], marker="o", color="b", lw=0, label="Measurements")
plt.errorbar(self.wls[mask_lims], self.fluxes[mask_lims]*factor_obs[mask_lims], yerr=self.fluxes[mask_lims]*0.2*factor_obs[mask_lims], fmt="o", color="b", uplims=True)
for i in range(len(self.wls)):
plt.text(self.wls[i], self.fluxes[i]*1.01*factor_obs[i], self.bands[i], alpha=.4, fontsize=8)
if self.model == "BlackBody":
fluxbb = self._model(lam, (self.T, self.R))
if lambdaFlambda:
factor = lam
else:
factor = np.ones_like(lam)
plt.plot(lam, fluxbb*factor, "k-", label="BB fit")
plt.title("T: %d K R:%d R$_{\odot}$ Lumiosity %.2e L$_{\odot}$"%(self.T, self.R, self.L))
elif self.model == "BlackBody_Av":
fluxbb = self._model(lam, (self.T, self.R))
fluxbb_red = self._model_av_r(lam, (self.T, self.R, self.Av))
plt.plot(lam, fluxbb, "k-", label="BB fit")
plt.plot(lam, fluxbb_red, "red", label="BB fit + reddening")
plt.title("T: %.1f K R:%.1f R$_{\odot}$ Lumiosity %.1e L$_{\odot}$ Av: %.2f"%(np.round(self.T,0), np.round(self.R,0), np.round(self.L,1), self.Av))
elif self.model == "BlackBody2_Av":
fluxbb_red = self._model2_av(lam, (self.T, self.R, self.Av))
fluxbb_secondary_red = self._model2_av(lam, (self.Tsec, self.Rsec, self.Av))
fluxbb_with_seconday = self._model2_av(lam, (self.T, self.R, self.Av, self.Tsec, self.Rsec))
plt.plot(lam, fluxbb_red, "k-", label="BB1 fit + reddening")
plt.plot(lam, fluxbb_secondary_red, "k--", label="BB2 fit + reddening")
plt.plot(lam, fluxbb_with_seconday, "green", label="BB1 + BB2")
plt.title("T: %.1f K R:%.1f R$_{\odot}$ Lumiosity %.1e L$_{\odot}$ Av: %.2f\n T2: %.1f R2: %.1f"%(self.T, \
self.R, self.L, self.Av, self.Tsec, self.Rsec))
elif self.model == "BlackBody2":
fluxbb_primary = self._model(lam, (self.T, self.R))
fluxbb_secondary = self._model(lam, (self.Tsec, self.Rsec))
fluxbb_with_seconday = self._model2_r(lam, (self.T, self.R, self.Tsec, self.Rsec))
plt.plot(lam, fluxbb_primary, "k-", label="BB1 fit")
plt.plot(lam, fluxbb_secondary, "k--", label="BB2 fit")
plt.plot(lam, fluxbb_with_seconday, "green", label="BB1 + BB2")
plt.title("T: %d K R:%d R$_{\odot}$ T2: %d R2: %d"%( self.T, \
self.R, self.Tsec, self.Rsec))
elif self.model == "PowerLaw":
flux = self._model_powerlaw(lam, (self.alpha, self.scale, self.Av))
plt.plot(lam, flux, "k-", label="PowerLaw + reddening")
plt.title("$\\alpha$: %.1f Av: %.2f"%(self.alpha, self.Av))
elif self.model == "PowerLaw_BlackBody":
flux = self._model_powerlaw_bb(lam, (self.alpha, self.scale, self.T, self.R))
flux_pw = self._model_powerlaw(lam, (self.alpha, self.scale, 0))
flux_bb = self._model(lam, (self.T, self.R))
plt.plot(lam, flux, "k-", label="PowerLaw + BlackBody")
plt.plot(lam, flux_pw, "b--", label="PowerLaw")
plt.plot(lam, flux_bb, "g:", label="BlackBody")
plt.title("$\\alpha$: %.1f scale: %.2e T: %.1f R:%.1f"%(self.alpha, self.scale, self.T, self.R))
elif self.model == "Disk":
fluxdisk = self._model_accretion_disk(lam, (self.Mstar, self.Rstar, self.logMacc, self.R_out))
plt.plot(lam, fluxdisk, "k-", label="Disk fit")
plt.title("M:%.3f M$_{\\odot}$ R:%.3f R$_{\odot}$ M$_{acc}$:%.2f R_out: %.2f"%(self.Mstar, self.Rstar, self.logMacc, self.R_out))
ymin, ymax = plt.ylim()
#plt.ylim(np.max([ymin, np.min(self.fluxes)*0.01]), ymax)
plt.xlabel("Wavelength [$\\AA$]")
if (lambdaFlambda):
plt.ylabel("$\\lambda F_{\\lambda}$ [erg/s]")
plt.ylim(ymin=np.min(self.fluxes*factor_obs) * 0.1)
else:
plt.ylabel("$F_{\\lambda}$ [erg/s/$\\AA$]")
plt.ylim(ymin=np.min(self.fluxes) * 0.1)
plt.yscale("log")
plt.legend()
name = self._get_save_path(None, "mcmc_best_fit_model")
plt.savefig(name)
plt.close("all")
def write_fit_params(self):
'''
Write the best fit parameters of the model to the standard output.
'''
if self.model.startswith("BlackBody"):
#Prints the best parameters
print ('''
Temperature: \t %.3f -%.3f +%.3f K
Radius: \t\t %.2e -%.2e +%.2e R$_{\odot}$
Luminosity: \t %.3e -%.3e +%.3e L$_{\odot}$'''%(\
self.T, self.Terr1, self.Terr2, \
self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2))
if self.model == "BlackBody_Av":
print (" Av: \t\t\t %.1f -%.1f +%.1f mag"%(self.Av, self.Averr1, self.Averr2))
if self.model == "BlackBody2":
print (" Temperature2: %.1f -%.1f +%.1f K"%(self.Tsec, self.Tsecerr1, self.Tsecerr2))
print (" Radius2: %.2e -%.2e +%.2e R$_{\odot}$"%(self.Rsec, self.Rsecerr1, self.Rsecerr2))
print (" Luminosity2 %.3e -%.3e +%.3e L$_{\odot}$"%(self.Lsec, self.Lsecerr1, self.Lsecerr2))
if self.model == "BlackBody2_Av":
print (" Av: %.1f -%.1f +%.1f mag"%(self.Av, self.Averr1, self.Averr2))
print (" Temperature2: %.1f -%.1f +%.1f K"%(self.Tsec, self.Tsecerr1, self.Tsecerr2))
print (" Radius2: %.1f -%.1f +%.1f R$_{\odot}$"%(self.Rsec, self.Rsecerr1, self.Rsecerr2))
if (self.model == "PowerLaw"):
print ('''
alpha: %.2f -%.2f +%.2f
Scale : %.2e -%.2e +%.2e
Av %.2f -%.2f +%.2f'''%(\
self.alpha, self.alphaerr1, self.alphaerr2, \
self.scale, self.scaleerr1, self.scaleerr2, \
self.Av, self.Averr1, self.Averr2))
if (self.model == "PowerLaw_BlackBody"):
print ('''
alpha: %.2f -%.2f +%.2f
Scale (R): %.2e -%.2e +%.2e
T %.2f -%.2f +%.2f
R %.2f -%.2f +%.2f '''%(\
self.alpha, self.alphaerr1, self.alphaerr2, \
self.scale, self.scaleerr1, self.scaleerr2,\
self.T, self.Terr1, self.Terr2,\
self.R, self.Rerr1, self.Rerr2 ))
if (self.model == "Disk"):
print ('''
Mstar: %.3f$_{-%.3f}^{+%.3f}$
Rstar (10^8 cm): %.3f -%.3f +%.3f
logMacc %.3f$_{-%.3f}^{+%.3f}$
R_out %.3f$_{-%.3f}^{+%.3f}$ '''%(\
self.Mstar, self.Mstarerr1, self.Mstarerr2, \
self.Rstar*(u.Rsun.to(u.cm))/1e8, self.Rstarerr1*(u.Rsun.to(u.cm))/1e8, self.Rstarerr2*(u.Rsun.to(u.cm))/1e8,\
self.logMacc, self.logMaccerr1, self.logMaccerr2,\
self.R_out, self.R_outerr1, self.R_outerr2 ))
| mit | -9,118,717,144,658,973,000 | 39.41373 | 196 | 0.504743 | false | 3.123932 | false | false | false |
disco-framework/disco | priv/general/components/gui/gui.py | 1 | 18070 | #!/usr/bin/python
import sys
from PyQt4 import QtCore, QtGui
from ui_mainview import Ui_MainWindow
import json
from jsonreader import JsonReader
##################################################
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# status bar
self.labelProblemSpec = QtGui.QLabel()
self.labelProblemTime = QtGui.QLabel()
self.labelCurrentRound = QtGui.QLabel()
self.labelWorkerInput = QtGui.QLabel()
self.ui.statusbar.addWidget(self.labelProblemSpec, 1)
self.ui.statusbar.addWidget(self.labelProblemTime, 1)
self.ui.statusbar.addWidget(self.labelCurrentRound, 1)
self.ui.statusbar.addWidget(self.labelWorkerInput, 1)
# set menu shortcuts
self.ui.actionLoadGameState.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+O")))
self.ui.actionSaveGameState.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+S")))
self.ui.actionQuit.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+Q")))
self.ui.actionStartRound.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+R")))
self.ui.actionAddScores.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+A")))
self.ui.actionKillAllWorkers.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+K")))
self.DataCollector = JsonReader(self)
self.connect(self.DataCollector, QtCore.SIGNAL("received_data"), self.received)
self.connect(self.DataCollector, QtCore.SIGNAL("worker_updated"), self.update_worker)
self.connect(self.DataCollector, QtCore.SIGNAL("round_started"), self.start_round)
self.connect(self.DataCollector, QtCore.SIGNAL("round_ended"), self.end_round)
self.connect(self.DataCollector, QtCore.SIGNAL("worker_input_changed"), self.update_worker_input)
self.connect(self.DataCollector, QtCore.SIGNAL("problem_chosen"), self.choose_problem)
self.connect(self.DataCollector, QtCore.SIGNAL("all_data"), self.update_all)
self.connect(self.DataCollector, QtCore.SIGNAL("save_game_state_reply"), self.save_game_state_reply)
self.connect(self.DataCollector, QtCore.SIGNAL("load_game_state_reply"), self.load_game_state_reply)
self.DataCollector.start()
self.problemAnswerTime = 0
self.roundTimerRemaining = 0
self.roundTimer = QtCore.QTimer()
QtCore.QObject.connect(self.roundTimer, QtCore.SIGNAL("timeout()"), self.roundTimer_tick)
# file menu
QtCore.QObject.connect(self.ui.actionLoadGameState, QtCore.SIGNAL("triggered()"), self.btnLoadGameState_clicked)
QtCore.QObject.connect(self.ui.actionSaveGameState, QtCore.SIGNAL("triggered()"), self.btnSaveGameState_clicked)
QtCore.QObject.connect(self.ui.actionReloadAllData, QtCore.SIGNAL("triggered()"), self.btnReloadAllData_clicked)
QtCore.QObject.connect(self.ui.actionQuit, QtCore.SIGNAL("triggered()"), self.btnQuit_clicked)
# round menu
QtCore.QObject.connect(self.ui.actionStartRound, QtCore.SIGNAL("triggered()"), self.btnStartRound_clicked)
QtCore.QObject.connect(self.ui.actionAddScores, QtCore.SIGNAL("triggered()"), self.btnAddScores_clicked)
QtCore.QObject.connect(self.ui.actionKillAllWorkers, QtCore.SIGNAL("triggered()"), self.btnKillAllWorkers_clicked)
# worker tab
self.ui.tableWorker.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.ui.tableWorker.customContextMenuRequested.connect(self.tableWorker_requestContextMenu)
# io tab
QtCore.QObject.connect(self.ui.btnSend, QtCore.SIGNAL("clicked()"), self.btnSend_clicked)
QtCore.QObject.connect(self.ui.edtSend, QtCore.SIGNAL("returnPressed()"), self.btnSend_clicked)
# worker table header
thh = self.ui.tableWorker.horizontalHeader()
thh.setVisible(True)
thh.resizeSection(0, 50) # ranking group
thh.resizeSection(1, 60) # id
thh.resizeSection(2, 170) # name
thh.resizeSection(3, 230) # proposition
thh.resizeSection(4, 100) # points
thh.resizeSection(5, 50) # processed points
thh.resizeSection(6, 100) # problem points (accumulated over all rounds on this problem)
thh.setSortIndicator(1, QtCore.Qt.AscendingOrder)
tvh = self.ui.tableWorker.verticalHeader()
tvh.setVisible(True)
tvh.setResizeMode(QtGui.QHeaderView.Fixed)
self.reset_problem_list([])
self.worker_blocked = {}
def closeEvent(self, e):
self.send(json.dumps({'action': 'quit program'}))
self.DataCollector.terminate() # TODO: "This function is dangerous and its use is discouraged"
self.DataCollector.wait()
e.accept()
app.exit()
###############################
## main menu / buttons ##
###############################
## file menu
def btnLoadGameState_clicked(self):
fileName = str(QtGui.QFileDialog.getOpenFileName())
if fileName != "":
self.send(json.dumps({'action': 'load game state', 'file path': fileName}))
def btnSaveGameState_clicked(self):
fileName = str(QtGui.QFileDialog.getSaveFileName())
if fileName != "":
self.send(json.dumps({'action': 'save game state', 'file path': fileName}))
def btnReloadAllData_clicked(self):
self.send(json.dumps({'action': 'get all data'}))
def btnQuit_clicked(self):
self.close()
## problems menu
def btnChooseProblem_clicked(self, idx, action, oldChecked):
action.setChecked(oldChecked) # undo auto check
self.send(json.dumps({'action': 'choose problem', 'problem idx': idx}))
## round menu
def btnStartRound_clicked(self):
self.send(json.dumps({'action': 'start round'}))
def btnAddScores_clicked(self):
self.send(json.dumps({'action': 'add scores'}))
self.ui.actionAddScores.setEnabled(False)
def btnKillAllWorkers_clicked(self):
self.send(json.dumps({'action': 'kill all workers'}))
## worker tab
def tableWorker_requestContextMenu(self, position):
workerId = str(self.ui.tableWorker.item(self.ui.tableWorker.currentRow(), 1).text())
# create menu
menu = QtGui.QMenu()
actApply = menu.addAction("&Apply proposition")
actBlock = None
actUnblock = None
if self.worker_blocked[workerId]:
actUnblock = menu.addAction("Un&block worker '" + workerId + "'")
else:
actBlock = menu.addAction("&Block worker '" + workerId + "'")
# execute menu synchronously
action = menu.exec_(self.ui.tableWorker.viewport().mapToGlobal(position))
if action != None:
if action == actApply:
if QtGui.QMessageBox.information(self, "Apply proposition", "Really apply proposition from " + workerId + "?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
self.send(json.dumps({'action': 'apply proposition', 'worker id': workerId}))
elif action == actBlock:
self.send(json.dumps({'action': 'block worker', 'worker id': workerId}))
elif action == actUnblock:
self.send(json.dumps({'action': 'unblock worker', 'worker id': workerId}))
## io tab
def btnSend_clicked(self):
msg = self.ui.edtSend.text()
self.send(msg)
self.ui.edtSend.clear()
#######################
## Round timer ##
#######################
def roundTimer_tick(self):
self.roundTimerRemaining -= self.roundTimer.interval()
if self.roundTimerRemaining <= 0:
self.roundTimer.stop()
self.roundTimerRemaining = 0
self.labelProblemTime.setText("Answer time remaining\n " +
str(self.roundTimerRemaining/1000) + "s")
#######################
## JSON events ##
#######################
def update_worker(self, id, proposition, caption, score, processedScore, problemScore, blocked, working):
row = self.get_worker_table_row(id)
if proposition == None:
proposition = ""
if row != None:
self.update_worker_by_row(row, id, proposition, caption, score, processedScore, problemScore, blocked, working)
def start_round(self, round):
self.ui.actionStartRound.setEnabled(False)
self.ui.menuProblems.setEnabled(False)
self.ui.actionAddScores.setEnabled(False)
self.labelCurrentRound.setText("Round (running)\n " + str(round))
self.roundTimerRemaining = self.problemAnswerTime
self.roundTimer.start(100)
def end_round(self, round):
self.ui.actionStartRound.setEnabled(True)
self.ui.menuProblems.setEnabled(True)
self.ui.actionAddScores.setEnabled(True)
self.labelCurrentRound.setText("Round\n " + str(round))
self.roundTimerRemaining = 0
self.roundTimer_tick()
def update_worker_input(self, workerInput):
def format_wi_line(line): return shorten_string(28, line)
wiString = "\n".join(list(map(format_wi_line, workerInput)))
self.labelWorkerInput.setText("Worker input for next round:\n" + wiString)
def choose_problem(self, problemIdx):
self.roundTimer.stop()
self.reset_problem_list(self.problemList, problemIdx)
probDesc, probSpec, answerTime, startState = self.problemList[problemIdx]
self.labelProblemSpec.setText("Problem\n " + probDesc)
self.labelProblemTime.setText("Answer time\n " + str(answerTime/1000.0) + "s")
self.problemAnswerTime = answerTime
self.labelCurrentRound.setText("")
def update_all(self, running, workerList, problemList, problemIdx, round, workerInput, problemState):
self.clear_worker_table()
for id, name, group, proposition, caption, score, processedScore, problemScore, blocked, working in workerList:
self.add_worker(id, name, group, proposition, caption, score, processedScore, problemScore, blocked, working)
self.update_worker_input(workerInput)
if running:
self.start_round(round)
else:
self.end_round(round)
self.problemList = problemList
self.choose_problem(problemIdx)
def save_game_state_reply(self, result):
if result == "ok":
msg = "Game state successfully saved."
QtGui.QMessageBox.information(self, "Game state saved", msg, QtGui.QMessageBox.Ok)
else:
if result == "enoent" : msg = "No such file or directory!"
elif result == "enotdir": msg = "Not a directory!"
elif result == "enospc" : msg = "No space left on device!"
elif result == "eacces" : msg = "Permission denied!"
elif result == "eisdir" : msg = "Illegal operation on a directory!"
else : msg = "Unknown error: " + result
QtGui.QMessageBox.warning(self, "Error saving game state", msg, QtGui.QMessageBox.Ok)
def load_game_state_reply(self, result):
if result == "ok":
msg = "Game state successfully loaded."
QtGui.QMessageBox.information(self, "Game state loaded", msg, QtGui.QMessageBox.Ok)
else:
if result == "eformat": msg = "Invalid file format!"
elif result == "enoent" : msg = "No such file or directory!"
elif result == "enotdir": msg = "Not a directory!"
elif result == "eacces" : msg = "Permission denied!"
elif result == "eisdir" : msg = "Illegal operation on a directory!"
else : msg = "Unknown error: " + result
QtGui.QMessageBox.warning(self, "Error loading game state", msg, QtGui.QMessageBox.Ok)
#############################
## private functions ##
#############################
def send(self, msg):
self.ui.txtRecv.appendHtml("<span style='font-weight:bold;color:red'>send:</span> "
+ escape_html(msg).rstrip("\n").replace("\n","<br />"))
print(msg)
sys.stdout.flush()
def received(self, msg):
self.ui.txtRecv.appendHtml("<span style='font-weight:bold;color:blue'>recv:</span> "
+ escape_html(msg).rstrip("\n").replace("\n","<br />"))
def get_worker_table_row(self, id):
for row in range(0, self.ui.tableWorker.rowCount()):
if self.ui.tableWorker.item(row, 1).text() == id:
return row
return None
def clear_worker_table(self):
self.worker_blocked = {}
self.ui.tableWorker.clearContents()
self.ui.tableWorker.setRowCount(0)
def add_worker(self, id, name, group, proposition, propCaption, score, processedScore, problemScore, blocked, working):
if proposition == None:
proposition = ""
self.worker_blocked[id] = blocked != "no"
row = self.ui.tableWorker.rowCount()
self.ui.tableWorker.setRowCount(row + 1)
self.ui.tableWorker.setSortingEnabled(False)
item = QtGui.QTableWidgetItem()
item.setText(group)
self.ui.tableWorker.setItem(row, 0, item)
item = QtGui.QTableWidgetItem()
item.setText(id)
self.ui.tableWorker.setItem(row, 1, item)
item = QtGui.QTableWidgetItem()
item.setText(name)
self.ui.tableWorker.setItem(row, 2, item)
item = QtGui.QTableWidgetItem()
self.ui.tableWorker.setItem(row, 3, item)
item = CustomTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ui.tableWorker.setItem(row, 4, item)
item = CustomTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ui.tableWorker.setItem(row, 5, item)
item = CustomTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ui.tableWorker.setItem(row, 6, item)
self.update_worker_by_row(row, id, proposition, propCaption, score, processedScore, problemScore, blocked, working)
self.ui.tableWorker.setSortingEnabled(True)
def update_worker_by_row(self, row, id, proposition, propCaption, score, processedScore, problemScore, blocked, working):
isBlocked = blocked != "no"
blockedIdx = blocked["idx"] if "idx" in blocked else 0
self.worker_blocked[id] = isBlocked
self.ui.tableWorker.setSortingEnabled(False)
brush = QtGui.QBrush(QtGui.QColor(190, 190, 190))
if self.worker_blocked[id]:
brush.setStyle(QtCore.Qt.SolidPattern)
else:
brush.setStyle(QtCore.Qt.NoBrush)
self.ui.tableWorker.item(row, 0).setBackground(brush)
self.ui.tableWorker.item(row, 1).setBackground(brush)
self.ui.tableWorker.item(row, 2).setBackground(brush)
item = self.ui.tableWorker.item(row, 3)
item.setText(propCaption)
item.setBackground(brush)
item = self.ui.tableWorker.item(row, 4)
item.setText(str(score))
item.setCustomSortData(isBlocked, {False: int(score), True: blockedIdx}[isBlocked])
item.setBackground(brush)
item = self.ui.tableWorker.item(row, 5)
item.setText(str(processedScore))
item.setCustomSortData(isBlocked, {False: int(processedScore), True: blockedIdx}[isBlocked])
item.setBackground(brush)
item = self.ui.tableWorker.item(row, 6)
item.setText(str(problemScore))
item.setCustomSortData(isBlocked, {False: int(problemScore), True: blockedIdx}[isBlocked])
item.setBackground(brush)
if self.ui.tableWorker.cellWidget(row, 2) == None:
if working:
self.ui.tableWorker.setCellWidget(row, 2, WorkingWidget(self))
else:
if not working:
self.ui.tableWorker.removeCellWidget(row, 2)
self.ui.tableWorker.setSortingEnabled(True)
def reset_problem_list(self, lst, checkedIdx=None):
self.problemList = lst
self.ui.menuProblems.clear()
if lst == []:
action = QtGui.QAction(self)
action.setText("--- no problems ---")
action.setEnabled(False)
self.ui.menuProblems.addAction(action)
else:
for idx, (description, spec, answerTime, state) in enumerate(lst):
action = QtGui.QAction(self)
action.setText(description + "\t" + str(answerTime/1000.0) + "s")
action.setCheckable(True)
if checkedIdx == idx:
action.setChecked(True)
QtCore.QObject.connect(action, QtCore.SIGNAL("triggered()"),
lambda i=idx, a=action, chk=(checkedIdx==idx):
self.btnChooseProblem_clicked(i, a, chk))
self.ui.menuProblems.addAction(action)
##################################################
class WorkingWidget(QtGui.QLabel):
def __init__(self, parent=None):
super(WorkingWidget, self).__init__(parent)
self.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
movie = QtGui.QMovie("./gears.gif")
self.setMovie(movie)
movie.start()
##################################################
class CustomTableWidgetItem(QtGui.QTableWidgetItem):
def __init__(self):
# call custom constructor with item type 'UserType'
QtGui.QTableWidgetItem.__init__(self, QtGui.QTableWidgetItem.UserType)
self.blocked = False
self.sortKey = 0
def setCustomSortData(self, blocked, sortKey):
self.blocked = blocked
self.sortKey = sortKey
# override the 'less than' operator
def __lt__(self, other):
if self.blocked == other.blocked:
return self.sortKey > other.sortKey
else:
return self.blocked < other.blocked
##################################################
def shorten_string(chars, string):
return (string[:(chars-3)] + '...') if len(string) > chars else string
def escape_html(str):
return str.replace("&","&").replace(">",">").replace("<","<")
##################################################
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
win = MainWindow()
win.show()
sys.exit(app.exec_())
| apache-2.0 | -6,686,924,166,606,951,000 | 38.714286 | 124 | 0.639236 | false | 3.791439 | false | false | false |
au9ustine/elrond | elrond/aws/s3.py | 1 | 3432 | import os
import json
import threading
import boto3
from boto3.s3.transfer import S3Transfer
from elrond.crypto import get_file_digest
DEFAULT_CHUNK_SIZE = 64 * 1024 * 1024
ELROND_S3_SINGLETON_CLIENT = None
ELROND_S3_SUPPORTED_REGIONS = [
'EU',
'eu-west-1',
'us-west-1',
'us-west-2',
'ap-south-1',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'sa-east-1',
'cn-north-1',
'eu-central-1'
]
def analyse(file_path):
res_st = os.stat(file_path, follow_symlinks=True)
return {
'mode': res_st.st_mode,
'atime': res_st.st_atime,
'mtime': res_st.st_mtime,
'ctime': res_st.st_ctime,
'size': res_st.st_size,
'digest': {
'algorithm': 'sha256',
'value': get_file_digest(file_path)
}
}
def update_metadata(file_path, metadata):
os.chmod(file_path, metadata['mode'])
os.utime(file_path, (metadata['atime'], metadata['mtime']))
def metadata2str(metadata):
return json.dumps(metadata,separators=(',', ':'))
def str2metadata(metadata_str):
return json.loads(metadata_str)
def chunk(stream, chuck_size=DEFAULT_CHUNK_SIZE):
for block in iter(lambda:stream.read(chuck_size),b''):
yield block
def get_client():
global ELROND_S3_SINGLETON_CLIENT
if ELROND_S3_SINGLETON_CLIENT is None:
ELROND_S3_SINGLETON_CLIENT = boto3.client('s3')
return ELROND_S3_SINGLETON_CLIENT
def get_buckets():
client = get_client()
return [bucket['Name'] for bucket in client.list_buckets()['Buckets']]
def get_bucket(bucket_name):
client = get_client()
if bucket_name in get_buckets():
location = client.get_bucket_location(
Bucket=bucket_name
)['LocationConstraint']
return (bucket_name, location)
else:
location = os.environ['AWS_DEFAULT_REGION']
assert location in ELROND_S3_SUPPORTED_REGIONS
res = client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': location
}
)
return (bucket_name, location)
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
def upload(bucket_name, file_path, key_name):
file_metadata = analyse(file_path)
if file_metadata['size'] > 100 * 1024 * 1024:
multipart_mode = True
client = get_client()
if multipart_mode:
pass
else:
transfer = S3Transfer(client)
transfer.upload_file(
file_path, bucket_name, key_name,
extra_args={
'ACL': 'private',
'Metadata': metadata2str(file_metadata),
'ContentType': 'application/octet-stream'
},
callback=ProgressPercentage(file_path))
| mit | 4,340,258,530,075,880,400 | 27.363636 | 74 | 0.586247 | false | 3.473684 | false | false | false |
cerrno/neurokernel | examples/timing/run_gpu_slow.py | 2 | 1898 | #!/usr/bin/env python
"""
Run timing test (GPU) scaled over number of ports.
"""
import csv
import glob
import multiprocessing as mp
import os
import re
import subprocess
import sys
import numpy as np
from neurokernel.tools.misc import get_pids_open
try:
from subprocess import DEVNULL
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
out_file = sys.argv[1]
script_name = 'timing_demo_gpu_slow.py'
trials = 3
lpus = 2
def check_and_print_output(*args):
for i in xrange(5):
# CUDA < 7.0 doesn't properly clean up IPC-related files; since
# these can cause problems, we manually remove them before launching
# each job:
ipc_files = glob.glob('/dev/shm/cuda.shm*')
for ipc_file in ipc_files:
# Only remove files that are not being held open by any processes:
if not get_pids_open(ipc_file):
try:
os.remove(ipc_file)
except:
pass
try:
out = subprocess.check_output(*args, env=os.environ, stderr=DEVNULL)
except Exception as e:
out = e.output
if 'error' not in out:
break
print out,
return out
pool = mp.Pool(1)
results = []
for spikes in np.linspace(50, 15000, 25, dtype=int):
for i in xrange(trials):
r = pool.apply_async(check_and_print_output,
[['srun', '-n', '1', '-c', str(lpus+2),
'-p', 'huxley',
'--gres=gpu:%s' % lpus,
'python', script_name,
'-u', str(lpus), '-s', str(spikes),
'-g', '0', '-m', '50']])
results.append(r)
f = open(out_file, 'w', 0)
w = csv.writer(f)
for r in results:
w.writerow(r.get().strip('[]\n\"').split(', '))
f.close()
| bsd-3-clause | 5,892,850,443,696,471,000 | 26.507246 | 80 | 0.53372 | false | 3.560976 | false | false | false |
hsoft/pluginbuilder | pluginbuilder/util.py | 1 | 12467 | import os, sys, zipfile, time
from modulegraph.find_modules import PY_SUFFIXES
from modulegraph.modulegraph import os_listdir
import macholib.util
def os_path_islink(path):
"""
os.path.islink with zipfile support.
Luckily zipfiles cannot contain symlink, therefore the implementation is
trivial.
"""
return os.path.islink(path)
def os_readlink(path):
"""
os.readlink with zipfile support.
Luckily zipfiles cannot contain symlink, therefore the implementation is
trivial.
"""
return os.readlink(path)
def os_path_isdir(path):
"""
os.path.isdir that understands zipfiles.
Assumes that you're checking a path the is the result of os_listdir and
might give false positives otherwise.
"""
while path.endswith('/') and path != '/':
path = path[:-1]
zf, zp = path_to_zip(path)
if zf is None:
return os.path.isdir(zp)
else:
zip = zipfile.ZipFile(zf)
try:
info = zip.getinfo(zp)
except KeyError:
return True
else:
# Not quite true, you can store information about directories in
# zipfiles, but those have a lash at the end of the filename
return False
def copy_resource(source, destination, dry_run=0):
"""
Copy a resource file into the application bundle
"""
if os.path.isdir(source):
# XXX: This is wrong, need to call ourselves recursively
if not dry_run:
if not os.path.exists(destination):
os.mkdir(destination)
for fn in os_listdir(source):
copy_resource(os.path.join(source, fn),
os.path.join(destination, fn), dry_run=dry_run)
else:
copy_file_data(source, destination, dry_run=dry_run)
def copy_file_data(source, destination, dry_run=0):
zf, zp = path_to_zip(source)
if zf is None:
data = open(zp,'rb').read()
else:
data = get_zip_data(zf, zp)
if not dry_run:
fp = open(destination, 'wb')
fp.write(data)
fp.close()
def get_zip_data(path_to_zip, path_in_zip):
zf = zipfile.ZipFile(path_to_zip)
return zf.read(path_in_zip)
def path_to_zip(path):
"""
Returns (pathtozip, pathinzip). If path isn't in a zipfile pathtozip
will be None
"""
orig_path = path
from distutils.errors import DistutilsFileError
if os.path.exists(path):
return (None, path)
else:
rest = ''
while not os.path.exists(path):
path, r = os.path.split(path)
if not path:
raise DistutilsFileError("File doesn't exist: %s"%(orig_path,))
rest = os.path.join(r, rest)
if not os.path.isfile(path):
# Directory really doesn't exist
raise DistutilsFileError("File doesn't exist: %s"%(orig_path,))
try:
zf = zipfile.ZipFile(path)
except zipfile.BadZipfile:
raise DistutilsFileError("File doesn't exist: %s"%(orig_path,))
if rest.endswith('/'):
rest = rest[:-1]
return path, rest
def get_mtime(path, mustExist=True):
"""
Get mtime of a path, even if it is inside a zipfile
"""
try:
return os.stat(path).st_mtime
except os.error:
from distutils.errors import DistutilsFileError
try:
path, rest = path_to_zip(path)
except DistutilsFileError:
if not mustExist:
return -1
raise
zf = zipfile.ZipFile(path)
info = zf.getinfo(rest)
return time.mktime(info.date_time + (0, 0, 0))
def newer(source, target):
"""
distutils.dep_utils.newer with zipfile support
"""
msource = get_mtime(source)
mtarget = get_mtime(target, mustExist=False)
return msource > mtarget
def is_python_package(path):
"""Returns whether `path` is a python package (has a __init__.py(c|o) file).
"""
if os_path_isdir(path):
for p in os_listdir(path):
if p.startswith('__init__.') and p[8:] in {'.py', '.pyc', '.pyo'}:
return True
return False
def make_exec(path):
mask = os.umask(0)
os.umask(mask)
os.chmod(path, os.stat(path).st_mode | (0o111 & ~mask))
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def mergecopy(src, dest):
return macholib.util.mergecopy(src, dest)
def mergetree(src, dst, condition=None, copyfn=mergecopy):
"""Recursively merge a directory tree using mergecopy()."""
return macholib.util.mergetree(src, dst, condition=condition, copyfn=copyfn)
def move(src, dst):
return macholib.util.move(src, dst)
LOADER = """
def __load():
import imp, os, sys, os.path
ext = %r
library_path = os.environ['LIBRARYPATH']
dynload_path = os.path.join(library_path, 'lib-dynload')
ext = os.path.join(dynload_path, ext)
if os.path.exists(ext):
mod = imp.load_dynamic(__name__, ext)
else:
raise ImportError(repr(ext) + " not found")
__load()
del __load
"""
def make_loader(fn):
return LOADER % fn
def byte_compile(py_files, optimize=0, force=0,
target_dir=None, verbose=1, dry_run=0,
direct=None):
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
from tempfile import mktemp
from distutils.util import execute, spawn
script_name = mktemp(".py")
if verbose:
print("writing byte-compilation script '%s'" % script_name)
if not dry_run:
script = open(script_name, "w")
script.write("""
from pluginbuilder.util import byte_compile
from modulegraph.modulegraph import *
files = [
""")
for f in py_files:
script.write(repr(f) + ",\n")
script.write("]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
target_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, target_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, verbose=verbose, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
verbose=verbose, dry_run=dry_run)
else:
from py_compile import compile
from distutils.dir_util import mkpath
for mod in py_files:
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
if mod.filename == mod.identifier:
cfile = os.path.basename(mod.filename)
dfile = cfile + (__debug__ and 'c' or 'o')
else:
cfile = mod.identifier.replace('.', os.sep)
if mod.packagepath:
dfile = cfile + os.sep + '__init__.py' + (__debug__ and 'c' or 'o')
else:
dfile = cfile + '.py' + (__debug__ and 'c' or 'o')
if target_dir:
cfile = os.path.join(target_dir, dfile)
if force or newer(mod.filename, cfile):
if verbose:
print("byte-compiling %s to %s" % (mod.filename, dfile))
if not dry_run:
mkpath(os.path.dirname(cfile))
suffix = os.path.splitext(mod.filename)[1]
if suffix in ('.py', '.pyw'):
zfile, pth = path_to_zip(mod.filename)
if zfile is None:
compile(mod.filename, cfile, dfile)
else:
fn = dfile + '.py'
open(fn, 'wb').write(get_zip_data(zfile, pth))
compile(mod.filename, cfile, dfile)
os.unlink(fn)
elif suffix in PY_SUFFIXES:
# Minor problem: This will happily copy a file
# <mod>.pyo to <mod>.pyc or <mod>.pyc to
# <mod>.pyo, but it does seem to work.
copy_file_data(mod.filename, cfile)
else:
raise RuntimeError \
("Don't know how to handle %r" % mod.filename)
else:
if verbose:
print("skipping byte-compilation of %s to %s" % \
(mod.filename, dfile))
SCMDIRS = {'CVS', '.svn', '.hg', '.git'}
def skipscm(ofn):
fn = os.path.basename(ofn)
if fn in SCMDIRS:
return False
return True
def iter_platform_files(path, is_platform_file=macholib.util.is_platform_file):
"""
Iterate over all of the platform files in a directory
"""
for root, dirs, files in os.walk(path):
for fn in files:
fn = os.path.join(root, fn)
if is_platform_file(fn):
yield fn
def copy_tree(src, dst,
preserve_mode=1,
preserve_times=1,
preserve_symlinks=0,
update=0,
verbose=0,
dry_run=0,
condition=None):
"""
Copy an entire directory tree 'src' to a new location 'dst'. Both
'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
assert isinstance(src, str), repr(src)
assert isinstance(dst, str), repr(dst)
from distutils.dir_util import mkpath
from distutils.file_util import copy_file
from distutils.dep_util import newer
from distutils.errors import DistutilsFileError
from distutils import log
if condition is None:
condition = skipscm
if not dry_run and not os_path_isdir(src):
raise DistutilsFileError("cannot copy tree '%s': not a directory" % src)
try:
names = os_listdir(src)
except os.error as xxx_todo_changeme:
(errno, errstr) = xxx_todo_changeme.args
if dry_run:
names = []
else:
raise DistutilsFileError("error listing files in '%s': %s" % (src, errstr))
if not dry_run:
mkpath(dst)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if (condition is not None) and (not condition(src_name)):
continue
if preserve_symlinks and os_path_islink(src_name):
link_dest = os_readlink(src_name)
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
if update and not newer(src, dst_name):
pass
else:
if os_path_islink(dst_name):
os.remove(dst_name)
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os_path_isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
dry_run=dry_run, condition=condition))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, dry_run=dry_run)
outputs.append(dst_name)
return outputs
| mit | 2,080,846,094,289,265,200 | 30.722646 | 87 | 0.562365 | false | 3.843095 | false | false | false |
CliMT/climt-future | climt/_lib/rrtmg_lw/setup.py | 1 | 2948 | from setuptools import setup, Extension
from Cython.Distutils import build_ext
# This line only needed if building with NumPy in Cython file.
from numpy import get_include
from os import system
import os
# compile the fortran modules without linking
module_list = [
'parkind.f90',
'parrrtm.f90',
'rrlw_cld.f90',
'rrlw_con.f90',
'rrlw_kg01.f90',
'rrlw_kg02.f90',
'rrlw_kg03.f90',
'rrlw_kg04.f90',
'rrlw_kg05.f90',
'rrlw_kg06.f90',
'rrlw_kg07.f90',
'rrlw_kg08.f90',
'rrlw_kg09.f90',
'rrlw_kg10.f90',
'rrlw_kg11.f90',
'rrlw_kg12.f90',
'rrlw_kg13.f90',
'rrlw_kg14.f90',
'rrlw_kg15.f90',
'rrlw_kg16.f90',
'rrlw_ncpar.f90',
'rrlw_ref.f90',
'rrlw_tbl.f90',
'rrlw_vsn.f90',
'rrlw_wvn.f90']
sources_list = [
'rrtmg_lw_cldprop.f90',
'rrtmg_lw_cldprmc.f90',
'rrtmg_lw_rtrn.f90',
'rrtmg_lw_rtrnmr.f90',
'rrtmg_lw_rtrnmc.f90',
'rrtmg_lw_setcoef.f90',
'rrtmg_lw_taumol.f90',
'rrtmg_lw_rad.nomcica.f90',
'mcica_random_numbers.f90',
'rrtmg_lw_init.f90',
'mcica_subcol_gen_lw.f90',
'rrtmg_lw_rad.f90',
'rrtmg_lw_c_binder.f90']
unoptimised_sources_list = [
'rrtmg_lw_k_g.f90',
]
object_file_list = []
fc = os.getenv('FC', 'gfortran ')
fflags = os.getenv('FFLAGS', ' -fPIC -fno-range-check ')
cflags = os.getenv('CFLAGS', '-fPIC')
f_opt_flags = os.getenv('CLIMT_OPTIMIZE_FLAG', '-O3')
f_no_opt_flags = os.getenv('CLIMT_NO_OPTIMIZE_FLAG', ' -O0 ')
ldflags = os.getenv('LDFLAGS', '-lgfortran')
print('Compiling Modules')
for module in module_list:
output_file = module[:-3]+'o'
object_file_list.append(output_file)
compilation_command = fc+module+' -c -o '+output_file+' '+f_opt_flags+fflags
print(compilation_command)
system(compilation_command)
print('Compiling Sources')
for source in sources_list:
output_file = source[:-3]+'o'
object_file_list.append(output_file)
compilation_command = fc+source+' -c -o '+output_file+' '+f_opt_flags+fflags
print(compilation_command)
system(compilation_command)
print('Compiling k coefficient tables')
for source in unoptimised_sources_list:
output_file = source[:-3]+'o'
object_file_list.append(output_file)
compilation_command = fc+source+' -c -o '+output_file+f_no_opt_flags+fflags
print(compilation_command)
system(compilation_command)
link_args_list = object_file_list + [ldflags]
ext_modules = [
Extension( # module name:
'_rrtm_lw',
# source file:
['_rrtm_lw.pyx'],
# other compile args for gcc
extra_compile_args=[cflags, f_opt_flags, ldflags],
# other files to link to
extra_link_args=link_args_list)]
setup(name='_rrtm_lw',
cmdclass={'build_ext': build_ext},
# Needed if building with NumPy.
# This includes the NumPy headers when compiling.
include_dirs=[get_include()],
ext_modules=ext_modules)
| bsd-3-clause | -8,972,440,418,118,426,000 | 25.8 | 80 | 0.637042 | false | 2.615794 | false | false | false |
hachreak/invenio-accounts | invenio_accounts/models.py | 1 | 5487 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Database models for accounts."""
from __future__ import absolute_import, print_function
from datetime import datetime
from flask import current_app, session
from flask_security import RoleMixin, UserMixin
from invenio_db import db
from sqlalchemy.orm import validates
from sqlalchemy_utils import IPAddressType, Timestamp
userrole = db.Table(
'accounts_userrole',
db.Column('user_id', db.Integer(), db.ForeignKey(
'accounts_user.id', name='fk_accounts_userrole_user_id')),
db.Column('role_id', db.Integer(), db.ForeignKey(
'accounts_role.id', name='fk_accounts_userrole_role_id')),
)
"""Relationship between users and roles."""
class Role(db.Model, RoleMixin):
"""Role data model."""
__tablename__ = "accounts_role"
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
"""Role name."""
description = db.Column(db.String(255))
"""Role description."""
def __str__(self):
"""Return the name and description of the role."""
return '{0.name} - {0.description}'.format(self)
class User(db.Model, UserMixin):
"""User data model."""
__tablename__ = "accounts_user"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
"""User email."""
password = db.Column(db.String(255))
"""User password."""
active = db.Column(db.Boolean(name='active'))
"""Flag to say if the user is active or not ."""
confirmed_at = db.Column(db.DateTime)
"""When the user confirmed the email address."""
last_login_at = db.Column(db.DateTime)
"""When the user logged-in for the last time."""
current_login_at = db.Column(db.DateTime)
"""When user logged into the current session."""
last_login_ip = db.Column(IPAddressType, nullable=True)
"""Last user IP address."""
current_login_ip = db.Column(IPAddressType, nullable=True)
"""Current user IP address."""
login_count = db.Column(db.Integer)
"""Count how many times the user logged in."""
roles = db.relationship('Role', secondary=userrole,
backref=db.backref('users', lazy='dynamic'))
"""List of the user's roles."""
@validates('last_login_ip', 'current_login_ip')
def validate_ip(self, key, value):
"""Hack untrackable IP addresses."""
# NOTE Flask-Security stores 'untrackable' value to IPAddressType
# field. This incorrect value causes ValueError on loading
# user object.
if value == 'untrackable': # pragma: no cover
value = None
return value
def __str__(self):
"""Representation."""
return 'User <id={0.id}, email={0.email}>'.format(self)
class SessionActivity(db.Model, Timestamp):
"""User Session Activity model.
Instances of this model correspond to a session belonging to a user.
"""
__tablename__ = "accounts_user_session_activity"
sid_s = db.Column(db.String(255), primary_key=True)
"""Serialized Session ID. Used as the session's key in the kv-session
store employed by `flask-kvsession`.
Named here as it is in `flask-kvsession` to avoid confusion.
"""
user_id = db.Column(db.Integer, db.ForeignKey(
User.id, name='fk_accounts_session_activity_user_id'))
"""ID of user to whom this session belongs."""
user = db.relationship(User, backref='active_sessions')
ip = db.Column(db.String(80), nullable=True)
"""IP address."""
country = db.Column(db.String(3), nullable=True)
"""Country name."""
browser = db.Column(db.String(80), nullable=True)
"""User browser."""
browser_version = db.Column(db.String(30), nullable=True)
"""Browser version."""
os = db.Column(db.String(80), nullable=True)
"""User operative system name."""
device = db.Column(db.String(80), nullable=True)
"""User device."""
@classmethod
def query_by_expired(cls):
"""Query to select all expired sessions."""
lifetime = current_app.permanent_session_lifetime
expired_moment = datetime.utcnow() - lifetime
return cls.query.filter(cls.created < expired_moment)
@classmethod
def query_by_user(cls, user_id):
"""Query to select user sessions."""
return cls.query.filter_by(user_id=user_id)
@classmethod
def is_current(cls, sid_s):
"""Check if the session is the current one."""
return session.sid_s == sid_s
| gpl-2.0 | 6,014,488,001,262,512,000 | 31.087719 | 76 | 0.659194 | false | 3.839748 | false | false | false |
khertan/KhtNotes | khtnotes/merge3/merge3.py | 1 | 18192 | # Copyright (C) 2005-2010 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#from __future__ import absolute_import
# mbp: "you know that thing where cvs gives you conflict markers?"
# s: "i hate that."
import errors
import patiencediff
import textfile
def intersect(ra, rb):
"""Given two ranges return the range where they intersect or None.
>>> intersect((0, 10), (0, 6))
(0, 6)
>>> intersect((0, 10), (5, 15))
(5, 10)
>>> intersect((0, 10), (10, 15))
>>> intersect((0, 9), (10, 15))
>>> intersect((0, 9), (7, 15))
(7, 9)
"""
# preconditions: (ra[0] <= ra[1]) and (rb[0] <= rb[1])
sa = max(ra[0], rb[0])
sb = min(ra[1], rb[1])
if sa < sb:
return sa, sb
else:
return None
def compare_range(a, astart, aend, b, bstart, bend):
"""Compare a[astart:aend] == b[bstart:bend], without slicing.
"""
if (aend - astart) != (bend - bstart):
return False
for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
if a[ia] != b[ib]:
return False
else:
return True
class Merge3(object):
"""3-way merge of texts.
Given BASE, OTHER, THIS, tries to produce a combined text
incorporating the changes from both BASE->OTHER and BASE->THIS.
All three will typically be sequences of lines."""
def __init__(self, base, a, b, is_cherrypick=False, allow_objects=False):
"""Constructor.
:param base: lines in BASE
:param a: lines in A
:param b: lines in B
:param is_cherrypick: flag indicating if this merge is a cherrypick.
When cherrypicking b => a, matches with b and base do not conflict.
:param allow_objects: if True, do not require that base, a and b are
plain Python strs. Also prevents BinaryFile from being raised.
Lines can be any sequence of comparable and hashable Python
objects.
"""
if not allow_objects:
textfile.check_text_lines(base)
textfile.check_text_lines(a)
textfile.check_text_lines(b)
self.base = base
self.a = a
self.b = b
self.is_cherrypick = is_cherrypick
def merge_lines(self,
name_a=None,
name_b=None,
name_base=None,
start_marker='<<<<<<<',
mid_marker='=======',
end_marker='>>>>>>>',
base_marker=None,
reprocess=False):
"""Return merge in cvs-like form.
"""
newline = '\n'
if len(self.a) > 0:
if self.a[0].endswith('\r\n'):
newline = '\r\n'
elif self.a[0].endswith('\r'):
newline = '\r'
if base_marker and reprocess:
raise errors.CantReprocessAndShowBase()
if name_a:
start_marker = start_marker + ' ' + name_a
if name_b:
end_marker = end_marker + ' ' + name_b
if name_base and base_marker:
base_marker = base_marker + ' ' + name_base
merge_regions = self.merge_regions()
if reprocess is True:
merge_regions = self.reprocess_merge_regions(merge_regions)
for t in merge_regions:
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield self.b[i]
elif what == 'conflict':
yield start_marker + newline
for i in range(t[3], t[4]):
yield self.a[i]
if base_marker is not None:
yield base_marker + newline
for i in range(t[1], t[2]):
yield self.base[i]
yield mid_marker + newline
for i in range(t[5], t[6]):
yield self.b[i]
yield end_marker + newline
else:
raise ValueError(what)
def merge(self):
"""Return merge"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield self.b[i]
elif what == 'conflict':
for i in range(t[3], t[4]):
yield self.a[i]
for i in range(t[5], t[6]):
yield self.b[i]
else:
raise ValueError(what)
def merge_annotated(self):
"""Return merge with conflicts, showing origin of lines.
Most useful for debugging merge.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield 'u | ' + self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield what[0] + ' | ' + self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield 'b | ' + self.b[i]
elif what == 'conflict':
yield '<<<<\n'
for i in range(t[3], t[4]):
yield 'A | ' + self.a[i]
yield '----\n'
for i in range(t[5], t[6]):
yield 'B | ' + self.b[i]
yield '>>>>\n'
else:
raise ValueError(what)
def merge_groups(self):
"""Yield sequence of line groups. Each one is a tuple:
'unchanged', lines
Lines unchanged from base
'a', lines
Lines taken from a
'same', lines
Lines taken from a (and equal to b)
'b', lines
Lines taken from b
'conflict', base_lines, a_lines, b_lines
Lines from base were changed to either a or b and conflict.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
yield what, self.base[t[1]:t[2]]
elif what == 'a' or what == 'same':
yield what, self.a[t[1]:t[2]]
elif what == 'b':
yield what, self.b[t[1]:t[2]]
elif what == 'conflict':
yield (what,
self.base[t[1]:t[2]],
self.a[t[3]:t[4]],
self.b[t[5]:t[6]])
else:
raise ValueError(what)
def merge_regions(self):
"""Return sequences of matching and conflicting regions.
This returns tuples, where the first value says what kind we
have:
'unchanged', start, end
Take a region of base[start:end]
'same', astart, aend
b and a are different from base but give the same result
'a', start, end
Non-clashing insertion from a[start:end]
Method is as follows:
The two sequences align only on regions which match the base
and both descendents. These are found by doing a two-way diff
of each one against the base, and then finding the
intersections between those regions. These "sync regions"
are by definition unchanged in both and easily dealt with.
The regions in between can be in any of three cases:
conflicted, or changed on only one side.
"""
# section a[0:ia] has been disposed of, etc
iz = ia = ib = 0
for zmatch, zend, amatch, aend, \
bmatch, bend in self.find_sync_regions():
matchlen = zend - zmatch
# invariants:
# matchlen >= 0
# matchlen == (aend - amatch)
# matchlen == (bend - bmatch)
len_a = amatch - ia
len_b = bmatch - ib
#len_base = zmatch - iz
# invariants:
# assert len_a >= 0
# assert len_b >= 0
# assert len_base >= 0
#print 'unmatched a=%d, b=%d' % (len_a, len_b)
if len_a or len_b:
# try to avoid actually slicing the lists
same = compare_range(self.a, ia, amatch,
self.b, ib, bmatch)
if same:
yield 'same', ia, amatch
else:
equal_a = compare_range(self.a, ia, amatch,
self.base, iz, zmatch)
equal_b = compare_range(self.b, ib, bmatch,
self.base, iz, zmatch)
if equal_a and not equal_b:
yield 'b', ib, bmatch
elif equal_b and not equal_a:
yield 'a', ia, amatch
elif not equal_a and not equal_b:
if self.is_cherrypick:
for node in self._refine_cherrypick_conflict(
iz, zmatch, ia, amatch,
ib, bmatch):
yield node
else:
yield 'conflict', \
iz, zmatch, ia, amatch, ib, bmatch
else:
raise AssertionError(
"can't handle a=b=base but unmatched")
ia = amatch
ib = bmatch
iz = zmatch
# if the same part of the base was deleted on both sides
# that's OK, we can just skip it.
if matchlen > 0:
# invariants:
# assert ia == amatch
# assert ib == bmatch
# assert iz == zmatch
yield 'unchanged', zmatch, zend
iz = zend
ia = aend
ib = bend
def _refine_cherrypick_conflict(self, zstart,
zend, astart, aend, bstart, bend):
"""When cherrypicking b => a, ignore matches with b and base."""
# Do not emit regions which match, only regions which do not match
matches = patiencediff.PatienceSequenceMatcher(None,
self.base[zstart:zend], self.b[bstart:bend]).get_matching_blocks()
last_base_idx = 0
last_b_idx = 0
last_b_idx = 0
yielded_a = False
for base_idx, b_idx, match_len in matches:
#conflict_z_len = base_idx - last_base_idx
conflict_b_len = b_idx - last_b_idx
if conflict_b_len == 0: # There are no lines in b which conflict,
# so skip it
pass
else:
if yielded_a:
yield ('conflict',
zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart +
base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
last_base_idx = base_idx + match_len
last_b_idx = b_idx + match_len
if last_base_idx != zend - zstart or last_b_idx != bend - bstart:
if yielded_a:
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
if not yielded_a:
yield ('conflict', zstart, zend, astart, aend, bstart, bend)
def reprocess_merge_regions(self, merge_regions):
"""Where there are conflict regions, remove the agreed lines.
Lines where both A and B have made the same changes are
eliminated.
"""
for region in merge_regions:
if region[0] != "conflict":
yield region
continue
type, iz, zmatch, ia, amatch, ib, bmatch = region
a_region = self.a[ia:amatch]
b_region = self.b[ib:bmatch]
matches = patiencediff.PatienceSequenceMatcher(
None, a_region, b_region).get_matching_blocks()
next_a = ia
next_b = ib
for region_ia, region_ib, region_len in matches[:-1]:
region_ia += ia
region_ib += ib
reg = self.mismatch_region(next_a, region_ia, next_b,
region_ib)
if reg is not None:
yield reg
yield 'same', region_ia, region_len + region_ia
next_a = region_ia + region_len
next_b = region_ib + region_len
reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
if reg is not None:
yield reg
@staticmethod
def mismatch_region(next_a, region_ia, next_b, region_ib):
if next_a < region_ia or next_b < region_ib:
return 'conflict', None, None, next_a, region_ia, next_b, region_ib
def find_sync_regions(self):
"""Return a list of sync regions,where both descendents match the base.
Generates a list of (base1, base2, a1, a2, b1, b2). There is
always a zero-length sync region at the end of all the files.
"""
ia = ib = 0
amatches = patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bmatches = patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
len_a = len(amatches)
len_b = len(bmatches)
sl = []
while ia < len_a and ib < len_b:
abase, amatch, alen = amatches[ia]
bbase, bmatch, blen = bmatches[ib]
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
i = intersect((abase, abase + alen), (bbase, bbase + blen))
if i:
intbase = i[0]
intend = i[1]
intlen = intend - intbase
# found a match of base[i[0], i[1]]; this may be less than
# the region that matches in either one
# assert intlen <= alen
# assert intlen <= blen
# assert abase <= intbase
# assert bbase <= intbase
asub = amatch + (intbase - abase)
bsub = bmatch + (intbase - bbase)
aend = asub + intlen
bend = bsub + intlen
# assert self.base[intbase:intend] == self.a[asub:aend], \
# (self.base[intbase:intend], self.a[asub:aend])
# assert self.base[intbase:intend] == self.b[bsub:bend]
sl.append((intbase, intend,
asub, aend,
bsub, bend))
# advance whichever one ends first in the base text
if (abase + alen) < (bbase + blen):
ia += 1
else:
ib += 1
intbase = len(self.base)
abase = len(self.a)
bbase = len(self.b)
sl.append((intbase, intbase, abase, abase, bbase, bbase))
return sl
def find_unconflicted(self):
"""Return a list of ranges in base that are not conflicted."""
am = patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bm = patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
unc = []
while am and bm:
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
a1 = am[0][0]
a2 = a1 + am[0][2]
b1 = bm[0][0]
b2 = b1 + bm[0][2]
i = intersect((a1, a2), (b1, b2))
if i:
unc.append(i)
if a2 < b2:
del am[0]
else:
del bm[0]
return unc
def main(argv):
# as for diff3 and meld the syntax is "MINE BASE OTHER"
a = file(argv[1], 'rt').readlines()
base = file(argv[2], 'rt').readlines()
b = file(argv[3], 'rt').readlines()
m3 = Merge3(base, a, b)
#for sr in m3.find_sync_regions():
# print sr
# sys.stdout.writelines(m3.merge_lines(name_a=argv[1], name_b=argv[3]))
sys.stdout.writelines(m3.merge())
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| gpl-3.0 | -2,084,280,794,020,240,600 | 34.952569 | 79 | 0.484883 | false | 3.930856 | false | false | false |
tschalch/pyTray | src/setup.py | 1 | 6933 | #!/usr/bin/env python
#this installer script uses InnoSetup to generate a complete Installer
from distutils.core import setup
import py2exe
import os, os.path, sys
import glob
#adding lib directory to module search path
libpath = os.path.abspath(os.path.dirname(sys.argv[0])) + "/lib"
sys.path.append(os.path.abspath(libpath))
includes = ["encodings", "encodings.latin_1",]
#options = {"py2exe": {"compressed": 1,
# "optimize": 2,
# "ascii": 1,
# "bundle_files": 1,
# "includes":includes}},
################################################################
# A program using wxPython
# The manifest will be inserted as resource into test_wx.exe. This
# gives the controls the Windows XP appearance (if run on XP ;-)
#
# Another option would be to store if in a file named
# test_wx.exe.manifest, and probably copy it with the data_files
# option.
#
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
################################################################
# arguments for the setup() call
pyTray = dict(
script = "pytray.py",
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="pyTray"))],
dest_base = r"pyTray",
icon_resources = [(1,"files/images/icon.ico")])
zipfile = r"lib\shardlib"
options = {"py2exe": {"compressed": 1,
"optimize": 2}}
################################################################
import os
class InnoScript:
def __init__(self,
name,
lib_dir,
dist_dir,
windows_exe_files = [],
lib_files = [],
version = "1.0"):
self.lib_dir = lib_dir
self.dist_dir = dist_dir
if not self.dist_dir[-1] in "\\/":
self.dist_dir += "\\"
self.name = name
self.version = version
self.windows_exe_files = [self.chop(p) for p in windows_exe_files]
self.lib_files = [self.chop(p) for p in lib_files]
def chop(self, pathname):
assert pathname.startswith(self.dist_dir)
return pathname[len(self.dist_dir):]
def create(self, pathname="dist\\pytray.iss"):
self.pathname = pathname
ofi = self.file = open(pathname, "w")
print >> ofi, "; WARNING: This script has been created by py2exe. Changes to this script"
print >> ofi, "; will be overwritten the next time py2exe is run!"
print >> ofi, r"[Setup]"
print >> ofi, r"AppName=%s" % self.name
print >> ofi, r"AppVerName=%s %s" % (self.name, self.version)
print >> ofi, r"DefaultDirName={pf}\%s" % self.name
print >> ofi, r"DefaultGroupName=%s" % self.name
print >> ofi
print >> ofi, r"[Files]"
for path in self.windows_exe_files + self.lib_files:
print >> ofi, r'Source: "%s"; DestDir: "{app}\%s"; Flags: ignoreversion' % (path, os.path.dirname(path))
print >> ofi
print >> ofi, r"[Icons]"
for path in self.windows_exe_files:
print >> ofi, r'Name: "{group}\%s"; Filename: "{app}\%s"' % \
(self.name, path)
print >> ofi, 'Name: "{group}\Uninstall %s"; Filename: "{uninstallexe}"' % self.name
def compile(self):
try:
import ctypes
except ImportError:
try:
import win32api
except ImportError:
import os
os.startfile(self.pathname)
else:
print "Ok, using win32api."
win32api.ShellExecute(0, "compile",
self.pathname,
None,
None,
0)
else:
print "Cool, you have ctypes installed."
res = ctypes.windll.shell32.ShellExecuteA(0, "compile",
self.pathname,
None,
None,
0)
if res < 32:
raise RuntimeError, "ShellExecute failed, error %d" % res
################################################################
from py2exe.build_exe import py2exe
class build_installer(py2exe):
# This class first builds the exe file(s), then creates a Windows installer.
# You need InnoSetup for it.
def run(self):
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# create the Installer, using the files py2exe has created.
script = InnoScript("pytray",
lib_dir,
dist_dir,
self.windows_exe_files,
self.lib_files)
print "*** creating the inno setup script***"
script.create()
print "*** compiling the inno setup script***"
script.compile()
# Note: By default the final setup.exe will be in an Output subdirectory.
################################################################
setup(
description='Cryallization Management Software',
options = options,
# The lib directory contains everything except the executables and the python dll.
zipfile = zipfile,
windows = [pyTray],
# use out build_installer class as extended py2exe build command
cmdclass = {"py2exe": build_installer},
data_files=[(r"files", glob.glob(r"files/*.*")),
(r"files/test", glob.glob(r"files/test/*.*")),
(r"files/Dtd", glob.glob(r"files/Dtd/*.*")),
(r"files/fonts", glob.glob(r"files/fonts/*.*")),
(r"files/images", glob.glob(r"files/images/*.*")),
],
author='Thomas Schalch',
author_email='[email protected]',
packages = ["gui","dataStructures","util","test"],
)
| bsd-3-clause | 2,843,598,152,450,568,700 | 34.298429 | 116 | 0.492572 | false | 4.078235 | false | false | false |
tdegeus/GooseEYE | docs/examples/clusters_dilate_periodic.py | 1 | 2926 | r'''
Plot and/or check.
Usage:
script [options]
Options:
-s, --save Save output for later check.
-c, --check Check against earlier results.
-p, --plot Plot.
-h, --help Show this help.
'''
# <snippet>
import numpy as np
import GooseEYE
# generate image
I = np.zeros((21, 21), dtype='bool')
I[4, 4] = True
I[18, 19] = True
I[19, 19] = True
I[20, 19] = True
I[19, 18] = True
I[19, 20] = True
# clusters
C = GooseEYE.Clusters(I).labels()
# dilate
CD = GooseEYE.dilate(C)
# </snippet>
if __name__ == '__main__':
import docopt
args = docopt.docopt(__doc__)
if args['--save']:
import h5py
with h5py.File('clusters_dilate_periodic.h5', 'w') as data:
data['I'] = I
data['C'] = C
data['CD'] = CD
if args['--check']:
import h5py
with h5py.File('clusters_dilate_periodic.h5', 'r') as data:
assert np.all(np.equal(data['I'][...], I))
assert np.all(np.equal(data['C'][...], C))
assert np.all(np.equal(data['CD'][...], CD))
if args['--plot']:
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
# color-scheme: modify such that the background is white
# N.B. for a transparent background -> 4th column == 1.
cmap = cm.jet(range(256))
cmap[0, :3] = 1.0
cmap = mpl.colors.ListedColormap(cmap)
try:
plt.style.use(['goose', 'goose-latex'])
except:
pass
fig, axes = plt.subplots(figsize=(18, 6), nrows=1, ncols=3)
ax = axes[0]
im = ax.imshow(I, clim=(0, 1), cmap=mpl.colors.ListedColormap(cm.gray([0, 255])))
ax.xaxis.set_ticks([0, 20])
ax.yaxis.set_ticks([0, 20])
ax.set_xlim([-0.5, 20.5])
ax.set_ylim([-0.5, 20.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'image')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks([0, 1])
ax = axes[1]
im = ax.imshow(CD, clim=(0, np.max(C) + 1), cmap=cmap)
ax.xaxis.set_ticks([0, 20])
ax.yaxis.set_ticks([0, 20])
ax.set_xlim([-0.5, 20.5])
ax.set_ylim([-0.5, 20.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'clusters + dilate')
ax = axes[2]
im = ax.imshow(np.tile(CD, (3, 3)), clim=(0, np.max(C) + 1), cmap=cmap)
ax.xaxis.set_ticks([0, 60])
ax.yaxis.set_ticks([0, 60])
ax.set_xlim([-0.5, 60.5])
ax.set_ylim([-0.5, 60.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'periodic copy')
plt.savefig('clusters_dilate_periodic.svg')
| gpl-3.0 | -889,297,396,052,922,000 | 25.125 | 89 | 0.520164 | false | 2.89703 | false | false | false |
drupdates/Slack | __init__.py | 1 | 1072 | """ Send report using Slack. """
from drupdates.settings import Settings
from drupdates.utils import Utils
from drupdates.constructors.reports import Report
import json, os
class Slack(Report):
""" Slack report plugin. """
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
settings_file = current_dir + '/settings/default.yaml'
self.settings = Settings()
self.settings.add(settings_file)
def send_message(self, report_text):
""" Post the report to a Slack channel or DM a specific user."""
url = self.settings.get('slackURL')
user = self.settings.get('slackUser')
payload = {}
payload['text'] = report_text
payload['new-bot-name'] = user
direct = self.settings.get('slackRecipient')
channel = self.settings.get('slackChannel')
if direct:
payload['channel'] = '@' + direct
elif channel:
payload['channel'] = '#' + direct
Utils.api_call(url, 'Slack', 'post', data=json.dumps(payload))
| mit | 81,357,146,827,252,450 | 33.580645 | 72 | 0.616604 | false | 4 | false | false | false |
RudolfCardinal/pythonlib | cardinal_pythonlib/wsgi/headers_mw.py | 1 | 4487 | #!/usr/bin/env python
# cardinal_pythonlib/headers_mw.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal ([email protected]).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**WSGI middleware to add arbitrary HTTP headers.**
"""
import logging
from cardinal_pythonlib.wsgi.constants import (
TYPE_WSGI_APP,
TYPE_WSGI_APP_RESULT,
TYPE_WSGI_ENVIRON,
TYPE_WSGI_EXC_INFO,
TYPE_WSGI_RESPONSE_HEADERS,
TYPE_WSGI_START_RESPONSE,
TYPE_WSGI_START_RESP_RESULT,
TYPE_WSGI_STATUS,
)
log = logging.getLogger(__name__)
class HeaderModifyMode(object):
"""
Options for
:class:`cardinal_pythonlib.wsgi.headers_mw.AddHeadersMiddleware`.
"""
ADD = 0
ADD_IF_ABSENT = 1
class AddHeadersMiddleware(object):
"""
WSGI middleware to add arbitrary HTTP headers.
See e.g. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers for a
list of possible HTTP headers.
Note:
- HTTP headers are case-insensitive. However, the canonical form is
hyphenated camel case;
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers.
- You can specify the same HTTP header multiple times; apart from
Set-Cookie, this should have the effect of the browser treating them as
concatenated in a CSV format.
https://stackoverflow.com/questions/3096888;
https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
"""
def __init__(self,
app: TYPE_WSGI_APP,
headers: TYPE_WSGI_RESPONSE_HEADERS,
method: int = HeaderModifyMode.ADD) -> None:
"""
Args:
app:
The WSGI app to which to apply the middleware.
headers:
A list of tuples, each of the form ``(key, value)``.
"""
assert isinstance(headers, list)
for key_value_tuple in headers:
assert isinstance(key_value_tuple, tuple)
assert len(key_value_tuple) == 2
assert isinstance(key_value_tuple[0], str)
assert isinstance(key_value_tuple[1], str)
assert method in [
HeaderModifyMode.ADD,
HeaderModifyMode.ADD_IF_ABSENT,
]
self.app = app
self.headers = headers
self.method = method
def __call__(self,
environ: TYPE_WSGI_ENVIRON,
start_response: TYPE_WSGI_START_RESPONSE) \
-> TYPE_WSGI_APP_RESULT:
"""
Called every time the WSGI app is used.
"""
def add(status: TYPE_WSGI_STATUS,
headers: TYPE_WSGI_RESPONSE_HEADERS,
exc_info: TYPE_WSGI_EXC_INFO = None) \
-> TYPE_WSGI_START_RESP_RESULT:
# Add headers. If they were present already, there will be
# several versions now. See above.
return start_response(status, headers + self.headers, exc_info)
def add_if_absent(status: TYPE_WSGI_STATUS,
headers: TYPE_WSGI_RESPONSE_HEADERS,
exc_info: TYPE_WSGI_EXC_INFO = None) \
-> TYPE_WSGI_START_RESP_RESULT:
# Add headers, but not if that header was already present.
# Note case-insensitivity.
header_keys_lower = [kv[0].lower() for kv in headers]
new_headers = [x for x in self.headers
if x[0].lower() not in header_keys_lower]
return start_response(status, headers + new_headers, exc_info)
method = self.method
if method == HeaderModifyMode.ADD:
custom_start_response = add
else:
custom_start_response = add_if_absent
return self.app(environ, custom_start_response)
| apache-2.0 | 8,220,256,760,036,851,000 | 32.992424 | 79 | 0.592378 | false | 4.197381 | false | false | false |
aep124/TwitterAnalyticsTools | textonly.py | 1 | 2405 | # this is a script to retrieve and process text-only data for classification
# This process includes four main tasks
# 1) getting raw tweets
# 2) apply labels (this step can be conducted at any time)
# 2) filtering those tweets (e.g., according to CMU POS tagger)
# 3) deriving a set of features (a.k.a. word list)
# 4) write the feature vectors to an arff file
import tools4pgs
import tools4parsing
import tools4fv
import tools4labeling
import pickle
import copy
import numpy as np
import pandas as pd
# dividing into two dataframe because tweet info is fixed, but features are flexible
# tweet info data frame columns:
# NAME DATATYPE
# twtid ....... string (of digits)
# raw ......... string
# filtered .... string
# userid ...... string (of digits)
# handle ...... string
# label ....... string
# imgurl ...... string
# tweet features data frame columns
# twtid ....... string (of digits)
# feature 1 ... TF score for word 1
# feature 2 ... TF score for word 2
# :
# feature n ... TF score for word n
# label ....... string
############### (1) Get Tweets ################
# TODO: modify query handling to accomodate the column names that databases use, as well as subsets query variables
# (this is written for robbery database)
query = 'SELECT id,text,user_id FROM tweets'
condition = "WHERE text like '%bears%'"
tools4pgs.writetwtinfo(query, condition, 'twtinfo.p')
############### (2) Apply Labels ###############
labelmap = tools4labeling.getlabelmap('labelsystem')
tools4labeling.writelabels('twtinfo.p', labelmap)
################# (3) Filter ################
keepset = tools4parsing.getkeepset('POS2keep')
tools4parsing.writefiltered('twtinfo.p', keepset)
# TODO: add functionality for reply tweets (conversations) ????????
############## (4) Derive Features ##############
wordmap = tools4fv.getwordmap('twtinfo.p')
wordlist = wordmap.keys()
# specify threshold directly :
# freq_threshold = 2
# could also specify threshold by number of words (e.g., 500):
# freq_threshold = sorted(wordmap.values())[-500]
# wordlist = [w for w in wordmap.keys() if wordmap[w] >= freq_threshold]
tools4fv.writetf('twtinfo.p','twtfeatures.p', wordlist)
tools4fv.synclabels('twtinfo.p','twtfeatures.p')
############### (5) Make ARFF File ###############
#tools4fv.writearff('twtfeatures.p')
| mit | 2,456,822,204,111,772,700 | 24.585106 | 116 | 0.642827 | false | 3.358939 | false | false | false |
daicang/Leetcode-solutions | 146-lru-cache.py | 1 | 1792 | class DLNode(object):
def __init__(self):
self.key = None
self.value = None
self.prev = None
self.next = None
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.head = DLNode()
self.tail = DLNode()
self.capacity = capacity
self.size = 0
self.cache = {}
self.head.next = self.tail
self.tail.prev = self.head
def _move_to_head(self, node):
node.prev = self.head
node.next = self.head.next
node.prev.next = node
node.next.prev = node
def _unlink_node(self, node):
node.prev.next = node.next
node.next.prev = node.prev
def get(self, key):
"""
:type key: int
:rtype: int
"""
node = self.cache.get(key)
if node is None:
return -1
self._unlink_node(node)
self._move_to_head(node)
return node.value
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: None
"""
node = self.cache.get(key)
if node:
node.value = value
self._unlink_node(node)
self._move_to_head(node)
return
node = DLNode()
node.key = key
node.value = value
self.cache[key] = node
self._move_to_head(node)
self.size += 1
if self.size > self.capacity:
outdated = self.tail.prev
self._unlink_node(outdated)
del self.cache[outdated.key]
self.size -= 1
c = LRUCache(2)
c.put(1, 1)
c.put(2, 2)
print c.get(1)
c.put(3, 3)
print c.get(2)
c.put(4, 4)
print c.get(1)
print c.get(3)
print c.get(4)
| mit | 3,490,341,262,473,513,000 | 19.363636 | 40 | 0.497768 | false | 3.419847 | false | false | false |
brettchien/LeetCode | 9_PalindromeNumber.py | 1 | 3651 | class Solution:
# @param {integer} x
# @return {boolean}
def isPalindrome(self, x):
if x < 0:
return False
if x > 0 and x % 10 == 0:
return False
reverse = 0
while x > reverse:
reverse = reverse * 10 + x % 10
x /= 10
return (x == reverse) or (x == reverse / 10)
def cisPalindrome(self, x):
if x < 0:
return False
if x < 10:
return True
if x < 100:
hi = x / 10
lo = x % 10
return hi == lo
pivot = 1
count = 0
while pivot <= x:
count += 1
pivot *= 10
digits = count / 2
first = x / (10 ** (digits + (count % 2)))
second = x % (10 ** digits)
print x, first, second
while digits >= 1:
print first, second
if digits == 1:
return first == second
lo = second % 10
hi = first / (10 ** (digits-1))
print hi, lo
if hi != lo:
return False
else:
first = first % (10 ** (digits-1))
second = second / 10
digits -= 1
def bisPalindrome(self, x):
if x < 0:
return False
if x < 10:
return True
if x < 100:
hi = x / 10
lo = x % 10
return hi == lo
pivot = 1
count = 1
while pivot <= x:
count += 1
pivot *= 10
count -= 1
odd = (count % 2 == 1)
print x, pivot, count
while x > 0:
print x
digit = x % 10
pivot /= 100
x /= 10
hiDigit = x / pivot
print pivot, x, digit, hiDigit
if hiDigit != digit:
return False
x -= digit * pivot
if x == 0:
return True
print x
if odd:
if pivot == 10:
return True
else:
if pivot == 100:
hi = x / 10
lo = x % 10
return hi == lo
def aisPalindrome(self, x):
if x < 0:
return False
if x < 10:
return True
if x == 10:
return False
pivot = 1
count = 1
while pivot <= x:
count += 1
pivot *= 10
count -= 1
print x, pivot, count
while x > 0:
print x
digit = x % 10
pivot /= 100
x /= 10
if digit == 0 and pivot > x:
continue
if count % 2 == 0: #even numbers of digits
if pivot == 10:
return x == digit
else: # odd numbers of digits
if pivot == 1:
return True
check = x - digit * pivot
print pivot, x, digit, check
if check == 0:
return True
elif check < 0 or check >= digit * pivot:
return False
else:
x -= digit * pivot
if __name__ == "__main__":
sol = Solution()
print sol.isPalindrome(121) == True
print sol.isPalindrome(101) == True
print sol.isPalindrome(100) == False
print sol.isPalindrome(9999) == True
print sol.isPalindrome(99999) == True
print sol.isPalindrome(999999) == True
print sol.isPalindrome(1000110001) == True
print sol.isPalindrome(1000021) == False
| mit | 3,103,546,097,456,922,600 | 26.870229 | 54 | 0.398247 | false | 4.474265 | false | false | false |
felipemontefuscolo/bitme | get_bitmex_candles.py | 1 | 4122 | #!/usr/bin/env python
import sys
import time
import swagger_client
from swagger_client.rest import ApiException
from utils.utils import smart_open
import argparse
import pandas as pd
MAX_NUM_CANDLES_BITMEX = 500
def print_file(file_or_stdout, api_instance, bin_size, partial, symbol, reverse, start_time, end_time):
chunks = split_in_chunks(start_time, end_time, MAX_NUM_CANDLES_BITMEX, bin_size)
with smart_open(file_or_stdout) as fh:
print("time,open,high,low,close,volume", file=fh)
num_pages = len(chunks)
for i in range(num_pages):
chunk = chunks[i]
s = chunk[0]
e = chunk[1]
count = (e - s) / pd.Timedelta(bin_size)
page = api_instance.trade_get_bucketed(
bin_size=bin_size,
partial=partial,
symbol=symbol,
count=count,
start=0.0,
reverse=reverse,
start_time=s,
end_time=e)
print("from {} to {}: {} candles downloaded".format(s, e, len(page)))
# TODO: bitmex has a bug where the high is not the highest value !!!!!
for line in reversed(page):
print(','.join([line.timestamp.strftime('%Y-%m-%dT%H:%M:%S'),
str(line.open),
str(max(line.high, line.open)),
str(min(line.low, line.open)),
str(line.close),
str(line.volume)]), file=fh)
sys.stdout.write(
"progress: completed %d out of %d pages (%.2f%%) \r" %
(i + 1, num_pages, 100 * float(i + 1) / num_pages))
sys.stdout.flush()
time.sleep(1.001)
print("")
def split_in_chunks(start: pd.Timedelta, end: pd.Timedelta, chunk_size: int, bucket_size: str):
i = start
r = []
dt = chunk_size * pd.Timedelta(bucket_size)
while i <= end:
r += [(i, min(end, i + dt))]
i += dt
return r
def get_args(args=None, namespace=None):
parser = argparse.ArgumentParser(description="Get bitmex data")
parser.add_argument('-b', '--begin-time', type=pd.Timestamp, required=True, help="Example: '2018-04-01T00:00:01'")
parser.add_argument('-e', '--end-time', type=pd.Timestamp, required=True, help="Example: '2018-04-02T00:00:01'")
parser.add_argument('-s', '--symbol', type=str, default='XBTUSD',
help='Instrument symbol. Send a bare series (e.g. XBU) to get data for the nearest expiring'
'contract in that series. You can also send a timeframe, e.g. `XBU:monthly`. '
'Timeframes are `daily`, `weekly`, `monthly`, `quarterly`, and `biquarterly`. (optional)')
parser.add_argument('-z', '--bin-size', choices=('1m', '5m', '1h', '1d'), default='1m', type=str,
help='Time interval to bucket by')
parser.add_argument('-o', '--file-or-stdout', type=str, required=True, help='Output filename or "-" for stdout')
parser.add_argument('--partial', action='store_true', default=False, )
args = parser.parse_args(args, namespace)
return args
def main():
args = get_args()
# create an instance of the API class
configuration = swagger_client.Configuration()
configuration.host = 'https://www.bitmex.com/api/v1'
api_instance = swagger_client.TradeApi(swagger_client.ApiClient(configuration))
print("print to file " + (args.file_or_stdout if args.file_or_stdout is not '-' else 'std output'))
try:
print_file(file_or_stdout=args.file_or_stdout,
api_instance=api_instance,
bin_size=args.bin_size, partial=args.partial, symbol=args.symbol,
reverse=False,
start_time=args.begin_time, end_time=args.end_time)
except ApiException as e:
print("Exception when calling TradeApi->trade_get_bucketed: %s\n" % e)
return 0
if __name__ == "__main__":
sys.exit(main())
| mpl-2.0 | -6,723,906,315,588,768,000 | 35.157895 | 119 | 0.562591 | false | 3.587467 | false | false | false |
pengkobe/leetcode | questions/Regular_Expression_Matching.py | 1 | 6170 | # -*- coding: utf-8 -*-
# 本题难度:★★★
# 实现一个正则表达式引擎,让其支持匹配 . 和 *,其中:
# . 匹配任何单字符
# * 匹配 0 个或者多个前字符
# 需要匹配全部输入而非部分输入,函数格式如下:
# bool isMatch(const char *s, const char *p)
# 如:
# isMatch('aa', 'a') // false
# isMatch('aa', 'aa') // true
# isMatch('aaa', 'aa') // false
# isMatch('aa', 'a*') // true
# isMatch('aa', '.*') // true
# isMatch('ab', '.*') // true
# isMatch('aab', 'c*a*b') // true
# 参考答案:https://github.com/barretlee/daily-algorithms/blob/master/answers/6.md
# Wrong Anwer1
# def isMatch(_str,patt):
# if not _str and not patt:
# return True;
# if not _str and not patt.replace("*",""):
# return True;
# if not _str or not patt:
# return False;
# 此处与题目要求不符
# if patt and patt[0]=="*":
# return isMatch(_str[1:],patt) or isMatch(_str,patt[1:]);
# else:
# return (_str[0]==patt[0] or patt[0] ==".") and isMatch(_str[1:],patt[1:]);
# if __name__ == '__main__':
# assert isMatch('aa', 'a') == False
# assert isMatch('aa', 'aa') == True
# assert isMatch('aaa', 'aaa') == True
# assert isMatch('aaa', '.a') == False
# assert isMatch('aa', '.*') == True
# assert isMatch('aab', '*') == True
# assert isMatch('b', '.*.') == False
# assert isMatch('aab', 'c*a*b') == True
# 提交解法1 备份
# class Solution(object):
# def isMatch(self, _str, patt):
# """
# :type s: str
# :type p: str
# :rtype: bool
# """
# if len(patt)==0:
# return len(_str)==0
# if len(patt)>1 and patt[1]=="*":
# i = 0;
# if len(_str) ==0:
# if self.isMatch(_str[0:],patt[2:]):
# return True;
# while i < len(_str):
# if i == 0 and self.isMatch(_str[0:],patt[2:]):
# return True;
# if _str[i] ==patt[0] or patt[0] ==".":
# if self.isMatch(_str[i+1:],patt[2:]):
# return True;
# else:
# break;
# i = i +1;
# return False;
# else:
# if _str and (_str[0]==patt[0] or patt[0] =="."):
# return self.isMatch(_str[1:],patt[1:]);
# else:
# return False;
# 解法1
def isMatch2(_str,patt):
if len(patt)==0:
return len(_str)==0
if len(patt)>1 and patt[1]=="*":
i = 0;
if len(_str) ==0:
if isMatch2(_str[0:],patt[2:]):
return True;
while i < len(_str):
if i == 0 and isMatch2(_str[0:],patt[2:]):
return True;
if _str[i] == patt[0] or patt[0] ==".":
if isMatch2(_str[i+1:],patt[2:]):
return True;
else:
break;
i = i +1;
return False;
else:
print('else',_str[0:]);
if _str and (_str[0]==patt[0] or patt[0] =="."):
return isMatch2(_str[1:],patt[1:]);
else:
return False;
if __name__ == '__main__':
assert isMatch2('aa', 'a') == False
assert isMatch2('aa', 'aa') == True
assert isMatch2('aaa', 'aaa') == True
assert isMatch2('aaa', '.a') == False
assert isMatch2('ab', '.*') == True
assert isMatch2('aa', '.*') == True
assert isMatch2('b', '.*.') == True
assert isMatch2('aab', 'c*a*b') == True
assert isMatch2('aaba', 'ab*a*c*a') == False
assert isMatch2('a', '.*..a*') == False
assert isMatch2('a', 'ab*') == True
assert isMatch2('abcd', 'd*') == False
assert isMatch2('ab', '.*c') == False
## 解法1 参考
# def isMatch3( s, p):
# if len(p)==0:
# return len(s)==0
# if len(p)==1 or p[1]!='*':
# if len(s)==0 or (s[0]!=p[0] and p[0]!='.'):
# return False
# return isMatch3(s[1:],p[1:])
# else:
# i=-1;
# length=len(s)
# while i<length and (i<0 or p[0]=='.' or p[0]==s[i]):
# print(length,i+1,s[i+1:]);
# if isMatch3(s[i+1:],p[2:]):
# return True
# i+=1
# return False
## 动态规划的解法
## 思路推演
# 1. 全部初始化为 False 先,这里用二位数组 dp[i][j] 标识, 即 s 中前 r 个字符与 p 中前 j 个字符是否匹配
# 2. dp[0][0]=True,空字符配空字符,恒为 True
# 3. s 为空字符,考虑 x* 号情形,注意,按题目要求,*前必须有一个非*字符
# 4. 正式开始规划
# 1. 为 .
# 2. 为 * (难点)
# 3. 为普通字符
# @return a boolean
def isMatch4(s, p):
s_len = len(s);
p_len = len(p);
dp = [[False for j in range(p_len+1)] for i in range(s_len+1)];
dp[0][0] = True;
for i in range(2,p_len+1):
if p[i-1] == "*":
dp[0][i] = dp[0][i-2];
for i in range(1,s_len+1):
for j in range(1,p_len+1):
if p[j-1] == ".":
dp[i][j] = dp[i-1][j-1];
elif p[j-1] == "*":
# 误点1. p[i-2]=="."
# 误点2 . dp[i-1][j-1] --> dp[i-1][j]
dp[i][j] = dp[i][j-1] or dp[i][j-2] or ((s[i-1] == p[j-2] or p[j-2]==".") and dp[i-1][j]);
else:
dp[i][j] = dp[i-1][j-1] and (s[i-1] == p[j -1]);
return dp[s_len][p_len];
if __name__ == '__main__':
assert isMatch4('aa', 'a') == False
assert isMatch4('aa', 'aa') == True
assert isMatch4('aaa', '.a') == False
assert isMatch4('ab', '.*') == True
assert isMatch4('aa', '.*') == True
assert isMatch4('b', '.*.') == True
assert isMatch4('aab', 'c*a*b') == True
assert isMatch4('aaba', 'ab*a*c*a') == False
assert isMatch4('a', '.*..a*') == False
assert isMatch4('a', 'ab*') == True
assert isMatch4('abcd', 'd*') == False
assert isMatch4('ab', '.*c') == False
assert isMatch4('abc', 'a*c') == False
# dp[i-1][j-1] --> dp[i-1][j], 举例如 aa vs .
assert isMatch4('aaa', '.*') == True
| gpl-3.0 | -772,837,806,286,783,600 | 29.136126 | 106 | 0.442669 | false | 2.41443 | false | false | false |
MisanthropicBit/bibpy | examples/requirements_check.py | 1 | 1795 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Example of checking the requirements of bibtext and biblatex."""
import bibpy
from bibpy.tools import get_abspath_for
def format_requirements_check(required, optional):
s = ""
if required:
s = "required field(s) " + ", ".join(map(str, required))
if optional:
if required:
s += " and "
temp = ["/".join(map(str, opt)) for opt in optional]
s += "optional field(s) " + ", ".join(temp)
return s
if __name__ == '__main__':
filename = get_abspath_for(
__file__,
'../tests/data/biblatex_missing_requirements.bib'
)
entries = bibpy.read_file(filename, format='biblatex').entries
# Collect all results for which a requirements check failed into a list of
# pairs. There is also bibpy.requirements.check for checking individual
# entries
checked = bibpy.requirements.collect(entries, format='biblatex')
print("* Using bibpy.requirements.collect:")
for (entry, (required, optional)) in checked:
if required or optional:
# Either a missing required or optional field for this entry
print("{0}:{1} is missing {2}"
.format(entry.bibtype, entry.bibkey,
format_requirements_check(required, optional)))
# Requirements checks can also be performed on individual entries.
# Use Entry.validate(format) to throw a RequiredFieldError instead of
# returning a bool
entry = entries[2]
print()
print("* {0} for {1}:{2} = {3}".format("entry.valid('biblatex')",
entry.bibtype,
entry.bibkey,
entry.valid('biblatex')))
| mit | 8,251,867,451,367,847,000 | 31.636364 | 78 | 0.578273 | false | 4.243499 | false | false | false |
syci/ingadhoc-odoo-addons | partner_views_fields/res_config.py | 1 | 1176 | # -*- coding: utf-8 -*-
from openerp import fields, models
class partner_configuration(models.TransientModel):
_inherit = 'base.config.settings'
group_ref = fields.Boolean(
"Show Reference On Partners Tree View",
implied_group='partner_views_fields.group_ref',
)
group_user_id = fields.Boolean(
"Show Commercial On Partners Tree View",
implied_group='partner_views_fields.group_user_id',
)
group_city = fields.Boolean(
"Show City On Partners Tree and Search Views",
implied_group='partner_views_fields.group_city',
)
group_state_id = fields.Boolean(
"Show State On Partners Tree and Search Views",
implied_group='partner_views_fields.group_state_id',
)
group_country_id = fields.Boolean(
"Show Country On Partners Tree and Search Views",
implied_group='partner_views_fields.group_country_id',
)
group_function = fields.Boolean(
"Show Function On Partners Tree and Search Views",
implied_group='partner_views_fields.group_function',
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,898,531,089,247,119,000 | 34.636364 | 65 | 0.654762 | false | 3.89404 | false | false | false |
malt1/lutris | tests/test_installer.py | 1 | 1700 | from unittest import TestCase
from lutris.installer import ScriptInterpreter, ScriptingError
class MockInterpreter(ScriptInterpreter):
""" a script interpreter mock """
script = {'runner': 'linux'}
def is_valid(self):
return True
class TestScriptInterpreter(TestCase):
def test_script_with_correct_values_is_valid(self):
script = {
'runner': 'foo',
'installer': 'bar',
'name': 'baz',
'game_slug': 'baz',
}
interpreter = ScriptInterpreter(script, None)
self.assertFalse(interpreter.errors)
self.assertTrue(interpreter.is_valid())
def test_move_requires_src_and_dst(self):
script = {
'foo': 'bar',
'installer': {},
'name': 'missing_runner',
'game_slug': 'missing-runner'
}
with self.assertRaises(ScriptingError):
interpreter = ScriptInterpreter(script, None)
interpreter._get_move_paths({})
def test_get_command_returns_a_method(self):
interpreter = MockInterpreter({}, None)
command, params = interpreter._map_command({'move': 'whatever'})
self.assertIn("bound method MockInterpreter.move", str(command))
self.assertEqual(params, "whatever")
def test_get_command_doesnt_return_private_methods(self):
""" """
interpreter = MockInterpreter({}, None)
with self.assertRaises(ScriptingError) as ex:
command, params = interpreter._map_command(
{'_substitute': 'foo'}
)
self.assertEqual(ex.exception.message,
"The command substitute does not exists")
| gpl-3.0 | -5,782,265,075,609,909,000 | 33 | 72 | 0.594118 | false | 4.381443 | true | false | false |
jossgray/zyrecffi | zyrecffi/_cffi.py | 1 | 3231 | from cffi import FFI
import os, sys
ffi = FFI()
ffi.cdef('''
// zsock.h
typedef struct _zsock_t zsock_t;
// zmsg.h
typedef struct _zmsg_t zmsg_t;
int zmsg_addstr (zmsg_t* self, const char* string);
char* zmsg_popstr (zmsg_t* self);
// zyre.h
typedef struct _zyre_t zyre_t;
zyre_t* zyre_new (const char *name);
void zyre_destroy (zyre_t **self_p);
const char* zyre_uuid (zyre_t *self);
const char *zyre_name (zyre_t *self);
void zyre_set_header (zyre_t *self, const char *name, const char *format, ...);
void zyre_set_verbose (zyre_t *self);
void zyre_set_port (zyre_t *self, int port_nbr);
void zyre_set_interval (zyre_t *self, size_t interval);
void zyre_set_interface (zyre_t *self, const char *value);
int zyre_set_endpoint (zyre_t *self, const char *format, ...);
void zyre_gossip_bind (zyre_t *self, const char *format, ...);
void zyre_gossip_connect (zyre_t *self, const char *format, ...);
int zyre_start (zyre_t *self);
void zyre_stop (zyre_t *self);
int zyre_join (zyre_t *self, const char *group);
int zyre_leave (zyre_t *self, const char *group);
zmsg_t* zyre_recv (zyre_t *self);
int zyre_whisper (zyre_t *self, const char *peer, zmsg_t **msg_p);
int zyre_shout (zyre_t *self, const char *group, zmsg_t **msg_p);
int zyre_whispers (zyre_t *self, const char *peer, const char *format, ...);
int zyre_shouts (zyre_t *self, const char *group, const char *format, ...);
zsock_t* zyre_socket (zyre_t *self);
void zyre_dump (zyre_t *self);
void zyre_version (int *major, int *minor, int *patch);
void zyre_test (bool verbose);
// zhash.h
typedef struct _zhash_t zhash_t;
// zyre_event.h
typedef struct _zyre_event_t zyre_event_t;
typedef enum {
ZYRE_EVENT_ENTER = 1,
ZYRE_EVENT_JOIN = 2,
ZYRE_EVENT_LEAVE = 3,
ZYRE_EVENT_EXIT = 4,
ZYRE_EVENT_WHISPER = 5,
ZYRE_EVENT_SHOUT = 6
} zyre_event_type_t;
zyre_event_t* zyre_event_new (zyre_t *self);
void zyre_event_destroy (zyre_event_t **self_p);
zyre_event_type_t zyre_event_type (zyre_event_t *self);
char * zyre_event_sender (zyre_event_t *self);
char * zyre_event_name (zyre_event_t *self);
char * zyre_event_address (zyre_event_t *self);
char * zyre_event_header (zyre_event_t *self, char *name);
char * zyre_event_group (zyre_event_t *self);
zmsg_t * zyre_event_msg (zyre_event_t *self);
zhash_t * zyre_event_headers (zyre_event_t *self);
// zsys.h
const char * zsys_interface ();
// zsock_option.h
int zsock_fd (zsock_t *self);
// zpoller.h
typedef struct _zpoller_t zpoller_t;
zpoller_t * zpoller_new (void *reader, ...);
void zpoller_destroy (zpoller_t **self_p);
void * zpoller_wait (zpoller_t *self, int timeout);
int zpoller_add (zpoller_t *self, void *reader);
''')
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.abspath(os.path.join(os.path.dirname(__file__)))
_zyre_lib_name, _czmq_lib_name = 'zyre', 'czmq'
if sys.platform == 'win32':
_zyre_lib_name, _czmq_lib_name = 'zyre.dll', 'czmq.dll'
zyre_lib = ffi.dlopen(_zyre_lib_name)
czmq_lib = ffi.dlopen(_czmq_lib_name)
new_int_ptr = lambda val: ffi.new('int*', val)
new_void_ptr = lambda val: ffi.new('void*', val)
c_string_to_py = lambda s: ffi.string(s) if s else None
check_null = lambda val: val if val else None | gpl-3.0 | 5,003,882,334,878,248,000 | 21.444444 | 104 | 0.665738 | false | 2.317791 | false | false | false |
cwisecarver/osf.io | api/base/serializers.py | 1 | 58011 | import collections
import re
import furl
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from rest_framework import exceptions, permissions
from rest_framework import serializers as ser
from rest_framework.fields import SkipField
from rest_framework.fields import get_attribute as get_nested_attributes
from api.base import utils
from api.base.exceptions import InvalidQueryStringError
from api.base.exceptions import Conflict
from api.base.exceptions import JSONAPIException
from api.base.exceptions import TargetNotSupportedError
from api.base.exceptions import RelationshipPostMakesNoChanges
from api.base.settings import BULK_SETTINGS
from api.base.utils import absolute_reverse, extend_querystring_params, get_user_auth, extend_querystring_if_key_exists
from framework.auth import core as auth_core
from osf.models import AbstractNode as Node
from website import settings
from website import util as website_utils
from website.util.sanitize import strip_html
from website.project.model import has_anonymous_link
def format_relationship_links(related_link=None, self_link=None, rel_meta=None, self_meta=None):
"""
Properly handles formatting of self and related links according to JSON API.
Removes related or self link, if none.
"""
ret = {'links': {}}
if related_link:
ret['links'].update({
'related': {
'href': related_link or {},
'meta': rel_meta or {}
}
})
if self_link:
ret['links'].update({
'self': {
'href': self_link or {},
'meta': self_meta or {}
}
})
return ret
def is_anonymized(request):
if hasattr(request, '_is_anonymized'):
return request._is_anonymized
private_key = request.query_params.get('view_only', None)
request._is_anonymized = website_utils.check_private_key_for_anonymized_link(private_key)
return request._is_anonymized
class ShowIfVersion(ser.Field):
"""
Skips the field if the specified request version is not after a feature's earliest supported version,
or not before the feature's latest supported version.
"""
def __init__(self, field, min_version, max_version, **kwargs):
super(ShowIfVersion, self).__init__(**kwargs)
self.field = field
self.required = field.required
self.read_only = field.read_only
self.min_version = min_version
self.max_version = max_version
self.help_text = 'This field is deprecated as of version {}'.format(self.max_version) or kwargs.get('help_text')
def get_attribute(self, instance):
request = self.context.get('request')
if request and utils.is_deprecated(request.version, self.min_version, self.max_version):
raise SkipField
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(ShowIfVersion, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
class HideIfRegistration(ser.Field):
"""
If node is a registration, this field will return None.
"""
def __init__(self, field, **kwargs):
super(HideIfRegistration, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_registration:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfRegistration, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfDisabled(ser.Field):
"""
If the user is disabled, returns None for attribute fields, or skips
if a RelationshipField.
"""
def __init__(self, field, **kwargs):
super(HideIfDisabled, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_disabled:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfDisabled, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfWithdrawal(HideIfRegistration):
"""
If registration is withdrawn, this field will return None.
"""
def get_attribute(self, instance):
if instance.is_retracted:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
class AllowMissing(ser.Field):
def __init__(self, field, **kwargs):
super(AllowMissing, self).__init__(**kwargs)
self.field = field
def to_representation(self, value):
return self.field.to_representation(value)
def bind(self, field_name, parent):
super(AllowMissing, self).bind(field_name, parent)
self.field.bind(field_name, self)
def get_attribute(self, instance):
"""
Overwrite the error message to return a blank value is if there is no existing value.
This allows the display of keys that do not exist in the DB (gitHub on a new OSF account for example.)
"""
try:
return self.field.get_attribute(instance)
except SkipField:
return ''
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def _url_val(val, obj, serializer, request, **kwargs):
"""Function applied by `HyperlinksField` to get the correct value in the
schema.
"""
url = None
if isinstance(val, Link): # If a Link is passed, get the url value
url = val.resolve_url(obj, request)
elif isinstance(val, basestring): # if a string is passed, it's a method of the serializer
if getattr(serializer, 'field', None):
serializer = serializer.parent
url = getattr(serializer, val)(obj) if obj is not None else None
else:
url = val
if not url and url != 0:
raise SkipField
else:
return url
class DateByVersion(ser.DateTimeField):
"""
Custom DateTimeField that forces dates into the ISO-8601 format with timezone information in version 2.2.
"""
def to_representation(self, value):
request = self.context.get('request')
if request:
if request.version >= '2.2':
self.format = '%Y-%m-%dT%H:%M:%S.%fZ'
else:
self.format = '%Y-%m-%dT%H:%M:%S.%f' if value.microsecond else '%Y-%m-%dT%H:%M:%S'
return super(DateByVersion, self).to_representation(value)
class IDField(ser.CharField):
"""
ID field that validates that 'id' in the request body is the same as the instance 'id' for single requests.
"""
def __init__(self, **kwargs):
kwargs['label'] = 'ID'
super(IDField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
request = self.context.get('request')
if request:
if request.method in utils.UPDATE_METHODS and not utils.is_bulk_request(request):
id_field = self.get_id(self.root.instance)
if id_field != data:
raise Conflict(detail=('The id you used in the URL, "{}", does not match the id you used in the json body\'s id field, "{}". The object "{}" exists, otherwise you\'d get a 404, so most likely you need to change the id field to match.'.format(id_field, data, id_field)))
return super(IDField, self).to_internal_value(data)
def get_id(self, obj):
return getattr(obj, self.source, '_id')
class TypeField(ser.CharField):
"""
Type field that validates that 'type' in the request body is the same as the Meta type.
Also ensures that type is write-only and required.
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
super(TypeField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
if isinstance(self.root, JSONAPIListSerializer):
type_ = self.root.child.Meta.type_
else:
type_ = self.root.Meta.type_
if type_ != data:
raise Conflict(detail=('This resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the resource\'s type.'.format(type_, data)))
return super(TypeField, self).to_internal_value(data)
class TargetTypeField(ser.CharField):
"""
Enforces that the related resource has the correct type
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
self.target_type = kwargs.pop('target_type')
super(TargetTypeField, self).__init__(**kwargs)
def to_internal_value(self, data):
if self.target_type != data:
raise Conflict(detail=('The target resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the target resource\'s type.'.format(self.target_type, data)))
return super(TargetTypeField, self).to_internal_value(data)
class JSONAPIListField(ser.ListField):
def to_internal_value(self, data):
if not isinstance(data, list):
self.fail('not_a_list', input_type=type(data).__name__)
return super(JSONAPIListField, self).to_internal_value(data)
class AuthorizedCharField(ser.CharField):
"""
Passes auth of the logged-in user to the object's method
defined as the field source.
Example:
content = AuthorizedCharField(source='get_content')
"""
def __init__(self, source, **kwargs):
self.source = source
super(AuthorizedCharField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
user = self.context['request'].user
auth = auth_core.Auth(user)
field_source_method = getattr(obj, self.source)
return field_source_method(auth=auth)
class AnonymizedRegexField(AuthorizedCharField):
"""
Performs a regex replace on the content of the authorized object's
source field when an anonymous view is requested.
Example:
content = AnonymizedRegexField(source='get_content', regex='\[@[^\]]*\]\([^\) ]*\)', replace='@A User')
"""
def __init__(self, source, regex, replace, **kwargs):
self.source = source
self.regex = regex
self.replace = replace
super(AnonymizedRegexField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
value = super(AnonymizedRegexField, self).get_attribute(obj)
if value:
user = self.context['request'].user
auth = auth_core.Auth(user)
if 'view_only' in self.context['request'].query_params:
auth.private_key = self.context['request'].query_params['view_only']
if has_anonymous_link(obj.node, auth):
value = re.sub(self.regex, self.replace, value)
return value
class RelationshipField(ser.HyperlinkedIdentityField):
"""
RelationshipField that permits the return of both self and related links, along with optional
meta information. ::
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<_id>'},
self_view='nodes:node-node-children-relationship',
self_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'}
)
The lookup field must be surrounded in angular brackets to find the attribute on the target. Otherwise, the lookup
field will be returned verbatim. ::
wiki_home = RelationshipField(
related_view='addon:addon-detail',
related_view_kwargs={'node_id': '<_id>', 'provider': 'wiki'},
)
'_id' is enclosed in angular brackets, but 'wiki' is not. 'id' will be looked up on the target, but 'wiki' will not.
The serialized result would be '/nodes/abc12/addons/wiki'.
Field can handle nested attributes: ::
wiki_home = RelationshipField(
related_view='wiki:wiki-detail',
related_view_kwargs={'node_id': '<_id>', 'wiki_id': '<wiki_pages_current.home>'}
)
Field can handle a filter_key, which operates as the source field (but
is named differently to not interfere with HyperLinkedIdentifyField's source
The ``filter_key`` argument defines the Mongo key (or ODM field name) to filter on
when using the ``FilterMixin`` on a view. ::
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
Field can include optional filters:
Example:
replies = RelationshipField(
self_view='nodes:node-comments',
self_view_kwargs={'node_id': '<node._id>'},
filter={'target': '<_id>'})
)
"""
json_api_link = True # serializes to a links object
def __init__(self, related_view=None, related_view_kwargs=None, self_view=None, self_view_kwargs=None,
self_meta=None, related_meta=None, always_embed=False, filter=None, filter_key=None, **kwargs):
related_view = related_view
self_view = self_view
related_kwargs = related_view_kwargs
self_kwargs = self_view_kwargs
self.views = {'related': related_view, 'self': self_view}
self.view_kwargs = {'related': related_kwargs, 'self': self_kwargs}
self.related_meta = related_meta
self.self_meta = self_meta
self.always_embed = always_embed
self.filter = filter
self.filter_key = filter_key
assert (related_view is not None or self_view is not None), 'Self or related view must be specified.'
if related_view:
assert related_kwargs is not None, 'Must provide related view kwargs.'
if not callable(related_kwargs):
assert isinstance(related_kwargs,
dict), "Related view kwargs must have format {'lookup_url_kwarg: lookup_field}."
if self_view:
assert self_kwargs is not None, 'Must provide self view kwargs.'
assert isinstance(self_kwargs, dict), "Self view kwargs must have format {'lookup_url_kwarg: lookup_field}."
view_name = related_view
if view_name:
lookup_kwargs = related_kwargs
else:
view_name = self_view
lookup_kwargs = self_kwargs
if kwargs.get('lookup_url_kwarg', None):
lookup_kwargs = kwargs.pop('lookup_url_kwarg')
super(RelationshipField, self).__init__(view_name, lookup_url_kwarg=lookup_kwargs, **kwargs)
# Allow a RelationshipField to be modified if explicitly set so
if kwargs.get('read_only') is not None:
self.read_only = kwargs['read_only']
def resolve(self, resource, field_name, request):
"""
Resolves the view when embedding.
"""
lookup_url_kwarg = self.lookup_url_kwarg
if callable(lookup_url_kwarg):
lookup_url_kwarg = lookup_url_kwarg(getattr(resource, field_name))
kwargs = {attr_name: self.lookup_attribute(resource, attr) for (attr_name, attr) in lookup_url_kwarg.items()}
kwargs.update({'version': request.parser_context['kwargs']['version']})
view = self.view_name
if callable(self.view_name):
view = view(getattr(resource, field_name))
return resolve(
reverse(
view,
kwargs=kwargs
)
)
def process_related_counts_parameters(self, params, value):
"""
Processes related_counts parameter.
Can either be a True/False value for fetching counts on all fields, or a comma-separated list for specifying
individual fields. Ensures field for which we are requesting counts is a relationship field.
"""
if utils.is_truthy(params) or utils.is_falsy(params):
return params
field_counts_requested = [val for val in params.split(',')]
countable_fields = {field for field in self.parent.fields if
getattr(self.parent.fields[field], 'json_api_link', False) or
getattr(getattr(self.parent.fields[field], 'field', None), 'json_api_link', None)}
for count_field in field_counts_requested:
# Some fields will hide relationships, e.g. HideIfWithdrawal
# Ignore related_counts for these fields
fetched_field = self.parent.fields.get(count_field)
hidden = fetched_field and isinstance(fetched_field, HideIfWithdrawal) and getattr(value, 'is_retracted', False)
if not hidden and count_field not in countable_fields:
raise InvalidQueryStringError(
detail="Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got '{0}'".format(
params),
parameter='related_counts'
)
return field_counts_requested
def get_meta_information(self, meta_data, value):
"""
For retrieving meta values, otherwise returns {}
"""
meta = {}
for key in meta_data or {}:
if key == 'count' or key == 'unread':
show_related_counts = self.context['request'].query_params.get('related_counts', False)
if self.context['request'].parser_context.get('kwargs'):
if self.context['request'].parser_context['kwargs'].get('is_embedded'):
show_related_counts = False
field_counts_requested = self.process_related_counts_parameters(show_related_counts, value)
if utils.is_truthy(show_related_counts):
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
elif utils.is_falsy(show_related_counts):
continue
elif self.field_name in field_counts_requested:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
else:
continue
elif key == 'projects_in_common':
if not get_user_auth(self.context['request']).user:
continue
if not self.context['request'].query_params.get('show_projects_in_common', False):
continue
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
else:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
return meta
def lookup_attribute(self, obj, lookup_field):
"""
Returns attribute from target object unless attribute surrounded in angular brackets where it returns the lookup field.
Also handles the lookup of nested attributes.
"""
bracket_check = _tpl(lookup_field)
if bracket_check:
source_attrs = bracket_check.split('.')
# If you are using a nested attribute for lookup, and you get the attribute wrong, you will not get an
# error message, you will just not see that field. This allows us to have slightly more dynamic use of
# nested attributes in relationship fields.
try:
return_val = get_nested_attributes(obj, source_attrs)
except KeyError:
return None
return return_val
return lookup_field
def kwargs_lookup(self, obj, kwargs_dict):
"""
For returning kwargs dictionary of format {"lookup_url_kwarg": lookup_value}
"""
if callable(kwargs_dict):
kwargs_dict = kwargs_dict(obj)
kwargs_retrieval = {}
for lookup_url_kwarg, lookup_field in kwargs_dict.items():
try:
lookup_value = self.lookup_attribute(obj, lookup_field)
except AttributeError as exc:
raise AssertionError(exc)
if lookup_value is None:
return None
kwargs_retrieval[lookup_url_kwarg] = lookup_value
return kwargs_retrieval
# Overrides HyperlinkedIdentityField
def get_url(self, obj, view_name, request, format):
urls = {}
for view_name, view in self.views.items():
if view is None:
urls[view_name] = {}
else:
kwargs = self.kwargs_lookup(obj, self.view_kwargs[view_name])
if kwargs is None:
urls[view_name] = {}
else:
if callable(view):
view = view(getattr(obj, self.field_name))
kwargs.update({'version': request.parser_context['kwargs']['version']})
url = self.reverse(view, kwargs=kwargs, request=request, format=format)
if self.filter:
formatted_filters = self.format_filter(obj)
if formatted_filters:
for filter in formatted_filters:
url = extend_querystring_params(
url,
{'filter[{}]'.format(filter['field_name']): filter['value']}
)
else:
url = None
url = extend_querystring_if_key_exists(url, self.context['request'], 'view_only')
urls[view_name] = url
if not urls['self'] and not urls['related']:
urls = None
return urls
def to_esi_representation(self, value, envelope='data'):
relationships = self.to_representation(value)
try:
href = relationships['links']['related']['href']
except KeyError:
raise SkipField
else:
if href and not href == '{}':
if self.always_embed:
envelope = 'data'
query_dict = dict(format=['jsonapi', ], envelope=[envelope, ])
if 'view_only' in self.parent.context['request'].query_params.keys():
query_dict.update(view_only=[self.parent.context['request'].query_params['view_only']])
esi_url = extend_querystring_params(href, query_dict)
return '<esi:include src="{}"/>'.format(esi_url)
def format_filter(self, obj):
""" Take filters specified in self.filter and format them in a way that can be easily parametrized
:param obj: RelationshipField object
:return: list of dictionaries with 'field_name' and 'value' for each filter
"""
filter_fields = self.filter.keys()
filters = []
for field_name in filter_fields:
try:
# check if serializer method passed in
serializer_method = getattr(self.parent, self.filter[field_name])
except AttributeError:
value = self.lookup_attribute(obj, self.filter[field_name])
else:
value = serializer_method(obj)
if not value:
continue
filters.append({'field_name': field_name, 'value': value})
return filters if filters else None
# Overrides HyperlinkedIdentityField
def to_representation(self, value):
request = self.context.get('request', None)
format = self.context.get('format', None)
assert request is not None, (
'`%s` requires the request in the serializer'
" context. Add `context={'request': request}` when instantiating "
'the serializer.' % self.__class__.__name__
)
# By default use whatever format is given for the current context
# unless the target is a different type to the source.
#
# Eg. Consider a HyperlinkedIdentityField pointing from a json
# representation to an html property of that representation...
#
# '/snippets/1/' should link to '/snippets/1/highlight/'
# ...but...
# '/snippets/1/.json' should link to '/snippets/1/highlight/.html'
if format and self.format and self.format != format:
format = self.format
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.get_url(value, self.view_name, request, format)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
if value in ('', None):
value_string = {'': 'the empty string', None: 'None'}[value]
msg += (
' WARNING: The value of the field on the model instance '
"was %s, which may be why it didn't match any "
'entries in your URL conf.' % value_string
)
raise ImproperlyConfigured(msg % self.view_name)
if url is None:
raise SkipField
related_url = url['related']
related_meta = self.get_meta_information(self.related_meta, value)
self_url = url['self']
self_meta = self.get_meta_information(self.self_meta, value)
return format_relationship_links(related_url, self_url, related_meta, self_meta)
class FileCommentRelationshipField(RelationshipField):
def get_url(self, obj, view_name, request, format):
if obj.kind == 'folder':
raise SkipField
return super(FileCommentRelationshipField, self).get_url(obj, view_name, request, format)
class TargetField(ser.Field):
"""
Field that returns a nested dict with the url (constructed based
on the object's type), optional meta information, and link_type.
Example:
target = TargetField(link_type='related', meta={'type': 'get_target_type'})
"""
json_api_link = True # serializes to a links object
view_map = {
'node': {
'view': 'nodes:node-detail',
'lookup_kwarg': 'node_id'
},
'comment': {
'view': 'comments:comment-detail',
'lookup_kwarg': 'comment_id'
},
'nodewikipage': {
'view': None,
'lookup_kwarg': None
}
}
def __init__(self, **kwargs):
self.meta = kwargs.pop('meta', {})
self.link_type = kwargs.pop('link_type', 'url')
super(TargetField, self).__init__(read_only=True, **kwargs)
def resolve(self, resource, field_name, request):
"""
Resolves the view for target node or target comment when embedding.
"""
view_info = self.view_map.get(resource.target.referent._name, None)
if not view_info:
raise TargetNotSupportedError('{} is not a supported target type'.format(
resource.target._name
))
if not view_info['view']:
return None, None, None
embed_value = resource.target._id
return resolve(
reverse(
view_info['view'],
kwargs={
view_info['lookup_kwarg']: embed_value,
'version': request.parser_context['kwargs']['version']
}
)
)
def to_esi_representation(self, value, envelope='data'):
href = value.get_absolute_url()
if href:
esi_url = extend_querystring_params(href, dict(envelope=[envelope, ], format=['jsonapi', ]))
return '<esi:include src="{}"/>'.format(esi_url)
return self.to_representation(value)
def to_representation(self, value):
"""
Returns nested dictionary in format {'links': {'self.link_type': ... }
If no meta information, self.link_type is equal to a string containing link's URL. Otherwise,
the link is represented as a links object with 'href' and 'meta' members.
"""
meta = website_utils.rapply(self.meta, _url_val, obj=value, serializer=self.parent, request=self.context['request'])
return {'links': {self.link_type: {'href': value.referent.get_absolute_url(), 'meta': meta}}}
class LinksField(ser.Field):
"""Links field that resolves to a links object. Used in conjunction with `Link`.
If the object to be serialized implements `get_absolute_url`, then the return value
of that method is used for the `self` link.
Example: ::
links = LinksField({
'html': 'absolute_url',
'children': {
'related': Link('nodes:node-children', node_id='<_id>'),
'count': 'get_node_count'
},
'contributors': {
'related': Link('nodes:node-contributors', node_id='<_id>'),
'count': 'get_contrib_count'
},
'registrations': {
'related': Link('nodes:node-registrations', node_id='<_id>'),
'count': 'get_registration_count'
},
})
"""
def __init__(self, links, *args, **kwargs):
ser.Field.__init__(self, read_only=True, *args, **kwargs)
self.links = links
def get_attribute(self, obj):
# We pass the object instance onto `to_representation`,
# not just the field attribute.
return obj
def extend_absolute_url(self, obj):
return extend_querystring_if_key_exists(obj.get_absolute_url(), self.context['request'], 'view_only')
def to_representation(self, obj):
ret = {}
for name, value in self.links.iteritems():
try:
url = _url_val(value, obj=obj, serializer=self.parent, request=self.context['request'])
except SkipField:
continue
else:
ret[name] = url
if hasattr(obj, 'get_absolute_url') and 'self' not in self.links:
ret['self'] = self.extend_absolute_url(obj)
return ret
class ListDictField(ser.DictField):
def __init__(self, **kwargs):
super(ListDictField, self).__init__(**kwargs)
def to_representation(self, value):
"""
Ensure the value of each key in the Dict to be a list
"""
res = {}
for key, val in value.items():
if isinstance(self.child.to_representation(val), list):
res[six.text_type(key)] = self.child.to_representation(val)
else:
if self.child.to_representation(val):
res[six.text_type(key)] = [self.child.to_representation(val)]
else:
res[six.text_type(key)] = []
return res
_tpl_pattern = re.compile(r'\s*<\s*(\S*)\s*>\s*')
def _tpl(val):
"""Return value within ``< >`` if possible, else return ``None``."""
match = _tpl_pattern.match(val)
if match:
return match.groups()[0]
return None
def _get_attr_from_tpl(attr_tpl, obj):
attr_name = _tpl(str(attr_tpl))
if attr_name:
attribute_value = obj
for attr_segment in attr_name.split('.'):
attribute_value = getattr(attribute_value, attr_segment, ser.empty)
if attribute_value is not ser.empty:
return attribute_value
elif attr_name in obj:
return obj[attr_name]
else:
raise AttributeError(
'{attr_name!r} is not a valid '
'attribute of {obj!r}'.format(
attr_name=attr_name, obj=obj,
))
else:
return attr_tpl
# TODO: Make this a Field that is usable on its own?
class Link(object):
"""Link object to use in conjunction with Links field. Does reverse lookup of
URLs given an endpoint name and attributed enclosed in `<>`. This includes
complex key strings like 'user.id'
"""
def __init__(self, endpoint, args=None, kwargs=None, query_kwargs=None, **kw):
self.endpoint = endpoint
self.kwargs = kwargs or {}
self.args = args or tuple()
self.reverse_kwargs = kw
self.query_kwargs = query_kwargs or {}
def resolve_url(self, obj, request):
kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.kwargs.items()}
kwarg_values.update({'version': request.parser_context['kwargs']['version']})
arg_values = [_get_attr_from_tpl(attr_tpl, obj) for attr_tpl in self.args]
query_kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.query_kwargs.items()}
# Presumably, if you have are expecting a value but the value is empty, then the link is invalid.
for item in kwarg_values:
if kwarg_values[item] is None:
raise SkipField
return utils.absolute_reverse(
self.endpoint,
args=arg_values,
kwargs=kwarg_values,
query_kwargs=query_kwarg_values,
**self.reverse_kwargs
)
class WaterbutlerLink(Link):
"""Link object to use in conjunction with Links field. Builds a Waterbutler URL for files.
"""
def __init__(self, must_be_file=None, must_be_folder=None, **kwargs):
self.kwargs = kwargs
self.must_be_file = must_be_file
self.must_be_folder = must_be_folder
def resolve_url(self, obj, request):
"""Reverse URL lookup for WaterButler routes
"""
if self.must_be_folder is True and not obj.path.endswith('/'):
raise SkipField
if self.must_be_file is True and obj.path.endswith('/'):
raise SkipField
url = website_utils.waterbutler_api_url_for(obj.node._id, obj.provider, obj.path, **self.kwargs)
if not url:
raise SkipField
else:
return url
class NodeFileHyperLinkField(RelationshipField):
def __init__(self, kind=None, never_embed=False, **kws):
self.kind = kind
self.never_embed = never_embed
super(NodeFileHyperLinkField, self).__init__(**kws)
def get_url(self, obj, view_name, request, format):
if self.kind and obj.kind != self.kind:
raise SkipField
return super(NodeFileHyperLinkField, self).get_url(obj, view_name, request, format)
class JSONAPIListSerializer(ser.ListSerializer):
def to_representation(self, data):
enable_esi = self.context.get('enable_esi', False)
envelope = self.context.update({'envelope': None})
# Don't envelope when serializing collection
errors = {}
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if isinstance(data, collections.Mapping):
errors = data.get('errors', None)
data = data.get('data', None)
if enable_esi:
ret = [
self.child.to_esi_representation(item, envelope=None) for item in data
]
else:
ret = [
self.child.to_representation(item, envelope=envelope) for item in data
]
if errors and bulk_skip_uneditable:
ret.append({'errors': errors})
return ret
# Overrides ListSerializer which doesn't support multiple update by default
def update(self, instance, validated_data):
# avoiding circular import
from api.nodes.serializers import ContributorIDField
# if PATCH request, the child serializer's partial attribute needs to be True
if self.context['request'].method == 'PATCH':
self.child.partial = True
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if not bulk_skip_uneditable:
if len(instance) != len(validated_data):
raise exceptions.ValidationError({'non_field_errors': 'Could not find all objects to update.'})
id_lookup = self.child.fields['id'].source
data_mapping = {item.get(id_lookup): item for item in validated_data}
if isinstance(self.child.fields['id'], ContributorIDField):
instance_mapping = {self.child.fields['id'].get_id(item): item for item in instance}
else:
instance_mapping = {getattr(item, id_lookup): item for item in instance}
ret = {'data': []}
for resource_id, resource in instance_mapping.items():
data = data_mapping.pop(resource_id, None)
ret['data'].append(self.child.update(resource, data))
# If skip_uneditable in request, add validated_data for nodes in which the user did not have edit permissions to errors
if data_mapping and bulk_skip_uneditable:
ret.update({'errors': data_mapping.values()})
return ret
# overrides ListSerializer
def run_validation(self, data):
meta = getattr(self, 'Meta', None)
bulk_limit = getattr(meta, 'bulk_limit', BULK_SETTINGS['DEFAULT_BULK_LIMIT'])
num_items = len(data)
if num_items > bulk_limit:
raise JSONAPIException(source={'pointer': '/data'},
detail='Bulk operation limit is {}, got {}.'.format(bulk_limit, num_items))
return super(JSONAPIListSerializer, self).run_validation(data)
# overrides ListSerializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' from validated_data.
"""
ret = super(JSONAPIListSerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = website_utils.rapply(self.validated_data, strip_html)
for data in self._validated_data:
data.pop('type', None)
return ret
class SparseFieldsetMixin(object):
def parse_sparse_fields(self, allow_unsafe=False, **kwargs):
request = kwargs.get('context', {}).get('request', None)
if request and (allow_unsafe or request.method in permissions.SAFE_METHODS):
sparse_fieldset_query_param = 'fields[{}]'.format(self.Meta.type_)
if sparse_fieldset_query_param in request.query_params:
fieldset = request.query_params[sparse_fieldset_query_param].split(',')
for field_name in self.fields.fields.copy().keys():
if field_name in ('id', 'links', 'type'):
# MUST return these fields
continue
if field_name not in fieldset:
self.fields.pop(field_name)
class BaseAPISerializer(ser.Serializer, SparseFieldsetMixin):
def __init__(self, *args, **kwargs):
self.parse_sparse_fields(**kwargs)
super(BaseAPISerializer, self).__init__(*args, **kwargs)
self.model_field_names = [name if field.source == '*' else field.source
for name, field in self.fields.iteritems()]
class JSONAPISerializer(BaseAPISerializer):
"""Base serializer. Requires that a `type_` option is set on `class Meta`. Also
allows for enveloping of both single resources and collections. Looks to nest fields
according to JSON API spec. Relational fields must set json_api_link=True flag.
Self/html links must be nested under "links".
"""
# Don't serialize relationships that use these views
# when viewing thru an anonymous VOL
views_to_hide_if_anonymous = {
'users:user-detail',
'nodes:node-registrations',
}
# overrides Serializer
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls(*args, **kwargs)
return JSONAPIListSerializer(*args, **kwargs)
def invalid_embeds(self, fields, embeds):
fields_check = fields[:]
for index, field in enumerate(fields_check):
if getattr(field, 'field', None):
fields_check[index] = field.field
invalid_embeds = set(embeds.keys()) - set(
[f.field_name for f in fields_check if getattr(f, 'json_api_link', False)])
return invalid_embeds
def to_esi_representation(self, data, envelope='data'):
href = None
query_params_blacklist = ['page[size]']
href = self.get_absolute_url(data)
if href and href != '{}':
esi_url = furl.furl(href).add(args=dict(self.context['request'].query_params)).remove(
args=query_params_blacklist).remove(args=['envelope']).add(args={'envelope': envelope}).url
return '<esi:include src="{}"/>'.format(esi_url)
# failsafe, let python do it if something bad happened in the ESI construction
return super(JSONAPISerializer, self).to_representation(data)
# overrides Serializer
def to_representation(self, obj, envelope='data'):
"""Serialize to final representation.
:param obj: Object to be serialized.
:param envelope: Key for resource object.
"""
ret = {}
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
self.parse_sparse_fields(allow_unsafe=True, context=self.context)
data = {
'id': '',
'type': type_,
'attributes': {},
'relationships': {},
'embeds': {},
'links': {},
}
embeds = self.context.get('embed', {})
context_envelope = self.context.get('envelope', envelope)
if context_envelope == 'None':
context_envelope = None
enable_esi = self.context.get('enable_esi', False)
is_anonymous = is_anonymized(self.context['request'])
to_be_removed = set()
if is_anonymous and hasattr(self, 'non_anonymized_fields'):
# Drop any fields that are not specified in the `non_anonymized_fields` variable.
allowed = set(self.non_anonymized_fields)
existing = set(self.fields.keys())
to_be_removed = existing - allowed
fields = [field for field in self.fields.values() if
not field.write_only and field.field_name not in to_be_removed]
invalid_embeds = self.invalid_embeds(fields, embeds)
invalid_embeds = invalid_embeds - to_be_removed
if invalid_embeds:
raise InvalidQueryStringError(parameter='embed',
detail='The following fields are not embeddable: {}'.format(
', '.join(invalid_embeds)))
for field in fields:
try:
attribute = field.get_attribute(obj)
except SkipField:
continue
nested_field = getattr(field, 'field', None)
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data['attributes'][field.field_name] = None
else:
try:
if hasattr(attribute, 'all'):
representation = field.to_representation(attribute.all())
else:
representation = field.to_representation(attribute)
except SkipField:
continue
if getattr(field, 'json_api_link', False) or getattr(nested_field, 'json_api_link', False):
# If embed=field_name is appended to the query string or 'always_embed' flag is True, directly embed the
# results in addition to adding a relationship link
if embeds and (field.field_name in embeds or getattr(field, 'always_embed', None)):
if enable_esi:
try:
result = field.to_esi_representation(attribute, envelope=envelope)
except SkipField:
continue
else:
try:
# If a field has an empty representation, it should not be embedded.
result = self.context['embed'][field.field_name](obj)
except SkipField:
result = None
if result:
data['embeds'][field.field_name] = result
else:
data['embeds'][field.field_name] = {'error': 'This field is not embeddable.'}
try:
if not (is_anonymous and
hasattr(field, 'view_name') and
field.view_name in self.views_to_hide_if_anonymous):
data['relationships'][field.field_name] = representation
except SkipField:
continue
elif field.field_name == 'id':
data['id'] = representation
elif field.field_name == 'links':
data['links'] = representation
else:
data['attributes'][field.field_name] = representation
if not data['relationships']:
del data['relationships']
if not data['embeds']:
del data['embeds']
if context_envelope:
ret[context_envelope] = data
if is_anonymous:
ret['meta'] = {'anonymous': True}
else:
ret = data
return ret
def get_absolute_url(self, obj):
raise NotImplementedError()
def get_absolute_html_url(self, obj):
return extend_querystring_if_key_exists(obj.absolute_url, self.context['request'], 'view_only')
# overrides Serializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' and '_id' from validated_data.
"""
ret = super(JSONAPISerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = self.sanitize_data()
self._validated_data.pop('type', None)
self._validated_data.pop('target_type', None)
if self.context['request'].method in utils.UPDATE_METHODS:
self._validated_data.pop('_id', None)
return ret
def sanitize_data(self):
return website_utils.rapply(self.validated_data, strip_html)
class JSONAPIRelationshipSerializer(BaseAPISerializer):
"""Base Relationship serializer. Requires that a `type_` option is set on `class Meta`.
Provides a simplified serialization of the relationship, allowing for simple update request
bodies.
"""
id = ser.CharField(required=False, allow_null=True)
type = TypeField(required=False, allow_null=True)
def to_representation(self, obj):
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
relation_id_field = self.fields['id']
attribute = relation_id_field.get_attribute(obj)
relationship = relation_id_field.to_representation(attribute)
data = {'type': type_, 'id': relationship} if relationship else None
return data
def DevOnly(field):
"""Make a field only active in ``DEV_MODE``. ::
experimental_field = DevMode(CharField(required=False))
"""
return field if settings.DEV_MODE else None
class RestrictedDictSerializer(ser.Serializer):
def to_representation(self, obj):
data = {}
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(obj)
except ser.SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data[field.field_name] = None
else:
data[field.field_name] = field.to_representation(attribute)
return data
def relationship_diff(current_items, new_items):
"""
To be used in POST and PUT/PATCH relationship requests, as, by JSON API specs,
in update requests, the 'remove' items' relationships would be deleted, and the
'add' would be added, while for create requests, only the 'add' would be added.
:param current_items: The current items in the relationship
:param new_items: The items passed in the request
:return:
"""
return {
'add': {k: new_items[k] for k in (set(new_items.keys()) - set(current_items.keys()))},
'remove': {k: current_items[k] for k in (set(current_items.keys()) - set(new_items.keys()))}
}
class AddonAccountSerializer(JSONAPISerializer):
id = ser.CharField(source='_id', read_only=True)
provider = ser.CharField(read_only=True)
profile_url = ser.CharField(required=False, read_only=True)
display_name = ser.CharField(required=False, read_only=True)
links = links = LinksField({
'self': 'get_absolute_url',
})
class Meta:
type_ = 'external_accounts'
def get_absolute_url(self, obj):
kwargs = self.context['request'].parser_context['kwargs']
kwargs.update({'account_id': obj._id})
return absolute_reverse(
'users:user-external_account-detail',
kwargs=kwargs
)
return obj.get_absolute_url()
class LinkedNode(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'linked_nodes'
class LinkedRegistration(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'linked_registrations'
class LinkedNodesRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=LinkedNode())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].linked_nodes_self_url
def get_related_url(self, obj):
return obj['self'].linked_nodes_related_url
class Meta:
type_ = 'linked_nodes'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer._id: pointer for pointer in pointers},
new_items={val['_id']: val for val in new_pointers}
)
nodes_to_add = []
for node_id in diff['add']:
node = Node.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {'data': [
pointer for pointer in
obj.linked_nodes.filter(is_deleted=False, type='osf.node')
], 'self': obj}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise RelationshipPostMakesNoChanges
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
class LinkedRegistrationsRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=LinkedRegistration())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].linked_registrations_self_url
def get_related_url(self, obj):
return obj['self'].linked_registrations_related_url
class Meta:
type_ = 'linked_registrations'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer._id: pointer for pointer in pointers},
new_items={val['_id']: val for val in new_pointers}
)
nodes_to_add = []
for node_id in diff['add']:
node = Node.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {'data': [
pointer for pointer in
obj.linked_nodes.filter(is_deleted=False, type='osf.registration')
], 'self': obj}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise RelationshipPostMakesNoChanges
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
| apache-2.0 | 3,039,951,152,254,714,000 | 37.725634 | 289 | 0.597076 | false | 4.176157 | false | false | false |
yakky/djangocms-text-ckeditor | djangocms_text_ckeditor/forms.py | 1 | 3464 | # -*- coding: utf-8 -*-
from django import forms
from django.core import signing
from django.core.signing import BadSignature
from django.forms.models import ModelForm
from django.template import RequestContext
from django.utils.translation import ugettext
from cms.models import CMSPlugin
from .models import Text
from .utils import _render_cms_plugin, plugin_tags_to_id_list, plugin_to_tag
class ActionTokenValidationForm(forms.Form):
token = forms.CharField(required=True)
def get_id_from_token(self, session_id):
payload = self.cleaned_data['token']
signer = signing.Signer(salt=session_id)
try:
return signer.unsign(payload)
except BadSignature:
return False
class RenderPluginForm(forms.Form):
plugin = forms.ModelChoiceField(
queryset=CMSPlugin.objects.none(),
required=True,
)
def __init__(self, *args, **kwargs):
self.text_plugin = kwargs.pop('text_plugin')
super(RenderPluginForm, self).__init__(*args, **kwargs)
self.fields['plugin'].queryset = self.get_child_plugins()
def get_child_plugins(self):
return self.text_plugin.get_descendants()
def render_plugin(self, request):
plugin = self.cleaned_data['plugin']
context = RequestContext(request)
context['request'] = request
rendered_content = _render_cms_plugin(plugin, context)
return plugin_to_tag(plugin, content=rendered_content, admin=True)
class DeleteOnCancelForm(forms.Form):
child_plugins = forms.ModelMultipleChoiceField(
queryset=CMSPlugin.objects.none(),
required=False,
)
def __init__(self, *args, **kwargs):
self.text_plugin = kwargs.pop('text_plugin')
super(DeleteOnCancelForm, self).__init__(*args, **kwargs)
self.fields['child_plugins'].queryset = self.get_child_plugins()
def clean(self):
children = self.cleaned_data.get('child_plugins')
if not children and self.text_plugin.get_plugin_instance()[0]:
# This check prevents users from using a cancel token
# to delete just any text plugin.
# Only non-saved text plugins can be deleted.
message = ugettext("Can't delete a saved plugin.")
raise forms.ValidationError(message, code='invalid')
return self.cleaned_data
def get_child_plugins(self):
# We use this queryset to limit the plugins
# a user can delete to only plugins that have not
# been saved in text and are descendants of the text plugin.
instance = self.text_plugin.get_plugin_instance()[0]
if instance:
# Only non-saved children can be deleted.
excluded_plugins = plugin_tags_to_id_list(instance.body)
else:
excluded_plugins = []
queryset = self.text_plugin.get_descendants()
if excluded_plugins:
queryset = queryset.exclude(pk__in=excluded_plugins)
return queryset
def delete(self):
child_plugins = self.cleaned_data.get('child_plugins')
if child_plugins:
child_plugins.delete()
else:
self.text_plugin.delete()
class TextForm(ModelForm):
body = forms.CharField()
class Meta:
model = Text
exclude = (
'page',
'position',
'placeholder',
'language',
'plugin_type',
)
| bsd-3-clause | -7,001,191,991,926,625,000 | 29.928571 | 76 | 0.633661 | false | 4.214112 | false | false | false |
SanPen/GridCal | src/research/PTDF/ACPTDF_research2.py | 1 | 14022 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import numba as nb
import time
from warnings import warn
import scipy.sparse as sp
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse import hstack as hs, vstack as vs
from scipy.sparse.linalg import factorized, spsolve, inv
from matplotlib import pyplot as plt
from GridCal.Engine import *
def SysMat(Y, Ys, pq, pvpq):
"""
Computes the system Jacobian matrix in polar coordinates
Args:
Ybus: Admittance matrix
V: Array of nodal voltages
Ibus: Array of nodal current injections
pq: Array with the indices of the PQ buses
pvpq: Array with the indices of the PV and PQ buses
Returns:
The system Jacobian matrix
"""
A11 = -Ys.imag[np.ix_(pvpq, pvpq)]
A12 = Y.real[np.ix_(pvpq, pq)]
A21 = -Ys.real[np.ix_(pq, pvpq)]
A22 = -Y.imag[np.ix_(pq, pq)]
Asys = sp.vstack([sp.hstack([A11, A12]),
sp.hstack([A21, A22])], format="csc")
return Asys
def compute_acptdf(Ybus, Yseries, Yf, Yt, Cf, V, pq, pv, distribute_slack):
"""
Compute the AC-PTDF
:param Ybus: admittance matrix
:param Yf: Admittance matrix of the buses "from"
:param Yt: Admittance matrix of the buses "to"
:param Cf: Connectivity branch - bus "from"
:param V: voltages array
:param Ibus: array of currents
:param pq: array of pq node indices
:param pv: array of pv node indices
:return: AC-PTDF matrix (branches, buses)
"""
n = len(V)
pvpq = np.r_[pv, pq]
npq = len(pq)
# compute the Jacobian
J = SysMat(Ybus, Yseries, pq, pvpq)
if distribute_slack:
dP = np.ones((n, n)) * (-1 / (n - 1))
for i in range(n):
dP[i, i] = 1.0
else:
dP = np.eye(n, n)
# compose the compatible array (the Q increments are considered zero
dQ = np.zeros((npq, n))
# dQ = np.eye(n, n)[pq, :]
dS = np.r_[dP[pvpq, :], dQ]
# solve the voltage increments
dx = spsolve(J, dS)
# compute branch derivatives
If = Yf * V
E = V / np.abs(V)
Vdiag = sp.diags(V)
Vdiag_conj = sp.diags(np.conj(V))
Ediag = sp.diags(E)
Ediag_conj = sp.diags(np.conj(E))
If_diag_conj = sp.diags(np.conj(If))
Yf_conj = Yf.copy()
Yf_conj.data = np.conj(Yf_conj.data)
Yt_conj = Yt.copy()
Yt_conj.data = np.conj(Yt_conj.data)
dSf_dVa = 1j * (If_diag_conj * Cf * Vdiag - sp.diags(Cf * V) * Yf_conj * Vdiag_conj)
dSf_dVm = If_diag_conj * Cf * Ediag - sp.diags(Cf * V) * Yf_conj * Ediag_conj
# compose the final AC-PTDF
dPf_dVa = dSf_dVa.real[:, pvpq]
dPf_dVm = dSf_dVm.real[:, pq]
PTDF = sp.hstack((dPf_dVa, dPf_dVm)) * dx
return PTDF
def make_lodf(circuit: SnapshotCircuit, PTDF, correct_values=True):
"""
:param circuit:
:param PTDF: PTDF matrix in numpy array form
:return:
"""
nl = circuit.nbr
# compute the connectivity matrix
Cft = circuit.C_branch_bus_f - circuit.C_branch_bus_t
H = PTDF * Cft.T
# old code
# h = sp.diags(H.diagonal())
# LODF = H / (np.ones((nl, nl)) - h * np.ones(nl))
# divide each row of H by the vector 1 - H.diagonal
# LODF = H / (1 - H.diagonal())
# replace possible nan and inf
# LODF[LODF == -np.inf] = 0
# LODF[LODF == np.inf] = 0
# LODF = np.nan_to_num(LODF)
# this loop avoids the divisions by zero
# in those cases the LODF column should be zero
LODF = np.zeros((nl, nl))
div = 1 - H.diagonal()
for j in range(H.shape[1]):
if div[j] != 0:
LODF[:, j] = H[:, j] / div[j]
# replace the diagonal elements by -1
# old code
# LODF = LODF - sp.diags(LODF.diagonal()) - sp.eye(nl, nl), replaced by:
for i in range(nl):
LODF[i, i] = - 1.0
if correct_values:
i1, j1 = np.where(LODF > 1)
for i, j in zip(i1, j1):
LODF[i, j] = 1
i2, j2 = np.where(LODF < -1)
for i, j in zip(i2, j2):
LODF[i, j] = -1
return LODF
def get_branch_time_series(circuit: TimeCircuit, PTDF):
"""
:param grid:
:return:
"""
# option 2: call the power directly
P = circuit.Sbus.real
Pbr = np.dot(PTDF, P).T * circuit.Sbase
return Pbr
def multiple_failure_old(flows, LODF, beta, delta, alpha):
"""
:param flows: array of all the pre-contingency flows
:param LODF: Line Outage Distribution Factors Matrix
:param beta: index of the first failed line
:param delta: index of the second failed line
:param alpha: index of the line where you want to see the effects
:return: post contingency flow in the line alpha
"""
# multiple contingency matrix
M = np.ones((2, 2))
M[0, 1] = -LODF[beta, delta]
M[1, 0] = -LODF[delta, beta]
# normal flows of the lines beta and delta
F = flows[[beta, delta]]
# contingency flows after failing the ines beta and delta
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines beta and delta
L = LODF[alpha, :][[beta, delta]]
dFf_alpha = np.dot(L, Ff)
return F[alpha] + dFf_alpha
def multiple_failure(flows, LODF, failed_idx):
"""
From the paper:
Multiple Element Contingency Screening
IEEE TRANSACTIONS ON POWER SYSTEMS, VOL. 26, NO. 3, AUGUST 2011
C. Matthew Davis and Thomas J. Overbye
:param flows: array of all the pre-contingency flows (the base flows)
:param LODF: Line Outage Distribution Factors Matrix
:param failed_idx: indices of the failed lines
:return: all post contingency flows
"""
# multiple contingency matrix
M = -LODF[np.ix_(failed_idx, failed_idx)]
for i in range(len(failed_idx)):
M[i, i] = 1.0
# normal flows of the failed lines indicated by failed_idx
F = flows[failed_idx]
# Affected flows after failing the lines indicated by failed_idx
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines indicated by failed_idx
L = LODF[:, failed_idx]
dFf_alpha = np.dot(L, Ff)
# return the final contingency flow as the base flow plus the contingency flow delta
return flows + dFf_alpha
def get_n_minus_1_flows(circuit: MultiCircuit):
opt = PowerFlowOptions()
branches = circuit.get_branches()
m = circuit.get_branch_number()
Pmat = np.zeros((m, m)) # monitored, contingency
for c, branch in enumerate(branches):
if branch.active:
branch.active = False
pf = PowerFlowDriver(circuit, opt)
pf.run()
Pmat[:, c] = pf.results.Sbranch.real
branch.active = True
return Pmat
def check_lodf(grid: MultiCircuit):
flows_n1_nr = get_n_minus_1_flows(grid)
# assume 1 island
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0]
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=True)
LODF = make_lodf(circuit, PTDF)
Pbus = circuit.get_injections(False).real
flows_n = np.dot(PTDF, Pbus)
nl = circuit.nbr
flows_n1 = np.zeros((nl, nl))
for c in range(nl): # branch that fails (contingency)
# for m in range(nl): # branch to monitor
# flows_n1[m, c] = flows_n[m] + LODF[m, c] * flows_n[c]
flows_n1[:, c] = flows_n[:] + LODF[:, c] * flows_n[c]
return flows_n, flows_n1_nr, flows_n1
def test_ptdf(grid):
"""
Sigma-distances test
:param grid:
:return:
"""
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0] # pick the first island
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=False)
print('PTDF:')
print(PTDF)
if __name__ == '__main__':
from GridCal.Engine import FileOpen
import pandas as pd
np.set_printoptions(threshold=sys.maxsize, linewidth=200000000)
# np.set_printoptions(linewidth=2000, suppress=True)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/lynn5buspv.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 118.xlsx'
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
# fname = 'helm_data1.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14 PQ only.gridcal'
# fname = 'IEEE 14 PQ only full.gridcal'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case5.m'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case30.m'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/PGOC_6bus.gridcal'
grid_ = FileOpen(fname).open()
test_ptdf(grid_)
name = os.path.splitext(fname.split(os.sep)[-1])[0]
method = 'ACPTDF (No Jacobian, V=Vpf)'
nc_ = compile_snapshot_circuit(grid_)
islands_ = split_into_islands(nc_)
circuit_ = islands_[0]
pf_driver_ = PowerFlowDriver(grid_, PowerFlowOptions())
pf_driver_.run()
H_ = compute_acptdf(Ybus=circuit_.Ybus,
Yseries=circuit_.Yseries,
Yf=circuit_.Yf,
Yt=circuit_.Yt,
Cf=circuit_.C_branch_bus_f,
V=pf_driver_.results.voltage,
pq=circuit_.pq,
pv=circuit_.pv,
distribute_slack=False)
LODF_ = make_lodf(circuit_, H_)
if H_.shape[0] < 50:
print('PTDF:\n', H_)
print('LODF:\n', LODF_)
flows_n_, flows_n1_nr_, flows_n1_ = check_lodf(grid_)
# in the case of the grid PGOC_6bus
flows_multiple = multiple_failure(flows=flows_n_,
LODF=LODF_,
failed_idx=[1, 5]) # failed lines 2 and 6
Pn1_nr_df = pd.DataFrame(data=flows_n1_nr_, index=nc_.branch_names, columns=nc_.branch_names)
flows_n1_df = pd.DataFrame(data=flows_n1_, index=nc_.branch_names, columns=nc_.branch_names)
# plot N-1
fig = plt.figure(figsize=(12, 8))
title = 'N-1 with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
Pn1_nr_df.plot(ax=ax1, legend=False)
flows_n1_df.plot(ax=ax2, legend=False)
diff = Pn1_nr_df - flows_n1_df
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson N-1 flows')
ax2.set_title('PTDF N-1 flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
# ------------------------------------------------------------------------------------------------------------------
# Perform real time series
# ------------------------------------------------------------------------------------------------------------------
if grid_.time_profile is not None:
grid_.ensure_profiles_exist()
nc_ts = compile_time_circuit(grid_)
islands_ts = split_time_circuit_into_islands(nc_ts)
circuit_ts = islands_ts[0]
pf_options = PowerFlowOptions()
ts_driver = TimeSeries(grid=grid_, options=pf_options)
ts_driver.run()
Pbr_nr = ts_driver.results.Sbranch.real
df_Pbr_nr = pd.DataFrame(data=Pbr_nr, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# Compute the PTDF based flows
Pbr_ptdf = get_branch_time_series(circuit=circuit_ts, PTDF=H_)
df_Pbr_ptdf = pd.DataFrame(data=Pbr_ptdf, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# plot
fig = plt.figure(figsize=(12, 8))
title = 'Flows with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
df_Pbr_nr.plot(ax=ax1, legend=False)
df_Pbr_ptdf.plot(ax=ax2, legend=False)
diff = df_Pbr_nr - df_Pbr_ptdf
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson flows')
ax2.set_title('PTDF flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
plt.show()
| gpl-3.0 | 2,332,068,375,098,212,400 | 31.234483 | 120 | 0.590857 | false | 3.160243 | false | false | false |
Yubico/yubikey-manager | ykman/cli/fido.py | 1 | 24461 | # Copyright (c) 2018 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from fido2.ctap import CtapError
from fido2.ctap1 import ApduError
from fido2.ctap2 import (
Ctap2,
ClientPin,
CredentialManagement,
FPBioEnrollment,
CaptureError,
)
from fido2.pcsc import CtapPcscDevice
from yubikit.core.fido import FidoConnection
from yubikit.core.smartcard import SW
from time import sleep
from .util import (
click_postpone_execution,
click_prompt,
click_force_option,
ykman_group,
prompt_timeout,
)
from .util import cli_fail
from ..fido import is_in_fips_mode, fips_reset, fips_change_pin, fips_verify_pin
from ..hid import list_ctap_devices
from ..device import is_fips_version
from ..pcsc import list_devices as list_ccid
from smartcard.Exceptions import NoCardException, CardConnectionException
from typing import Optional
import click
import logging
logger = logging.getLogger(__name__)
FIPS_PIN_MIN_LENGTH = 6
PIN_MIN_LENGTH = 4
@ykman_group(FidoConnection)
@click.pass_context
@click_postpone_execution
def fido(ctx):
"""
Manage the FIDO applications.
Examples:
\b
Reset the FIDO (FIDO2 and U2F) applications:
$ ykman fido reset
\b
Change the FIDO2 PIN from 123456 to 654321:
$ ykman fido access change-pin --pin 123456 --new-pin 654321
"""
conn = ctx.obj["conn"]
try:
ctx.obj["ctap2"] = Ctap2(conn)
except (ValueError, CtapError) as e:
logger.info("FIDO device does not support CTAP2: %s", e)
@fido.command()
@click.pass_context
def info(ctx):
"""
Display general status of the FIDO2 application.
"""
conn = ctx.obj["conn"]
ctap2 = ctx.obj.get("ctap2")
if is_fips_version(ctx.obj["info"].version):
click.echo("FIPS Approved Mode: " + ("Yes" if is_in_fips_mode(conn) else "No"))
elif ctap2:
client_pin = ClientPin(ctap2) # N.B. All YubiKeys with CTAP2 support PIN.
if ctap2.info.options["clientPin"]:
if ctap2.info.force_pin_change:
click.echo(
"NOTE: The FIDO PID is disabled and must be changed before it can "
"be used!"
)
pin_retries, power_cycle = client_pin.get_pin_retries()
if pin_retries:
click.echo(f"PIN is set, with {pin_retries} attempt(s) remaining.")
if power_cycle:
click.echo(
"PIN is temporarily blocked. "
"Remove and re-insert the YubiKey to unblock."
)
else:
click.echo("PIN is set, but has been blocked.")
else:
click.echo("PIN is not set.")
bio_enroll = ctap2.info.options.get("bioEnroll")
if bio_enroll:
uv_retries, _ = client_pin.get_uv_retries()
if uv_retries:
click.echo(
f"Fingerprints registered, with {uv_retries} attempt(s) "
"remaining."
)
else:
click.echo(
"Fingerprints registered, but blocked until PIN is verified."
)
elif bio_enroll is False:
click.echo("No fingerprints have been registered.")
always_uv = ctap2.info.options.get("alwaysUv")
if always_uv is not None:
click.echo(
"Always Require User Verification is turned "
+ ("on." if always_uv else "off.")
)
else:
click.echo("PIN is not supported.")
@fido.command("reset")
@click_force_option
@click.pass_context
def reset(ctx, force):
"""
Reset all FIDO applications.
This action will wipe all FIDO credentials, including FIDO U2F credentials,
on the YubiKey and remove the PIN code.
The reset must be triggered immediately after the YubiKey is
inserted, and requires a touch on the YubiKey.
"""
conn = ctx.obj["conn"]
if isinstance(conn, CtapPcscDevice): # NFC
readers = list_ccid(conn._name)
if not readers or readers[0].reader.name != conn._name:
logger.error(f"Multiple readers matched: {readers}")
cli_fail("Unable to isolate NFC reader.")
dev = readers[0]
logger.debug(f"use: {dev}")
is_fips = False
def prompt_re_insert():
click.echo(
"Remove and re-place your YubiKey on the NFC reader to perform the "
"reset..."
)
removed = False
while True:
sleep(0.5)
try:
with dev.open_connection(FidoConnection):
if removed:
sleep(1.0) # Wait for the device to settle
break
except CardConnectionException:
pass # Expected, ignore
except NoCardException:
removed = True
return dev.open_connection(FidoConnection)
else: # USB
n_keys = len(list_ctap_devices())
if n_keys > 1:
cli_fail("Only one YubiKey can be connected to perform a reset.")
is_fips = is_fips_version(ctx.obj["info"].version)
ctap2 = ctx.obj.get("ctap2")
if not is_fips and not ctap2:
cli_fail("This YubiKey does not support FIDO reset.")
def prompt_re_insert():
click.echo("Remove and re-insert your YubiKey to perform the reset...")
removed = False
while True:
sleep(0.5)
keys = list_ctap_devices()
if not keys:
removed = True
if removed and len(keys) == 1:
return keys[0].open_connection(FidoConnection)
if not force:
if not click.confirm(
"WARNING! This will delete all FIDO credentials, including FIDO U2F "
"credentials, and restore factory settings. Proceed?",
err=True,
):
ctx.abort()
if is_fips:
destroy_input = click_prompt(
"WARNING! This is a YubiKey FIPS device. This command will also "
"overwrite the U2F attestation key; this action cannot be undone and "
"this YubiKey will no longer be a FIPS compliant device.\n"
'To proceed, please enter the text "OVERWRITE"',
default="",
show_default=False,
)
if destroy_input != "OVERWRITE":
cli_fail("Reset aborted by user.")
conn = prompt_re_insert()
try:
with prompt_timeout():
if is_fips:
fips_reset(conn)
else:
Ctap2(conn).reset()
except CtapError as e:
logger.error("Reset failed", exc_info=e)
if e.code == CtapError.ERR.ACTION_TIMEOUT:
cli_fail(
"Reset failed. You need to touch your YubiKey to confirm the reset."
)
elif e.code in (CtapError.ERR.NOT_ALLOWED, CtapError.ERR.PIN_AUTH_BLOCKED):
cli_fail(
"Reset failed. Reset must be triggered within 5 seconds after the "
"YubiKey is inserted."
)
else:
cli_fail(f"Reset failed: {e.code.name}")
except ApduError as e: # From fips_reset
logger.error("Reset failed", exc_info=e)
if e.code == SW.COMMAND_NOT_ALLOWED:
cli_fail(
"Reset failed. Reset must be triggered within 5 seconds after the "
"YubiKey is inserted."
)
else:
cli_fail("Reset failed.")
except Exception as e:
logger.error(e)
cli_fail("Reset failed.")
def _fail_pin_error(ctx, e, other="%s"):
if e.code == CtapError.ERR.PIN_INVALID:
cli_fail("Wrong PIN.")
elif e.code == CtapError.ERR.PIN_AUTH_BLOCKED:
cli_fail(
"PIN authentication is currently blocked. "
"Remove and re-insert the YubiKey."
)
elif e.code == CtapError.ERR.PIN_BLOCKED:
cli_fail("PIN is blocked.")
else:
cli_fail(other % e.code)
@fido.group("access")
def access():
"""
Manage the PIN for FIDO.
"""
@access.command("change-pin")
@click.pass_context
@click.option("-P", "--pin", help="Current PIN code.")
@click.option("-n", "--new-pin", help="A new PIN.")
@click.option(
"-u", "--u2f", is_flag=True, help="Set FIDO U2F PIN instead of FIDO2 PIN."
)
def change_pin(ctx, pin, new_pin, u2f):
"""
Set or change the PIN code.
The FIDO2 PIN must be at least 4 characters long, and supports any type
of alphanumeric characters.
On YubiKey FIPS, a PIN can be set for FIDO U2F. That PIN must be at least
6 characters long.
"""
is_fips = is_fips_version(ctx.obj["info"].version)
if is_fips and not u2f:
cli_fail("This is a YubiKey FIPS. To set the U2F PIN, pass the --u2f option.")
if u2f and not is_fips:
cli_fail(
"This is not a YubiKey FIPS, and therefore does not support a U2F PIN. "
"To set the FIDO2 PIN, remove the --u2f option."
)
if is_fips:
conn = ctx.obj["conn"]
else:
ctap2 = ctx.obj.get("ctap2")
if not ctap2:
cli_fail("PIN is not supported on this YubiKey.")
client_pin = ClientPin(ctap2)
def prompt_new_pin():
return click_prompt(
"Enter your new PIN",
default="",
hide_input=True,
show_default=False,
confirmation_prompt=True,
)
def change_pin(pin, new_pin):
if pin is not None:
_fail_if_not_valid_pin(ctx, pin, is_fips)
try:
if is_fips:
try:
# Failing this with empty current PIN does not cost a retry
fips_change_pin(conn, pin or "", new_pin)
except ApduError as e:
if e.code == SW.WRONG_LENGTH:
pin = _prompt_current_pin()
_fail_if_not_valid_pin(ctx, pin, is_fips)
fips_change_pin(conn, pin, new_pin)
else:
raise
else:
client_pin.change_pin(pin, new_pin)
except CtapError as e:
logger.error("Failed to change PIN", exc_info=e)
if e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
cli_fail("New PIN doesn't meet policy requirements.")
else:
_fail_pin_error(ctx, e, "Failed to change PIN: %s")
except ApduError as e:
logger.error("Failed to change PIN", exc_info=e)
if e.code == SW.VERIFY_FAIL_NO_RETRY:
cli_fail("Wrong PIN.")
elif e.code == SW.AUTH_METHOD_BLOCKED:
cli_fail("PIN is blocked.")
else:
cli_fail(f"Failed to change PIN: SW={e.code:04x}")
def set_pin(new_pin):
_fail_if_not_valid_pin(ctx, new_pin, is_fips)
try:
client_pin.set_pin(new_pin)
except CtapError as e:
logger.error("Failed to set PIN", exc_info=e)
if e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
cli_fail("PIN is too long.")
else:
cli_fail(f"Failed to set PIN: {e.code}")
if not is_fips:
if ctap2.info.options.get("clientPin"):
if not pin:
pin = _prompt_current_pin()
else:
if pin:
cli_fail("There is no current PIN set. Use --new-pin to set one.")
if not new_pin:
new_pin = prompt_new_pin()
if is_fips:
_fail_if_not_valid_pin(ctx, new_pin, is_fips)
change_pin(pin, new_pin)
else:
if len(new_pin) < ctap2.info.min_pin_length:
cli_fail("New PIN is too short.")
if ctap2.info.options.get("clientPin"):
change_pin(pin, new_pin)
else:
set_pin(new_pin)
def _require_pin(ctx, pin, feature="This feature"):
ctap2 = ctx.obj.get("ctap2")
if not ctap2:
cli_fail(f"{feature} is not supported on this YubiKey.")
if not ctap2.info.options.get("clientPin"):
cli_fail(f"{feature} requires having a PIN. Set a PIN first.")
if ctap2.info.force_pin_change:
cli_fail("The FIDO PIN is blocked. Change the PIN first.")
if pin is None:
pin = _prompt_current_pin(prompt="Enter your PIN")
return pin
@access.command("verify-pin")
@click.pass_context
@click.option("-P", "--pin", help="Current PIN code.")
def verify(ctx, pin):
"""
Verify the FIDO PIN against a YubiKey.
For YubiKeys supporting FIDO2 this will reset the "retries" counter of the PIN.
For YubiKey FIPS this will unlock the session, allowing U2F registration.
"""
ctap2 = ctx.obj.get("ctap2")
if ctap2:
pin = _require_pin(ctx, pin)
client_pin = ClientPin(ctap2)
try:
# Get a PIN token to verify the PIN.
client_pin.get_pin_token(
pin, ClientPin.PERMISSION.GET_ASSERTION, "ykman.example.com"
)
except CtapError as e:
logger.error("PIN verification failed", exc_info=e)
cli_fail(f"Error: {e}")
elif is_fips_version(ctx.obj["info"].version):
_fail_if_not_valid_pin(ctx, pin, True)
try:
fips_verify_pin(ctx.obj["conn"], pin)
except ApduError as e:
logger.error("PIN verification failed", exc_info=e)
if e.code == SW.VERIFY_FAIL_NO_RETRY:
cli_fail("Wrong PIN.")
elif e.code == SW.AUTH_METHOD_BLOCKED:
cli_fail("PIN is blocked.")
elif e.code == SW.COMMAND_NOT_ALLOWED:
cli_fail("PIN is not set.")
else:
cli_fail(f"PIN verification failed: {e.code.name}")
else:
cli_fail("This YubiKey does not support a FIDO PIN.")
click.echo("PIN verified.")
def _prompt_current_pin(prompt="Enter your current PIN"):
return click_prompt(prompt, default="", hide_input=True, show_default=False)
def _fail_if_not_valid_pin(ctx, pin=None, is_fips=False):
min_length = FIPS_PIN_MIN_LENGTH if is_fips else PIN_MIN_LENGTH
if not pin or len(pin) < min_length:
ctx.fail(f"PIN must be over {min_length} characters long")
def _gen_creds(credman):
data = credman.get_metadata()
if data.get(CredentialManagement.RESULT.EXISTING_CRED_COUNT) == 0:
return # No credentials
for rp in credman.enumerate_rps():
for cred in credman.enumerate_creds(rp[CredentialManagement.RESULT.RP_ID_HASH]):
yield (
rp[CredentialManagement.RESULT.RP]["id"],
cred[CredentialManagement.RESULT.CREDENTIAL_ID],
cred[CredentialManagement.RESULT.USER]["id"],
cred[CredentialManagement.RESULT.USER]["name"],
)
def _format_cred(rp_id, user_id, user_name):
return f"{rp_id} {user_id.hex()} {user_name}"
@fido.group("credentials")
def creds():
"""
Manage discoverable (resident) credentials.
This command lets you manage credentials stored on your YubiKey.
Credential management is only available when a FIDO PIN is set on the YubiKey.
\b
Examples:
\b
List credentials (providing PIN via argument):
$ ykman fido credentials list --pin 123456
\b
Delete a credential by user name (PIN will be prompted for):
$ ykman fido credentials delete example_user
"""
def _init_credman(ctx, pin):
pin = _require_pin(ctx, pin, "Credential Management")
ctap2 = ctx.obj.get("ctap2")
client_pin = ClientPin(ctap2)
try:
token = client_pin.get_pin_token(pin, ClientPin.PERMISSION.CREDENTIAL_MGMT)
except CtapError as e:
logger.error("Ctap error", exc_info=e)
_fail_pin_error(ctx, e, "PIN error: %s")
return CredentialManagement(ctap2, client_pin.protocol, token)
@creds.command("list")
@click.pass_context
@click.option("-P", "--pin", help="PIN code.")
def creds_list(ctx, pin):
"""
List credentials.
"""
creds = _init_credman(ctx, pin)
for (rp_id, _, user_id, user_name) in _gen_creds(creds):
click.echo(_format_cred(rp_id, user_id, user_name))
@creds.command("delete")
@click.pass_context
@click.argument("query")
@click.option("-P", "--pin", help="PIN code.")
@click.option("-f", "--force", is_flag=True, help="Confirm deletion without prompting")
def creds_delete(ctx, query, pin, force):
"""
Delete a credential.
\b
QUERY A unique substring match of a credentials RP ID, user ID (hex) or name,
or credential ID.
"""
credman = _init_credman(ctx, pin)
hits = [
(rp_id, cred_id, user_id, user_name)
for (rp_id, cred_id, user_id, user_name) in _gen_creds(credman)
if query.lower() in user_name.lower()
or query.lower() in rp_id.lower()
or user_id.hex().startswith(query.lower())
or query.lower() in _format_cred(rp_id, user_id, user_name)
]
if len(hits) == 0:
cli_fail("No matches, nothing to be done.")
elif len(hits) == 1:
(rp_id, cred_id, user_id, user_name) = hits[0]
if force or click.confirm(
f"Delete credential {_format_cred(rp_id, user_id, user_name)}?"
):
try:
credman.delete_cred(cred_id)
except CtapError as e:
logger.error("Failed to delete resident credential", exc_info=e)
cli_fail("Failed to delete resident credential.")
else:
cli_fail("Multiple matches, make the query more specific.")
@fido.group("fingerprints")
def bio():
"""
Manage fingerprints.
Requires a YubiKey with fingerprint sensor.
Fingerprint management is only available when a FIDO PIN is set on the YubiKey.
\b
Examples:
\b
Register a new fingerprint (providing PIN via argument):
$ ykman fido fingerprints add "Left thumb" --pin 123456
\b
List already stored fingerprints (providing PIN via argument):
$ ykman fido fingerprints list --pin 123456
\b
Delete a stored fingerprint with ID "f691" (PIN will be prompted for):
$ ykman fido fingerprints delete f691
"""
def _init_bio(ctx, pin):
ctap2 = ctx.obj.get("ctap2")
if not ctap2 or "bioEnroll" not in ctap2.info.options:
cli_fail("Biometrics is not supported on this YubiKey.")
pin = _require_pin(ctx, pin, "Biometrics")
client_pin = ClientPin(ctap2)
try:
token = client_pin.get_pin_token(pin, ClientPin.PERMISSION.BIO_ENROLL)
except CtapError as e:
logger.error("Ctap error", exc_info=e)
_fail_pin_error(ctx, e, "PIN error: %s")
return FPBioEnrollment(ctap2, client_pin.protocol, token)
def _format_fp(template_id, name):
return f"{template_id.hex()}{f' ({name})' if name else ''}"
@bio.command("list")
@click.pass_context
@click.option("-P", "--pin", help="PIN code.")
def bio_list(ctx, pin):
"""
List registered fingerprint.
Lists fingerprints by ID and (if available) label.
"""
bio = _init_bio(ctx, pin)
for t_id, name in bio.enumerate_enrollments().items():
click.echo(f"ID: {_format_fp(t_id, name)}")
@bio.command("add")
@click.pass_context
@click.argument("name")
@click.option("-P", "--pin", help="PIN code.")
def bio_enroll(ctx, name, pin):
"""
Add a new fingerprint.
\b
NAME A short readable name for the fingerprint (eg. "Left thumb").
"""
if len(name.encode()) > 15:
ctx.fail("Fingerprint name must be a maximum of 15 characters")
bio = _init_bio(ctx, pin)
enroller = bio.enroll()
template_id = None
while template_id is None:
click.echo("Place your finger against the sensor now...")
try:
template_id = enroller.capture()
remaining = enroller.remaining
if remaining:
click.echo(f"{remaining} more scans needed.")
except CaptureError as e:
logger.error(f"Capture error: {e.code}")
click.echo("Capture failed. Re-center your finger, and try again.")
except CtapError as e:
logger.error("Failed to add fingerprint template", exc_info=e)
if e.code == CtapError.ERR.FP_DATABASE_FULL:
cli_fail(
"Fingerprint storage full. "
"Remove some fingerprints before adding new ones."
)
elif e.code == CtapError.ERR.USER_ACTION_TIMEOUT:
cli_fail("Failed to add fingerprint due to user inactivity.")
cli_fail(f"Failed to add fingerprint: {e.code.name}")
click.echo("Capture complete.")
bio.set_name(template_id, name)
@bio.command("rename")
@click.pass_context
@click.argument("template_id", metavar="ID")
@click.argument("name")
@click.option("-P", "--pin", help="PIN code.")
def bio_rename(ctx, template_id, name, pin):
"""
Set the label for a fingerprint.
\b
ID The ID of the fingerprint to rename (as shown in "list").
NAME A short readable name for the fingerprint (eg. "Left thumb").
"""
if len(name) >= 16:
ctx.fail("Fingerprint name must be a maximum of 15 characters")
bio = _init_bio(ctx, pin)
enrollments = bio.enumerate_enrollments()
key = bytes.fromhex(template_id)
if key not in enrollments:
cli_fail(f"No fingerprint matching ID={template_id}.")
bio.set_name(key, name)
@bio.command("delete")
@click.pass_context
@click.argument("template_id", metavar="ID")
@click.option("-P", "--pin", help="PIN code.")
@click.option("-f", "--force", is_flag=True, help="Confirm deletion without prompting")
def bio_delete(ctx, template_id, pin, force):
"""
Delete a fingerprint.
Delete a fingerprint from the YubiKey by its ID, which can be seen by running the
"list" subcommand.
"""
bio = _init_bio(ctx, pin)
enrollments = bio.enumerate_enrollments()
try:
key: Optional[bytes] = bytes.fromhex(template_id)
except ValueError:
key = None
if key not in enrollments:
# Match using template_id as NAME
matches = [k for k in enrollments if enrollments[k] == template_id]
if len(matches) == 0:
cli_fail(f"No fingerprint matching ID={template_id}")
elif len(matches) > 1:
cli_fail(
f"Multiple matches for NAME={template_id}. "
"Delete by template ID instead."
)
key = matches[0]
name = enrollments[key]
if force or click.confirm(f"Delete fingerprint {_format_fp(key, name)}?"):
try:
bio.remove_enrollment(key)
except CtapError as e:
logger.error("Failed to delete fingerprint template", exc_info=e)
cli_fail(f"Failed to delete fingerprint: {e.code.name}")
| bsd-2-clause | -6,043,912,827,052,858,000 | 32.010796 | 88 | 0.589346 | false | 3.633002 | false | false | false |
kobotoolbox/kobocat | onadata/apps/logger/tests/test_briefcase_client.py | 1 | 6934 | # coding: utf-8
import os.path
from io import StringIO, BytesIO
from urllib.parse import urljoin
import requests
from django.contrib.auth import authenticate
from django.core.files.storage import get_storage_class
from django.core.files.uploadedfile import UploadedFile
from django.urls import reverse
from django.test import RequestFactory
from django_digest.test import Client as DigestClient
from httmock import urlmatch, HTTMock
from onadata.apps.logger.models import Instance, XForm
from onadata.apps.logger.views import formList, download_xform, xformsManifest
from onadata.apps.main.models import MetaData
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.main.views import profile, download_media_data
from onadata.libs.utils.briefcase_client import BriefcaseClient
from onadata.libs.utils.storage import delete_user_storage
storage = get_storage_class()()
@urlmatch(netloc=r'(.*\.)?testserver$')
def form_list_xml(url, request, **kwargs):
response = requests.Response()
factory = RequestFactory()
req = factory.get(url.path)
req.user = authenticate(username='bob', password='bob')
req.user.profile.require_auth = False
req.user.profile.save()
id_string = 'transportation_2011_07_25'
if url.path.endswith('formList'):
res = formList(req, username='bob')
elif url.path.endswith('form.xml'):
res = download_xform(req, username='bob', id_string=id_string)
elif url.path.find('xformsManifest') > -1:
res = xformsManifest(req, username='bob', id_string=id_string)
elif url.path.find('formid-media') > -1:
data_id = url.path[url.path.rfind('/') + 1:]
res = download_media_data(
req, username='bob', id_string=id_string, data_id=data_id)
response._content = get_streaming_content(res)
else:
res = formList(req, username='bob')
response.status_code = 200
if not response._content:
response._content = res.content
return response
def get_streaming_content(res):
tmp = BytesIO()
for chunk in res.streaming_content:
tmp.write(chunk)
content = tmp.getvalue()
tmp.close()
return content
@urlmatch(netloc=r'(.*\.)?testserver$')
def instances_xml(url, request, **kwargs):
response = requests.Response()
client = DigestClient()
client.set_authorization('bob', 'bob', 'Digest')
res = client.get('%s?%s' % (url.path, url.query))
if res.status_code == 302:
res = client.get(res['Location'])
response.encoding = res.get('content-type')
response._content = get_streaming_content(res)
else:
response._content = res.content
response.status_code = 200
return response
class TestBriefcaseClient(TestBase):
def setUp(self):
TestBase.setUp(self)
self._publish_transportation_form()
self._submit_transport_instance_w_attachment()
src = os.path.join(self.this_directory, "fixtures",
"transportation", "screenshot.png")
uf = UploadedFile(file=open(src, 'rb'), content_type='image/png')
count = MetaData.objects.count()
MetaData.media_upload(self.xform, uf)
self.assertEqual(MetaData.objects.count(), count + 1)
url = urljoin(
self.base_url,
reverse(profile, kwargs={'username': self.user.username})
)
self._logout()
self._create_user_and_login('deno', 'deno')
self.bc = BriefcaseClient(
username='bob', password='bob',
url=url,
user=self.user
)
def test_download_xform_xml(self):
"""
Download xform via briefcase api
"""
with HTTMock(form_list_xml):
self.bc.download_xforms()
is_local = storage.__class__.__name__ == 'FileSystemStorage'
forms_folder_path = os.path.join('deno',
'briefcase',
'forms',
self.xform.id_string)
forms_path = os.path.join(forms_folder_path,
'%s.xml' % self.xform.id_string)
form_media_path = os.path.join(forms_folder_path, 'form-media')
media_path = os.path.join(form_media_path, 'screenshot.png')
if is_local:
does_root_folder_exist = storage.exists(forms_folder_path)
does_media_folder_exist = storage.exists(form_media_path)
else:
# `django-storage.exists()` does not work with folders on AWS
sub_folders, files = storage.listdir(forms_folder_path)
does_root_folder_exist = bool(sub_folders or files)
does_media_folder_exist = 'form-media' in sub_folders
self.assertTrue(does_root_folder_exist)
self.assertTrue(storage.exists(forms_path))
self.assertTrue(does_media_folder_exist)
self.assertTrue(storage.exists(media_path))
"""
Download instance xml
"""
with HTTMock(instances_xml):
self.bc.download_instances(self.xform.id_string)
instance_folder_path = os.path.join(forms_folder_path, 'instances')
if is_local:
does_instances_folder_exist = storage.exists(instance_folder_path)
else:
sub_folders, _ = storage.listdir(forms_folder_path)
does_instances_folder_exist = 'instances' in sub_folders
self.assertTrue(does_instances_folder_exist)
instance = Instance.objects.all()[0]
instance_path = os.path.join(
instance_folder_path, 'uuid%s' % instance.uuid, 'submission.xml')
self.assertTrue(storage.exists(instance_path))
media_file = "1335783522563.jpg"
media_path = os.path.join(
instance_folder_path, 'uuid%s' % instance.uuid, media_file)
self.assertTrue(storage.exists(media_path))
def test_push(self):
with HTTMock(form_list_xml):
self.bc.download_xforms()
with HTTMock(instances_xml):
self.bc.download_instances(self.xform.id_string)
XForm.objects.all().delete()
xforms = XForm.objects.filter(
user=self.user, id_string=self.xform.id_string)
self.assertTrue(xforms.count() == 0)
instances = Instance.objects.filter(
xform__user=self.user, xform__id_string=self.xform.id_string)
self.assertTrue(instances.count() == 0)
self.bc.push()
xforms = XForm.objects.filter(
user=self.user, id_string=self.xform.id_string)
self.assertTrue(xforms.count() == 1)
instances = Instance.objects.filter(
xform__user=self.user, xform__id_string=self.xform.id_string)
self.assertTrue(instances.count() == 1)
def tearDown(self):
# remove media files
for username in ['bob', 'deno']:
delete_user_storage(username)
| bsd-2-clause | 5,599,805,507,777,540,000 | 37.098901 | 78 | 0.62792 | false | 3.760304 | true | false | false |
mmlab/eice | EiCGraphAlgo/core/typeahead.py | 1 | 4270 | '''
Created on 17-sep.-2012
@author: ldevocht
'''
import urllib.parse, lxml.objectify, logging, configparser, re, ujson, requests
from core.resourceretriever import Resourceretriever
from core import resourceretriever, config_search
config = resourceretriever.config
mappings = resourceretriever.mappings
logger = logging.getLogger('pathFinder')
lookup_server = config.get('services', 'lookup_index')
#lookup_solr = Solr(lookup_server)
class TypeAhead:
def __init__(self):
self.session = requests.session()
def dbPediaPrefix(self, prefix):
server = config.get('services', 'lookup')
gateway = '{0}/api/search.asmx/PrefixSearch?MaxHits=7&QueryString={1}'.format(server,prefix)
requestUrl = urllib.parse.quote(gateway, ':/=?<>"*&')
logger.debug('Request %s' % requestUrl)
#rq = grequests.get(requestUrl)
#response = grequests.map([rq])
#raw_output = response[0].content
#raw_output = urllib.request.urlopen(requestUrl,timeout=2).read()
#s = requests.Session()
#s.headers.update({'Connection': 'close'})
r = self.session.get(requestUrl)
#(s.headers)
#print(r.headers)
raw_output = r.content
root = lxml.objectify.fromstring(raw_output)
results = list()
if hasattr(root, 'Result'):
logger.debug('Found %s results' % len(root.Result))
for result in root.Result:
if prefix.lower() in result.Label[0].text.lower() and hasattr(result.Classes, 'Class'):
klasses = result.Classes.Class
if hasattr(klasses, 'Label'):
klasse = klasses
else:
klasse = klasses[0]
item = dict()
item['label'] = result.Label[0].text
item['category']=klasse.Label.text.capitalize()
item['uri']=result.URI[0].text
logger.debug('Fetching local hits for %s' % len(item['uri']))
local_hits = Resourceretriever().getResource(item['uri'].strip("<>"),False)
if local_hits:
logger.debug('Found %s hits' % len(local_hits))
n_hits = 0
if local_hits:
for triple in local_hits:
if local_hits[triple][1] not in config_search.blacklist:
n_hits += 1
if n_hits > 8:
results.append(item)
else:
logger.debug('Found nothing for prefix %s' % prefix)
return results
def prefix(self, prefix,lookup_server=lookup_server):
results = list()
if len(prefix) > 2:
logger.debug('looking up %s on dbpedia lookup' % prefix)
results += self.dbPediaPrefix(prefix)
logger.debug('looking up %s on local index' % prefix)
if config.has_option('services','lookup_index'):
#query={'q':'lookup:"{0}*"'.format(re.escape(prefix).lower()),'fl':'url label type','timeAllowed':'100','rows':'7'}
#response = lookup_solr.search(**query)
query = '%sselect?q=lookup:"%s*"&fl=url label type&wt=json' % (lookup_server,re.escape(prefix).lower())
rsp = self.session.get(query)
#response = grequests.map([rq])
response = ujson.decode(rsp.content)['response']
if len(response['docs']) > 0:
for doc in response['docs']:
item = dict()
item['category']=doc['type'].split(' ')[0].rsplit('/')[-1].rsplit('#')[-1].strip('<>".')
if item['category'] == 'Agent':
item['category'] = 'Author'
item['uri']=doc['url']
item['label']=(doc['label'].split('.')[0].split('"^^')[0]).strip('\" <>.')
results.append(item)
logger.debug('done finding matches for %s' % prefix)
return results
#print(TypeAhead().prefix('Selver'))
#print(TypeAhead().dbPediaPrefix('Selver')) | agpl-3.0 | -1,328,228,545,254,396,400 | 45.423913 | 131 | 0.52623 | false | 4.14161 | true | false | false |
gaeun/open-event-orga-server | app/api/helpers/utils.py | 1 | 7209 | import json
from hashlib import md5
from flask import request
from flask.ext.restplus import Resource as RestplusResource
from flask_restplus import Model, fields, reqparse
from app.helpers.data import update_version
from app.models.event import Event as EventModel
from .error_docs import (
notfound_error_model,
notauthorized_error_model,
validation_error_model,
invalidservice_error_model,
)
from .helpers import get_object_list, get_object_or_404, get_object_in_event, \
create_model, validate_payload, delete_model, update_model, \
handle_extra_payload, get_paginated_list, fix_attribute_names
DEFAULT_PAGE_START = 1
DEFAULT_PAGE_LIMIT = 20
POST_RESPONSES = {
400: ('Validation error', validation_error_model),
401: ('Authentication failure', notauthorized_error_model),
404: ('Event does not exist', notfound_error_model),
201: 'Resource created successfully'
}
PUT_RESPONSES = {
400: ('Validation Error', validation_error_model),
401: ('Authentication failure', notauthorized_error_model),
404: ('Object/Event not found', notfound_error_model)
}
SERVICE_RESPONSES = {
404: ('Service not found', notfound_error_model),
400: ('Service does not belong to event', invalidservice_error_model),
}
# Parameters for a paginated response
PAGE_PARAMS = {
'start': {
'description': 'Serial number to start from',
'type': int,
'default': DEFAULT_PAGE_START
},
'limit': {
'description': 'Limit on the number of results',
'type': int,
'default': DEFAULT_PAGE_LIMIT
},
}
# ETag Header (required=False by default)
ETAG_HEADER_DEFN = [
'If-None-Match', 'ETag saved by client for cached resource'
]
# Base Api Model for a paginated response
PAGINATED_MODEL = Model('PaginatedModel', {
'start': fields.Integer,
'limit': fields.Integer,
'count': fields.Integer,
'next': fields.String,
'previous': fields.String
})
# Custom Resource Class
class Resource(RestplusResource):
def dispatch_request(self, *args, **kwargs):
resp = super(Resource, self).dispatch_request(*args, **kwargs)
# ETag checking.
if request.method == 'GET':
old_etag = request.headers.get('If-None-Match', '')
# Generate hash
data = json.dumps(resp)
new_etag = md5(data).hexdigest()
if new_etag == old_etag:
# Resource has not changed
return '', 304
else:
# Resource has changed, send new ETag value
return resp, 200, {'ETag': new_etag}
elif request.method == 'POST':
# Grab just the response data
# Exclude status code and headers
resp_data = resp[0]
data = json.dumps(resp_data)
etag = md5(data).hexdigest()
# Add ETag to response headers
resp[2].update({'ETag': etag})
return resp
# Base class for Paginated Resource
class PaginatedResourceBase():
"""
Paginated Resource Helper class
This includes basic properties used in the class
"""
parser = reqparse.RequestParser()
parser.add_argument('start', type=int, default=DEFAULT_PAGE_START)
parser.add_argument('limit', type=int, default=DEFAULT_PAGE_LIMIT)
# DAO for Models
class BaseDAO:
"""
DAO for a basic independent model
"""
version_key = None
is_importing = False # temp key to set to True when an import operation is underway
def __init__(self, model, post_api_model=None, put_api_model=None):
self.model = model
self.post_api_model = post_api_model
self.put_api_model = put_api_model if put_api_model else post_api_model
def get(self, id_):
return get_object_or_404(self.model, id_)
def list(self, **kwargs):
return get_object_list(self.model, **kwargs)
def paginated_list(self, url=None, args={}, **kwargs):
return get_paginated_list(self.model, url=url, args=args, **kwargs)
def create(self, data, validate=True):
if validate:
data = self.validate(data, self.post_api_model)
item = create_model(self.model, data)
self.update_version(item.id)
return item
def update(self, id_, data, validate=True):
if validate:
data = self.validate_put(data, self.put_api_model)
item = update_model(self.model, id_, data)
self.update_version(id_)
return item
def delete(self, id_):
item = delete_model(self.model, id_)
self.update_version(id_)
return item
def validate(self, data, model=None, check_required=True):
if not model:
model = self.post_api_model
if model:
data = handle_extra_payload(data, model)
validate_payload(data, model, check_required=check_required)
data = fix_attribute_names(data, model)
return data
def validate_put(self, data, model=None):
"""
Abstraction over validate with check_required set to False
"""
return self.validate(data, model=model, check_required=False)
def update_version(self, event_id):
"""
Update version of the component of the event
"""
if self.version_key:
update_version(event_id, False, self.version_key)
# Helper functions
def _del(self, data, fields):
"""
Safe delete fields from payload
"""
data_copy = data.copy()
for field in fields:
if field in data:
del data_copy[field]
return data_copy
# DAO for Service Models
class ServiceDAO(BaseDAO):
"""
Data Access Object for service models like microlocations,
speakers and so.
"""
def get(self, event_id, sid):
return get_object_in_event(self.model, sid, event_id)
def list(self, event_id, **kwargs):
# Check if an event with `event_id` exists
get_object_or_404(EventModel, event_id)
return get_object_list(self.model, event_id=event_id, **kwargs)
def paginated_list(self, url=None, args={}, **kwargs):
return get_paginated_list(self.model, url=url, args=args, **kwargs)
def create(self, event_id, data, url, validate=True):
if validate:
data = self.validate(data)
item = create_model(self.model, data, event_id=event_id)
self.update_version(event_id)
# Return created resource with a 201 status code and its Location
# (url) in the header.
resource_location = url + '/' + str(item.id)
return item, 201, {'Location': resource_location}
def update(self, event_id, service_id, data, validate=True):
if validate:
data = self.validate_put(data)
item = update_model(self.model, service_id, data, event_id)
self.update_version(event_id)
return item
def delete(self, event_id, service_id):
item = delete_model(self.model, service_id, event_id=event_id)
self.update_version(event_id)
return item
# store task results in case of testing
# state and info
TASK_RESULTS = {}
| gpl-3.0 | 3,571,529,669,562,688,000 | 30.207792 | 88 | 0.625468 | false | 3.80623 | false | false | false |
wradlib/wradlib | wradlib/tests/test_adjust.py | 1 | 7916 | #!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import numpy as np
import pytest
from wradlib import adjust
class Data:
# Arguments to be used throughout all test classes
raw_x, raw_y = np.meshgrid(np.arange(4).astype("f4"), np.arange(4).astype("f4"))
raw_coords = np.vstack((raw_x.ravel(), raw_y.ravel())).T
obs_coords = np.array([[1.0, 1.0], [2.0, 1.0], [1.0, 3.5], [3.5, 3.0]])
raw = np.array(
[
[
1.0,
2.0,
1.0,
0.0,
1.0,
2.0,
1.0,
2.0,
1.0,
0.0,
0.0,
3.0,
4.0,
0.0,
4.0,
0.0,
],
[
1.0,
2.0,
1.0,
0.0,
1.0,
2.0,
1.0,
2.0,
1.0,
0.0,
0.0,
3.0,
4.0,
0.0,
4.0,
0.0,
],
]
).T
obs = np.array([[2.0, 3, 0.0, 4.0], [2.0, 3, 0.0, 4.0]]).T
nnear_raws = 2
mingages = 3
class TestAdjustBase(Data):
def test___init__(self):
pass
def test__checkip(self):
pass
def test__check_shape(self):
pass
def test___call__(self):
pass
def test__get_valid_pairs(self):
pass
def test_xvalidate(self):
pass
class TestAdjustAddTest(Data):
def test_AdjustAdd_1(self):
adj = adjust.AdjustAdd(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array(
[
[1.62818784, 1.62818784],
[2.75926679, 2.75926679],
[2.09428144, 2.09428144],
[1.1466651, 1.1466651],
[1.51948941, 1.51948941],
[2.5, 2.5],
[2.5, 2.5],
[3.27498305, 3.27498305],
[1.11382822, 1.11382822],
[0.33900645, 0.33900645],
[0.89999998, 0.89999998],
[4.52409637, 4.52409637],
[3.08139533, 3.08139533],
[0.0, 0.0],
[3.99180328, 3.99180328],
[2.16913891, 2.16913891],
]
)
assert np.allclose(res, shouldbe)
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
assert np.allclose(res, shouldbe[:, 0])
class TestAdjustMultiplyTest(Data):
def test_AdjustMultiply_1(self):
adj = adjust.AdjustMultiply(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array(
[
[1.44937706, 1.44937706],
[3.04539442, 3.04539442],
[1.74463618, 1.74463618],
[0.0, 0.0],
[1.37804615, 1.37804615],
[2.66666675, 2.66666675],
[2.0, 2.0],
[3.74106812, 3.74106812],
[1.17057478, 1.17057478],
[0.0, 0.0],
[0.0, 0.0],
[6.14457822, 6.14457822],
[2.43439031, 2.43439031],
[0.0, 0.0],
[4.60765028, 4.60765028],
[0.0, 0.0],
]
)
assert np.allclose(res, shouldbe)
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
assert np.allclose(res, shouldbe[:, 0])
class TestAdjustMixed(Data):
def test_AdjustMixed_1(self):
adj = adjust.AdjustMixed(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array(
[
[1.51427719, 1.51427719],
[2.95735525, 2.95735525],
[1.85710269, 1.85710269],
[0.36806121, 0.36806121],
[1.43181512, 1.43181512],
[2.61538471, 2.61538471],
[2.15384617, 2.15384617],
[3.59765723, 3.59765723],
[1.18370627, 1.18370627],
[0.15027952, 0.15027952],
[0.30825174, 0.30825174],
[5.63558862, 5.63558862],
[2.49066845, 2.49066845],
[-0.29200733, -0.29200733],
[4.31646909, 4.31646909],
[0.67854041, 0.67854041],
]
)
assert np.allclose(res, shouldbe)
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
assert np.allclose(res, shouldbe[:, 0])
class TestAdjustMFB(Data):
raw_coords = np.array([[0.0, 0.0], [1.0, 1.0]])
obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
raw = np.array([2.0, 2.0])
obs = np.array([4.0, 4.0])
mingages = 0
mfb_args = dict(method="mean")
def test_AdjustMFB_1(self):
adj = adjust.AdjustMFB(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=self.mfb_args,
)
res = adj(self.obs, self.raw)
shouldbe = np.array([4.0, 4.0])
assert np.allclose(res, shouldbe)
adj = adjust.AdjustMFB(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=dict(method="median"),
)
adj(self.obs, self.raw)
adj = adjust.AdjustMFB(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=dict(method="linregr", minslope=1.0, minr="0.7", maxp=0.5),
)
adj(self.obs, self.raw)
class TestAdjustNone(Data):
raw_coords = np.array([[0.0, 0.0], [1.0, 1.0]])
obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
raw = np.array([2.0, 2.0])
obs = np.array([4.0, 4.0])
mingages = 0
mfb_args = dict(method="mean")
def test_AdjustNone_1(self):
adj = adjust.AdjustNone(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array([2.0, 2.0])
assert np.allclose(res, shouldbe)
class TestGageOnly(Data):
raw_coords = np.array([[0.0, 0.0], [1.0, 1.0]])
obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
raw = np.array([2.0, 2.0])
obs = np.array([4.0, 4.0])
mingages = 0
mfb_args = dict(method="mean")
def test_GageOnly_1(self):
adj = adjust.GageOnly(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array([4.0, 4.0])
assert np.allclose(res, shouldbe)
class TestAdjustHelper:
def test__get_neighbours_ix(self):
pass
def test__get_statfunc(self):
adjust._get_statfunc("median")
adjust._get_statfunc("best")
with pytest.raises(NameError):
adjust._get_statfunc("wradlib")
def test_best(self):
x = 7.5
y = np.array([0.0, 1.0, 0.0, 1.0, 0.0, 7.7, 8.0, 8.0, 8.0, 8.0])
assert adjust.best(x, y) == 7.7
| mit | 893,179,589,196,588,400 | 27.47482 | 84 | 0.456544 | false | 3.07776 | true | false | false |
maxivanoff/fftoolbox-app | q/fftoolbox/multipole.py | 1 | 12213 | import logging
import numpy as np
from copy import deepcopy
from numpy.linalg import norm
from scipy.special import sph_harm as Y
mult_logger = logging.getLogger('multipole')
def Rlm(l, m, r, theta, phi):
return r**l * np.sqrt(4 * np.pi / (2 * l + 1)) * Y(m, l, theta, phi)
def Rlmc(l, m, r, theta, phi):
return r**l * np.sqrt(4 * np.pi / (2 * l + 1)) * Ylmc(l, m, theta, phi)
def Rlms(l, m, r, theta, phi):
return r**l * np.sqrt(4 * np.pi / (2 * l + 1)) * Ylms(l, m, theta, phi)
def Ylmc(l, m, theta, phi):
#v = np.sqrt(0.5) * (np.conj(Y(m, l, theta, phi)) + Y(m, l, theta, phi))
v = np.sqrt(0.5) * (Y(-m, l, theta, phi) + (-1)**m*Y(m, l, theta, phi))
#v = np.sqrt(0.5) * ((-1)**m*Y(-m, l, theta, phi) + Y(m, l, theta, phi))
if abs(v.imag) > 0.0001: raise ValueError("Non-zero imaginary part in Ylmc")
return v.real
def Ylms(l, m, theta, phi):
#v = 1j * np.sqrt(0.5) * (np.conj(Y(m, l, theta, phi)) - Y(m, l, theta, phi))
#v = 1j * np.sqrt(0.5) * (Y(-m, l, theta, phi) - (-1)**m*Y(m, l, theta, phi))
v = 1j * np.sqrt(0.5) * (-(-1)**m*Y(-m, l, theta, phi) + Y(m, l, theta, phi))
if abs(v.imag) > 0.0001: raise ValueError("Non-zero imaginary part in Ylms")
return v.real
class GroupOfAtoms(object):
def __init__(self, name=None):
self.name = name
self.atoms = list()
self.i = -1
def build_Pymol_rep(self, vmax=1.,r_sphere=0.2):
s = 'from pymol.cgo import *\nfrom pymol import cmd\nobj = [ BEGIN, LINES, ]\n' % (WORKDIR, geometry)
for site in self.sites:
q = self.molecule.ff.charges[s.name]
if q is None:
s_color = 'x = 0.0\ncolor = [COLOR, 1-x, 1-x, 1]\n'
elif q >= 0:
s_color = 'x = %f\ncolor = [COLOR, 1, 1-x, 1-x]\n' % (q/vmax)
elif q < 0:
s_color = 'x = %f\ncolor = [COLOR, 1-x, 1-x, 1]\n' % (-q/vmax)
s_sphere = 'sphere = [ SPHERE, %f, %f, %f,%f]\n' % (s.x, s.y, s.z, r_sphere)
s = s + s_color + s_sphere + 'obj += color+sphere\n'
s = s + 'obj.append(END)\ncmd.load_cgo(obj,"cgo01")\n'
file = open(filename,'w')
file.write(s)
file.close()
def set_sym_sites(self):
sites = {}
self.sym_sites = []
for i, name in enumerate(self.sites_names_eq):
if not name in sites:
sites[name] = i
self.sym_sites.append(sites[name])
def get_coordinates(self):
crds = np.zeros((len(self.sites), 3))
for i, s in enumerate(self.sites):
crds[i][:] = s.coordinates[:]
return crds
def get_sites(self, name):
return filter(lambda s: s.name==name, self.sites)
def get_atoms_by_element(self, element):
return filter(lambda a: a.element==element, self.atoms)
def get_atom(self, index):
return next(a for a in self.atoms if a.index==index)
@property
def atoms_names_noneq(self):
return [a.name for a in self.atoms_noneq]
@property
def atoms_names_eq(self):
return [a.name for a in self.atoms]
@property
def sites_names_noneq(self):
return [s.name for s in self.sites_noneq]
@property
def sites_names(self):
return self.sites_names_noneq
@property
def sites_names_eq(self):
return [s.name for s in self.sites]
@property
def sites(self):
sites = []
for atom in self:
sites += atom.sites
return sites
@property
def sites_noneq(self):
sites = []
for s in self.sites:
if not s.name in [ss.name for ss in sites]:
sites.append(s)
return sites
@property
def atoms_noneq(self):
atoms = []
for a in self.atoms:
if not a.name in [aa.name for aa in atoms]:
atoms.append(a)
return atoms
def __iter__(self):
return self
def next(self):
if self.i < len(self.atoms)-1:
self.i += 1
return self.atoms[self.i]
else:
self.i = -1
raise StopIteration
class Multipole(GroupOfAtoms):
"""
This is Multipole
"""
def __init__(self, name=None, origin=None):
GroupOfAtoms.__init__(self, name)
self.origin = origin
def set_multipole_matrix(self, multipoles=('cartesian', 2)):
if multipoles[0] == 'cartesian':
multipole = Cartesian(multipoles[1], self.get_coordinates(), self.sym_sites, self.origin)
elif multipoles[0] == 'spherical':
multipole = Spherical(multipoles[1], self.get_coordinates(), self.sym_sites, self.origin)
self.l = multipoles[1]
self.multipoles_names = multipole.names
self.QtoM = multipole.rotation_matrix_direct
self.QtoM_normed = np.zeros(self.QtoM.shape)
for i, u in enumerate(self.QtoM):
self.QtoM_normed[i,:] = u/np.linalg.norm(u)
self.MtoQ = multipole.rotation_matrix_inverse
def charges_to_multipoles(self, charges):
Q = np.array([])
for name in self.sites_names_noneq:
Q = np.append(Q, charges[name])
M = np.dot(self.QtoM, Q)
multipoles = {}
for multipole, m_value in zip(self.multipoles_names, M):
multipoles[multipole] = m_value
return multipoles
def multipoles_to_charges(self, multipoles):
if self.MtoQ is None:
raise ValueError('Cannot convert multipoles to charges')
M = np.array([])
for multipole in self.multipoles_names:
M = np.append(M, multipoles[multipole])
Q = np.dot(self.MtoQ, M)
charges = {}
for name, q_value in zip(self.sites_names_noneq, Q):
charges[name] = q_value
return charges
class MultipoleMatrix(object):
def __init__(self, sym_sites=None, formula=None):
# build matrix
rotation_matrix = np.zeros((len(self.names), len(sym_sites)))
for i, m_name in enumerate(self.names):
rotation_matrix[i][:] = formula.u(m_name).real
# reduce matrix
self.rotation_matrix_direct = np.zeros((len(self.names), max(sym_sites)+1))
for i, _ in enumerate(self.names):
self.rotation_matrix_direct[i] = np.bincount(sym_sites, weights=rotation_matrix[i])
try:
self.rotation_matrix_inverse = np.linalg.inv(self.rotation_matrix_direct)
except np.linalg.LinAlgError:
self.rotation_matrix_inverse = None
mult_logger.debug("Multipole conversion matrix is set up.\nmultipoles = %s; total number of components: %i \nQ to M matrix: %s" % (self.names, len(self.names), self.rotation_matrix_direct.shape))
class Spherical(MultipoleMatrix):
def __init__(self, l=None, coordinates=None, sym_sites=None, origin=None):
try:
self.names = []
for ll in xrange(l):
for mm in xrange(ll+1):
if mm==0:
self.names.append('%i%i' % (ll, mm))
else:
self.names.append('%i%ic' % (ll, mm))
self.names.append('%i%is' % (ll, mm))
except TypeError:
self.names = l
#cartesian to spherical (r, theta, phi) = (r, azimuth, polar)
def arctan(a,b):
if a==b==0:
return 0.
if b==0:
return (-1)*np.pi*np.sign(a)/2
else:
return np.arctan(a/b)
spherical = np.zeros(coordinates.shape)
x, y, z = coordinates[:,0], coordinates[:,1], coordinates[:,2]
#r = np.sqrt(x**2 + y**2 + z**2)
#phi = np.arccos(z/r)
#theta = np.array([])
#for xx, yy in zip(x,y):
# if yy>=0 and xx>0:
# s = 0
# if xx<=0:
# s = np.pi
# if xx>0 and yy<0:
# s = 2*np.pi
# if xx==0 and yy==0:
# s = 0
# theta = np.append(theta, arctan(yy,xx) + s)
#spherical[:,0] = r
#spherical[:,1] = theta
#spherical[:,2] = phi
xy2 = x**2 + y**2 # x2 + y2
spherical[:,0] = np.sqrt(xy2 + z**2) # r2 = x2 + y2 + z2
spherical[:,1] = np.arctan2(y, x) # theta = arctan(y/x)
spherical[:,2] = np.arctan2(np.sqrt(xy2), z) # phi = arctan(xy/z)
formula = SphericalFormulas(spherical, origin)
MultipoleMatrix.__init__(self, sym_sites, formula)
class Cartesian(MultipoleMatrix):
def __init__(self, l=None, coordinates=None, sym_sites=None, origin=None):
self.names = []
for i in xrange(l+1):
self.names += self.l_to_names(i)
formula = CartesianFormulas(coordinates, origin)
MultipoleMatrix.__init__(self, sym_sites, formula)
def l_to_names(self, l):
if l == 0: return ['charge']
if l == 1: return 'X Y Z'.split()
if l == 2: return 'XX YY ZZ XY XZ YZ'.split()
class Formulas(dict):
def __init__(self, coordinates=None, origin=None):
self.coordinates = coordinates
if origin == None:
self.origin = np.zeros(3)
else:
self.origin = origin
dict.__init__(self)
class SphericalFormulas(Formulas):
def __init__(self, coordinates=None, origin=None):
Formulas.__init__(self, coordinates, origin)
self[0] = Rlm
self['c'] = Rlmc
self['s'] = Rlms
def u(self, m_name):
l, m = [int(t) for t in m_name[:2]]
try:
x = m_name[2]
except IndexError:
x = 0
u = np.array([])
for crds in self.coordinates:
r, theta, phi = crds
u = np.append(u, self[x](l, m, r, theta, phi))
return u
class CartesianFormulas(Formulas):
def __init__(self, coordinates=None, origin=None):
Formulas.__init__(self, coordinates, origin)
self[0] = self.total_charge
self[1] = self.dipole
self[2] = self.quadrupole
self[3] = self.hexadecapole
def name_to_num(self, m_name):
def convert(a):
if a == 'X': return 0
if a == 'Y': return 1
if a == 'Z': return 2
if m_name == 'charge':
return
else:
return [convert(a) for a in m_name]
def u(self, m_name):
components = self.name_to_num(m_name)
if m_name == 'charge': c = 0
else: c = len(m_name)
u = np.array([])
for crds in self.coordinates:
u = np.append(u, self[c](crds, components))
return u
def total_charge(self, crds, components):
return 1.
def dipole(self, crds, components):
c = components[0]
return crds[c] - self.origin[c]
def quadrupole(self, crds, components):
a2 = np.sum(crds**2)
m, n = components
am = crds[m] - self.origin[m]
an = crds[n] - self.origin[n]
return 3.0 / 2.0 * am * an - 0.5 * a2 * self.delta(m,n)
def octapole(self, crds, components):
m, n, k = components
a2 = np.sum(crds**2)
am = crds[m] - self.origin[m]
an = crds[n] - self.origin[n]
ak = crds[k] - self.origin[k]
return 5. / 2. * am * an * ak - 0.5 * a2 * (am * self.delta(n,k) + an * self.delta(m,n) + ak * self.delta(m,n))
def hexadecapole(self, crds, components):
m, n, k, l = components
am = crds[m] - self.origin[m]
an = crds[n] - self.origin[n]
ak = crds[k] - self.origin[k]
al = crds[l] - self.origin[l]
return 1. / (1. * 2. * 3. * 4.) * (105. * am * an * ak * al - 15. * a2 * (am * an * self.delta(k,l) + am * ak * self.delta(n,l) + am * al * self.delta(n,k) + an * ak * self.delta(m,l) + an * al * self.delta(m,k) + ak * al * self.delta(m,n)) + 3. * a2**2 * (self.delta(m,n) * self.delta(k,l) + self.delta(m,k) * self.delta(n,l) + self.delta(m,l) * self.delta(n,k)))
def delta(self, i, j):
if i==j: return 1
else: return 0
| gpl-2.0 | 6,099,882,326,194,920,000 | 33.794872 | 372 | 0.524032 | false | 3.106843 | false | false | false |
woobe/h2o | py/testdir_multi_jvm/test_rf_1ktrees_job_cancel_many_fvec.py | 1 | 2219 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_jobs, h2o_rf
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(3)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_1ktrees_job_cancel_many_fvec(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
# always match the run below!
# just using one file for now
for x in [1000]:
shCmdString = "perl " + h2o.find_file("syn_scripts/parity.pl") + " 128 4 "+ str(x) + " quad " + SYNDATASETS_DIR
h2o.spawn_cmd_and_wait('parity.pl', shCmdString.split(),4)
csvFilename = "parity_128_4_" + str(x) + "_quad.data"
csvFilename = "parity_128_4_" + str(1000) + "_quad.data"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
hex_key = csvFilename + ".hex"
parseResult = h2o_cmd.parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30)
print "kick off jobs, then cancel them"
for trial in range (1,5):
# random 0 or 1 delay
delay = random.uniform(0,1)
time.sleep(delay)
h2o.verboseprint("Trial", trial)
start = time.time()
h2o_cmd.runRF(parseResult=parseResult, trees=trial, max_depth=50, rfView=False, noPoll=True, timeoutSecs=30, retryDelaySecs=0.25)
print "RF #", trial, "started on ", csvFilename, 'took', time.time() - start, 'seconds'
### h2o_jobs.cancelAllJobs(timeoutSecs=10)
h2o.check_sandbox_for_errors()
# do one last good one
rfView = h2o_cmd.runRF(parseResult=parseResult, trees=trial, max_depth=50, timeoutSecs=600, retryDelaySecs=3)
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView, ntree=trial)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | -7,085,951,831,473,397,000 | 37.258621 | 141 | 0.604777 | false | 3.143059 | false | false | false |
juliosmelo/soldo | utils/settings.py | 1 | 1331 | PGS_TOKEN = 'C888EE7F420841CF92D0B0063EDDFC7D'
PGS_EMAIL = '[email protected]'
# from datetime import datetime
# from datetime import date
# from datetime import timedelta
# dates = [d0]
# dates_two = list()
# def date_paginator(x, y):
# print x, y
# if pages == 1 and pages_mods == 0:
# _date = d0 + timedelta(days=30)
# date_paginator(d0, _date)
# else:
# for i in range(pages):
# _date = d0 + timedelta(days=30 * (i + 1))
# dates.append(_date)
# if pages_mods > 0 and pages_mods < 30:
# new_date = dates[-1:][0] + timedelta(days=pages_mods)
# dates.append(new_date)
# if dates:
# for i in range(len(dates) - 1):
# date_paginator(dates[i], dates[i + 1])
# class DateRangePagination:
# """docstring for DateRangePagination"""
# def __init__(self, initial_date):
# self.initial_date = datetime.strptime(initial_date, "%Y-%m-%d").date()
# self.dates = [self.initial_date]
# self.date_limit = datetime.now().date()
# def get_ranges(self):
# print self.initial_date
# def set_ranges():
# d0 = date(2008, 8, 18)
# d1 = date(2008, 11, 18)
# delta = d1 - d0
# pages = delta.days / 30
# pages_mods = delta.days % 30
# pass
# def get_days(self,):
# pass | mit | 1,225,591,867,755,594,800 | 23.666667 | 80 | 0.574005 | false | 2.819915 | false | false | false |
MichaelAnckaert/Hermes | message.py | 1 | 1631 | """Message functionality for Hermes"""
from datetime import datetime
import json
__author__ = "Michael Anckaert"
__copyright__ = "Copyright 2012, Michael Anckaert"
__credits__ = ["Michael Anckaert"]
__license__ = "GPLv3"
__version__ = "0.0.1"
__maintainer__ = "Michael Anckaert"
__email__ = "[email protected]"
__status__ = "Development"
class MessageType(object):
types = {}
def __init__(self, name):
if name in MessageType.types:
print " W: Message type '{0}' already exists".format(name)
raise ValueError("Message type '{}' already exists.".format(name))
self.name = name
self.rest_enabled = False
MessageType.types['name'] = self
def enable_rest(self):
self.rest_enabled = True
def disable_rest(self):
self.rest_enabled = False
def get_message_type(self, name):
if name in MessageType.types.items():
return MessageType.types[name]
else:
return None
class Message(object):
def __init__(self, type, content):
if MessageType.get_message_type(type):
self.type = type
self.content = content
self.id = None
self.status = "UNKNOWN"
self.received = datetime.now().strftime("%d-%m-%Y %H:%M")
self.response = None
return
print " W: Unknown message type '{0}' ".format(type)
raise ValueError("Wrong message type!")
def __str__(self):
return json.dumps({'message': {'id': self.id, 'status': self.status, 'received': self.received, 'response': self.response}})
| gpl-3.0 | 2,294,136,335,230,464,300 | 27.12069 | 132 | 0.591048 | false | 3.828638 | false | false | false |
DedMemez/ODS-August-2017 | suit/SuitDNA.py | 1 | 7589 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.suit.SuitDNA
from panda3d.core import Datagram, DatagramIterator, VBase4
import random
from direct.directnotify.DirectNotifyGlobal import *
from toontown.toonbase import TTLocalizer, ToontownGlobals
import random
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
notify = directNotify.newCategory('SuitDNA')
suitHeadTypes = ['f',
'p',
'ym',
'mm',
'ds',
'hh',
'cr',
'tbc',
'bf',
'b',
'dt',
'ac',
'bs',
'sd',
'le',
'bw',
'sc',
'pp',
'tw',
'bc',
'nc',
'mb',
'ls',
'rb',
'cc',
'tm',
'nd',
'gh',
'ms',
'tf',
'm',
'mh',
'sk',
'cm',
'vp',
'db',
'kc',
'ss',
'iw',
'ru']
suitATypes = ['ym',
'hh',
'tbc',
'dt',
'bs',
'le',
'bw',
'pp',
'nc',
'rb',
'nd',
'tf',
'm',
'mh',
'vp',
'ss',
'ru']
suitBTypes = ['p',
'ds',
'b',
'ac',
'sd',
'bc',
'ls',
'tm',
'ms',
'kc',
'iw']
suitCTypes = ['f',
'mm',
'cr',
'bf',
'sc',
'tw',
'mb',
'cc',
'gh',
'sk',
'cm',
'db']
suitDepts = ['c',
'l',
'm',
's',
't']
suitDeptZones = [ToontownGlobals.BossbotHQ,
ToontownGlobals.LawbotHQ,
ToontownGlobals.CashbotHQ,
ToontownGlobals.SellbotHQ,
ToontownGlobals.TechbotHQ]
suitDeptFullnames = {'c': TTLocalizer.Bossbot,
'l': TTLocalizer.Lawbot,
'm': TTLocalizer.Cashbot,
's': TTLocalizer.Sellbot,
't': TTLocalizer.Techbot}
suitDeptFullnamesP = {'c': TTLocalizer.BossbotP,
'l': TTLocalizer.LawbotP,
'm': TTLocalizer.CashbotP,
's': TTLocalizer.SellbotP,
't': TTLocalizer.TechbotP}
suitDeptFilenames = {'c': 'boss',
'l': 'law',
'm': 'cash',
's': 'sell',
't': 'tech'}
suitDeptModelPaths = {'c': '**/CorpIcon',
0: '**/CorpIcon',
'l': '**/LegalIcon',
1: '**/LegalIcon',
'm': '**/MoneyIcon',
2: '**/MoneyIcon',
's': '**/SalesIcon',
3: '**/SalesIcon',
't': '**/TechIcon',
4: '**/TechIcon'}
corpPolyColor = VBase4(0.95, 0.75, 0.75, 1.0)
legalPolyColor = VBase4(0.75, 0.75, 0.95, 1.0)
moneyPolyColor = VBase4(0.65, 0.95, 0.85, 1.0)
salesPolyColor = VBase4(0.95, 0.75, 0.95, 1.0)
techPolyColor = VBase4(0.6, 0.48, 0.7, 1.0)
suitDeptColors = {'c': corpPolyColor,
'l': legalPolyColor,
'm': moneyPolyColor,
's': salesPolyColor,
't': techPolyColor}
suitsPerLevel = [1,
1,
1,
1,
1,
1,
1,
1]
suitsPerDept = 8
goonTypes = ['pg', 'sg', 'fg1']
def getSuitBodyType(name):
if name in suitATypes:
return 'a'
if name in suitBTypes:
return 'b'
if name in suitCTypes:
return 'c'
print 'Unknown body type for suit name: ', name
def getSuitDept(name):
index = suitHeadTypes.index(name)
for dept in xrange(len(suitDepts)):
if index < suitsPerDept * (dept + 1):
return suitDepts[dept]
print 'Unknown dept for suit name: ', name
def getDeptFullname(dept):
return suitDeptFullnames[dept]
def getDeptFullnameP(dept):
return suitDeptFullnamesP[dept]
def getSuitDeptFullname(name):
return suitDeptFullnames[getSuitDept(name)]
def getSuitType(name):
index = suitHeadTypes.index(name)
return index % suitsPerDept + 1
def getSuitName(deptIndex, typeIndex):
return suitHeadTypes[suitsPerDept * deptIndex + typeIndex]
def getRandomSuitType(level, rng = random):
return random.randint(max(level - 4, 1), min(level, 8))
def getRandomIndexByDept(dept):
return suitsPerDept * suitDepts.index(dept) + random.randint(0, suitsPerDept - 1)
def getRandomSuitByDept(dept):
return suitHeadTypes[getRandomIndexByDept(dept)]
def getSuitsInDept(dept):
start = dept * suitsPerDept
end = start + suitsPerDept
return suitHeadTypes[start:end]
def getLevelByIndex(index):
return index % suitsPerDept + 1
class SuitDNA:
def __init__(self, str = None, type = None, dna = None, r = None, b = None, g = None):
if str != None:
self.makeFromNetString(str)
elif type != None:
if type == 's':
self.newSuit()
else:
self.type = 'u'
return
def __str__(self):
if self.type == 's':
return 'type = %s\nbody = %s, dept = %s, name = %s' % ('suit',
self.body,
self.dept,
self.name)
elif self.type == 'b':
return 'type = boss cog\ndept = %s' % self.dept
else:
return 'type undefined'
def makeNetString(self):
dg = PyDatagram()
dg.addFixedString(self.type, 1)
if self.type == 's':
dg.addFixedString(self.name, 3)
dg.addFixedString(self.dept, 1)
elif self.type == 'b':
dg.addFixedString(self.dept, 1)
elif self.type == 'u':
notify.error('undefined avatar')
else:
notify.error('unknown avatar type: ', self.type)
return dg.getMessage()
def makeFromNetString(self, string):
dg = PyDatagram(string)
dgi = PyDatagramIterator(dg)
self.type = dgi.getFixedString(1)
if self.type == 's':
self.name = dgi.getFixedString(3)
self.dept = dgi.getFixedString(1)
self.body = getSuitBodyType(self.name)
elif self.type == 'b':
self.dept = dgi.getFixedString(1)
else:
notify.error('unknown avatar type: ', self.type)
return None
def __defaultGoon(self):
self.type = 'g'
self.name = goonTypes[0]
def __defaultSuit(self):
self.type = 's'
self.name = 'ds'
self.dept = getSuitDept(self.name)
self.body = getSuitBodyType(self.name)
def newSuit(self, name = None):
if name == None:
self.__defaultSuit()
else:
self.type = 's'
self.name = name
self.dept = getSuitDept(self.name)
self.body = getSuitBodyType(self.name)
return
def newBossCog(self, dept):
self.type = 'b'
self.dept = dept
def newSuitRandom(self, level = None, dept = None):
self.type = 's'
if level == None:
level = random.choice(range(1, len(suitsPerLevel)))
elif level < 0 or level > len(suitsPerLevel):
notify.error('Invalid suit level: %d' % level)
if dept == None:
dept = random.choice(suitDepts)
self.dept = dept
index = suitDepts.index(dept)
base = index * suitsPerDept
offset = 0
if level > 1:
for i in xrange(1, level):
offset = offset + suitsPerLevel[i - 1]
bottom = base + offset
top = bottom + suitsPerLevel[level - 1]
self.name = suitHeadTypes[random.choice(range(bottom, top))]
self.body = getSuitBodyType(self.name)
return
def newGoon(self, name = None):
if type == None:
self.__defaultGoon()
else:
self.type = 'g'
if name in goonTypes:
self.name = name
else:
notify.error('unknown goon type: ', name)
return
def getType(self):
if self.type == 's':
type = 'suit'
elif self.type == 'b':
type = 'boss'
else:
notify.error('Invalid DNA type: ', self.type)
return type | apache-2.0 | 5,145,229,808,531,475,000 | 21.501548 | 90 | 0.548557 | false | 2.940333 | false | false | false |
tswast/google-cloud-python | videointelligence/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py | 1 | 14088 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.videointelligence.v1 VideoIntelligenceService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import grpc
from google.cloud.videointelligence_v1.gapic import enums
from google.cloud.videointelligence_v1.gapic import (
video_intelligence_service_client_config,
)
from google.cloud.videointelligence_v1.gapic.transports import (
video_intelligence_service_grpc_transport,
)
from google.cloud.videointelligence_v1.proto import video_intelligence_pb2
from google.cloud.videointelligence_v1.proto import video_intelligence_pb2_grpc
from google.longrunning import operations_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-videointelligence"
).version
class VideoIntelligenceServiceClient(object):
"""Service that implements Google Cloud Video Intelligence API."""
SERVICE_ADDRESS = "videointelligence.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.videointelligence.v1.VideoIntelligenceService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.VideoIntelligenceServiceGrpcTransport,
Callable[[~.Credentials, type], ~.VideoIntelligenceServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = video_intelligence_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=video_intelligence_service_grpc_transport.VideoIntelligenceServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = video_intelligence_service_grpc_transport.VideoIntelligenceServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def annotate_video(
self,
input_uri=None,
input_content=None,
features=None,
video_context=None,
output_uri=None,
location_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs asynchronous video annotation. Progress and results can be
retrieved through the ``google.longrunning.Operations`` interface.
``Operation.metadata`` contains ``AnnotateVideoProgress`` (progress).
``Operation.response`` contains ``AnnotateVideoResponse`` (results).
Example:
>>> from google.cloud import videointelligence_v1
>>> from google.cloud.videointelligence_v1 import enums
>>>
>>> client = videointelligence_v1.VideoIntelligenceServiceClient()
>>>
>>> input_uri = 'gs://cloud-samples-data/video/cat.mp4'
>>> features_element = enums.Feature.LABEL_DETECTION
>>> features = [features_element]
>>>
>>> response = client.annotate_video(input_uri=input_uri, features=features)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
input_uri (str): Input video location. Currently, only `Google Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are supported, which
must be specified in the following format: ``gs://bucket-id/object-id``
(other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For
more information, see `Request
URIs <https://cloud.google.com/storage/docs/reference-uris>`__. A video
URI may include wildcards in ``object-id``, and thus identify multiple
videos. Supported wildcards: '\*' to match 0 or more characters; '?' to
match 1 character. If unset, the input video should be embedded in the
request as ``input_content``. If set, ``input_content`` should be unset.
input_content (bytes): The video data bytes. If unset, the input video(s) should be specified
via ``input_uri``. If set, ``input_uri`` should be unset.
features (list[~google.cloud.videointelligence_v1.types.Feature]): Required. Requested video annotation features.
video_context (Union[dict, ~google.cloud.videointelligence_v1.types.VideoContext]): Additional video context and/or feature-specific parameters.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.videointelligence_v1.types.VideoContext`
output_uri (str): Optional. Location where the output (in JSON format) should be stored.
Currently, only `Google Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are supported, which
must be specified in the following format: ``gs://bucket-id/object-id``
(other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For
more information, see `Request
URIs <https://cloud.google.com/storage/docs/reference-uris>`__.
location_id (str): Optional. Cloud region where annotation should take place. Supported
cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``,
``asia-east1``. If no region is specified, a region will be determined
based on video file location.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.videointelligence_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "annotate_video" not in self._inner_api_calls:
self._inner_api_calls[
"annotate_video"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.annotate_video,
default_retry=self._method_configs["AnnotateVideo"].retry,
default_timeout=self._method_configs["AnnotateVideo"].timeout,
client_info=self._client_info,
)
request = video_intelligence_pb2.AnnotateVideoRequest(
input_uri=input_uri,
input_content=input_content,
features=features,
video_context=video_context,
output_uri=output_uri,
location_id=location_id,
)
operation = self._inner_api_calls["annotate_video"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
video_intelligence_pb2.AnnotateVideoResponse,
metadata_type=video_intelligence_pb2.AnnotateVideoProgress,
)
| apache-2.0 | -8,513,084,120,351,419,000 | 45.039216 | 156 | 0.626704 | false | 4.681954 | true | false | false |
BeeeOn/server | t/restui/t1007-types-list-detail.py | 1 | 10281 | #! /usr/bin/env python3
import config
config.import_libs()
import unittest
import socket
import json
from rest import GET, POST, PUT, DELETE
class TestTypesListDetail(unittest.TestCase):
"""
Create a session for testing.
"""
def setUp(self):
req = POST(config.ui_host, config.ui_port, "/auth")
req.body(config.PERMIT_LOGIN)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
self.session = result["data"]["id"]
def tearDown(self):
req = DELETE(config.ui_host, config.ui_port, "/auth")
req.authorize(self.session)
response, content = req()
self.assertEqual(204, response.status)
"""
List all available types.
"""
def test1_list_all(self):
req = GET(config.ui_host, config.ui_port, "/types")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
self.assertEqual(30, len(result["data"]))
def test2_detail_of_non_existing(self):
req = GET(config.ui_host, config.ui_port, "/types/12904232")
req.authorize(self.session)
response, content = req()
self.assertEqual(404, response.status)
result = json.loads(content)
self.assertEqual("error", result["status"])
self.assertEqual("requested resource does not exist", result["message"])
def test3_detail_of_battery(self):
req = GET(config.ui_host, config.ui_port, "/types/battery")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
self.assertEqual("battery", result["data"]["name"])
self.assertEqual("%", result["data"]["unit"])
def assure_range(self, id, name, min, max, step):
req = GET(config.ui_host, config.ui_port, "/types/" + id)
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
type = result["data"]
self.assertEqual(name, type["name"])
self.assertTrue("range" in type)
if min is not None:
self.assertTrue("min" in type["range"])
self.assertEqual(min, type["range"]["min"])
else:
self.assertFalse("min" in type["range"])
if max is not None:
self.assertTrue("max" in type["range"])
self.assertEqual(max, type["range"]["max"])
else:
self.assertFalse("max" in type["range"])
if step is not None:
self.assertTrue("step" in type["range"])
self.assertEqual(step, type["range"]["step"])
else:
self.assertFalse("step" in type["range"])
def test4_check_types_with_ranges(self):
self.assure_range("battery", "battery", 0, 100, 1)
self.assure_range("brightness", "brightness", 0, 100, 1)
self.assure_range("co2", "CO2", 0, 1000000, 1)
self.assure_range("humidity", "humidity", 0, 100, 1)
self.assure_range("luminance", "luminance", 0, 1000000, 1)
self.assure_range("noise", "noise", 0, 200, 1)
self.assure_range("performance", "performance", 0, 100, 1)
self.assure_range("pressure", "pressure", 800, 1100, 1)
self.assure_range("rssi", "signal", 0, 100, 1)
self.assure_range("temperature", "temperature", -273.15, 200, 0.01)
self.assure_range("ultraviolet", "UV", 0, 11, 0.1)
self.assure_range("color_temperature", "color temperature", 1700, 27000, 1)
self.assure_range("color", "color", 0, 16777215, 1)
def assure_values(self, id, name, values):
req = GET(config.ui_host, config.ui_port, "/types/" + id)
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
type = result["data"]
self.assertEqual(name, type["name"])
self.assertTrue("values" in type)
self.assertEqual(len(values), len(type["values"]))
for key in values:
self.assertTrue(key in type["values"])
self.assertEqual(values[key], type["values"][key])
def test5_check_types_with_values(self):
self.assure_values("availability", "availability", {"0": "unavailable", "1": "available"})
self.assure_values("fire", "fire", {"0": "no fire", "1": "fire"})
self.assure_values("motion", "motion", {"0": "no motion", "1": "motion"})
self.assure_values("open_close", "open/close", {"0": "closed", "1": "open"})
self.assure_values("on_off", "on/off", {"0": "off", "1": "on"})
self.assure_values("security_alert", "security alert", {"0": "ease", "1": "alert"})
self.assure_values("shake", "shake", {"0": "ease", "1": "shake"})
def assure_levels(self, id, name, levels):
req = GET(config.ui_host, config.ui_port, "/types/" + id)
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
type = result["data"]
self.assertEqual(name, type["name"])
self.assertTrue("levels" in type)
self.assertEqual(len(levels), len(type["levels"]))
for i in range(len(levels)):
if levels[i][0] is None:
self.assertFalse("min" in type["levels"][i])
else:
self.assertTrue("min" in type["levels"][i])
self.assertEqual(levels[i][0], type["levels"][i]["min"])
if levels[i][1] is None:
self.assertFalse("max" in type["levels"][i])
else:
self.assertTrue("max" in type["levels"][i])
self.assertEqual(levels[i][1], type["levels"][i]["max"])
if levels[i][2] is None:
self.assertFalse("attention" in type["levels"][i])
else:
self.assertTrue("attention" in type["levels"][i])
self.assertEqual(levels[i][2], type["levels"][i]["attention"])
if levels[i][3] is None:
self.assertFalse("name" in type["levels"][i])
else:
self.assertTrue("name" in type["levels"][i])
self.assertEqual(levels[i][3], type["levels"][i]["name"])
def test6_check_types_with_levels(self):
self.assure_levels("battery", "battery", [
(0, 10, "single", "critical"),
(11, 25, "single", "low"),
(26, 80, None, "medium"),
(81, 100, None, "high")
])
self.assure_levels("co2", "CO2", [
(None, 450, None, "normal outdoor"),
(451, 1000, None, "normal indoor"),
(1001, 2500, None, "poor air"),
(2501, 5000, "single", "adverse health effects"),
(5001, 10000, "repeat", "dangerous after few hours"),
(10001, 30000, "repeat" , "dangerous after several minutes"),
(30001, None, "alert", "extreme and dangerous")
])
self.assure_levels("fire", "fire", [
(1, 1, "alert", None)
])
self.assure_levels("motion", "motion", [
(1, 1, "single", None)
])
self.assure_levels("noise", "noise", [
(None, 80, None, "normal"),
(81, 90, None, "acceptable"),
(91, 99, "single", "loud"),
(100, 111, "repeat", "dangerous for several minutes stay"),
(112, 139, "repeat", "dangerous for few minutes stay"),
(140, None, "alert", "immediate nerve damage possible"),
])
self.assure_levels("performance", "performance", [
(0, 0, None, "idle"),
(95, None, None, "high load")
])
self.assure_levels("rssi", "signal", [
(None, 25, None, "poor"),
(26, 80, None, "good"),
(81, 100, None, "high")
])
self.assure_levels("security_alert", "security alert", [
(1, 1, "alert", None)
])
self.assure_levels("ultraviolet", "UV", [
(None, 2.9, None, "low"),
(3, 5.9, None, "moderate"),
(6, 7.9, "single", "high"),
(8, 10.9, "single", "very high"),
(11, None, "single", "extreme")
])
def test7_check_enums(self):
req = GET(config.ui_host, config.ui_port, "/types/enum/MOD_BOILER_STATUS")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
enum = result["data"]
self.assertEqual("boiler status", enum["name"])
self.assertTrue("values" in enum)
self.assertEqual(5, len(enum["values"]))
self.assertEqual("undefined", enum["values"]["0"])
self.assertEqual("heating", enum["values"]["1"])
self.assertEqual("heating water", enum["values"]["2"])
self.assertEqual("failure", enum["values"]["3"])
self.assertEqual("shutdown", enum["values"]["4"])
def test8_check_bitmap_with_flags(self):
req = GET(config.ui_host, config.ui_port, "/types/bitmap/MOD_CURRENT_BOILER_OT_FAULT_FLAGS")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
bitmap = result["data"]
self.assertEqual("OT Fault Flags", bitmap["name"])
self.assertTrue("flags" in bitmap)
self.assertEqual(6, len(bitmap["flags"]))
self.assertEqual("service request", bitmap["flags"]["0"]["name"])
self.assertEqual("lockout reset enabled", bitmap["flags"]["1"]["name"])
self.assertEqual("low water pressure", bitmap["flags"]["2"]["name"])
self.assertEqual("gas/flame fault", bitmap["flags"]["3"]["name"])
self.assertEqual("air pressure fault", bitmap["flags"]["4"]["name"])
self.assertEqual("water overheated", bitmap["flags"]["5"]["name"])
def test9_check_bitmap_with_group(self):
req = GET(config.ui_host, config.ui_port, "/types/bitmap/MOD_CURRENT_BOILER_OT_OEM_FAULTS")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
bitmap = result["data"]
self.assertEqual("OT OEM Faults", bitmap["name"])
self.assertTrue("groups" in bitmap)
self.assertEqual(1, len(bitmap["groups"]))
self.assertEqual("OEM specific", bitmap["groups"][0]["name"])
self.assertEqual(8, len(bitmap["groups"][0]["mapping"]))
self.assertEqual(0, bitmap["groups"][0]["mapping"][0])
self.assertEqual(1, bitmap["groups"][0]["mapping"][1])
self.assertEqual(2, bitmap["groups"][0]["mapping"][2])
self.assertEqual(3, bitmap["groups"][0]["mapping"][3])
self.assertEqual(4, bitmap["groups"][0]["mapping"][4])
self.assertEqual(5, bitmap["groups"][0]["mapping"][5])
self.assertEqual(6, bitmap["groups"][0]["mapping"][6])
self.assertEqual(7, bitmap["groups"][0]["mapping"][7])
if __name__ == '__main__':
import sys
import taprunner
unittest.main(testRunner=taprunner.TAPTestRunner(stream = sys.stdout))
| bsd-3-clause | 227,601,853,684,804,400 | 32.271845 | 94 | 0.654897 | false | 3.010542 | true | false | false |
fusionbox/django-darkknight | darkknight/forms.py | 1 | 4549 | import re
import os
from django import forms
from django.db import transaction
from django.utils.translation import ugettext as _
from django.forms.formsets import BaseFormSet
from localflavor.us.us_states import US_STATES
from django_countries import countries
from OpenSSL import crypto
from darkknight.models import CertificateSigningRequest, SSLKey
from darkknight.signals import key_created
KEY_SIZE = 2048
WWW = 'www.'
def creat(filename, mode):
fd = os.open(filename, os.O_CREAT | os.O_WRONLY | os.O_EXCL, mode)
return os.fdopen(fd, 'w')
class GenerateForm(forms.Form):
countryName = forms.ChoiceField(
choices=countries,
label=_("Country Name"),
initial='US',
)
stateOrProvinceName = forms.CharField(
label=_("State or province name"),
help_text=_("Enter its full name"),
)
localityName = forms.CharField(
label=_("Locality name"),
help_text=_("eg, city name"),
)
organizationName = forms.CharField(
label=_("Organisation Name"),
help_text=_("eg, company name"),
)
organizationalUnitName = forms.CharField(
label=_("Organisation Unit"),
help_text=_("Section, Department, ... eg, IT Departement"),
required=False,
)
commonName = forms.CharField(
label=_("Common Name"),
help_text=_("Domain name, including 'www.' if applicable. "
"eg, www.example.com")
)
emailAddress = forms.EmailField(
label=_("Email address"),
required=False,
)
subjectAlternativeNames = forms.CharField(
label=_('Subject Alternative Names (SAN)'),
required=False,
help_text=_('Please put one domain name per line'),
widget=forms.Textarea,
)
def clean_countryName(self):
country = self.cleaned_data['countryName']
if not re.match('^[a-z]{2}$', country, flags=re.IGNORECASE):
raise forms.ValidationError(_("Please enter a two-letters code"))
return country.upper()
def clean_subjectAlternativeNames(self):
sans = list(filter(bool, (
domain.strip() for domain in self.cleaned_data['subjectAlternativeNames'].splitlines()
)))
return sans
def clean(self):
cd = super(GenerateForm, self).clean()
if cd.get('countryName') == 'US':
try:
if cd['stateOrProvinceName'] not in set(i[1] for i in US_STATES):
self.add_error('stateOrProvinceName', 'State should be the full state name, eg "Colorado"')
except KeyError:
pass
return cd
class GenerateBaseFormSet(BaseFormSet):
def __init__(self, *args, **kwargs):
super(GenerateBaseFormSet, self).__init__(*args, **kwargs)
for form in self.forms:
form.empty_permitted = False
def generate(self):
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, KEY_SIZE)
key_obj = SSLKey()
csr_list = [self._generate_csr(pkey, key_obj, data) for data in self.cleaned_data]
with transaction.atomic():
key = crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)
assert not os.path.exists(key_obj.key_path)
with creat(key_obj.key_path, 0000) as f:
f.write(key)
key_obj.save()
CertificateSigningRequest.objects.bulk_create(csr_list)
key_created.send(sender=self, instance=key_obj, private_key=key)
return key_obj
def _generate_csr(self, pkey, key_obj, cleaned_data):
req = crypto.X509Req()
req.set_pubkey(pkey)
subject = req.get_subject()
for attr, value in cleaned_data.items():
if value:
if attr == 'subjectAlternativeNames':
req.add_extensions([
crypto.X509Extension('subjectAltName', False, ", ".join(
"DNS.{i}:{domain}".format(i=i, domain=domain)
for i, domain in enumerate(value)
))
])
else:
setattr(subject, attr, value)
cn = cleaned_data['commonName']
# Strip www. from the common name
if cn.startswith(WWW):
cn = cn[len(WWW):]
req.sign(pkey, "sha256")
csr = crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)
csr_obj = CertificateSigningRequest(domain=cn, key=key_obj, content=csr)
return csr_obj
| bsd-2-clause | -862,029,955,554,943,000 | 31.963768 | 111 | 0.591778 | false | 4.043556 | false | false | false |
debian789/suescunet | apps/codigos/admin.py | 1 | 1478 | from django.contrib import admin
from models import mdl_codigos
from apps.elementos_comunes.models import mdl_lenguaje,mdl_sistema_operativo
from actions import export_as_csv
## crea el listado de opciones en el administrado !!!
class codigosAdmin(admin.ModelAdmin):
list_display = ('titulo','lenguaje','archivo','imagen_azul','esta_publicado','url')
list_filter = ('publicado','so','lenguaje')
search_fields = ('titulo','codigo')
list_editable = ('archivo',)
actions = [export_as_csv]
raw_id_fields = ('lenguaje',)
filter_horizontal = ('so',)
def imagen_azul(self,obj):
url = obj.imagen_azul_publicado()
tag = '<img src="%s">'% url
return tag
imagen_azul.allow_tags = True #permite que tenga tag html
imagen_azul.admin_order_field = 'publicado' #permite ordenarlos por publicado
class CodigosInline(admin.StackedInline):
model = mdl_codigos
extra = 1
class LenguajesAdmin(admin.ModelAdmin):
actions = [export_as_csv]
inlines = [CodigosInline]
#class SitemaOperativoAdmin(admin.ModelAdmin):
# fiter_vertical = ('so',)
#class AgregadorAdmin(admin.ModelAdmin):
# filter_vertical = ('enlaces',)
admin.site.register(mdl_sistema_operativo)
#admin.site.register(Agregador,AgregadorAdmin)
#admin.site.register(mdl_sistema_operativo)
#admin.site.register(mdl_lenguaje,LenguajesAdmin)
admin.site.register(mdl_lenguaje)
admin.site.register(mdl_codigos,codigosAdmin)
#admin.site.register(soAdmin)
#admin.site.register(mdl_lenguaje,LenguajesAdmin) | gpl-2.0 | 5,772,595,188,927,756,000 | 31.152174 | 85 | 0.747632 | false | 2.672694 | false | false | false |
jeremiah-c-leary/vhdl-style-guide | vsg/tests/sequential/test_rule_006.py | 1 | 1193 |
import os
import unittest
from vsg.rules import sequential
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_006_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_006_test_input.fixed.vhd'), lExpected)
class test_sequential_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_006(self):
oRule = sequential.rule_006()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'sequential')
self.assertEqual(oRule.identifier, '006')
lExpected = [19, 21]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_006(self):
oRule = sequential.rule_006()
oRule.fixable = True
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| gpl-3.0 | 3,324,378,810,418,496,500 | 24.934783 | 106 | 0.678122 | false | 3.304709 | true | false | false |
acjones617/k-means | lib/exec.py | 1 | 1524 | import normalize as n
import cluster as c
import jsonpickle as j
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument('matrix')
parser.add_argument('options')
args = parser.parse_args()
matrix = ast.literal_eval(args.matrix)
options = ast.literal_eval(args.options)
# steps:
# 1. normalize data
# 2. randomly pick center points
# 3. assign points to a cluster
# 4. re-pick cluster center points
# 5. repeat
# 6. assign clusters to original data
# 7. send back to node
# steps:
# 1. normalize data
normal_matrix = n.normalize(matrix)
# 2. randomly pick cluster center points
cluster_centers = c.init(normal_matrix, options['clusters'])
# 3. assign points to a cluster
# 4. re-pick cluster center points
# 5. repeat steps 3 and 4
clusters = []
has_converged = False
for i in range(options['iterations']):
old_clusters = clusters
cluster_points, clusters = c.assign_points(normal_matrix, cluster_centers)
if c.converged(old_clusters, clusters):
has_converged = True
break
cluster_centers = c.reselect_centers(cluster_points, options['clusters'])
# final assignment of points if never converged
if (not has_converged):
cluster_points, clusters = c.assign_points(normal_matrix, cluster_centers)
# 6. assign clusters to original data
final_matrix = n.reassign(matrix, cluster_points)
# 7. send back to node - need to convert cluster centers to list first
print j.encode({
'finalMatrix' : final_matrix,
'clusterCenters': cluster_centers
})
| mit | 4,464,520,530,550,727,700 | 24.4 | 78 | 0.727034 | false | 3.544186 | false | false | false |
mlperf/training_results_v0.7 | Google/benchmarks/ssd/implementations/ssd-research-TF-tpu-v4-512/ssd_main.py | 1 | 11062 | # Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for SSD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import multiprocessing
import sys
import threading
from absl import app
import tensorflow.compat.v1 as tf
from REDACTED.mlp_log import mlp_log
from REDACTED.ssd import coco_metric
from REDACTED.ssd import dataloader
from REDACTED.ssd import ssd_constants
from REDACTED.ssd import ssd_model
from REDACTED.util import train_and_eval_runner
# copybara:strip_begin
from REDACTED.REDACTED.multiprocessing import REDACTEDprocess
# copybara:strip_end
tf.flags.DEFINE_string(
'resnet_checkpoint',
'/REDACTED/mb-d/home/tpu-perf-team/ssd_checkpoint/resnet34_bs2048_2',
'Location of the ResNet checkpoint to use for model '
'initialization.')
tf.flags.DEFINE_string('hparams', '',
'Comma separated k=v pairs of hyperparameters.')
tf.flags.DEFINE_integer(
'num_shards', default=8, help='Number of shards (TPU cores) for '
'training.')
tf.flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
tf.flags.DEFINE_integer('eval_batch_size', 1, 'evaluation batch size')
tf.flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '
'evaluation.')
tf.flags.DEFINE_integer(
'iterations_per_loop', 1000, 'Number of iterations per TPU training loop')
tf.flags.DEFINE_string(
'training_file_pattern',
'REDACTEDtrain*',
'Glob for training data files (e.g., COCO train - minival set)')
tf.flags.DEFINE_string(
'validation_file_pattern',
'REDACTEDval*',
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
tf.flags.DEFINE_bool(
'use_fake_data', False,
'Use fake data to reduce the input preprocessing overhead (for unit tests)')
tf.flags.DEFINE_string(
'val_json_file',
'REDACTEDinstances_val2017.json',
'COCO validation JSON containing golden bounding boxes.')
tf.flags.DEFINE_integer('num_examples_per_epoch', 118287,
'Number of examples in one epoch')
tf.flags.DEFINE_integer('num_epochs', 64, 'Number of epochs for training')
tf.flags.DEFINE_multi_integer(
'input_partition_dims',
default=None,
help=('Number of partitions on each dimension of the input. Each TPU core'
' processes a partition of the input image in parallel using spatial'
' partitioning.'))
tf.flags.DEFINE_bool('run_cocoeval', True, 'Whether to run cocoeval')
FLAGS = tf.flags.FLAGS
_STOP = -1
def construct_run_config(iterations_per_loop):
"""Construct the run config."""
# Parse hparams
hparams = ssd_model.default_hparams()
hparams.parse(FLAGS.hparams)
return dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
resnet_checkpoint=FLAGS.resnet_checkpoint,
val_json_file=FLAGS.val_json_file,
model_dir=FLAGS.model_dir,
iterations_per_loop=iterations_per_loop,
steps_per_epoch=FLAGS.num_examples_per_epoch // FLAGS.train_batch_size,
eval_samples=FLAGS.eval_samples,
transpose_input=False if FLAGS.input_partition_dims is not None else True,
use_spatial_partitioning=True
if FLAGS.input_partition_dims is not None else False,
)
# copybara:strip_begin
def REDACTED_predict_post_processing():
"""REDACTED batch-processes the predictions."""
q_in, q_out = REDACTEDprocess.get_user_data()
predict_post_processing(q_in, q_out)
# copybara:strip_end
def predict_post_processing(q_in, q_out):
"""Run post-processing on CPU for predictions."""
coco_gt = coco_metric.create_coco(FLAGS.val_json_file, use_cpp_extension=True)
current_step, predictions = q_in.get()
while current_step != _STOP and q_out is not None:
q_out.put((current_step,
coco_metric.compute_map(
predictions,
coco_gt,
use_cpp_extension=True,
nms_on_tpu=True)))
current_step, predictions = q_in.get()
def main(argv):
del argv # Unused.
params = construct_run_config(FLAGS.iterations_per_loop)
mlp_log.mlperf_print(key='cache_clear', value=True)
mlp_log.mlperf_print(key='init_start', value=None)
mlp_log.mlperf_print('global_batch_size', FLAGS.train_batch_size)
mlp_log.mlperf_print('opt_base_learning_rate', params['base_learning_rate'])
mlp_log.mlperf_print(
'opt_learning_rate_decay_boundary_epochs',
[params['first_lr_drop_epoch'], params['second_lr_drop_epoch']])
mlp_log.mlperf_print('opt_weight_decay', params['weight_decay'])
mlp_log.mlperf_print(
'model_bn_span', FLAGS.train_batch_size // FLAGS.num_shards *
params['distributed_group_size'])
mlp_log.mlperf_print('max_samples', ssd_constants.NUM_CROP_PASSES)
mlp_log.mlperf_print('train_samples', FLAGS.num_examples_per_epoch)
mlp_log.mlperf_print('eval_samples', FLAGS.eval_samples)
params['batch_size'] = FLAGS.train_batch_size // FLAGS.num_shards
input_partition_dims = FLAGS.input_partition_dims
train_steps = FLAGS.num_epochs * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
eval_steps = int(math.ceil(FLAGS.eval_samples / FLAGS.eval_batch_size))
runner = train_and_eval_runner.TrainAndEvalRunner(FLAGS.iterations_per_loop,
train_steps, eval_steps,
FLAGS.num_shards)
train_input_fn = dataloader.SSDInputReader(
FLAGS.training_file_pattern,
params['transpose_input'],
is_training=True,
use_fake_data=FLAGS.use_fake_data,
params=params)
eval_input_fn = dataloader.SSDInputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
distributed_eval=True,
count=eval_steps * FLAGS.eval_batch_size,
params=params)
def init_fn():
tf.train.init_from_checkpoint(params['resnet_checkpoint'], {
'resnet/': 'resnet%s/' % ssd_constants.RESNET_DEPTH,
})
if FLAGS.run_cocoeval:
# copybara:strip_begin
q_in, q_out = REDACTEDprocess.get_user_data()
processes = [
REDACTEDprocess.Process(target=REDACTED_predict_post_processing) for _ in range(4)
]
# copybara:strip_end_and_replace_begin
# q_in = multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE)
# q_out = multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE)
# processes = [
# multiprocessing.Process(
# target=predict_post_processing, args=(q_in, q_out))
# for _ in range(self.num_multiprocessing_workers)
# ]
# copybara:replace_end
for p in processes:
p.start()
def log_eval_results_fn():
"""Print out MLPerf log."""
result = q_out.get()
success = False
while result[0] != _STOP:
if not success:
steps_per_epoch = (
FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
epoch = (result[0] + FLAGS.iterations_per_loop) // steps_per_epoch
mlp_log.mlperf_print(
'eval_accuracy',
result[1]['COCO/AP'],
metadata={'epoch_num': epoch})
mlp_log.mlperf_print('eval_stop', None, metadata={'epoch_num': epoch})
if result[1]['COCO/AP'] > ssd_constants.EVAL_TARGET:
success = True
mlp_log.mlperf_print(
'run_stop', None, metadata={'status': 'success'})
result = q_out.get()
if not success:
mlp_log.mlperf_print('run_stop', None, metadata={'status': 'abort'})
log_eval_result_thread = threading.Thread(target=log_eval_results_fn)
log_eval_result_thread.start()
runner.initialize(train_input_fn, eval_input_fn,
functools.partial(ssd_model.ssd_model_fn,
params), FLAGS.train_batch_size,
FLAGS.eval_batch_size, input_partition_dims, init_fn)
mlp_log.mlperf_print('init_stop', None)
mlp_log.mlperf_print('run_start', None)
def eval_init_fn(cur_step):
"""Executed before every eval."""
steps_per_epoch = FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_start',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': FLAGS.iterations_per_loop // steps_per_epoch
})
mlp_log.mlperf_print(
'eval_start',
None,
metadata={
'epoch_num': epoch + FLAGS.iterations_per_loop // steps_per_epoch
})
def eval_finish_fn(cur_step, eval_output, _):
steps_per_epoch = FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_stop',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': FLAGS.iterations_per_loop // steps_per_epoch
})
if FLAGS.run_cocoeval:
q_in.put((cur_step, eval_output['detections']))
runner.train_and_eval(eval_init_fn, eval_finish_fn)
if FLAGS.run_cocoeval:
for _ in processes:
q_in.put((_STOP, None))
for p in processes:
try:
p.join(timeout=10)
except Exception: # pylint: disable=broad-except
pass
q_out.put((_STOP, None))
log_eval_result_thread.join()
# Clear out all the queues to avoid deadlock.
while not q_out.empty():
q_out.get()
while not q_in.empty():
q_in.get()
if __name__ == '__main__':
# copybara:strip_begin
user_data = (multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE),
multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE))
in_compile_test = False
for arg in sys.argv:
if arg == '--xla_jf_exit_process_on_compilation_success=true':
in_compile_test = True
break
if in_compile_test:
# Exiting from XLA's C extension skips REDACTEDprocess's multiprocessing clean
# up. Don't use REDACTED process when xla is in compilation only mode.
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
else:
with REDACTEDprocess.main_handler(user_data=user_data):
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
# copybara:strip_end
# copybara:insert tf.logging.set_verbosity(tf.logging.INFO)
# copybara:insert app.run(main)
| apache-2.0 | 66,469,063,065,002,300 | 35.388158 | 90 | 0.653679 | false | 3.437539 | false | false | false |
openwebcc/ba | maintenance/rawdata/als/hef/071011_hef14_fix_ala.py | 1 | 2755 | #!/usr/bin/python
#
# fix incorrect syntax for echoes in .ala files of 071011_hef14
#
import re
import os
import sys
sys.path.append('/home/institut/rawdata/www/lib')
from Laser.Util import las
if __name__ == '__main__':
""" fix incorrect return number and number of returns for given pulse syntax """
import argparse
parser = argparse.ArgumentParser(description='fix incorrect syntax for echoes in .ala files')
parser.add_argument('--ala', dest='ala', required=True, help='path to input .ala file')
parser.add_argument('--out', dest='out', required=True, help='path to cleaned output file')
args = parser.parse_args()
# init utility library
util = las.rawdata()
# open output file
o = open(args.out,'w')
# loop through input file, read pairs of line and clean up
with open(args.ala) as f:
prev_line = None
curr_line = None
for line in f:
if not prev_line:
prev_line = util.parse_line(line)
continue
else:
curr_line = util.parse_line(line)
# alter previous and current line in one turn if gpstimes are the same
if prev_line[0] == curr_line[0]:
# set return numbers of previous echo
prev_line[-2] = '1'
prev_line[-1] = '2'
# set return numbers of current echo
curr_line[-2] = '2'
curr_line[-1] = '2'
# write out both lines
o.write('%s\n' % ' '.join(prev_line))
o.write('%s\n' % ' '.join(curr_line))
# set previous line to None
prev_line = None
continue
else:
# write previous line with 1 1 as no second echo is present
prev_line[-2] = '1'
prev_line[-1] = '1'
o.write('%s\n' % ' '.join(prev_line))
# assign current line as next previous line
prev_line = curr_line[:]
# write last record from loop if any
if prev_line:
o.write('%s\n' % ' '.join(prev_line))
# create log file
with open("%s.txt" % args.out, "w") as log:
log.write("the corresponding file was created with %s\n" % __file__)
log.write("it contains fixed return numbers for first and second returns\n")
log.write("\n")
log.write("input file with incorrect return numbers: %s\n" % re.sub("raw/str/ala","raw/bad/ala",args.ala[:-4]) )
log.write("output file with correct return numbers: %s\n" % args.out)
log.write("\n")
# close cleaned output file
o.close()
| gpl-3.0 | -4,989,434,933,116,422,000 | 33.873418 | 120 | 0.536116 | false | 3.969741 | false | false | false |
google/retiming | models/networks.py | 1 | 16657 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from third_party.models.networks import init_net
###############################################################################
# Helper Functions
###############################################################################
def define_LNR(nf=64, texture_channels=16, texture_res=16, n_textures=25, gpu_ids=[]):
"""Create a layered neural renderer.
Parameters:
nf (int) -- the number of channels in the first/last conv layers
texture_channels (int) -- the number of channels in the neural texture
texture_res (int) -- the size of each individual texture map
n_textures (int) -- the number of individual texture maps
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a layered neural rendering model.
"""
net = LayeredNeuralRenderer(nf, texture_channels, texture_res, n_textures)
return init_net(net, gpu_ids)
def define_kp2uv(nf=64, gpu_ids=[]):
"""Create a keypoint-to-UV model.
Parameters:
nf (int) -- the number of channels in the first/last conv layers
Returns a keypoint-to-UV model.
"""
net = kp2uv(nf)
return init_net(net, gpu_ids)
def cal_alpha_reg(prediction, lambda_alpha_l1, lambda_alpha_l0):
"""Calculate the alpha regularization term.
Parameters:
prediction (tensor) - - composite of predicted alpha layers
lambda_alpha_l1 (float) - - weight for the L1 regularization term
lambda_alpha_l0 (float) - - weight for the L0 regularization term
Returns the alpha regularization loss term
"""
assert prediction.max() <= 1.
assert prediction.min() >= 0.
loss = 0.
if lambda_alpha_l1 > 0:
loss += lambda_alpha_l1 * torch.mean(prediction)
if lambda_alpha_l0 > 0:
# Pseudo L0 loss using a squished sigmoid curve.
l0_prediction = (torch.sigmoid(prediction * 5.0) - 0.5) * 2.0
loss += lambda_alpha_l0 * torch.mean(l0_prediction)
return loss
##############################################################################
# Classes
##############################################################################
class MaskLoss(nn.Module):
"""Define the loss which encourages the predicted alpha matte to match the mask (trimap)."""
def __init__(self):
super(MaskLoss, self).__init__()
self.loss = nn.L1Loss(reduction='none')
def __call__(self, prediction, target):
"""Calculate loss given predicted alpha matte and trimap.
Balance positive and negative regions. Exclude 'unknown' region from loss.
Parameters:
prediction (tensor) - - predicted alpha
target (tensor) - - trimap
Returns: the computed loss
"""
mask_err = self.loss(prediction, target)
pos_mask = F.relu(target)
neg_mask = F.relu(-target)
pos_mask_loss = (pos_mask * mask_err).sum() / (1 + pos_mask.sum())
neg_mask_loss = (neg_mask * mask_err).sum() / (1 + neg_mask.sum())
loss = .5 * (pos_mask_loss + neg_mask_loss)
return loss
class ConvBlock(nn.Module):
"""Helper module consisting of a convolution, optional normalization and activation, with padding='same'."""
def __init__(self, conv, in_channels, out_channels, ksize=4, stride=1, dil=1, norm=None, activation='relu'):
"""Create a conv block.
Parameters:
conv (convolutional layer) - - the type of conv layer, e.g. Conv2d, ConvTranspose2d
in_channels (int) - - the number of input channels
in_channels (int) - - the number of output channels
ksize (int) - - the kernel size
stride (int) - - stride
dil (int) - - dilation
norm (norm layer) - - the type of normalization layer, e.g. BatchNorm2d, InstanceNorm2d
activation (str) -- the type of activation: relu | leaky | tanh | none
"""
super(ConvBlock, self).__init__()
self.k = ksize
self.s = stride
self.d = dil
self.conv = conv(in_channels, out_channels, ksize, stride=stride, dilation=dil)
if norm is not None:
self.norm = norm(out_channels)
else:
self.norm = None
if activation == 'leaky':
self.activation = nn.LeakyReLU(0.2)
elif activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
else:
self.activation = None
def forward(self, x):
"""Forward pass. Compute necessary padding and cropping because pytorch doesn't have pad=same."""
height, width = x.shape[-2:]
if isinstance(self.conv, nn.modules.ConvTranspose2d):
desired_height = height * self.s
desired_width = width * self.s
pady = 0
padx = 0
else:
# o = [i + 2*p - k - (k-1)*(d-1)]/s + 1
# padding = .5 * (stride * (output-1) + (k-1)(d-1) + k - input)
desired_height = height // self.s
desired_width = width // self.s
pady = .5 * (self.s * (desired_height - 1) + (self.k - 1) * (self.d - 1) + self.k - height)
padx = .5 * (self.s * (desired_width - 1) + (self.k - 1) * (self.d - 1) + self.k - width)
x = F.pad(x, [int(np.floor(padx)), int(np.ceil(padx)), int(np.floor(pady)), int(np.ceil(pady))])
x = self.conv(x)
if x.shape[-2] != desired_height or x.shape[-1] != desired_width:
cropy = x.shape[-2] - desired_height
cropx = x.shape[-1] - desired_width
x = x[:, :, int(np.floor(cropy / 2.)):-int(np.ceil(cropy / 2.)),
int(np.floor(cropx / 2.)):-int(np.ceil(cropx / 2.))]
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class ResBlock(nn.Module):
"""Define a residual block."""
def __init__(self, channels, ksize=4, stride=1, dil=1, norm=None, activation='relu'):
"""Initialize the residual block, which consists of 2 conv blocks with a skip connection."""
super(ResBlock, self).__init__()
self.convblock1 = ConvBlock(nn.Conv2d, channels, channels, ksize=ksize, stride=stride, dil=dil, norm=norm,
activation=activation)
self.convblock2 = ConvBlock(nn.Conv2d, channels, channels, ksize=ksize, stride=stride, dil=dil, norm=norm,
activation=None)
def forward(self, x):
identity = x
x = self.convblock1(x)
x = self.convblock2(x)
x += identity
return x
class kp2uv(nn.Module):
"""UNet architecture for converting keypoint image to UV map.
Same person UV map format as described in https://arxiv.org/pdf/1802.00434.pdf.
"""
def __init__(self, nf=64):
super(kp2uv, self).__init__(),
self.encoder = nn.ModuleList([
ConvBlock(nn.Conv2d, 3, nf, ksize=4, stride=2),
ConvBlock(nn.Conv2d, nf, nf * 2, ksize=4, stride=2, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 2, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=3, stride=1, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=3, stride=1, norm=nn.InstanceNorm2d, activation='leaky')])
self.decoder = nn.ModuleList([
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 2, ksize=4, stride=2, norm=nn.InstanceNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 2 * 2, nf, ksize=4, stride=2, norm=nn.InstanceNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 2, nf, ksize=4, stride=2, norm=nn.InstanceNorm2d)])
# head to predict body part class (25 classes - 24 body parts, 1 background.)
self.id_pred = ConvBlock(nn.Conv2d, nf + 3, 25, ksize=3, stride=1, activation='none')
# head to predict UV coordinates for every body part class
self.uv_pred = ConvBlock(nn.Conv2d, nf + 3, 2 * 24, ksize=3, stride=1, activation='tanh')
def forward(self, x):
"""Forward pass through UNet, handling skip connections.
Parameters:
x (tensor) - - rendered keypoint image, shape [B, 3, H, W]
Returns:
x_id (tensor): part id class probabilities
x_uv (tensor): uv coordinates for each part id
"""
skips = [x]
for i, layer in enumerate(self.encoder):
x = layer(x)
if i < 5:
skips.append(x)
for layer in self.decoder:
x = torch.cat((x, skips.pop()), 1)
x = layer(x)
x = torch.cat((x, skips.pop()), 1)
x_id = self.id_pred(x)
x_uv = self.uv_pred(x)
return x_id, x_uv
class LayeredNeuralRenderer(nn.Module):
"""Layered Neural Rendering model for video decomposition.
Consists of neural texture, UNet, upsampling module.
"""
def __init__(self, nf=64, texture_channels=16, texture_res=16, n_textures=25):
super(LayeredNeuralRenderer, self).__init__(),
"""Initialize layered neural renderer.
Parameters:
nf (int) -- the number of channels in the first/last conv layers
texture_channels (int) -- the number of channels in the neural texture
texture_res (int) -- the size of each individual texture map
n_textures (int) -- the number of individual texture maps
"""
# Neural texture is implemented as 'n_textures' concatenated horizontally
self.texture = nn.Parameter(torch.randn(1, texture_channels, texture_res, n_textures * texture_res))
# Define UNet
self.encoder = nn.ModuleList([
ConvBlock(nn.Conv2d, texture_channels + 1, nf, ksize=4, stride=2),
ConvBlock(nn.Conv2d, nf, nf * 2, ksize=4, stride=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 2, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=1, dil=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=1, dil=2, norm=nn.BatchNorm2d, activation='leaky')])
self.decoder = nn.ModuleList([
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 2, ksize=4, stride=2, norm=nn.BatchNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 2 * 2, nf, ksize=4, stride=2, norm=nn.BatchNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 2, nf, ksize=4, stride=2, norm=nn.BatchNorm2d)])
self.final_rgba = ConvBlock(nn.Conv2d, nf, 4, ksize=4, stride=1, activation='tanh')
# Define upsampling block, which outputs a residual
upsampling_ic = texture_channels + 5 + nf
self.upsample_block = nn.Sequential(
ConvBlock(nn.Conv2d, upsampling_ic, nf, ksize=3, stride=1, norm=nn.InstanceNorm2d),
ResBlock(nf, ksize=3, stride=1, norm=nn.InstanceNorm2d),
ResBlock(nf, ksize=3, stride=1, norm=nn.InstanceNorm2d),
ResBlock(nf, ksize=3, stride=1, norm=nn.InstanceNorm2d),
ConvBlock(nn.Conv2d, nf, 4, ksize=3, stride=1, activation='none'))
def render(self, x):
"""Pass inputs for a single layer through UNet.
Parameters:
x (tensor) - - sampled texture concatenated with person IDs
Returns RGBA for the input layer and the final feature maps.
"""
skips = [x]
for i, layer in enumerate(self.encoder):
x = layer(x)
if i < 5:
skips.append(x)
for layer in self.decoder:
x = torch.cat((x, skips.pop()), 1)
x = layer(x)
rgba = self.final_rgba(x)
return rgba, x
def forward(self, uv_map, id_layers, uv_map_upsampled=None, crop_params=None):
"""Forward pass through layered neural renderer.
Steps:
1. Sample from the neural texture using uv_map
2. Input uv_map and id_layers into UNet
2a. If doing upsampling, then pass upsampled inputs and results through upsampling module
3. Composite RGBA outputs.
Parameters:
uv_map (tensor) - - UV maps for all layers, with shape [B, (2*L), H, W]
id_layers (tensor) - - person ID for all layers, with shape [B, L, H, W]
uv_map_upsampled (tensor) - - upsampled UV maps to input to upsampling module (if None, skip upsampling)
crop_params
"""
b_sz = uv_map.shape[0]
n_layers = uv_map.shape[1] // 2
texture = self.texture.repeat(b_sz, 1, 1, 1)
composite = None
layers = []
sampled_textures = []
for i in range(n_layers):
# Get RGBA for this layer.
uv_map_i = uv_map[:, i * 2:(i + 1) * 2, ...]
uv_map_i = uv_map_i.permute(0, 2, 3, 1)
sampled_texture = F.grid_sample(texture, uv_map_i, mode='bilinear', padding_mode='zeros')
inputs = torch.cat([sampled_texture, id_layers[:, i:i + 1]], 1)
rgba, last_feat = self.render(inputs)
if uv_map_upsampled is not None:
uv_map_up_i = uv_map_upsampled[:, i * 2:(i + 1) * 2, ...]
uv_map_up_i = uv_map_up_i.permute(0, 2, 3, 1)
sampled_texture_up = F.grid_sample(texture, uv_map_up_i, mode='bilinear', padding_mode='zeros')
id_layers_up = F.interpolate(id_layers[:, i:i + 1], size=sampled_texture_up.shape[-2:],
mode='bilinear')
inputs_up = torch.cat([sampled_texture_up, id_layers_up], 1)
upsampled_size = inputs_up.shape[-2:]
rgba = F.interpolate(rgba, size=upsampled_size, mode='bilinear')
last_feat = F.interpolate(last_feat, size=upsampled_size, mode='bilinear')
if crop_params is not None:
starty, endy, startx, endx = crop_params
rgba = rgba[:, :, starty:endy, startx:endx]
last_feat = last_feat[:, :, starty:endy, startx:endx]
inputs_up = inputs_up[:, :, starty:endy, startx:endx]
rgba_residual = self.upsample_block(torch.cat((rgba, inputs_up, last_feat), 1))
rgba += .01 * rgba_residual
rgba = torch.clamp(rgba, -1, 1)
sampled_texture = sampled_texture_up
# Update the composite with this layer's RGBA output
if composite is None:
composite = rgba
else:
alpha = rgba[:, 3:4] * .5 + .5
composite = rgba * alpha + composite * (1.0 - alpha)
layers.append(rgba)
sampled_textures.append(sampled_texture)
outputs = {
'reconstruction': composite,
'layers': torch.stack(layers, 1),
'sampled texture': sampled_textures, # for debugging
}
return outputs
| apache-2.0 | 2,665,017,518,263,958,500 | 44.386921 | 117 | 0.576514 | false | 3.506737 | false | false | false |
samirelanduk/pygtop | pygtop/targets.py | 1 | 14667 | """Contains target-specific objects and functions."""
from . import gtop
from . import pdb
from .interactions import Interaction, get_interaction_by_id
from .exceptions import NoSuchTargetError, NoSuchTargetFamilyError
from .shared import DatabaseLink, Gene, strip_html
def get_target_by_id(target_id):
"""Returns a Target object of the target with the given ID.
:param int target_id: The GtoP ID of the Target desired.
:rtype: :py:class:`Target`
:raises: :class:`.NoSuchTargetError`: if no such target exists in the database"""
if not isinstance(target_id, int):
raise TypeError("target_id must be int, not '%s'" % str(target_id))
json_data = gtop.get_json_from_gtop("targets/%i" % target_id)
if json_data:
return Target(json_data)
else:
raise NoSuchTargetError("There is no target with ID %i" % target_id)
def get_all_targets():
"""Returns a list of all targets in the Guide to PHARMACOLOGY database. This
can take a few seconds.
:returns: list of :py:class:`Target` objects"""
json_data = gtop.get_json_from_gtop("targets")
return [Target(t) for t in json_data]
def get_targets_by(criteria):
"""Get all targets which specify the criteria dictionary.
:param dict criteria: A dictionary of `field=value` pairs. See the\
`GtoP target web services page <http://www.guidetopharmacology.org/\
webServices.jsp#targets>`_ for key/value pairs which can be supplied.
:returns: list of :py:class:`Target` objects."""
if not isinstance(criteria, dict):
raise TypeError("criteria must be dict, not '%s'" % str(criteria))
search_string = "&".join(["%s=%s" % (key, criteria[key]) for key in criteria])
json_data = gtop.get_json_from_gtop("targets?%s" % search_string)
if json_data:
return [Target(t) for t in json_data]
else:
return []
def get_target_by_name(name):
"""Returns the target which matches the name given.
:param str name: The name of the target to search for. Note that synonyms \
will not be searched.
:rtype: :py:class:`Target`
:raises: :class:`.NoSuchTargetError`: if no such target exists in the database."""
if not isinstance(name, str):
raise TypeError("name must be str, not '%s'" % str(name))
targets = get_targets_by({"name": name})
if targets:
return targets[0]
else:
raise NoSuchTargetError("There is no target with name %s" % name)
def get_target_family_by_id(family_id):
"""Returns a TargetFamily object of the family with the given ID.
:param int family_id: The GtoP ID of the TargetFamily desired.
:rtype: :py:class:`TargetFamily`
:raises: :class:`.NoSuchTargetFamilyError`: if no such family exists in the database"""
if not isinstance(family_id, int):
raise TypeError("family_id must be int, not '%s'" % str(family_id))
json_data = gtop.get_json_from_gtop("targets/families/%i" % family_id)
if json_data:
return TargetFamily(json_data)
else:
raise NoSuchTargetFamilyError("There is no Target Family with ID %i" % family_id)
def get_all_target_families():
"""Returns a list of all target families in the Guide to PHARMACOLOGY database.
:returns: list of :py:class:`TargetFamily` objects"""
json_data = gtop.get_json_from_gtop("targets/families")
return [TargetFamily(f) for f in json_data]
class Target:
"""A Guide to PHARMACOLOGY target object.
:param json_data: A dictionary obtained from the web services."""
def __init__(self, json_data):
self.json_data = json_data
self._target_id = json_data["targetId"]
self._name = json_data["name"]
self._abbreviation = json_data["abbreviation"]
self._systematic_name = json_data["systematicName"]
self._target_type = json_data["type"]
self._family_ids = json_data["familyIds"]
self._subunit_ids = json_data["subunitIds"]
self._complex_ids = json_data["complexIds"]
def __repr__(self):
return "<Target %i (%s)>" % (self._target_id, self._name)
def target_id(self):
"""Returns the target's GtoP ID.
:rtype: int"""
return self._target_id
@strip_html
def name(self):
"""Returns the target's name.
:param bool strip_html: If ``True``, the name will have HTML entities stripped.
:rtype: str"""
return self._name
@strip_html
def abbreviation(self):
"""Returns the target's abbreviated name.
:param bool strip_html: If ``True``, the abbreviation will have HTML entities stripped.
:rtype: str"""
return self._abbreviation
@strip_html
def systematic_name(self):
"""Returns the target's systematic name.
:param bool strip_html: If ``True``, the name will have HTML entities stripped.
:rtype: str"""
return self._systematic_name
def target_type(self):
"""Returns the target's type.
:rtype: str"""
return self._target_type
def family_ids(self):
"""Returns the the family IDs of any families this target is a member of.
:returns: list of ``int``"""
return self._family_ids
def families(self):
"""Returns a list of all target families of which this target is a member.
:returns: list of :py:class:`TargetFamily` objects"""
return [get_target_family_by_id(i) for i in self._family_ids]
def subunit_ids(self):
"""Returns the the target IDs of all targets which are subunits of this
target.
:returns: list of ``int``"""
return self._subunit_ids
def subunits(self):
"""Returns a list of all targets which are subunits of this target.
:returns: list of :py:class:`Target` objects"""
return [get_target_by_id(id_) for id_ in self._subunit_ids]
def complex_ids(self):
"""Returns the the target IDs of all targets of which this target is a
subunit.
:returns: list of ``int``"""
return self._complex_ids
def complexes(self):
"""Returns a list of all targets of which this target is a subunit.
:returns: list of :py:class:`Target` objects"""
return [get_target_by_id(id_) for id_ in self._complex_ids]
@strip_html
def synonyms(self):
"""Returns any synonyms for this target.
:param bool strip_html: If ``True``, the synonyms will have HTML entities stripped.
:returns: list of str"""
return [synonym["name"] for synonym in self._get_synonym_json()]
def database_links(self, species=None):
"""Returns any database links for this target.
:param str species: If given, only links belonging to this species will be returned.
:returns: list of :class:`.DatabaseLink` objects."""
if species:
return [DatabaseLink(link_json) for link_json in self._get_database_json()
if link_json["species"] and link_json["species"].lower() == species.lower()]
else:
return [DatabaseLink(link_json) for link_json in self._get_database_json()]
def genes(self, species=None):
"""Returns any genes for this target.
:param str species: If given, only genes belonging to this species will be returned.
:returns: list of :class:`.Gene` objects."""
if species:
return [Gene(gene_json) for gene_json in self._get_gene_json()
if gene_json["species"] and gene_json["species"].lower() == species.lower()]
else:
return [Gene(gene_json) for gene_json in self._get_gene_json()]
def interactions(self, species=None):
"""Returns any interactions for this target.
:param str species: If given, only interactions belonging to this species will be returned.
:returns: list of :class:`.Interaction` objects."""
if species:
return [Interaction(interaction_json) for interaction_json in self._get_interactions_json()
if interaction_json["targetSpecies"] and interaction_json["targetSpecies"].lower() == species.lower()]
else:
return [Interaction(interaction_json) for interaction_json in self._get_interactions_json()]
get_interaction_by_id = get_interaction_by_id
"""Returns an Interaction object of a given ID belonging to the target.
:param int interaction_id: The interactions's ID.
:rtype: :py:class:`.Interaction`
:raises: :class:`.NoSuchInteractionError`: if no such interaction exists in the database."""
def ligands(self, species=None):
"""Returns any ligands that this target interacts with.
:param str species: If given, only ligands belonging to this species will be returned.
:returns: list of :class:`.DatabaseLink` objects."""
ligands = []
for interaction in self.interactions(species=species):
ligand = interaction.ligand()
if ligand not in ligands:
ligands.append(ligand)
return ligands
@pdb.ask_about_molecupy
def gtop_pdbs(self, species=None):
"""Returns a list of PDBs which the Guide to PHARMACOLOGY says contain
this target.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
if species is None:
return [pdb["pdbCode"] for pdb in self._get_pdb_json() if pdb["pdbCode"]]
else:
return [pdb["pdbCode"] for pdb in self._get_pdb_json()
if pdb["pdbCode"] and pdb["species"].lower() == species.lower()]
@pdb.ask_about_molecupy
def uniprot_pdbs(self, species=None):
"""Queries the RSCB PDB database with the targets's uniprot accessions.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:param str species: If given, only PDBs belonging to this species will be returned.
:returns: list of ``str`` PDB codes"""
uniprot_accessions = [
link.accession() for link in self.database_links(species=species)
if link.database() == "UniProtKB"
]
if uniprot_accessions:
results = pdb.query_rcsb_advanced("UpAccessionIdQuery", {
"accessionIdList": ",".join(uniprot_accessions)
})
return [result.split(":")[0] for result in results] if results else []
else:
return []
@pdb.ask_about_molecupy
def all_pdbs(self, species=None):
"""Get a list of PDB codes using all means available - annotated and
external.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:param str species: If given, only PDBs belonging to this species will be returned.
:returns: list of ``str`` PDB codes"""
return list(set(
self.gtop_pdbs(species=species) +
self.uniprot_pdbs(species=species)
))
def _get_synonym_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/synonyms" % self._target_id
)
return json_object if json_object else []
def _get_database_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/databaseLinks" % self._target_id
)
return json_object if json_object else []
def _get_gene_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/geneProteinInformation" % self._target_id
)
return json_object if json_object else []
def _get_interactions_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/interactions" % self._target_id
)
return json_object if json_object else []
def _get_pdb_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/pdbStructure" % self._target_id
)
return json_object if json_object else []
class TargetFamily:
"""A Guide to PHARMACOLOGY target family object.
:param json_data: A dictionary obtained from the web services."""
def __init__(self, json_data):
self.json_data = json_data
self._family_id = json_data["familyId"]
self._name = json_data["name"]
self._target_ids = json_data["targetIds"]
self._parent_family_ids = json_data["parentFamilyIds"]
self._sub_family_ids = json_data["subFamilyIds"]
def __repr__(self):
return "<'%s' TargetFamily>" % self._name
def family_id(self):
"""Returns the family's GtoP ID.
:rtype: int"""
return self._family_id
@strip_html
def name(self):
"""Returns the family's name.
:param bool strip_html: If ``True``, the name will have HTML entities stripped.
:rtype: str"""
return self._name
def target_ids(self):
"""Returns the the target IDs of all targets in this family. Note that only
immediate children are shown - if a family has subfamilies then it will
not return any targets here - you must look in the sub-families.
:returns: list of ``int``"""
return self._target_ids
def targets(self):
"""Returns a list of all targets in this family. Note that only
immediate children are shown - if a family has subfamilies then it will
not return any targets here - you must look in the sub-families.
:returns: list of :py:class:`Target` objects"""
return [get_target_by_id(i) for i in self._target_ids]
def parent_family_ids(self):
"""Returns the the target IDs of all target families of which this
family is a member.
:returns: list of ``int``"""
return self._parent_family_ids
def parent_families(self):
"""Returns a list of all target families of which this family is a member.
:returns: list of :py:class:`TargetFamily` objects"""
return [get_target_family_by_id(i) for i in self._parent_family_ids]
def sub_family_ids(self):
"""Returns the the target IDs of all arget families which are a member
of this family.
:returns: list of ``int``"""
return self._sub_family_ids
def sub_families(self):
"""Returns a list of all target families which are a member of this family.
:returns: list of :py:class:`TargetFamily` objects"""
return [get_target_family_by_id(i) for i in self._sub_family_ids]
| mit | 6,307,949,579,907,811,000 | 30.678186 | 115 | 0.628281 | false | 3.83752 | false | false | false |
easyw/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/Fuse/main_generator_SMD.py | 1 | 13644 | # -*- coding: utf8 -*-
#!/usr/bin/python
#
# This is derived from a cadquery script for generating PDIP models in X3D format
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
# This is a
# Dimensions are from Microchips Packaging Specification document:
# DS00000049BY. Body drawing is the same as QFP generator#
## requirements
## cadquery FreeCAD plugin
## https://github.com/jmwright/cadquery-freecad-module
## to run the script just do: freecad make_gwexport_fc.py modelName
## e.g. c:\freecad\bin\freecad make_gw_export_fc.py SOIC_8
## the script will generate STEP and VRML parametric models
## to be used with kicad StepUp script
#* These are a FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* *
#* cadquery script for generating QFP/SOIC/SSOP/TSSOP models in STEP AP214 *
#* Copyright (c) 2015 *
#* Maurice https://launchpad.net/~easyw *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
__title__ = "make chip Resistors 3D models"
__author__ = "maurice"
__Comment__ = 'make chip Resistors 3D models exported to STEP and VRML for Kicad StepUP script'
___ver___ = "1.3.2 10/02/2017"
# thanks to Frank Severinsen Shack for including vrml materials
# maui import cadquery as cq
# maui from Helpers import show
from math import tan, radians, sqrt
from collections import namedtuple
global save_memory
save_memory = False #reducing memory consuming for all generation params
import sys, os
import datetime
from datetime import datetime
sys.path.append("../_tools")
import exportPartToVRML as expVRML
import shaderColors
body_color_key = "white body"
body_color = shaderColors.named_colors[body_color_key].getDiffuseFloat()
pins_color_key = "metal grey pins"
pins_color = shaderColors.named_colors[pins_color_key].getDiffuseFloat()
top_color_key = "resistor black body"
top_color = shaderColors.named_colors[top_color_key].getDiffuseFloat()
# maui start
import FreeCAD, Draft, FreeCADGui
import ImportGui
import FreeCADGui as Gui
import yaml
#from Gui.Command import *
outdir=os.path.dirname(os.path.realpath(__file__)+"/../_3Dmodels")
scriptdir=os.path.dirname(os.path.realpath(__file__))
sys.path.append(outdir)
sys.path.append(scriptdir)
if FreeCAD.GuiUp:
from PySide import QtCore, QtGui
# Licence information of the generated models.
#################################################################################################
STR_licAuthor = "kicad StepUp"
STR_licEmail = "ksu"
STR_licOrgSys = "kicad StepUp"
STR_licPreProc = "OCC"
STR_licOrg = "FreeCAD"
LIST_license = ["",]
#################################################################################################
try:
# Gui.SendMsgToActiveView("Run")
#from Gui.Command import *
Gui.activateWorkbench("CadQueryWorkbench")
import cadquery as cq
from Helpers import show
# CadQuery Gui
except Exception as e: # catch *all* exceptions
print(e)
msg="missing CadQuery 0.3.0 or later Module!\r\n\r\n"
msg+="https://github.com/jmwright/cadquery-freecad-module/wiki\n"
reply = QtGui.QMessageBox.information(None,"Info ...",msg)
# maui end
# Import cad_tools
from cqToolsExceptions import *
# Import cad_tools
import cq_cad_tools
# Explicitly load all needed functions
from cq_cad_tools import FuseObjs_wColors, GetListOfObjects, restore_Main_Tools, \
exportSTEP, close_CQ_Example, exportVRML, saveFCdoc, z_RotateObject, Color_Objects, \
CutObjs_wColors, checkRequirements
#checking requirements
checkRequirements(cq)
# Sphinx workaround #1
try:
QtGui
except NameError:
QtGui = None
#
try:
close_CQ_Example(App, Gui)
except: # catch *all* exceptions
print("CQ 030 doesn't open example file")
def make_chip(model, all_params):
# dimensions for chip capacitors
length = all_params[model]['length'] # package length
width = all_params[model]['width'] # package width
height = all_params[model]['height'] # package height
pin_band = all_params[model]['pin_band'] # pin band
pin_thickness = all_params[model]['pin_thickness'] # pin thickness
if pin_thickness == 'auto':
pin_thickness = height/10.
edge_fillet = all_params[model]['edge_fillet'] # fillet of edges
if edge_fillet == 'auto':
edge_fillet = pin_thickness
# Create a 3D box based on the dimension variables above and fillet it
case = cq.Workplane("XY").workplane(offset=pin_thickness). \
box(length-2*pin_thickness, width, height-2*pin_thickness,centered=(True, True, False))
top = cq.Workplane("XY").workplane(offset=height-pin_thickness).box(length-2*pin_band, width, pin_thickness,centered=(True, True, False))
# Create a 3D box based on the dimension variables above and fillet it
pin1 = cq.Workplane("XY").box(pin_band, width, height)
pin1.edges("|Y").fillet(edge_fillet)
pin1=pin1.translate((-length/2+pin_band/2,0,height/2)).rotate((0,0,0), (0,0,1), 0)
pin2 = cq.Workplane("XY").box(pin_band, width, height)
pin2.edges("|Y").fillet(edge_fillet)
pin2=pin2.translate((length/2-pin_band/2,0,height/2)).rotate((0,0,0), (0,0,1), 0)
pins = pin1.union(pin2)
#body_copy.ShapeColor=result.ShapeColor
# extract case from pins
# case = case.cut(pins)
pins = pins.cut(case)
return (case, top, pins)
#import step_license as L
import add_license as Lic
if __name__ == "__main__" or __name__ == "main_generator_SMD":
destination_dir = '/Fuse.3dshapes'
expVRML.say(expVRML.__file__)
FreeCAD.Console.PrintMessage('\r\nRunning...\r\n')
full_path=os.path.realpath(__file__)
expVRML.say(full_path)
scriptdir=os.path.dirname(os.path.realpath(__file__))
expVRML.say(scriptdir)
sub_path = full_path.split(scriptdir)
expVRML.say(sub_path)
sub_dir_name =full_path.split(os.sep)[-2]
expVRML.say(sub_dir_name)
sub_path = full_path.split(sub_dir_name)[0]
expVRML.say(sub_path)
models_dir=sub_path+"_3Dmodels"
#expVRML.say(models_dir)
#stop
try:
with open('cq_parameters_SMD.yaml', 'r') as f:
all_params = yaml.load(f)
except yaml.YAMLError as exc:
print(exc)
from sys import argv
models = []
if len(sys.argv) < 3:
FreeCAD.Console.PrintMessage('No variant name is given! building:\n')
model_to_build = list(all_params.keys())[0]
print(model_to_build)
else:
model_to_build = sys.argv[2]
if model_to_build == "all":
models = all_params
save_memory=True
else:
models = [model_to_build]
for model in models:
if not model in all_params.keys():
print("Parameters for %s doesn't exist in 'all_params', skipping." % model)
continue
ModelName = model
CheckedModelName = ModelName.replace('.', '').replace('-', '_').replace('(', '').replace(')', '')
Newdoc = App.newDocument(CheckedModelName)
App.setActiveDocument(CheckedModelName)
Gui.ActiveDocument=Gui.getDocument(CheckedModelName)
body, pins, top = make_chip(model, all_params)
show(body)
show(pins)
show(top)
doc = FreeCAD.ActiveDocument
objs = GetListOfObjects(FreeCAD, doc)
Color_Objects(Gui,objs[0],body_color)
Color_Objects(Gui,objs[1],top_color)
Color_Objects(Gui,objs[2],pins_color)
col_body=Gui.ActiveDocument.getObject(objs[0].Name).DiffuseColor[0]
col_top=Gui.ActiveDocument.getObject(objs[1].Name).DiffuseColor[0]
col_pin=Gui.ActiveDocument.getObject(objs[2].Name).DiffuseColor[0]
material_substitutions={
col_body[:-1]:body_color_key,
col_pin[:-1]:pins_color_key,
col_top[:-1]:top_color_key
}
expVRML.say(material_substitutions)
del objs
objs=GetListOfObjects(FreeCAD, doc)
FuseObjs_wColors(FreeCAD, FreeCADGui, doc.Name, objs[0].Name, objs[1].Name)
objs=GetListOfObjects(FreeCAD, doc)
FuseObjs_wColors(FreeCAD, FreeCADGui, doc.Name, objs[0].Name, objs[1].Name)
doc.Label = CheckedModelName
objs=GetListOfObjects(FreeCAD, doc)
objs[0].Label = CheckedModelName
restore_Main_Tools()
#rotate if required
rotation = all_params[model]['rotation']
if (rotation!=0):
z_RotateObject(doc, rotation)
#out_dir=destination_dir+all_params[variant].dest_dir_prefix+'/'
script_dir=os.path.dirname(os.path.realpath(__file__))
#models_dir=script_dir+"/../_3Dmodels"
expVRML.say(models_dir)
out_dir=models_dir+destination_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
#out_dir="./generated_qfp/"
# export STEP model
exportSTEP(doc, ModelName, out_dir)
if LIST_license[0]=="":
LIST_license=Lic.LIST_int_license
LIST_license.append("")
Lic.addLicenseToStep(out_dir+'/', ModelName+".step", LIST_license,\
STR_licAuthor, STR_licEmail, STR_licOrgSys, STR_licOrg, STR_licPreProc)
# scale and export Vrml model
scale=1/2.54
#exportVRML(doc,ModelName,scale,out_dir)
objs=GetListOfObjects(FreeCAD, doc)
expVRML.say("######################################################################")
expVRML.say(objs)
expVRML.say("######################################################################")
export_objects, used_color_keys = expVRML.determineColors(Gui, objs, material_substitutions)
export_file_name=out_dir+os.sep+ModelName+'.wrl'
colored_meshes = expVRML.getColoredMesh(Gui, export_objects , scale)
expVRML.writeVRMLFile(colored_meshes, export_file_name, used_color_keys, LIST_license)
# Save the doc in Native FC format
if save_memory == False:
Gui.SendMsgToActiveView("ViewFit")
Gui.activeDocument().activeView().viewAxometric()
# Save the doc in Native FC format
saveFCdoc(App, Gui, doc, ModelName,out_dir, False)
check_Model=True
if save_memory == True or check_Model==True:
doc=FreeCAD.ActiveDocument
FreeCAD.closeDocument(doc.Name)
step_path=os.path.join(out_dir,ModelName+u'.step')
if check_Model==True:
#ImportGui.insert(step_path,ModelName)
ImportGui.open(step_path)
docu = FreeCAD.ActiveDocument
if cq_cad_tools.checkUnion(docu) == True:
FreeCAD.Console.PrintMessage('step file is correctly Unioned\n')
else:
FreeCAD.Console.PrintError('step file is NOT Unioned\n')
stop
FC_majorV=int(FreeCAD.Version()[0])
FC_minorV=int(FreeCAD.Version()[1][0:FreeCAD.Version()[1].find('.')])
print("Minor version: "+str(FC_minorV))
if FC_majorV == 0 and FC_minorV >= 17:
for o in docu.Objects:
if hasattr(o,'Shape'):
chks=cq_cad_tools.checkBOP(o.Shape)
print ('chks ',chks)
print (cq_cad_tools.mk_string(o.Label))
if chks != True:
msg='shape \''+o.Name+'\' \''+cq_cad_tools.mk_string(o.Label)+'\' is INVALID!\n'
FreeCAD.Console.PrintError(msg)
FreeCAD.Console.PrintWarning(chks[0])
stop
else:
msg='shape \''+o.Name+'\' \''+cq_cad_tools.mk_string(o.Label)+'\' is valid\n'
FreeCAD.Console.PrintMessage(msg)
else:
FreeCAD.Console.PrintError('BOP check requires FC 0.17+\n')
# Save the doc in Native FC format
saveFCdoc(App, Gui, docu, ModelName,out_dir, False)
doc=FreeCAD.ActiveDocument
FreeCAD.closeDocument(doc.Name)
| gpl-2.0 | -5,632,057,004,259,708,000 | 38.547826 | 141 | 0.591469 | false | 3.632588 | false | false | false |
kalyptorisk/daversy | src/daversy/db/oracle/index.py | 1 | 3580 | from daversy.utils import *
from daversy.db.object import Index, IndexColumn
class IndexColumnBuilder(object):
""" Represents a builder for a column in an index. """
DbClass = IndexColumn
XmlTag = 'index-column'
Query = """
SELECT c.column_name, lower(c.descend) AS sort, i.index_name,
i.table_name, c.column_position AS position,
e.column_expression AS expression
FROM sys.user_indexes i, sys.user_ind_columns c,
sys.user_ind_expressions e
WHERE i.index_name = c.index_name
AND i.table_name = c.table_name
AND c.index_name = e.index_name (+)
AND c.column_position = e.column_position (+)
ORDER BY i.index_name, c.column_position
"""
PropertyList = odict(
('COLUMN_NAME', Property('name')),
('SORT', Property('sort')),
('EXPRESSION', Property('expression', exclude=True)),
('INDEX_NAME', Property('index-name', exclude=True)),
('TABLE_NAME', Property('table-name', exclude=True)),
('POSITION', Property('position', exclude=True)),
)
@staticmethod
def addToState(state, column):
table = state.tables.get(column['table-name'])
real = table and table.columns.get(column.name)
if column.expression and not real: # function-based columns have no name
column.name = column.expression
index = state.indexes.get(column['index-name'])
if index:
index.columns[column.name] = column
class IndexBuilder(object):
""" Represents a builder for a index on a table. """
DbClass = Index
XmlTag = 'index'
Query = """
SELECT i.index_name, i.table_name,
decode(i.uniqueness, 'UNIQUE', 'true', 'false') AS is_unique,
decode(i.index_type, 'BITMAP', 'true') AS is_bitmap,
DECODE(i.compression, 'ENABLED', i.prefix_length) AS "COMPRESS"
FROM sys.user_indexes i
WHERE i.index_type IN ('NORMAL', 'FUNCTION-BASED NORMAL', 'BITMAP')
ORDER BY i.index_name
"""
PropertyList = odict(
('INDEX_NAME', Property('name')),
('IS_UNIQUE', Property('unique')),
('IS_BITMAP', Property('bitmap')),
('TABLE_NAME', Property('table-name')),
('COMPRESS', Property('compress'))
)
@staticmethod
def addToState(state, index):
# ensure that the table exists and the index is not for a PK/UK
table = state.tables.get(index['table-name'])
if table:
if table.primary_keys.has_key(index.name) or table.unique_keys.has_key(index.name):
return
state.indexes[index.name] = index
@staticmethod
def isAllowed(state, index):
return state.tables.get(index['table-name'])
@staticmethod
def createSQL(index):
sql = "CREATE %(unique)s %(bitmap)s INDEX %(name)s ON %(table-name)s (\n" \
" %(column_sql)s\n)%(suffix)s\n/\n"
column_def = ["%(name)-30s %(sort)s" % column for column
in index.columns.values()]
column_sql = ",\n ".join(column_def)
unique = index.unique == 'true' and 'UNIQUE' or ''
bitmap = index.bitmap == 'true' and 'BITMAP' or ''
suffix = ''
if index.compress:
suffix = ' COMPRESS '+index.compress
return render(sql, index, unique=unique, bitmap=bitmap,
suffix=suffix, column_sql=column_sql)
| gpl-2.0 | 1,298,916,235,426,265,300 | 35.907216 | 95 | 0.569832 | false | 3.824786 | false | false | false |
patrick91/pycon | backend/notifications/aws.py | 1 | 2184 | import typing
from urllib.parse import urljoin
import boto3
from django.conf import settings
from newsletters.exporter import Endpoint
from users.models import User
def _get_client():
return boto3.client("pinpoint", region_name="eu-central-1")
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i : i + n] # noqa
def send_endpoints_to_pinpoint(endpoints: typing.Iterable[Endpoint]):
# batch only supports 100 at the time
endpoint_chunks = chunks(list(endpoints), 100)
for endpoints_chunk in endpoint_chunks:
data = {"Item": [endpoint.to_item() for endpoint in endpoints_chunk]}
client = _get_client()
client.update_endpoints_batch(
ApplicationId=settings.PINPOINT_APPLICATION_ID, EndpointBatchRequest=data
)
def send_notification(
template_name: str,
users: typing.List[User],
substitutions: typing.Dict[str, typing.List[str]],
):
client = _get_client()
client.send_users_messages(
ApplicationId=settings.PINPOINT_APPLICATION_ID,
SendUsersMessageRequest={
"MessageConfiguration": {
"EmailMessage": {
"FromAddress": "[email protected]",
"Substitutions": substitutions,
}
},
"TemplateConfiguration": {"EmailTemplate": {"Name": template_name}},
"Users": {str(user.id): {} for user in users},
},
)
# TODO: validate that it has been sent correctly
def send_comment_notification(comment):
submission = comment.submission
users: typing.Set[User] = set([submission.speaker])
# also send notification to all other commenters
users = users.union(set([comment.author for comment in submission.comments.all()]))
# don't notify current user
users.discard(comment.author)
if not users:
return
submission_url = urljoin(
settings.FRONTEND_URL, f"/en/submission/{submission.hashid}"
)
substitutions = {
"submission_url": [submission_url],
"submission": [submission.title],
}
send_notification("pycon-11-new-comment-on-submission", users, substitutions)
| mit | 7,613,541,312,755,594,000 | 27.736842 | 87 | 0.642399 | false | 4.044444 | false | false | false |
jcushman/pywb | pywb/rewrite/cookie_rewriter.py | 1 | 4929 | from Cookie import SimpleCookie, CookieError
#=================================================================
class WbUrlBaseCookieRewriter(object):
""" Base Cookie rewriter for wburl-based requests.
"""
def __init__(self, url_rewriter):
self.url_rewriter = url_rewriter
def rewrite(self, cookie_str, header='Set-Cookie'):
results = []
cookie = SimpleCookie()
try:
cookie.load(cookie_str)
except CookieError:
return results
for name, morsel in cookie.iteritems():
morsel = self.rewrite_cookie(name, morsel)
if morsel:
path = morsel.get('path')
if path:
inx = path.find(self.url_rewriter.rel_prefix)
if inx > 0:
morsel['path'] = path[inx:]
results.append((header, morsel.OutputString()))
return results
def _remove_age_opts(self, morsel):
# remove expires as it refers to archived time
if morsel.get('expires'):
del morsel['expires']
# don't use max-age, just expire at end of session
if morsel.get('max-age'):
del morsel['max-age']
# for now, also remove secure to avoid issues when
# proxying over plain http (TODO: detect https?)
if morsel.get('secure'):
del morsel['secure']
#=================================================================
class RemoveAllCookiesRewriter(WbUrlBaseCookieRewriter):
def rewrite(self, cookie_str, header='Set-Cookie'):
return []
#=================================================================
class MinimalScopeCookieRewriter(WbUrlBaseCookieRewriter):
"""
Attempt to rewrite cookies to minimal scope possible
If path present, rewrite path to current rewritten url only
If domain present, remove domain and set to path prefix
"""
def rewrite_cookie(self, name, morsel):
# if domain set, no choice but to expand cookie path to root
if morsel.get('domain'):
del morsel['domain']
morsel['path'] = self.url_rewriter.rel_prefix
# else set cookie to rewritten path
elif morsel.get('path'):
morsel['path'] = self.url_rewriter.rewrite(morsel['path'])
self._remove_age_opts(morsel)
return morsel
#=================================================================
class HostScopeCookieRewriter(WbUrlBaseCookieRewriter):
"""
Attempt to rewrite cookies to current host url..
If path present, rewrite path to current host. Only makes sense in live
proxy or no redirect mode, as otherwise timestamp may change.
If domain present, remove domain and set to path prefix
"""
def rewrite_cookie(self, name, morsel):
# if domain set, expand cookie to host prefix
if morsel.get('domain'):
del morsel['domain']
morsel['path'] = self.url_rewriter.rewrite('/')
# set cookie to rewritten path
elif morsel.get('path'):
morsel['path'] = self.url_rewriter.rewrite(morsel['path'])
self._remove_age_opts(morsel)
return morsel
#=================================================================
class ExactPathCookieRewriter(WbUrlBaseCookieRewriter):
"""
Rewrite cookies only using exact path, useful for live rewrite
without a timestamp and to minimize cookie pollution
If path or domain present, simply remove
"""
def rewrite_cookie(self, name, morsel):
if morsel.get('domain'):
del morsel['domain']
# else set cookie to rewritten path
if morsel.get('path'):
del morsel['path']
self._remove_age_opts(morsel)
return morsel
#=================================================================
class RootScopeCookieRewriter(WbUrlBaseCookieRewriter):
"""
Sometimes it is necessary to rewrite cookies to root scope
in order to work across time boundaries and modifiers
This rewriter simply sets all cookies to be in the root
"""
def rewrite_cookie(self, name, morsel):
# get root path
morsel['path'] = self.url_rewriter.root_path
# remove domain
if morsel.get('domain'):
del morsel['domain']
self._remove_age_opts(morsel)
return morsel
#=================================================================
def get_cookie_rewriter(cookie_scope):
if cookie_scope == 'root':
return RootScopeCookieRewriter
elif cookie_scope == 'exact':
return ExactPathCookieRewriter
elif cookie_scope == 'host':
return HostScopeCookieRewriter
elif cookie_scope == 'removeall':
return RemoveAllCookiesRewriter
elif cookie_scope == 'coll':
return MinimalScopeCookieRewriter
else:
return HostScopeCookieRewriter
| gpl-3.0 | -1,616,446,436,483,166,000 | 31.006494 | 75 | 0.564212 | false | 4.526171 | false | false | false |
colloquium/spacewalk | client/solaris/smartpm/smart/interfaces/gtk/interactive.py | 1 | 31919 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.transaction import INSTALL, REMOVE, UPGRADE, REINSTALL, KEEP, FIX
from smart.transaction import Transaction, ChangeSet, checkPackages
from smart.transaction import PolicyInstall, PolicyRemove, PolicyUpgrade
from smart.interfaces.gtk.channels import GtkChannels, GtkChannelSelector
from smart.interfaces.gtk.mirrors import GtkMirrors
from smart.interfaces.gtk.flags import GtkFlags
from smart.interfaces.gtk.priorities import GtkPriorities, GtkSinglePriority
from smart.interfaces.gtk.packageview import GtkPackageView
from smart.interfaces.gtk.packageinfo import GtkPackageInfo
from smart.interfaces.gtk.interface import GtkInterface
from smart.interfaces.gtk import getPixbuf
from smart.const import NEVER, VERSION
from smart.searcher import Searcher
from smart.cache import Package
from smart import *
import shlex, re
import fnmatch
import gtk
UI = """
<ui>
<menubar>
<menu action="file">
<menuitem action="update-selected-channels"/>
<menuitem action="update-channels"/>
<separator/>
<menuitem action="rebuild-cache"/>
<separator/>
<menuitem action="exec-changes"/>
<separator/>
<menuitem action="quit"/>
</menu>
<menu action="edit">
<menuitem action="undo"/>
<menuitem action="redo"/>
<menuitem action="clear-changes"/>
<separator/>
<menuitem action="upgrade-all"/>
<menuitem action="fix-all-problems"/>
<separator/>
<menuitem action="check-installed-packages"/>
<menuitem action="check-uninstalled-packages"/>
<menuitem action="check-all-packages"/>
<separator/>
<menuitem action="find"/>
<separator/>
<menuitem action="edit-channels"/>
<menuitem action="edit-mirrors"/>
<menuitem action="edit-flags"/>
<menuitem action="edit-priorities"/>
</menu>
<menu action="view">
<menuitem action="hide-non-upgrades"/>
<menuitem action="hide-installed"/>
<menuitem action="hide-uninstalled"/>
<menuitem action="hide-unmarked"/>
<menuitem action="hide-old"/>
<separator/>
<menuitem action="expand-all"/>
<menuitem action="collapse-all"/>
<separator/>
<menu action="tree-style">
<menuitem action="tree-style-groups"/>
<menuitem action="tree-style-channels"/>
<menuitem action="tree-style-channels-groups"/>
<menuitem action="tree-style-none"/>
</menu>
<separator/>
<menuitem action="summary-window"/>
<menuitem action="log-window"/>
</menu>
</menubar>
<toolbar>
<toolitem action="update-channels"/>
<separator/>
<toolitem action="exec-changes"/>
<separator/>
<toolitem action="undo"/>
<toolitem action="redo"/>
<toolitem action="clear-changes"/>
<separator/>
<toolitem action="upgrade-all"/>
<separator/>
<toolitem action="find"/>
</toolbar>
</ui>
"""
ACTIONS = [
("file", None, _("_File")),
("update-selected-channels", "gtk-refresh", _("Update _Selected Channels..."), None,
_("Update given channels"), "self.updateChannels(True)"),
("update-channels", "gtk-refresh", _("_Update Channels"), None,
_("Update channels"), "self.updateChannels()"),
("rebuild-cache", None, _("_Rebuild Cache"), None,
_("Reload package information"), "self.rebuildCache()"),
("exec-changes", "gtk-execute", _("_Execute Changes..."), "<control>c",
_("Apply marked changes"), "self.applyChanges()"),
("quit", "gtk-quit", _("_Quit"), "<control>q",
_("Quit application"), "gtk.main_quit()"),
("edit", None, _("_Edit")),
("undo", "gtk-undo", _("_Undo"), "<control>z",
_("Undo last change"), "self.undo()"),
("redo", "gtk-redo", _("_Redo"), "<control><shift>z",
_("Redo last undone change"), "self.redo()"),
("clear-changes", "gtk-clear", _("Clear Marked Changes"), None,
_("Clear all changes"), "self.clearChanges()"),
("check-installed-packages", None, _("Check Installed Packages..."), None,
_("Check installed packages"), "self.checkPackages()"),
("check-uninstalled-packages", None, _("Check Uninstalled Packages..."), None,
_("Check uninstalled packages"), "self.checkPackages(uninstalled=True)"),
("check-all-packages", None, _("Check All Packages..."), None,
_("Check all packages"), "self.checkPackages(all=True)"),
("upgrade-all", "gtk-go-up", _("Upgrade _All..."), None,
_("Upgrade all packages"), "self.upgradeAll()"),
("fix-all-problems", None, _("Fix All _Problems..."), None,
_("Fix all problems"), "self.fixAllProblems()"),
("find", "gtk-find", _("_Find..."), "<control>f",
_("Find packages"), "self.toggleSearch()"),
("edit-channels", None, _("_Channels"), None,
_("Edit channels"), "self.editChannels()"),
("edit-mirrors", None, _("_Mirrors"), None,
_("Edit mirrors"), "self.editMirrors()"),
("edit-flags", None, _("_Flags"), None,
_("Edit package flags"), "self.editFlags()"),
("edit-priorities", None, _("_Priorities"), None,
_("Edit package priorities"), "self.editPriorities()"),
("view", None, _("_View")),
("tree-style", None, _("_Tree Style")),
("expand-all", "gtk-open", _("_Expand All"), None,
_("Expand all items in the tree"), "self._pv.getTreeView().expand_all()"),
("collapse-all", "gtk-close", _("_Collapse All"), None,
_("Collapse all items in the tree"), "self._pv.getTreeView().collapse_all()"),
("summary-window", None, _("_Summary Window"), "<control>s",
_("Show summary window"), "self.showChanges()"),
("log-window", None, _("_Log Window"), None,
_("Show log window"), "self._log.show()"),
]
def compileActions(actions, globals):
newactions = []
for action in actions:
if len(action) > 5:
action = list(action)
code = compile(action[5], "<callback>", "exec")
def callback(action, code=code, globals=globals):
globals["action"] = action
exec code in globals
action[5] = callback
newactions.append(tuple(action))
return newactions
class GtkInteractiveInterface(GtkInterface):
def __init__(self, ctrl):
GtkInterface.__init__(self, ctrl)
self._changeset = None
self._window = gtk.Window()
self._window.set_title("Smart Package Manager %s" % VERSION)
self._window.set_position(gtk.WIN_POS_CENTER)
self._window.set_geometry_hints(min_width=640, min_height=480)
self._window.connect("destroy", lambda x: gtk.main_quit())
self._log.set_transient_for(self._window)
self._progress.set_transient_for(self._window)
self._hassubprogress.set_transient_for(self._window)
self._watch = gtk.gdk.Cursor(gtk.gdk.WATCH)
self._undo = []
self._redo = []
self._topvbox = gtk.VBox()
self._topvbox.show()
self._window.add(self._topvbox)
globals = {"self": self, "gtk": gtk}
self._actions = gtk.ActionGroup("Actions")
self._actions.add_actions(compileActions(ACTIONS, globals))
self._filters = {}
for name, label in [("hide-non-upgrades", _("Hide Non-upgrades")),
("hide-installed", _("Hide Installed")),
("hide-uninstalled", _("Hide Uninstalled")),
("hide-unmarked", _("Hide Unmarked")),
("hide-old", _("Hide Old"))]:
action = gtk.ToggleAction(name, label, "", "")
action.connect("toggled", lambda x, y: self.toggleFilter(y), name)
self._actions.add_action(action)
treestyle = sysconf.get("package-tree")
lastaction = None
for name, label in [("groups", _("Groups")),
("channels", _("Channels")),
("channels-groups", _("Channels & Groups")),
("none", _("None"))]:
action = gtk.RadioAction("tree-style-"+name, label, "", "", 0)
if name == treestyle:
action.set_active(True)
if lastaction:
action.set_group(lastaction)
lastaction = action
action.connect("toggled", lambda x, y: self.setTreeStyle(y), name)
self._actions.add_action(action)
self._ui = gtk.UIManager()
self._ui.insert_action_group(self._actions, 0)
self._ui.add_ui_from_string(UI)
self._menubar = self._ui.get_widget("/menubar")
self._topvbox.pack_start(self._menubar, False)
self._toolbar = self._ui.get_widget("/toolbar")
self._toolbar.set_style(gtk.TOOLBAR_ICONS)
self._topvbox.pack_start(self._toolbar, False)
self._window.add_accel_group(self._ui.get_accel_group())
self._execmenuitem = self._ui.get_action("/menubar/file/exec-changes")
self._execmenuitem.set_property("sensitive", False)
self._clearmenuitem = self._ui.get_action("/menubar/edit/clear-changes")
self._clearmenuitem.set_property("sensitive", False)
self._undomenuitem = self._ui.get_action("/menubar/edit/undo")
self._undomenuitem.set_property("sensitive", False)
self._redomenuitem = self._ui.get_action("/menubar/edit/redo")
self._redomenuitem.set_property("sensitive", False)
# Search bar
self._searchbar = gtk.Alignment()
self._searchbar.set(0, 0, 1, 1)
self._searchbar.set_padding(3, 3, 0, 0)
self._topvbox.pack_start(self._searchbar, False)
searchvp = gtk.Viewport()
searchvp.set_shadow_type(gtk.SHADOW_OUT)
searchvp.show()
self._searchbar.add(searchvp)
searchtable = gtk.Table(1, 1)
searchtable.set_row_spacings(5)
searchtable.set_col_spacings(5)
searchtable.set_border_width(5)
searchtable.show()
searchvp.add(searchtable)
label = gtk.Label(_("Search:"))
label.show()
searchtable.attach(label, 0, 1, 0, 1, 0, 0)
self._searchentry = gtk.Entry()
self._searchentry.connect("activate", lambda x: self.refreshPackages())
self._searchentry.show()
searchtable.attach(self._searchentry, 1, 2, 0, 1)
button = gtk.Button()
button.set_relief(gtk.RELIEF_NONE)
button.connect("clicked", lambda x: self.refreshPackages())
button.show()
searchtable.attach(button, 2, 3, 0, 1, 0, 0)
image = gtk.Image()
image.set_from_stock("gtk-find", gtk.ICON_SIZE_BUTTON)
image.show()
button.add(image)
align = gtk.Alignment()
align.set(1, 0, 0, 0)
align.set_padding(0, 0, 10, 0)
align.show()
searchtable.attach(align, 3, 4, 0, 1, gtk.FILL, gtk.FILL)
button = gtk.Button()
button.set_size_request(20, 20)
button.set_relief(gtk.RELIEF_NONE)
button.connect("clicked", lambda x: self.toggleSearch())
button.show()
align.add(button)
image = gtk.Image()
image.set_from_stock("gtk-close", gtk.ICON_SIZE_MENU)
image.show()
button.add(image)
hbox = gtk.HBox()
hbox.set_spacing(10)
hbox.show()
searchtable.attach(hbox, 1, 2, 1, 2)
self._searchname = gtk.RadioButton(None, _("Automatic"))
self._searchname.set_active(True)
self._searchname.connect("clicked", lambda x: self.refreshPackages())
self._searchname.show()
hbox.pack_start(self._searchname, False)
self._searchdesc = gtk.RadioButton(self._searchname, _("Description"))
self._searchdesc.connect("clicked", lambda x: self.refreshPackages())
self._searchdesc.show()
hbox.pack_start(self._searchdesc, False)
# Packages and information
self._vpaned = gtk.VPaned()
self._vpaned.show()
self._topvbox.pack_start(self._vpaned)
self._pv = GtkPackageView()
self._pv.show()
self._vpaned.pack1(self._pv, True)
self._pi = GtkPackageInfo()
self._pi.show()
self._pv.connect("package_selected",
lambda x, y: self._pi.setPackage(y))
self._pv.connect("package_activated",
lambda x, y: self.actOnPackages(y))
self._pv.connect("package_popup", self.packagePopup)
self._vpaned.pack2(self._pi, False)
self._status = gtk.Statusbar()
self._status.show()
self._topvbox.pack_start(self._status, False)
def showStatus(self, msg):
self._status.pop(0)
self._status.push(0, msg)
while gtk.events_pending():
gtk.main_iteration()
def hideStatus(self):
self._status.pop(0)
while gtk.events_pending():
gtk.main_iteration()
def run(self, command=None, argv=None):
self.setCatchExceptions(True)
self._window.set_icon(getPixbuf("smart"))
self._window.show()
self._ctrl.reloadChannels()
self._changeset = ChangeSet(self._ctrl.getCache())
self._pi.setChangeSet(self._changeset)
self._progress.hide()
self.refreshPackages()
gtk.main()
self.setCatchExceptions(False)
# Non-standard interface methods:
def getChangeSet(self):
return self._changeset
def updateChannels(self, selected=False, channels=None):
if selected:
aliases = GtkChannelSelector().show()
channels = [channel for channel in self._ctrl.getChannels()
if channel.getAlias() in aliases]
if not channels:
return
state = self._changeset.getPersistentState()
self._ctrl.reloadChannels(channels, caching=NEVER)
self._changeset.setPersistentState(state)
self.refreshPackages()
def rebuildCache(self):
state = self._changeset.getPersistentState()
self._ctrl.reloadChannels()
self._changeset.setPersistentState(state)
self.refreshPackages()
def applyChanges(self):
transaction = Transaction(self._ctrl.getCache(),
changeset=self._changeset)
if self._ctrl.commitTransaction(transaction):
del self._undo[:]
del self._redo[:]
self._redomenuitem.set_property("sensitive", False)
self._undomenuitem.set_property("sensitive", False)
self._changeset.clear()
self._ctrl.reloadChannels()
self.refreshPackages()
self.changedMarks()
self._progress.hide()
def clearChanges(self):
self.saveUndo()
self._changeset.clear()
self.changedMarks()
def showChanges(self):
return self._changes.showChangeSet(self._changeset)
def toggleFilter(self, filter):
if filter in self._filters:
del self._filters[filter]
else:
self._filters[filter] = True
self.refreshPackages()
def upgradeAll(self):
transaction = Transaction(self._ctrl.getCache())
transaction.setState(self._changeset)
for pkg in self._ctrl.getCache().getPackages():
if pkg.installed:
transaction.enqueue(pkg, UPGRADE)
transaction.setPolicy(PolicyUpgrade)
transaction.run()
changeset = transaction.getChangeSet()
if changeset != self._changeset:
if self.confirmChange(self._changeset, changeset):
self.saveUndo()
self._changeset.setState(changeset)
self.changedMarks()
if self.askYesNo(_("Apply marked changes now?"), True):
self.applyChanges()
else:
self.showStatus(_("No interesting upgrades available!"))
def actOnPackages(self, pkgs, op=None):
cache = self._ctrl.getCache()
transaction = Transaction(cache, policy=PolicyInstall)
transaction.setState(self._changeset)
changeset = transaction.getChangeSet()
if op is None:
if not [pkg for pkg in pkgs if pkg not in changeset]:
op = KEEP
else:
for pkg in pkgs:
if not pkg.installed:
op = INSTALL
break
else:
op = REMOVE
if op is REMOVE:
transaction.setPolicy(PolicyRemove)
policy = transaction.getPolicy()
for pkg in pkgs:
if op is KEEP:
transaction.enqueue(pkg, op)
elif op in (REMOVE, REINSTALL, FIX):
if pkg.installed:
transaction.enqueue(pkg, op)
if op is REMOVE:
for _pkg in cache.getPackages(pkg.name):
if not _pkg.installed:
policy.setLocked(_pkg, True)
elif op is INSTALL:
if not pkg.installed:
transaction.enqueue(pkg, op)
transaction.run()
if op is FIX:
expected = 0
else:
expected = 1
if self.confirmChange(self._changeset, changeset, expected):
self.saveUndo()
self._changeset.setState(changeset)
self.changedMarks()
def packagePopup(self, packageview, pkgs, event):
menu = gtk.Menu()
hasinstalled = bool([pkg for pkg in pkgs if pkg.installed
and self._changeset.get(pkg) is not REMOVE])
hasnoninstalled = bool([pkg for pkg in pkgs if not pkg.installed
and self._changeset.get(pkg) is not INSTALL])
image = gtk.Image()
image.set_from_pixbuf(getPixbuf("package-install"))
item = gtk.ImageMenuItem(_("Install"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, INSTALL))
if not hasnoninstalled:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
image.set_from_pixbuf(getPixbuf("package-reinstall"))
item = gtk.ImageMenuItem(_("Reinstall"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, REINSTALL))
if not hasinstalled:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
image.set_from_pixbuf(getPixbuf("package-remove"))
item = gtk.ImageMenuItem(_("Remove"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, REMOVE))
if not hasinstalled:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
if not hasinstalled:
image.set_from_pixbuf(getPixbuf("package-available"))
else:
image.set_from_pixbuf(getPixbuf("package-installed"))
item = gtk.ImageMenuItem(_("Keep"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, KEEP))
if not [pkg for pkg in pkgs if pkg in self._changeset]:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
image.set_from_pixbuf(getPixbuf("package-broken"))
item = gtk.ImageMenuItem(_("Fix problems"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, FIX))
if not hasinstalled:
item.set_sensitive(False)
menu.append(item)
inconsistent = False
thislocked = None
alllocked = None
names = pkgconf.getFlagTargets("lock")
if [pkg for pkg in pkgs if pkg in self._changeset]:
inconsistent = True
else:
for pkg in pkgs:
if (names and pkg.name in names and
("=", pkg.version) in names[pkg.name]):
newthislocked = True
newalllocked = len(names[pkg.name]) > 1
else:
newthislocked = False
newalllocked = pkgconf.testFlag("lock", pkg)
if (thislocked is not None and thislocked != newthislocked or
alllocked is not None and alllocked != newalllocked):
inconsistent = True
break
thislocked = newthislocked
alllocked = newalllocked
image = gtk.Image()
if thislocked:
item = gtk.ImageMenuItem(_("Unlock this version"))
if not hasnoninstalled:
image.set_from_pixbuf(getPixbuf("package-installed"))
else:
image.set_from_pixbuf(getPixbuf("package-available"))
def unlock_this(x):
for pkg in pkgs:
pkgconf.clearFlag("lock", pkg.name, "=", pkg.version)
self._pv.queue_draw()
self._pi.setPackage(pkgs[0])
item.connect("activate", unlock_this)
else:
item = gtk.ImageMenuItem(_("Lock this version"))
if not hasnoninstalled:
image.set_from_pixbuf(getPixbuf("package-installed-locked"))
else:
image.set_from_pixbuf(getPixbuf("package-available-locked"))
def lock_this(x):
for pkg in pkgs:
pkgconf.setFlag("lock", pkg.name, "=", pkg.version)
self._pv.queue_draw()
self._pi.setPackage(pkgs[0])
item.connect("activate", lock_this)
item.set_image(image)
if inconsistent:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
if alllocked:
item = gtk.ImageMenuItem(_("Unlock all versions"))
if not hasnoninstalled:
image.set_from_pixbuf(getPixbuf("package-installed"))
else:
image.set_from_pixbuf(getPixbuf("package-available"))
def unlock_all(x):
for pkg in pkgs:
pkgconf.clearFlag("lock", pkg.name)
self._pv.queue_draw()
self._pi.setPackage(pkgs[0])
item.connect("activate", unlock_all)
else:
item = gtk.ImageMenuItem(_("Lock all versions"))
if not hasnoninstalled:
image.set_from_pixbuf(getPixbuf("package-installed-locked"))
else:
image.set_from_pixbuf(getPixbuf("package-available-locked"))
def lock_all(x):
for pkg in pkgs:
pkgconf.setFlag("lock", pkg.name)
self._pv.queue_draw()
self._pi.setPackage(pkgs[0])
item.connect("activate", lock_all)
item.set_image(image)
if inconsistent:
item.set_sensitive(False)
menu.append(item)
item = gtk.MenuItem(_("Priority"))
def priority(x):
GtkSinglePriority(self._window).show(pkgs[0])
self._pi.setPackage(pkgs[0])
item.connect("activate", priority)
if len(pkgs) != 1:
item.set_sensitive(False)
menu.append(item)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
def checkPackages(self, all=False, uninstalled=False):
cache = self._ctrl.getCache()
if checkPackages(cache, cache.getPackages(), report=True,
all=all, uninstalled=uninstalled):
self.info(_("All checked packages have correct relations."))
def fixAllProblems(self):
self.actOnPackages([pkg for pkg in self._ctrl.getCache().getPackages()
if pkg.installed], FIX)
def undo(self):
if self._undo:
state = self._undo.pop(0)
if not self._undo:
self._undomenuitem.set_property("sensitive", False)
self._redo.insert(0, self._changeset.getPersistentState())
self._redomenuitem.set_property("sensitive", True)
self._changeset.setPersistentState(state)
self.changedMarks()
def redo(self):
if self._redo:
state = self._redo.pop(0)
if not self._redo:
self._redomenuitem.set_property("sensitive", False)
self._undo.insert(0, self._changeset.getPersistentState())
self._undomenuitem.set_property("sensitive", True)
self._changeset.setPersistentState(state)
self.changedMarks()
def saveUndo(self):
self._undo.insert(0, self._changeset.getPersistentState())
del self._redo[:]
del self._undo[20:]
self._undomenuitem.set_property("sensitive", True)
self._redomenuitem.set_property("sensitive", False)
def setTreeStyle(self, mode):
if mode != sysconf.get("package-tree"):
sysconf.set("package-tree", mode)
self.refreshPackages()
def editChannels(self):
if GtkChannels(self._window).show():
self.rebuildCache()
def editMirrors(self):
GtkMirrors(self._window).show()
def editFlags(self):
GtkFlags(self._window).show()
def editPriorities(self):
GtkPriorities(self._window).show()
def setBusy(self, flag):
if flag:
self._window.window.set_cursor(self._watch)
while gtk.events_pending():
gtk.main_iteration()
else:
self._window.window.set_cursor(None)
def changedMarks(self):
if "hide-unmarked" in self._filters:
self.refreshPackages()
else:
self._pv.queue_draw()
self._execmenuitem.set_property("sensitive", bool(self._changeset))
self._clearmenuitem.set_property("sensitive", bool(self._changeset))
def toggleSearch(self):
visible = not self._searchbar.get_property('visible')
self._searchbar.set_property('visible', visible)
self.refreshPackages()
if visible:
self._searchentry.grab_focus()
def refreshPackages(self):
if not self._ctrl:
return
self.setBusy(True)
tree = sysconf.get("package-tree", "groups")
ctrl = self._ctrl
changeset = self._changeset
if self._searchbar.get_property("visible"):
searcher = Searcher()
dosearch = False
if self._searchdesc.get_active():
text = self._searchentry.get_text().strip()
if text:
dosearch = True
searcher.addDescription(text)
searcher.addSummary(text)
else:
try:
tokens = shlex.split(self._searchentry.get_text())
except ValueError:
pass
else:
if tokens:
dosearch = True
for tok in tokens:
searcher.addAuto(tok)
packages = []
if dosearch:
self._ctrl.getCache().search(searcher)
for ratio, obj in searcher.getResults():
if isinstance(obj, Package):
packages.append(obj)
else:
packages.extend(obj.packages)
else:
packages = ctrl.getCache().getPackages()
filters = self._filters
if filters:
if "hide-non-upgrades" in filters:
newpackages = {}
for pkg in packages:
if pkg.installed:
upgpkgs = {}
try:
for prv in pkg.provides:
for upg in prv.upgradedby:
for upgpkg in upg.packages:
if upgpkg.installed:
raise StopIteration
upgpkgs[upgpkg] = True
except StopIteration:
pass
else:
newpackages.update(upgpkgs)
packages = newpackages.keys()
if "hide-uninstalled" in filters:
packages = [x for x in packages if x.installed]
if "hide-unmarked" in filters:
packages = [x for x in packages if x in changeset]
if "hide-installed" in filters:
packages = [x for x in packages if not x.installed]
if "hide-old" in filters:
packages = pkgconf.filterByFlag("new", packages)
if tree == "groups":
groups = {}
done = {}
for pkg in packages:
lastgroup = None
for loader in pkg.loaders:
info = loader.getInfo(pkg)
group = info.getGroup()
donetuple = (group, pkg)
if donetuple not in done:
done[donetuple] = True
if group in groups:
groups[group].append(pkg)
else:
groups[group] = [pkg]
elif tree == "channels":
groups = {}
done = {}
for pkg in packages:
for loader in pkg.loaders:
channel = loader.getChannel()
group = channel.getName() or channel.getAlias()
donetuple = (group, pkg)
if donetuple not in done:
done[donetuple] = True
if group in groups:
groups[group].append(pkg)
else:
groups[group] = [pkg]
elif tree == "channels-groups":
groups = {}
done = {}
for pkg in packages:
for loader in pkg.loaders:
channel = loader.getChannel()
group = channel.getName() or channel.getAlias()
subgroup = loader.getInfo(pkg).getGroup()
donetuple = (group, subgroup, pkg)
if donetuple not in done:
done[donetuple] = True
if group in groups:
if subgroup in groups[group]:
groups[group][subgroup].append(pkg)
else:
groups[group][subgroup] = [pkg]
else:
groups[group] = {subgroup: [pkg]}
else:
groups = packages
self._pv.setPackages(groups, changeset, keepstate=True)
self.setBusy(False)
# vim:ts=4:sw=4:et
| gpl-2.0 | -5,670,332,782,027,367,000 | 37.135006 | 88 | 0.556534 | false | 4.217627 | false | false | false |
qbuat/tauperf | old/eff_tools/DecisionTool.py | 1 | 2103 | from ROOT import TMVA
from array import array
from rootpy.extern import ordereddict
import logging
log = logging.getLogger('DecisionTool')
class DecisionTool:
def __init__(self,tree,name,weight_file,var_file,cutval):
""" A class to handle the decision of the BDT"""
TMVA.Tools.Instance()
self._reader = TMVA.Reader()
self._tree = tree
self._variables = {}
self._cutvalue = -1
self._bdtscore = -9999
self._name = name
self._weight_file = weight_file
self._var_file = var_file
self.SetReader(self._name,self._weight_file,self._var_file)
self.SetCutValue(cutval)
# --------------------------
def SetCutValue(self,val):
self._cutvalue = val
# --------------------------------------------
def SetReader(self,name,weight_file,var_file):
self._variables = self.InitVariables(var_file)
for varName, var in self._variables.iteritems():
self._reader.AddVariable(varName,var[1])
self._reader.BookMVA(name,weight_file)
# ----------------------
def InitVariables(self,var_file):
variables = ordereddict.OrderedDict()
file = open(var_file,'r')
for line in file:
if "#" in line: continue
words = line.strip().split(',')
variables[ words[0] ] = [ words[1],array( 'f',[0.]) ]
return variables
# -------------------------------------------------
def BDTScore(self):
for varName, var in self._variables.iteritems():
var[1][0] = getattr(self._tree,var[0])
log.info('{0}: {1}'.format(varName, var[1][0]))
return self._reader.EvaluateMVA(self._name)
# --------------------------------------------
def Decision(self):
self._bdtscore = self.BDTScore()
if self._bdtscore>=self._cutvalue:
return True
else:
return False
# ----------------------
def GetBDTScore(self):
self._bdtscore = self.BDTScore()
return self._bdtscore
| gpl-3.0 | -3,456,458,913,760,876,500 | 30.863636 | 67 | 0.514503 | false | 4.021033 | false | false | false |
bioconda/bioconda-utils | bioconda_utils/bot/chat.py | 1 | 5842 | """
Chat with the bot via Gitter
"""
import asyncio
import logging
from typing import Any, Dict, List
import aiohttp
from .. import gitter
from ..gitter import AioGitterAPI
from .commands import command_routes
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
"""
https://webhooks.gitter.im/e/b9e5fad23b9cf034879083a
POST
{ message: 'message', level='error|normal' }
"""
class GitterListener:
"""Listens to messages in a Gitter chat room
Args:
app: Web Server Application
api: Gitter API object
rooms: Map containing rooms and their respective github user/repo
"""
def __init__(self, app: aiohttp.web.Application, token: str, rooms: Dict[str, str],
session: aiohttp.ClientSession, ghappapi) -> None:
self.rooms = rooms
self._ghappapi = ghappapi
self._api = AioGitterAPI(app['client_session'], token)
self._user: gitter.User = None
self._tasks: List[Any] = []
self._session = session
app.on_startup.append(self.start)
app.on_shutdown.append(self.shutdown)
def __str__(self) -> str:
return f"{self.__class__.__name__}"
async def start(self, app: aiohttp.web.Application) -> None:
"""Start listeners"""
self._user = await self._api.get_user()
logger.debug("%s: User Info: %s", self, self._user)
for room in await self._api.list_rooms():
logger.debug("%s: Room Info: %s", self, room)
logger.debug("%s: Groups Info: %s", self, await self._api.list_groups())
self._tasks = [app.loop.create_task(self.listen(room))
for room in self.rooms]
async def shutdown(self, _app: aiohttp.web.Application) -> None:
"""Send cancel signal to listener"""
logger.info("%s: Shutting down listeners", self)
for task in self._tasks:
task.cancel()
for task in self._tasks:
await task
logger.info("%s: Shut down all listeners", self)
async def listen(self, room_name: str) -> None:
"""Main run loop"""
try:
user, repo = self.rooms[room_name].split('/')
logger.error("Listening in %s for repo %s/%s", room_name, user, repo)
message = None
while True:
try:
room = await self._api.get_room(room_name)
logger.info("%s: joining %s", self, room_name)
await self._api.join_room(self._user, room)
logger.info("%s: listening in %s", self, room_name)
async for message in self._api.iter_chat(room):
# getting a new ghapi object for every message because our
# creds time out. Ideally, the api class would take care of that.
ghapi = await self._ghappapi.get_github_api(False, user, repo)
await self.handle_msg(room, message, ghapi)
# on timeouts, we just run log into the room again
except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
pass
# http errors just get logged
except aiohttp.ClientResponseError as exc:
logger.exception("HTTP Error Code %s while listening to room %s",
exc.code, room_name)
# asyncio cancellation needs to be passed up
except asyncio.CancelledError: # pylint: disable=try-except-raise
raise
# the rest, we just log so that we remain online after an error
except Exception: # pylint: disable=broad-except
logger.exception("Unexpected exception caught. Last message: '%s'", message)
await asyncio.sleep(1)
except asyncio.CancelledError:
logger.error("%s: stopped listening in %s", self, room_name)
# we need a new session here as the one we got passed might have been
# closed already when we get cancelled
async with aiohttp.ClientSession() as session:
self._api._session = session
await self._api.leave_room(self._user, room)
logger.error("%s: left room %s", self, room_name)
async def handle_msg(self, room: gitter.Room, message: gitter.Message, ghapi) -> None:
"""Parse Gitter message and dispatch via command_routes"""
await self._api.mark_as_read(self._user, room, [message.id])
if self._user.id not in (m.userId for m in message.mentions):
if self._user.username.lower() in (m.screenName.lower() for m in message.mentions):
await self._api.send_message(room, "@%s - are you talking to me?",
message.fromUser.username)
return
command = message.text.strip().lstrip('@'+self._user.username).strip()
if command == message.text.strip():
await self._api.send_message(room, "Hmm? Someone talking about me?",
message.fromUser.username)
return
cmd, *args = command.split()
issue_number = None
try:
if args[-1][0] == '#':
issue_number = int(args[-1][1:])
args.pop()
except (ValueError, IndexError):
pass
response = await command_routes.dispatch(cmd.lower(), ghapi, issue_number,
message.fromUser.username, *args)
if response:
await self._api.send_message(room, "@%s: %s", message.fromUser.username, response)
else:
await self._api.send_message(room, "@%s: command failed", message.fromUser.username)
| mit | 7,395,101,836,971,723,000 | 40.432624 | 96 | 0.566587 | false | 4.181818 | false | false | false |
reshanie/roblox.py | roblox/asset.py | 1 | 12222 | """
Copyright (c) 2017 James Patrick Dill, reshanie
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import logging
from collections import namedtuple
from json import JSONDecodeError
import faste
from . import utils, enums, errors
log = logging.getLogger("roblox")
class Asset(object):
"""Roblox Asset object.
Use :meth:`RobloxSession.get_asset` to get a specific asset.
Attributes
----------
name : str
Asset name
description : str
Asset description
id : int
Asset ID
product_id : Optional[int]
Product ID
asset_type : :class:`roblox.AssetType`
Asset type
created : :class:`datetime.datetime`
When the asset was first created
updated : :class:`datetime.datetime`
When the asset was last updated
price : Optional[int]
Price of the asset in ROBUX
sales : Optional[int]
Total sales of the asset
is_new : bool
Whether Roblox considers the asset 'new'
for_sale : bool
Whether asset can be taken/bought
public_domain : bool
If the asset is public domain / publicly viewable
limited : bool
If the asset is limited
limited_unique : bool
If the asset is limited and unique
remaining : Optional[int]
How many are remaining, if the asset is limited
membership_level: :class:`roblox.Membership`
Minimum Builders Club needed to take the asset
"""
def __init__(self, client, asset_id=0):
"""param client: client
:type client: roblox.RobloxSession
"""
self.client = client
self.id = asset_id
self._update_info()
def _update_info(self):
try:
product_info = self.client.http.product_info(self.id)
except JSONDecodeError:
raise errors.BadRequest("Invalid asset, possibly deleted")
self.product_id = product_info.get("ProductId")
self.name = product_info.get("Name")
self.description = product_info.get("Description")
self.asset_type = enums.AssetType(product_info.get("AssetTypeId"))
self.icon_image_asset_id = product_info.get("IconImageAssetId")
self.created = utils.get_datetime(product_info.get("Created"))
self.updated = utils.get_datetime(product_info.get("Updated"))
self.price = product_info.get("PriceInRobux")
self.sales = product_info.get("Sales")
self.is_new = product_info.get("IsNew")
self.for_sale = product_info.get("IsForSale")
self.public_domain = product_info.get("IsPublicDomain")
self.unique = product_info.get("IsLimitedUnique")
self.limited = product_info.get("IsLimited") or self.unique
self.remaining = product_info.get("Remaining")
self.membership_level = enums.Membership(product_info.get("MinimumMembershipLevel"))
self.creator_id = product_info["Creator"]["CreatorTargetId"]
self.creator_type = product_info["Creator"]["CreatorType"]
def __hash__(self):
return self.id
def __repr__(self):
return "<roblox.Asset {0.asset_type.name} name={0.name!r} id={0.id!r}>".format(self)
def __str__(self):
return self.name
def __eq__(self, other):
"""
Returns True if two asset objects are the same asset.
"""
if type(other) != Asset:
return False
return self.id == other.id
@property
@faste.decor.rr_cache()
def creator(self):
"""Asset creator
:returns: :class:`User` or :class:`Group`"""
if self.creator_type == "User":
return self.client.get_user(user_id=self.creator_id)
else:
return self.client.get_group(self.creator_id)
def buy(self):
"""
Takes/buys asset.
:returns: `True` if successful
"""
return self.client.http.buy_product(self.product_id,
self.price,
self.creator_id)
def remove_from_inventory(self):
"""
Deletes asset from inventory of client user.
:returns: `True` if successful
"""
return self.client.http.delete_from_inventory(self.id)
def post_comment(self, content):
"""
Posts comment on asset
:param str content: Comment text
:return: :class:`Comment`
"""
if not content:
raise errors.BadRequest("Comment must have text.")
comment = self.client.http.post_comment(self.id, content)
return Comment(self, content=comment["Text"], created=comment["PostedDate"], author=self.client.me)
def owned_by(self, user):
"""
Checks if asset is owned by user.
:param user: User
:type user: :class:`User`
:returns: `True` if user owns asset
"""
return self.client.http.user_owns_asset(user.id, self.id)
@property
@faste.decor.rr_cache()
def icon(self):
"""Asset for icon
:returns: Optional[:class:`Asset`]"""
if self.icon_image_asset_id == 0:
return None
return self.client.get_asset(self.icon_image_asset_id)
@property
def favorites(self):
"""Favorite count of asset
:returns: int"""
return self.client.http.asset_favorites(self.id)
def is_favorited(self):
"""Whether asset is favorited by client
:returns: bool"""
return self.client.http.is_favorited(self.id)
def favorite(self):
"""Favorites asset if it isn't favorited already.
:returns: return value of :meth:`is_favorited` (bool)"""
if self.is_favorited():
return True
return self.client.http.toggle_favorite(self.id)
def unfavorite(self):
"""Unfavorites asset if it's favorited.
:returns: return value of :meth:`is_favorited` (bool)"""
if not self.is_favorited():
return False
return not self.client.http.toggle_favorite(self.id)
def recent_average_price(self):
"""Gets RAP of asset, if it is a collectible.
:returns: Optional[`int`]"""
return self.client.http.get_sales_data(self.id).get("AveragePrice")
def RAP(self):
"""Alias for :meth:recent_average_pice"""
return self.recent_average_price()
def sales_chart(self):
"""Gets :class:`SalesChart` for asset, if it's a collectible."""
return SalesChart(self.client, self)
class Game(Asset):
pass
sales_point = namedtuple("sales_day", "date price volume")
class SalesChart(object):
"""Asset sales chart, representing user sales of a collectible.
You can also iterate over this object, and index it. ``SalesChart[0]`` will return the first sales point.
You can also use ::
>>> list(chart)
>>> reversed(chart)
>>> dict(chart)
>>> len(chart)
>>> datetime.date in chart
The dict version and list versions' values are namedtuples representing sales points, with ``sales_point.date``
, ``sales_point.price`` , and ``sales_point.volume``
The dict's keys are :class:`datetime.date`
Attributes
----------
asset : :class:`Asset`
Asset the sales chart belongs to
chart_dict : dict
dict version of the sales chart
"""
def __init__(self, client, asset):
self.client = client
self.asset = asset
self.chart_dict = self._chart_dict()
def _chart_dict(self):
sales_data = self.client.http.get_sales_data(self.asset.id)
if not sales_data:
raise ValueError("{!r} isn't a collectible and has no sales data".format(self.asset))
sales_chart = sales_data.get("HundredEightyDaySalesChart").split("|")
volume_chart = sales_data.get("HundredEightyDayVolumeChart").split("|")
sales_chart_dict = {}
for sale in sales_chart:
ts = sale.split(",")
if not ts[0]:
break
k = int(ts[0][:-3])
sales_chart_dict[k] = int(ts[1])
volume_chart_dict = {}
for vol in volume_chart:
tv = vol.split(",")
if not tv[0]:
break
k = int(tv[0][:-3])
volume_chart_dict[k] = int(tv[1])
rtd = {}
for timestamp in sales_chart_dict:
nts = datetime.date.fromtimestamp(timestamp)
rtd[nts] = sales_point(
date=nts,
price=sales_chart_dict.get(timestamp),
volume=volume_chart_dict.get(timestamp) or 0,
)
return rtd
def __dict__(self):
return self.chart_dict
def __iter__(self):
return list(self.chart_dict.values())
def __getitem__(self, index):
return list(self.chart_dict.values())[index]
def __len__(self):
return len(self.chart_dict)
def __reversed__(self):
return reversed(list(self.chart_dict.values()))
def __contains__(self, item):
if isinstance(item, datetime.date):
return item in self.chart_dict.keys()
elif isinstance(item, sales_point):
return item in self.chart_dict.values()
return False
def __repr__(self):
return "<roblox.SalesChart asset={0.asset.name!r}>".format(self)
class Comment(object):
"""Asset comment.
Attributes
----------
asset : :class:`Asset`
Asset the comment belongs to
content : str
Comment content
created : :class:`datetime.datetime`
When the comment was posted"""
__slots__ = ["asset", "content", "created", "_user", "_user_cache"]
def __init__(self, asset, content=None, created=None, author=None):
"""
:type asset: :class:`Asset`
"""
self.asset = asset
self.content = content
self.created = utils.get_datetime(created) if created else None
self._user = author
self._user_cache = None
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.asset == other.asset and self.created == other.created and self.author == other.author
def __repr__(self):
return "<roblox.Comment asset={0.asset.name!r} author={0.author!r} created={0.created!r}>".format(self)
def __str__(self):
return self.content
@property
@faste.decor.rr_cache()
def author(self):
"""User who made the post.
:returns: :class:`User`"""
if type(self._user) == int:
return self.asset.client.get_user(user_id=self._user)
elif type(self._user) == str:
return self.asset.client.get_user(username=self._user)
return self._user
| mit | -1,756,698,657,007,532,500 | 28.708543 | 115 | 0.58722 | false | 4.090361 | false | false | false |
jcsp/manila | manila/tests/scheduler/fakes.py | 1 | 12546 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Scheduler tests.
"""
from oslo_utils import timeutils
import six
from manila.scheduler import filter_scheduler
from manila.scheduler import host_manager
SHARE_SERVICES_NO_POOLS = [
dict(id=1, host='host1', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2@back1', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host2@back2', topic='share', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
]
SERVICE_STATES_NO_POOLS = {
'host1': dict(share_backend_name='AAA',
total_capacity_gb=512, free_capacity_gb=200,
timestamp=None, reserved_percentage=0,
provisioned_capacity_gb=312,
max_over_subscription_ratio=1.0,
thin_provisioning=False,
driver_handles_share_servers=False),
'host2@back1': dict(share_backend_name='BBB',
total_capacity_gb=256, free_capacity_gb=100,
timestamp=None, reserved_percentage=0,
provisioned_capacity_gb=400,
max_over_subscription_ratio=2.0,
thin_provisioning=True,
driver_handles_share_servers=False),
'host2@back2': dict(share_backend_name='CCC',
total_capacity_gb=10000, free_capacity_gb=700,
timestamp=None, reserved_percentage=0,
provisioned_capacity_gb=50000,
max_over_subscription_ratio=20.0,
thin_provisioning=True,
driver_handles_share_servers=False),
}
SHARE_SERVICES_WITH_POOLS = [
dict(id=1, host='host1@AAA', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2@BBB', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host3@CCC', topic='share', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
dict(id=4, host='host4@DDD', topic='share', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
# service on host5 is disabled
dict(id=5, host='host5@EEE', topic='share', disabled=True,
availability_zone='zone4', updated_at=timeutils.utcnow()),
dict(id=5, host='host6@FFF', topic='share', disabled=True,
availability_zone='zone5', updated_at=timeutils.utcnow()),
]
SHARE_SERVICE_STATES_WITH_POOLS = {
'host1@AAA': dict(share_backend_name='AAA',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool1',
total_capacity_gb=51,
free_capacity_gb=41,
reserved_percentage=0,
provisioned_capacity_gb=10,
max_over_subscription_ratio=1.0,
thin_provisioning=False)]),
'host2@BBB': dict(share_backend_name='BBB',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool2',
total_capacity_gb=52,
free_capacity_gb=42,
reserved_percentage=0,
provisioned_capacity_gb=60,
max_over_subscription_ratio=2.0,
thin_provisioning=True)]),
'host3@CCC': dict(share_backend_name='CCC',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool3',
total_capacity_gb=53,
free_capacity_gb=43,
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=20.0,
thin_provisioning=True,
consistency_group_support='pool')]),
'host4@DDD': dict(share_backend_name='DDD',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool4a',
total_capacity_gb=541,
free_capacity_gb=441,
reserved_percentage=0,
provisioned_capacity_gb=800,
max_over_subscription_ratio=2.0,
thin_provisioning=True,
consistency_group_support='host'),
dict(pool_name='pool4b',
total_capacity_gb=542,
free_capacity_gb=442,
reserved_percentage=0,
provisioned_capacity_gb=2000,
max_over_subscription_ratio=10.0,
thin_provisioning=True,
consistency_group_support='host')]),
'host5@EEE': dict(share_backend_name='EEE',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool5a',
total_capacity_gb=551,
free_capacity_gb=451,
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False),
dict(pool_name='pool5b',
total_capacity_gb=552,
free_capacity_gb=452,
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False)]),
'host6@FFF': dict(share_backend_name='FFF',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
pools=[dict(pool_name='pool6a',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False),
dict(pool_name='pool6b',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False)]),
}
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
def __init__(self, *args, **kwargs):
super(FakeFilterScheduler, self).__init__(*args, **kwargs)
self.host_manager = host_manager.HostManager()
class FakeHostManager(host_manager.HostManager):
def __init__(self):
super(FakeHostManager, self).__init__()
self.service_states = {
'host1': {'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
'thin_provisioning': False,
'reserved_percentage': 10,
'timestamp': None},
'host2': {'total_capacity_gb': 2048,
'free_capacity_gb': 300,
'allocated_capacity_gb': 1748,
'provisioned_capacity_gb': 1748,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'reserved_percentage': 10,
'timestamp': None},
'host3': {'total_capacity_gb': 512,
'free_capacity_gb': 256,
'allocated_capacity_gb': 256,
'provisioned_capacity_gb': 256,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': False,
'consistency_group_support': 'host',
'reserved_percentage': 0,
'timestamp': None},
'host4': {'total_capacity_gb': 2048,
'free_capacity_gb': 200,
'allocated_capacity_gb': 1848,
'provisioned_capacity_gb': 1848,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': True,
'reserved_percentage': 5,
'timestamp': None},
'host5': {'total_capacity_gb': 2048,
'free_capacity_gb': 500,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.5,
'thin_provisioning': True,
'reserved_percentage': 5,
'timestamp': None,
'consistency_group_support': 'pool'},
'host6': {'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'thin_provisioning': False,
'reserved_percentage': 5,
'timestamp': None},
}
class FakeHostState(host_manager.HostState):
def __init__(self, host, attribute_dict):
super(FakeHostState, self).__init__(host)
for (key, val) in six.iteritems(attribute_dict):
setattr(self, key, val)
def mock_host_manager_db_calls(mock_obj, disabled=None):
services = [
dict(id=1, host='host1', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host3', topic='share', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
dict(id=4, host='host4', topic='share', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
dict(id=5, host='host5', topic='share', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
dict(id=6, host='host6', topic='share', disabled=False,
availability_zone='zone4', updated_at=timeutils.utcnow()),
]
if disabled is None:
mock_obj.return_value = services
else:
mock_obj.return_value = [service for service in services
if service['disabled'] == disabled]
| apache-2.0 | 4,470,534,704,318,130,700 | 48.588933 | 78 | 0.492428 | false | 4.650111 | false | false | false |
bepress/xavier | xavier/taskqueue.py | 1 | 2517 | """
Offline Manager for Xavier
"""
import logging
import jsonpickle
logger = logging.getLogger(__name__)
class Task(object):
def __init__(self, func):
self.func = func
self.path = '%s.%s' % (func.__name__, func.__module__)
self.publish_event = None
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def delay(self, *args, **kwargs):
event = jsonpickle.dumps((self.path, args, kwargs))
if not self.publish_event:
logger.error("This task has not yet been registered with a task queue")
return False
self.publish_event(event)
return True
def register_with_queue(self, publish_event):
self.publish_event = publish_event
def __repr__(self):
return self.__unicode__()
def __unicode__(self):
return "BackgroundTask(path='{}')".format(self.path)
class TaskQueue(object):
def __init__(self, publish_event):
self.functions = {}
self.publish_event = publish_event
self.schedules = {}
def process_event(self, event):
name, args, kwargs = jsonpickle.loads(event)
func = self.functions.get(name)
if not func:
logger.info("processing event - missing function name: %s", name)
raise Exception("Missing function")
try:
func(*args, **kwargs)
except Exception as e:
return False
return True
def process_schedule(self, schedule):
if schedule not in self.schedules:
logger.info("Trying to process schedule for unknown schedule: %s", schedule)
return
scheduled_functions = self.schedules[schedule]
logger.info("Running schedule %s registered functions: %s", schedule, scheduled_functions)
for func in scheduled_functions:
func.delay()
def register_task(self, task, schedules):
self.functions[task.path] = task
for schedule in schedules:
if schedule not in self.schedules:
self.schedules[schedule] = []
if task.path not in self.schedules[schedule]:
self.schedules[schedule].append(task)
task.register_with_queue(self.publish_event)
def task(self, schedules=None):
schedules = schedules if schedules else []
def wrapper(func):
func = Task(func)
self.register_task(func, schedules)
return func
return wrapper
| mit | -8,391,339,796,450,315,000 | 26.064516 | 98 | 0.594358 | false | 4.237374 | false | false | false |
zestrada/nova-cs498cc | nova/cells/manager.py | 1 | 15722 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Service Manager
"""
import datetime
import time
from oslo.config import cfg
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova import manager
from nova.openstack.common import importutils
from nova.openstack.common import timeutils
cell_manager_opts = [
cfg.StrOpt('driver',
default='nova.cells.rpc_driver.CellsRPCDriver',
help='Cells communication driver to use'),
cfg.IntOpt("instance_updated_at_threshold",
default=3600,
help="Number of seconds after an instance was updated "
"or deleted to continue to update cells"),
cfg.IntOpt("instance_update_num_instances",
default=1,
help="Number of instances to update per periodic task run")
]
CONF = cfg.CONF
CONF.register_opts(cell_manager_opts, group='cells')
class CellsManager(manager.Manager):
"""The nova-cells manager class. This class defines RPC
methods that the local cell may call. This class is NOT used for
messages coming from other cells. That communication is
driver-specific.
Communication to other cells happens via the messaging module. The
MessageRunner from that module will handle routing the message to
the correct cell via the communications driver. Most methods below
create 'targeted' (where we want to route a message to a specific cell)
or 'broadcast' (where we want a message to go to multiple cells)
messages.
Scheduling requests get passed to the scheduler class.
"""
RPC_API_VERSION = '1.6'
def __init__(self, *args, **kwargs):
# Mostly for tests.
cell_state_manager = kwargs.pop('cell_state_manager', None)
super(CellsManager, self).__init__(*args, **kwargs)
if cell_state_manager is None:
cell_state_manager = cells_state.CellStateManager
self.state_manager = cell_state_manager()
self.msg_runner = messaging.MessageRunner(self.state_manager)
cells_driver_cls = importutils.import_class(
CONF.cells.driver)
self.driver = cells_driver_cls()
self.instances_to_heal = iter([])
def post_start_hook(self):
"""Have the driver start its consumers for inter-cell communication.
Also ask our child cells for their capacities and capabilities so
we get them more quickly than just waiting for the next periodic
update. Receiving the updates from the children will cause us to
update our parents. If we don't have any children, just update
our parents immediately.
"""
# FIXME(comstud): There's currently no hooks when services are
# stopping, so we have no way to stop consumers cleanly.
self.driver.start_consumers(self.msg_runner)
ctxt = context.get_admin_context()
if self.state_manager.get_child_cells():
self.msg_runner.ask_children_for_capabilities(ctxt)
self.msg_runner.ask_children_for_capacities(ctxt)
else:
self._update_our_parents(ctxt)
@manager.periodic_task
def _update_our_parents(self, ctxt):
"""Update our parent cells with our capabilities and capacity
if we're at the bottom of the tree.
"""
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
@manager.periodic_task
def _heal_instances(self, ctxt):
"""Periodic task to send updates for a number of instances to
parent cells.
On every run of the periodic task, we will attempt to sync
'CONF.cells.instance_update_num_instances' number of instances.
When we get the list of instances, we shuffle them so that multiple
nova-cells services aren't attempting to sync the same instances
in lockstep.
If CONF.cells.instance_update_at_threshold is set, only attempt
to sync instances that have been updated recently. The CONF
setting defines the maximum number of seconds old the updated_at
can be. Ie, a threshold of 3600 means to only update instances
that have modified in the last hour.
"""
if not self.state_manager.get_parent_cells():
# No need to sync up if we have no parents.
return
info = {'updated_list': False}
def _next_instance():
try:
instance = self.instances_to_heal.next()
except StopIteration:
if info['updated_list']:
return
threshold = CONF.cells.instance_updated_at_threshold
updated_since = None
if threshold > 0:
updated_since = timeutils.utcnow() - datetime.timedelta(
seconds=threshold)
self.instances_to_heal = cells_utils.get_instances_to_sync(
ctxt, updated_since=updated_since, shuffle=True,
uuids_only=True)
info['updated_list'] = True
try:
instance = self.instances_to_heal.next()
except StopIteration:
return
return instance
rd_context = ctxt.elevated(read_deleted='yes')
for i in xrange(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)
instance_uuid = _next_instance()
if not instance_uuid:
return
try:
instance = self.db.instance_get_by_uuid(rd_context,
instance_uuid)
except exception.InstanceNotFound:
continue
self._sync_instance(ctxt, instance)
break
def _sync_instance(self, ctxt, instance):
"""Broadcast an instance_update or instance_destroy message up to
parent cells.
"""
if instance['deleted']:
self.instance_destroy_at_top(ctxt, instance)
else:
self.instance_update_at_top(ctxt, instance)
def schedule_run_instance(self, ctxt, host_sched_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s)
and forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
def get_cell_info_for_neighbors(self, _ctxt):
"""Return cell information for our neighbor cells."""
return self.state_manager.get_cell_info_for_neighbors()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
cell_name,
method_info,
call)
if call:
return response.value_or_raise()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
self.msg_runner.instance_update_at_top(ctxt, instance)
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
self.msg_runner.instance_destroy_at_top(ctxt, instance)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
self.msg_runner.instance_delete_everywhere(ctxt, instance,
delete_type)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault)
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
self.msg_runner.sync_instances(ctxt, project_id, updated_since,
deleted)
def service_get_all(self, ctxt, filters):
"""Return services in this cell and in all child cells."""
responses = self.msg_runner.service_get_all(ctxt, filters)
ret_services = []
# 1 response per cell. Each response is a list of services.
for response in responses:
services = response.value_or_raise()
for service in services:
cells_utils.add_cell_to_service(service, response.cell_name)
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, ctxt, host_name):
"""Return a service entry for a compute host in a certain cell."""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_get_by_compute_host(ctxt,
cell_name,
host_name)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
"""Proxy an RPC message as-is to a manager."""
compute_topic = CONF.compute_topic
cell_and_host = topic[len(compute_topic) + 1:]
cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
host_name, topic, rpc_message, call, timeout)
return response.value_or_raise()
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'host' is not None, host will be of the format 'cell!name@host',
with '@host' being optional. The query will be directed to the
appropriate cell and return all task logs, or task logs matching
the host if specified.
'state' also may be None. If it's not, filter by the state as well.
"""
if host is None:
cell_name = None
else:
cell_name, host = cells_utils.split_cell_and_item(host)
# If no cell name was given, assume that the host name is the
# cell_name and that the target is all hosts
if cell_name is None:
cell_name, host = host, cell_name
responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
task_name, period_beginning, period_ending,
host=host, state=state)
# 1 response per cell. Each response is a list of task log
# entries.
ret_task_logs = []
for response in responses:
task_logs = response.value_or_raise()
for task_log in task_logs:
cells_utils.add_cell_to_task_log(task_log,
response.cell_name)
ret_task_logs.append(task_log)
return ret_task_logs
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
cell_name, compute_id = cells_utils.split_cell_and_item(
compute_id)
response = self.msg_runner.compute_node_get(ctxt, cell_name,
compute_id)
node = response.value_or_raise()
cells_utils.add_cell_to_compute_node(node, cell_name)
return node
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells."""
responses = self.msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
# 1 response per cell. Each response is a list of compute_node
# entries.
ret_nodes = []
for response in responses:
nodes = response.value_or_raise()
for node in nodes:
cells_utils.add_cell_to_compute_node(node,
response.cell_name)
ret_nodes.append(node)
return ret_nodes
def compute_node_stats(self, ctxt):
"""Return compute node stats totals from all cells."""
responses = self.msg_runner.compute_node_stats(ctxt)
totals = {}
for response in responses:
data = response.value_or_raise()
for key, val in data.iteritems():
totals.setdefault(key, 0)
totals[key] += val
return totals
def actions_get(self, ctxt, cell_name, instance_uuid):
response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid)
return response.value_or_raise()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
response = self.msg_runner.action_get_by_request_id(ctxt, cell_name,
instance_uuid,
request_id)
return response.value_or_raise()
def action_events_get(self, ctxt, cell_name, action_id):
response = self.msg_runner.action_events_get(ctxt, cell_name,
action_id)
return response.value_or_raise()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
instance = self.db.instance_get_by_uuid(ctxt, instance_uuid)
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
response = self.msg_runner.validate_console_port(ctxt,
instance['cell_name'], instance_uuid, console_port,
console_type)
return response.value_or_raise()
| apache-2.0 | -814,845,744,529,827,200 | 42.551247 | 78 | 0.596871 | false | 4.357539 | false | false | false |
ShaguptaS/moviepy | moviepy/video/fx/resize.py | 1 | 3276 |
resize_possible = True
try:
import cv2
resizer = lambda pic, newsize : cv2.resize(pic.astype('uint8'),
tuple(map(int, newsize)),
interpolation=cv2.INTER_AREA)
except ImportError:
try:
import Image
import numpy as np
def resizer(pic, newsize):
newsize = map(int, newsize)[::-1]
shape = pic.shape
newshape = (newsize[0],newsize[1],shape[2])
pilim = Image.fromarray(pic)
resized_pil = pilim.resize(newsize[::-1], Image.ANTIALIAS)
arr = np.fromstring(resized_pil.tostring(), dtype='uint8')
return arr.reshape(newshape)
except ImportError:
try:
import scipy.misc.imresize as imresize
resizer = lambda pic, newsize : imresize(pic,
map(int, newsize[::-1]))
except ImportError:
resize_possible = False
from moviepy.decorators import apply_to_mask
@apply_to_mask
def resize(clip, newsize=None, height=None, width=None):
"""
Returns a video clip that is a resized version of the clip.
:param newsize: can be either ``(height,width)`` in pixels or
a float representing a scaling factor. Or a function of time
returning one of these.
:param width: width of the new clip in pixel. The height is
then computed so that the width/height ratio is conserved.
:param height: height of the new clip in pixel. The width is
then computed so that the width/height ratio is conserved.
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.size
if newsize != None:
def trans_newsize(ns):
if isinstance(ns, (int, float)):
return [ns * w, ns * h]
else:
return ns
if hasattr(newsize, "__call__"):
newsize2 = lambda t : trans_newsize(newsize(t))
if clip.ismask:
fl = lambda gf,t: 1.0*resizer((255 * gf(t)).astype('uint8'),
newsize2(t))/255
else:
fl = lambda gf,t: resizer(gf(t).astype('uint8'),newsize2(t))
return clip.fl(fl)
else:
newsize = trans_newsize(newsize)
elif height != None:
newsize = [w * height / h, height]
elif width != None:
newsize = [width, h * width / w]
if clip.ismask:
fl = lambda pic: 1.0*resizer((255 * pic).astype('uint8'),
newsize)/255
else:
fl = lambda pic: resizer(pic.astype('uint8'), newsize)
return clip.fl_image(fl)
if not resize_possible:
doc = resize.__doc__
def resize(clip, newsize=None, height=None, width=None):
raise ImportError("fx resize needs OpenCV or Scipy or PIL")
resize.__doc__ = doc
| mit | -3,920,737,535,064,460,300 | 31.117647 | 76 | 0.53083 | false | 3.995122 | false | false | false |
BrendanLeber/adventofcode | 2016/20-firewall_rules/firewall_rules.py | 1 | 1586 | # -*- coding: utf-8 -*-
import argparse
import pdb
import traceback
from typing import List, Tuple
def test_ip(ip: int, rules: List[Tuple[int, int]], max_addr: int) -> bool:
for (start, end) in rules:
if start <= ip <= end:
break
else:
if ip < max_addr:
return True
return False
def solve(rules: List[Tuple[int, int]], max_addr: int) -> Tuple[int, int]:
candidates = [rule[1] + 1 for rule in rules]
valids = [candidate for candidate in candidates if test_ip(candidate, rules, max_addr)]
one: int = valids[0]
two: int = 0
for ip in valids:
while test_ip(ip, rules, max_addr):
two += 1
ip += 1
return (one, two)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Advent of Code - 2016 - Day 20 - Firewall Rules.")
parser.add_argument(
"input",
type=str,
default="input.txt",
nargs="?",
help="The puzzle input. (Default %(default)s)",
)
parser.add_argument(
"max_addr",
type=int,
default=4294967296,
nargs="?",
help="The largest address. (Default %(default)s)",
)
args = parser.parse_args()
rules: List[Tuple[int, int]] = []
with open(args.input, "rt") as inf:
for line in inf:
parts = line.strip().split("-")
rules.append((int(parts[0]), int(parts[1])))
rules.sort()
try:
print(solve(rules, args.max_addr))
except Exception:
traceback.print_exc()
pdb.post_mortem()
| mit | 2,803,982,701,570,625,000 | 24.580645 | 100 | 0.552333 | false | 3.580135 | false | false | false |
uwosh/UWOshOIE | tests/testTransitionApproveForFA.py | 1 | 4225 | import os, sys
if __name__ == '__main__':
execfile(os.path.join(sys.path[0], 'framework.py'))
from Products.UWOshOIE.tests.uwoshoietestcase import UWOshOIETestCase
from Products.CMFCore.WorkflowCore import WorkflowException
class TestTransitionApproveForFA(UWOshOIETestCase):
"""Ensure product is properly installed"""
def createApplication(self):
self.login(self._default_user)
self.portal.invokeFactory(type_name="OIEStudentApplication", id="testapplication")
app = self.portal['testapplication']
self.fill_out_application(app)
app.setHoldApplication('HOLD')
self.portal_workflow.doActionFor(app, 'submit')
return app
def test_program_manager_should_be_able_to_do_action(self):
app = self.createApplication()
self.login('program_manager')
self.portal_workflow.doActionFor(app, 'approveForFA')
self.assertEquals('waitingForPrintMaterials', self.getState(app))
def test_front_line_advisor_should_be_able_to_do_action(self):
app = self.createApplication()
self.login('front_line_advisor')
self.portal_workflow.doActionFor(app, 'approveForFA')
self.assertEquals('waitingForPrintMaterials', self.getState(app))
def test_all_other_roles_should_not_be_able_able_to_perform_action(self):
app = self.createApplication
for user in self._all_users:
if user != 'program_maanger' and user != 'front_line_advisor':
self.login(user)
self.assertRaises(WorkflowException, self.portal_workflow.doActionFor, app, 'approveForFA')
self.logout()
def test_should_send_email_when_fired(self):
app = self.createApplication()
self.portal.MailHost.clearEmails()
self.login('program_manager')
self.portal_workflow.doActionFor(app, 'approveForFA')
self.assertEquals(1, self.portal.MailHost.getEmailCount())
def test_should_send_correct_email_program_maanger(self):
app = self.createApplication()
self.portal.MailHost.clearEmails()
self.login('program_manager')
self.portal_workflow.doActionFor(app, 'approveForFA')
to = self.portal.MailHost.getTo()
f = self.portal.MailHost.getFrom()
subject = self.portal.MailHost.getSubject()
message = self.portal.MailHost.getMessage()
self.assertEquals(['[email protected]', '[email protected]'], to)
self.assertEquals('[email protected]', f)
self.assertEquals('Your study abroad application update (UW Oshkosh Office of International Education)', subject)
self.assertEquals("\n\nYour UW Oshkosh Office of International Education study abroad application has been updated.\n\nName: John Doe\nProgram Name: test\nProgram Year: 2009\n\nTransition\n\n\n\nYou can view your application here: http://nohost/plone/testapplication\n\nComment: \n\n\n", message)
def test_should_send_correct_email_front_line_advisor(self):
app = self.createApplication()
self.portal.MailHost.clearEmails()
self.login('front_line_advisor')
self.portal_workflow.doActionFor(app, 'approveForFA')
to = self.portal.MailHost.getTo()
f = self.portal.MailHost.getFrom()
subject = self.portal.MailHost.getSubject()
message = self.portal.MailHost.getMessage()
self.assertEquals(['[email protected]', '[email protected]'], to)
self.assertEquals('[email protected]', f)
self.assertEquals('Your study abroad application update (UW Oshkosh Office of International Education)', subject)
self.assertEquals("\n\nYour UW Oshkosh Office of International Education study abroad application has been updated.\n\nName: John Doe\nProgram Name: test\nProgram Year: 2009\n\nTransition\n\n\n\nYou can view your application here: http://nohost/plone/testapplication\n\nComment: \n\n\n", message)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestTransitionApproveForFA))
return suite
if __name__ == '__main__':
framework()
| gpl-2.0 | 7,559,430,899,610,069,000 | 41.676768 | 304 | 0.68213 | false | 3.648532 | true | false | false |
dpshelio/sunpy | sunpy/util/net.py | 2 | 6983 | """
This module provides general net utility functions.
"""
import os
import re
import sys
import shutil
from unicodedata import normalize
from email.parser import FeedParser
from urllib.parse import urljoin, urlparse
from urllib.request import urlopen
from sunpy.util import replacement_filename
__all__ = ['slugify', 'get_content_disposition', 'get_filename', 'get_system_filename',
'download_file', 'download_fileobj', 'check_download_file']
# Characters not allowed in slugified version.
_punct_re = re.compile(r'[:\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim='_'):
"""
Slugify given unicode text.
Parameters
----------
text : `str`
A `str` to slugify.
delim : `str`, optional
The delimiter for the input ``text``. Default is "_".
Returns
-------
`str` :
The slugify `str` name.
"""
text = normalize('NFKD', text)
period = '.'
name_and_extension = text.rsplit(period, 1)
name = name_and_extension[0]
name = str(delim).join(
filter(None, (word for word in _punct_re.split(name.lower()))))
if len(name_and_extension) == 2:
extension = name_and_extension[1]
return str(period).join([name, extension])
else:
return name
def get_content_disposition(content_disposition):
"""
Get the content disposition filename from given header.
**Do not include "Content-Disposition:".**
Parameters
----------
content_disposition : `str`
The content disposition header.
Returns
-------
`str` :
The content disposition filename.
"""
parser = FeedParser()
parser.feed('Content-Disposition: ' + content_disposition)
name = parser.close().get_filename()
if name and not isinstance(name, str):
name = name.decode('latin1', 'ignore')
return name
def get_filename(sock, url):
"""
Get filename from given `~urllib.request.urlopen` object and URL.
First, tries the "Content-Disposition", if unavailable, extracts name from the URL.
Parameters
----------
sock : `~urllib.request.urlopen`
The `~urllib.request.urlopen` to parse for the filename.
url : `str`
The URL to parse for the filename.
Returns
-------
`str`:
The filename.
"""
name = None
cd = sock.headers.get('Content-Disposition', None)
if cd is not None:
try:
name = get_content_disposition(cd)
except IndexError:
pass
if not name:
parsed = urlparse(url)
name = parsed.path.rstrip('/').rsplit('/', 1)[-1]
return str(name)
def get_system_filename(sock, url, default="file"):
"""
Get filename from given `~urllib.request.urlopen` object and URL.
First, tries the "Content-Disposition", if unavailable, extracts name from the URL.
If this fails, the ``default`` keyword will be used.
Parameters
----------
sock : `~urllib.request.urlopen`
The `~urllib.request.urlopen` to parse for the filename.
url : `str`
The URL to parse for the filename.
default : `str`, optional
The name to use if the first two methods fail. Defaults to "file".
Returns
-------
`bytes`:
The filename in file system encoding.
"""
name = get_filename(sock, url)
if not name:
name = str(default)
return name.encode(sys.getfilesystemencoding(), 'ignore')
def download_fileobj(opn, directory, url='', default="file", overwrite=False):
"""
Download a file from a url into a directory.
Tries the "Content-Disposition", if unavailable, extracts name from the URL.
If this fails, the ``default`` keyword will be used.
Parameters
----------
opn : `~urllib.request.urlopen`
The `~urllib.request.urlopen` to download.
directory : `str`
The directory path to download the file in to.
url : `str`
The URL to parse for the filename.
default : `str`, optional
The name to use if the first two methods fail. Defaults to "file".
overwrite: `bool`, optional
If `True` will overwrite a file of the same name. Defaults to `False`.
Returns
-------
`str`:
The file path for the downloaded file.
"""
filename = get_system_filename(opn, url, default)
path = os.path.join(directory, filename.decode('utf-8'))
if overwrite and os.path.exists(path):
path = replacement_filename(path)
with open(path, 'wb') as fd:
shutil.copyfileobj(opn, fd)
return path
def download_file(url, directory, default="file", overwrite=False):
"""
Download a file from a url into a directory.
Tries the "Content-Disposition", if unavailable, extracts name from the URL.
If this fails, the ``default`` keyword will be used.
Parameters
----------
url : `str`
The file URL download.
directory : `str`
The directory path to download the file in to.
default : `str`, optional
The name to use if the first two methods fail. Defaults to "file".
overwrite: `bool`, optional
If `True` will overwrite a file of the same name. Defaults to `False`.
Returns
-------
`str`:
The file path for the downloaded file.
"""
opn = urlopen(url)
try:
path = download_fileobj(opn, directory, url, default, overwrite)
finally:
opn.close()
return path
def check_download_file(filename, remotepath, download_dir, remotename=None, replace=False):
"""
Downloads a file from a remotepath to a localpath if it isn't there.
This function checks whether a file with name ``filename`` exists in the user's local machine.
If it doesn't, it downloads the file from ``remotepath``.
Parameters
----------
filename : `str`
Name of file.
remotepath : `str`
URL of the remote location from which filename can be downloaded.
download_dir : `str`
The files directory.
remotename : `str`, optional
Filename under which the file is stored remotely.
Default is same as filename.
replace : `bool`, optional
If `True`, file will be downloaded whether or not file already exists locally.
Examples
--------
>>> from sunpy.util.net import check_download_file
>>> remotepath = "https://www.download_repository.com/downloads/"
>>> check_download_file("filename.txt", remotepath, download_dir='.') # doctest: +SKIP
"""
# Check if file already exists locally. If not, try downloading it.
if replace or not os.path.isfile(os.path.join(download_dir, filename)):
# set local and remote file names be the same unless specified
# by user.
if not isinstance(remotename, str):
remotename = filename
download_file(urljoin(remotepath, remotename),
download_dir, default=filename, overwrite=replace)
| bsd-2-clause | 134,006,119,759,047,330 | 28.340336 | 98 | 0.622798 | false | 4.117335 | false | false | false |
msherry/PyXB-1.1.4 | tests/bugs/test-200908271556.py | 1 | 1990 | import pyxb_114.binding.generate
import pyxb_114.binding.datatypes as xs
import pyxb_114.binding.basis
import pyxb_114.utils.domutils
import gc
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="instance">
<xs:complexType>
<xs:all>
<xs:element name="inner" maxOccurs="unbounded">
<xs:complexType>
<xs:all>
<xs:element name="text" type="xs:string"/>
<xs:element name="number" type="xs:integer"/>
</xs:all>
</xs:complexType>
</xs:element>
</xs:all>
</xs:complexType>
</xs:element>
</xs:schema>
'''
#file('schema.xsd', 'w').write(xsd)
code = pyxb_114.binding.generate.GeneratePython(schema_text=xsd)
#file('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb_114.exceptions_ import *
import unittest
import os
class TestBug_200908271556(unittest.TestCase):
# No, this isn't portable. No, I don't care.
__statm = file('/proc/%d/statm' % (os.getpid(),))
def __getMem (self):
self.__statm.seek(0)
return int(self.__statm.read().split()[0])
def testMemory (self):
xmls = '<instance><inner><text>text</text><number>45</number></inner></instance>'
base_at = 10
check_at = 20
growth_limit = 1.10
iter = 0
gc.collect()
while True:
iter += 1
if base_at == iter:
gc.collect()
base_mem = self.__getMem()
elif check_at == iter:
gc.collect()
check_mem = self.__getMem()
growth = check_mem - base_mem
self.assertTrue(0 == growth, 'growth %s' % (growth,))
break
instance = CreateFromDocument(xmls)
xmls = instance.toxml("utf-8", root_only=True)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 223,691,252,362,142,340 | 27.028169 | 89 | 0.554271 | false | 3.390119 | true | false | false |
deuscoin-org/deuscoin-core | qa/rpc-tests/test_framework/authproxy.py | 1 | 6097 |
"""
Copyright 2011 Jeff Garzik
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import decimal
import json
import logging
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
log = logging.getLogger("DeuscoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
Exception.__init__(self)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return round(o, 8)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy(object):
__id_count = 0
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None):
self.__service_url = service_url
self._service_name = service_name
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
(user, passwd) = (self.__url.username, self.__url.password)
try:
user = user.encode('utf8')
except AttributeError:
pass
try:
passwd = passwd.encode('utf8')
except AttributeError:
pass
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port,
None, None, False,
timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port,
False, timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except httplib.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
def __call__(self, *args):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal)))
postdata = json.dumps({'version': '1.1',
'method': self._service_name,
'params': args,
'id': AuthServiceProxy.__id_count}, default=EncodeDecimal)
response = self._request('POST', self.__url.path, postdata)
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal)
log.debug("--> "+postdata)
return self._request('POST', self.__url.path, postdata)
def _get_response(self):
http_response = self.__conn.getresponse()
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
if "error" in response and response["error"] is None:
log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal)))
else:
log.debug("<-- "+responsedata)
return response
| mit | -2,653,445,764,162,595,300 | 36.176829 | 105 | 0.599803 | false | 4.345688 | false | false | false |
JasonLai256/plumbca | plumbca/cache.py | 1 | 2429 | # -*- coding:utf-8 -*-
"""
plumbca.cache
~~~~~~~~~~~~~
CacheHandler for the collections control.
:copyright: (c) 2015 by Jason Lai.
:license: BSD, see LICENSE for more details.
"""
import asyncio
import logging
import re
import os
from .config import DefaultConf
from .collection import IncreaseCollection
from .backend import BackendFactory
actlog = logging.getLogger('activity')
err_logger = logging.getLogger('errors')
class CacheCtl(object):
def __init__(self):
self.collmap = {}
self.info = {}
self.bk = BackendFactory(DefaultConf['backend'])
loop = asyncio.get_event_loop()
loop.run_until_complete(self.bk.init_connection())
def get_collection(self, name):
if name not in self.collmap:
actlog.info("Collection %s not exists.", name)
return
return self.collmap[name]
async def ensure_collection(self, name, ctype, expire, **kwargs):
rv = await self.bk.get_collection_index(name)
if name not in self.collmap and not rv:
actlog.info("Ensure collection - not exists in plumbca and redis")
self.collmap[name] = globals()[ctype](name, expire=expire, **kwargs)
await self.bk.set_collection_index(name, self.collmap[name])
actlog.info("Ensure collection - not exists in plumbca and redis, "
"create it, `%s`.", self.collmap[name])
elif name not in self.collmap and rv:
actlog.info("Ensure collection - not exists in plumbca")
rv_name, rv_instance_name = rv
assert name == rv_name
assert rv_instance_name == globals()[ctype].__class__.__name__
self.collmap[name] = globals()[ctype](name, expire=expire, **kwargs)
actlog.info("Ensure collection - not exists in plumbca, "
"create it, `%s`.", self.collmap[name])
elif name in self.collmap and not rv:
actlog.info("Ensure collection - not exists in redis")
await self.bk.set_collection_index(name, self.collmap[name])
actlog.info("Ensure collection - not exists in redis, "
"create it, `%s`.", self.collmap[name])
else:
actlog.info("Ensure collection already exists, `%s`.",
self.collmap[name])
def info(self):
pass
CacheCtl = CacheCtl()
| bsd-3-clause | -7,146,317,080,230,869,000 | 31.386667 | 80 | 0.599012 | false | 3.968954 | false | false | false |
sdrogers/lda | code/lda_utilities.py | 1 | 14058 | import numpy as np
import pickle
import jsonpickle
def match_topics_across_dictionaries(lda1 = None,lda2 = None,file1 = None,file2 = None,
same_corpus = True,copy_annotations = False,copy_threshold = 0.5,summary_file = None,
new_file2 = None,mass_tol = 5.0):
# finds the closest topic matches from lda2 to lda1
if lda1 == None:
if file1 == None:
print "Must specify either an lda dictionary object or a dictionary file for lda1"
return
else:
with open(file1,'r') as f:
lda1 = pickle.load(f)
print "Loaded lda1 from {}".format(file1)
if lda2 == None:
if file2 == None:
print "Must specify either an lda dictionary object or a dictionary file for lda1"
return
else:
with open(file2,'r') as f:
lda2 = pickle.load(f)
print "Loaded lda2 from {}".format(file2)
word_index = lda1['word_index']
n_words = len(word_index)
n_topics1 = lda1['K']
n_topics2 = lda2['K']
# Put lda1's topics into a nice matrix
beta = np.zeros((n_topics1,n_words),np.float)
topic_pos = 0
topic_index1 = {}
for topic in lda1['beta']:
topic_index1[topic] = topic_pos
for word in lda1['beta'][topic]:
word_pos = word_index[word]
beta[topic_pos,word_pos] = lda1['beta'][topic][word]
topic_pos += 1
# Make the reverse index
ti = [(topic,topic_index1[topic]) for topic in topic_index1]
ti = sorted(ti,key = lambda x: x[1])
reverse1,_ = zip(*ti)
if not same_corpus:
fragment_masses = np.array([float(f.split('_')[1]) for f in word_index if f.startswith('fragment')])
fragment_names = [f for f in word_index if f.startswith('fragment')]
loss_masses = np.array([float(f.split('_')[1]) for f in word_index if f.startswith('loss')])
loss_names = [f for f in word_index if f.startswith('loss')]
beta /= beta.sum(axis=1)[:,None]
best_match = {}
temp_topics2 = {}
for topic2 in lda2['beta']:
temp_topics2[topic2] = {}
temp_beta = np.zeros((1,n_words))
if same_corpus:
total_probability = 0.0
for word in lda2['beta'][topic2]:
word_pos = word_index[word]
temp_beta[0,word_pos] = lda2['beta'][topic2][word]
temp_topics2[topic2][word] = lda2['beta'][topic2][word]
total_probability += temp_topics2[topic2][word]
for word in temp_topics2[topic2]:
temp_topics2[topic2][word] /= total_probability
temp_beta /= temp_beta.sum(axis=1)[:,None]
else:
# we need to match across corpus
total_probability = 0.0
for word in lda2['beta'][topic2]:
# try and match to a word in word_index
split_word = word.split('_')
word_mass = float(split_word[1])
if split_word[0].startswith('fragment'):
ppm_errors = 1e6*np.abs((fragment_masses - word_mass)/fragment_masses)
smallest_pos = ppm_errors.argmin()
if ppm_errors[smallest_pos] < mass_tol:
word1 = fragment_names[smallest_pos]
temp_topics2[topic2][word1] = lda2['beta'][topic2][word]
temp_beta[0,word_index[word1]] = lda2['beta'][topic2][word]
if split_word[0].startswith('loss'):
ppm_errors = 1e6*np.abs((loss_masses - word_mass)/loss_masses)
smallest_pos = ppm_errors.argmin()
if ppm_errors[smallest_pos] < 2*mass_tol:
word1 = loss_names[smallest_pos]
temp_topics2[topic2][word1] = lda2['beta'][topic2][word]
temp_beta[0,word_index[word1]] = lda2['beta'][topic2][word]
total_probability += lda2['beta'][topic2][word]
for word in temp_topics2[topic2]:
temp_topics2[topic2][word] /= total_probability
temp_beta /= total_probability
match_scores = np.dot(beta,temp_beta.T)
best_score = match_scores.max()
best_pos = match_scores.argmax()
topic1 = reverse1[best_pos]
w1 = lda1['beta'][topic1].keys()
if same_corpus:
w2 = lda2['beta'][topic2].keys()
else:
w2 = temp_topics2[topic2].keys()
union = set(w1) | set(w2)
intersect = set(w1) & set(w2)
p1 = 0.0
p2 = 0.0
for word in intersect:
word_pos = word_index[word]
p1 += beta[topic_index1[topic1],word_pos]
p2 += temp_topics2[topic2][word]
annotation = ""
if 'topic_metadata' in lda1:
if topic1 in lda1['topic_metadata']:
if type(lda1['topic_metadata'][topic1]) == str:
annotation = lda1['topic_metadata'][topic1]
else:
annotation = lda1['topic_metadata'][topic1].get('annotation',"")
best_match[topic2] = (topic1,best_score,len(union),len(intersect),p2,p1,annotation)
if summary_file:
with open(summary_file,'w') as f:
f.write('lda2_topic,lda1_topic,match_score,unique_words,shared_words,shared_p_lda2,shared_p_lda1,lda1_annotation\n')
for topic2 in best_match:
topic1 = best_match[topic2][0]
line = "{},{},{}".format(topic2,topic1,best_match[topic2][1])
line += ",{},{}".format(best_match[topic2][2],best_match[topic2][3])
line += ",{},{}".format(best_match[topic2][4],best_match[topic2][5])
line += ",{}".format(best_match[topic2][6])
f.write(line+'\n')
if copy_annotations and 'topic_metadata' in lda1:
print "Copying annotations"
if not 'topic_metadata' in lda2:
lda2['topic_metadata'] = {}
for topic2 in best_match:
lda2['topic_metadata'][topic2] = {'name':topic2}
topic1 = best_match[topic2][0]
p2 = best_match[topic2][4]
p1 = best_match[topic2][5]
if p1 >= copy_threshold and p2 >= copy_threshold:
annotation = best_match[topic2][6]
if len(annotation) > 0:
lda2['topic_metadata'][topic2]['annotation'] = annotation
if new_file2 == None:
with open(file2,'w') as f:
pickle.dump(lda2,f)
print "Dictionary with copied annotations saved to {}".format(file2)
else:
with open(new_file2,'w') as f:
pickle.dump(lda2,f)
print "Dictionary with copied annotations saved to {}".format(new_file2)
return best_match,lda2
def find_standards_in_dict(standards_file,lda_dict=None,lda_dict_file=None,mode='pos',mass_tol = 3,rt_tol = 12,new_lda_file = None):
if lda_dict == None:
if lda_dict_file == None:
print "Must provide either an lda dictionary or an lda dictionary file"
return
else:
with open(lda_dict_file,'r') as f:
lda_dict = pickle.load(f)
print "Loaded lda dictionary from {}".format(lda_dict_file)
# Load the standards
standard_molecules = []
found_heads = False
with open(standards_file,'r') as f:
for line in f:
if found_heads == False and line.startswith('Peak Num'):
found_heads = True
continue
elif found_heads == False:
continue
else:
split_line = line.rstrip().split(',')
if (mode == 'pos' and split_line[4] == '+') or (mode == 'neg' and split_line[3] == '-'):
# It's a keeper
name = split_line[2]
mz = split_line[6]
if mz == 'N':
continue
mz = float(mz)
rt = split_line[9]
if rt == '-':
continue
rt = float(rt)*60.0 # converted to seconds
formula = split_line[3]
standard_molecules.append((name,mz,rt,formula))
# mol = ()
print "Loaded {} molecules".format(len(standard_molecules))
doc_masses = np.array([float(d.split('_')[0]) for d in lda_dict['corpus']])
doc_names = [d for d in lda_dict['corpus']]
doc_rt = np.array([float(d.split('_')[1]) for d in lda_dict['corpus']])
hits = {}
for mol in standard_molecules:
mass_delta = mol[1]*mass_tol*1e-6
mass_hit = (doc_masses < mol[1] + mass_delta) & (doc_masses > mol[1] - mass_delta)
rt_hit = (doc_rt < mol[2] + rt_tol) & (doc_rt > mol[2] - rt_tol)
match = np.where(mass_hit & rt_hit)[0]
if len(match) > 0:
if len(match) == 1:
hits[mol] = doc_names[match[0]]
else:
# Multiple hits
min_dist = 1e6
best_match = match[0]
for individual_match in match:
match_mass = doc_masses[individual_match]
match_rt = doc_rt[individual_match]
dist = np.sqrt((match_rt - mol[2])**2 + (match_mass - mol[1])**2)
if dist < min_dist:
best_match = individual_match
hits[mol] = doc_names[best_match]
print "Found hits for {} standard molecules".format(len(hits))
# Add the hits to the lda_dict as document metadata
for mol in hits:
doc_name = hits[mol]
lda_dict['doc_metadata'][doc_name]['standard_mol'] = mol[0]
lda_dict['doc_metadata'][doc_name]['annotation'] = mol[0]
if new_lda_file:
with open(new_lda_file,'w') as f:
pickle.dump(lda_dict,f)
print "Wrote annotated dictionary to {}".format(new_lda_file)
return lda_dict
def alpha_report(vlda,overlap_scores = None,overlap_thresh = 0.3):
ta = []
for topic,ti in vlda.topic_index.items():
ta.append((topic,vlda.alpha[ti]))
ta = sorted(ta,key = lambda x: x[1],reverse = True)
for t,a in ta:
to = []
if overlap_scores:
for doc in overlap_scores:
if t in overlap_scores[doc]:
if overlap_scores[doc][t]>=overlap_thresh:
to.append((doc,overlap_scores[doc][t]))
print t,vlda.topic_metadata[t].get('SHORT_ANNOTATION',None),a
to = sorted(to,key = lambda x: x[1],reverse = True)
for t,o in to:
print '\t',t,o
def decompose(vlda,corpus):
# decompose the documents in corpus
# CHECK THE INTENSITY NORMALISATION
K = vlda.K
phi = {}
gamma_mat = {}
n_done = 0
n_total = len(corpus)
p_in = {}
for doc,spectrum in corpus.items():
intensity_in = 0.0
intensity_out = 0.0
max_i = 0.0
for word in spectrum:
if spectrum[word] > max_i:
max_i = spectrum[word]
if word in vlda.word_index:
intensity_in += spectrum[word]
else:
intensity_out += spectrum[word]
p_in[doc] = (1.0*intensity_in)/(intensity_in + intensity_out)
# print max_i
print "Decomposing document {} ({}/{})".format(doc,n_done,n_total)
phi[doc] = {}
# gamma_mat[doc] = np.zeros(K) + vlda.alpha
gamma_mat[doc] = np.ones(K)
for it in range(20):
# temp_gamma = np.zeros(K) + vlda.alpha
temp_gamma = np.ones(K)
for word in spectrum:
if word in vlda.word_index:
w = vlda.word_index[word]
log_phi_matrix = np.log(vlda.beta_matrix[:,w]) + psi(gamma_mat[doc])
log_phi_matrix = np.exp(log_phi_matrix - log_phi_matrix.max())
phi[doc][word] = log_phi_matrix/log_phi_matrix.sum()
temp_gamma += phi[doc][word]*spectrum[word]
gamma_mat[doc] = temp_gamma
n_done += 1
return gamma_mat,phi,p_in
def decompose_overlap(vlda,decomp_phi):
# computes the overlap score for a decomposition phi
o = {}
K = vlda.K
for doc in decomp_phi:
o[doc] = {}
os = np.zeros(K)
for word,phi_vec in decomp_phi[doc].items():
word_pos = vlda.word_index[word]
os += phi_vec*vlda.beta_matrix[:,word_pos]
for topic,pos in vlda.topic_index.items():
o[doc][topic] = os[pos]
return o
def decompose_from_dict(vlda_dict,corpus):
# step 1, get the betas into a matrix
K = vlda_dict['K']
skeleton = VariationalLDA({},K)
skeleton.word_index = vlda_dict['word_index']
skeleton.topic_index = vlda_dict['topic_index']
n_words = len(skeleton.word_index)
skeleton.beta_matrix = np.zeros((K,n_words),np.double) + 1e-6
beta_dict = vlda_dict['beta']
for topic in beta_dict:
topic_pos = skeleton.topic_index[topic]
for word,prob in beta_dict[topic].items():
word_pos = skeleton.word_index[word]
skeleton.beta_matrix[topic_pos,word_pos] = prob
# normalise
skeleton.beta_matrix /= skeleton.beta_matrix.sum(axis=1)[:,None]
g,phi,p_in = decompose(skeleton,corpus)
return g,phi,p_in,skeleton
def doc_feature_counts(vlda_dict,p_thresh = 0.01,o_thresh = 0.3):
theta = vlda_dict['theta']
decomp_gamma,decomp_phi,decomp_p_in,skeleton = decompose_from_dict(vlda_dict,vlda_dict['corpus'])
overlap_scores = decompose_overlap(skeleton,vlda_dict['corpus'])
phi_thresh = 0.5
motif_doc_counts = {}
motif_word_counts = {}
for doc in theta:
for motif in theta[doc]:
if theta[doc][motif] >= p_thresh and overlap_scores[doc][motif] >= o_thresh:
if not motif in motif_doc_counts:
motif_doc_counts[motif] = 0
motif_doc_counts[motif] += 1
for word,phi_vec in decomp_phi[doc].items():
motif_pos = vlda_dict['topic_index'][motif]
if phi_vec[motif_pos] >= phi_thresh:
if not motif in motif_word_counts:
motif_word_counts[motif] = {}
if not word in motif_word_counts[motif]:
motif_word_counts[motif][word] = 0
motif_word_counts[motif][word] += 1
word_total_counts = {}
for doc,spectrum in vlda_dict['corpus'].items():
for word,intensity in spectrum.items():
if not word in word_total_counts:
word_total_counts[word] = 0
word_total_counts[word] += 1
return motif_doc_counts,motif_word_counts,word_total_counts
def compute_overlap_scores(vlda):
import numpy as np
K = len(vlda.topic_index)
overlap_scores = {}
for doc in vlda.doc_index:
overlap_scores[doc] = {}
os = np.zeros(K)
pm = vlda.phi_matrix[doc]
for word,probs in pm.items():
word_index = vlda.word_index[word]
os += probs*vlda.beta_matrix[:,word_index]
for motif,m_pos in vlda.topic_index.items():
overlap_scores[doc][motif] = os[m_pos]
return overlap_scores
def write_csv(vlda,overlap_scores,filename,metadata,p_thresh=0.01,o_thresh=0.3):
import csv
probs = vlda.get_expect_theta()
motif_dict = {}
with open(filename,'w') as f:
writer = csv.writer(f)
heads = ['Document','Motif','Probability','Overlap Score','Precursor Mass','Retention Time','Document Annotation']
writer.writerow(heads)
all_rows = []
for doc,doc_pos in vlda.doc_index.items():
for motif,motif_pos in vlda.topic_index.items():
if probs[doc_pos,motif_pos] >= p_thresh and overlap_scores[doc][motif] >= o_thresh:
new_row = []
new_row.append(doc)
new_row.append(motif)
new_row.append(probs[doc_pos,motif_pos])
new_row.append(overlap_scores[doc][motif])
new_row.append(metadata[doc]['parentmass'])
new_row.append("None")
new_row.append(metadata[doc]['featid'])
all_rows.append(new_row)
motif_dict[motif] = True
all_rows = sorted(all_rows,key = lambda x:x[0])
for new_row in all_rows:
writer.writerow(new_row)
return motif_dict | gpl-3.0 | 7,522,224,093,427,851,000 | 33.374083 | 132 | 0.636862 | false | 2.75431 | false | false | false |
our-city-app/oca-backend | src/rogerthat/models/auth/acm.py | 1 | 2211 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from rogerthat.models.common import NdbModel
from google.appengine.ext import ndb
# DOCS https://authenticatie.vlaanderen.be/docs/beveiligen-van-toepassingen/integratie-methoden/oidc/
# T&I https://authenticatie-ti.vlaanderen.be/op/.well-known/openid-configuration
# PROD https://authenticatie.vlaanderen.be/op/.well-known/openid-configuration
class ACMSettings(NdbModel):
client_id = ndb.TextProperty()
client_secret = ndb.TextProperty()
openid_config_uri = ndb.TextProperty()
auth_redirect_uri = ndb.TextProperty()
logout_redirect_uri = ndb.TextProperty()
@classmethod
def create_key(cls, app_id):
return ndb.Key(cls, app_id)
class ACMLoginState(NdbModel):
creation_time = ndb.DateTimeProperty(auto_now_add=True)
app_id = ndb.TextProperty()
scope = ndb.TextProperty()
code_challenge = ndb.TextProperty()
token = ndb.JsonProperty()
id_token = ndb.JsonProperty()
@property
def state(self):
return self.key.id()
@classmethod
def create_key(cls, state):
return ndb.Key(cls, state)
@classmethod
def list_before_date(cls, date):
return cls.query(cls.creation_time < date)
class ACMLogoutState(NdbModel):
creation_time = ndb.DateTimeProperty(auto_now_add=True)
app_id = ndb.TextProperty()
@property
def state(self):
return self.key.id()
@classmethod
def create_key(cls, state):
return ndb.Key(cls, state)
@classmethod
def list_before_date(cls, date):
return cls.query(cls.creation_time < date) | apache-2.0 | 6,988,458,327,065,646,000 | 28.891892 | 101 | 0.703754 | false | 3.438569 | false | false | false |
cartertech/odoo-hr-ng | hr_report_payroll_attendance_summary/report/attendance_summary.py | 1 | 14914 | #-*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 One Click Software (http://oneclick.solutions)
# and Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DATEFORMAT
from report import report_sxw
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_employee_data': self.get_employee_data,
'get_worked_days': self.get_worked_days,
'get_daily_ot': self.get_daily_ot,
'get_nightly_ot': self.get_nightly_ot,
'get_restday_ot': self.get_restday_ot,
'get_holiday_ot': self.get_holiday_ot,
'get_bunch_no': self.get_bunch_no,
'get_awol': self.get_awol,
'get_sickleave': self.get_sickleave,
'get_no': self.get_no,
'get_start': self.get_start,
'get_end': self.get_end,
'lose_bonus': self.lose_bonus,
'get_paid_leave': self.get_paid_leave,
'get_employee_list': self.get_employee_list,
'get_lu': self.get_lu,
'get_wage': self.get_adjusted_wage,
})
self.start_date = False
self.end_date = False
self.ee_lines = {}
self.no = 0
self.department_id = False
self.regular_hours = 8.0
self.get_employee_data_ids = []
self.get_employee_list_ids = []
def set_context(self, objects, data, ids, report_type=None):
if data.get('form', False) and data['form'].get('start_date', False):
self.start_date = data['form']['start_date']
if data.get('form', False) and data['form'].get('end_date', False):
self.end_date = data['form']['end_date']
return super(Parser, self).set_context(objects, data, ids, report_type=report_type)
def calculate_wage_by_ppf(self, dFullStart, dFullEnd, contracts_list):
full_days = 0
d = dFullStart
while d <= dFullEnd:
full_days += 1
d += relativedelta(days= +1)
wage = 0.0
for line in contracts_list:
ppf = 0.0
dates = line[0]
contract_wage = line[1]
ppf = float(relativedelta(dates[1], dates[0]).days + 1) / float(full_days)
wage += (contract_wage * ppf)
return wage
def get_adjusted_wage(self, ee_id):
con_obj = self.pool.get('hr.contract')
dS = datetime.strptime(self.start_date, OE_DATEFORMAT).date()
dE = datetime.strptime(self.end_date, OE_DATEFORMAT).date()
wage = 0
# Get contract in effect on first day of period (or first contract date for new employees)
cons= []
d = dS
while d <= dE:
con_ids = con_obj.search(self.cr, self.uid,
[('employee_id', '=', ee_id),
('date_start', '<=', d.strftime(OE_DATEFORMAT)),
'|', ('date_end', '=', False),
('date_end', '>=', d.strftime(OE_DATEFORMAT))])
if len(con_ids) > 0:
con = con_obj.browse(self.cr, self.uid, con_ids[0])
_seen = False
for c in cons:
if con.id == c[2]:
_seen = True
break
if _seen:
d += relativedelta(days= +1)
continue
dTempStart = dS
dTempEnd = dE
dConStart = datetime.strptime(con.date_start, OE_DATEFORMAT).date()
if dConStart > dS:
dTempStart = dConStart
if con.date_end:
dConEnd = datetime.strptime(con.date_end, OE_DATEFORMAT).date()
if dConEnd < dE:
dTempEnd = dConEnd
cons.append([(dTempStart, dTempEnd), con.wage, con.id])
d += relativedelta(days= +1)
if len(cons) > 0:
wage = self.calculate_wage_by_ppf(dS, dE, cons)
return wage
def get_employee_ids(self, department_id, seen_ids):
c_obj = self.pool.get('hr.contract')
ee_obj = self.pool.get('hr.employee')
c_ids = c_obj.search(self.cr, self.uid, ['|', ('job_id.department_id', '=', department_id),
('end_job_id.department_id', '=', department_id),
('date_start', '<=', self.end_date),
'|', ('date_end', '=', False),
('date_end', '>=', self.start_date)])
ee_ids = []
cdata = c_obj.read(self.cr, self.uid, c_ids, ['employee_id'])
ee_ids = [data['employee_id'][0] for data in cdata if ((data['employee_id'][0] not in ee_ids) and (data['employee_id'][0] not in seen_ids))]
seen_ids += ee_ids
# re-order
return ee_obj.search(self.cr, self.uid, [('id', 'in', ee_ids),
'|', ('active', '=', True),
('active', '=', False)])
def get_employee_list(self, department_id):
ee_ids = self.get_employee_ids(department_id, self.get_employee_list_ids)
return self.pool.get('hr.employee').browse(self.cr, self.uid, ee_ids)
def get_employee_data(self, department_id):
payslip_obj = self.pool.get('hr.payslip')
ee_obj = self.pool.get('hr.employee')
dtStart = datetime.strptime(self.start_date, OE_DATEFORMAT).date()
dtEnd = datetime.strptime(self.end_date, OE_DATEFORMAT).date()
ee_ids = self.get_employee_ids(department_id, self.get_employee_data_ids)
for ee in ee_obj.browse(self.cr, self.uid, ee_ids):
datas = []
for c in ee.contract_ids:
dtCStart = False
dtCEnd = False
if c.date_start: dtCStart = datetime.strptime(c.date_start, OE_DATEFORMAT).date()
if c.date_end: dtCEnd = datetime.strptime(c.date_end, OE_DATEFORMAT).date()
if (dtCStart and dtCStart <= dtEnd) and ((dtCEnd and dtCEnd >= dtStart) or not dtCEnd):
datas.append({'contract_id': c.id,
'date_start': dtCStart > dtStart and dtCStart.strftime(OE_DATEFORMAT) or dtStart.strftime(OE_DATEFORMAT),
'date_end': (dtCEnd and dtCEnd < dtEnd) and dtCEnd.strftime(OE_DATEFORMAT) or dtEnd.strftime(OE_DATEFORMAT)})
wd_lines = []
for d in datas:
wd_lines += payslip_obj.get_worked_day_lines(self.cr, self.uid, [d['contract_id']],
d['date_start'], d['date_end'])
self.ee_lines.update({ee.id: wd_lines})
def get_start(self):
return datetime.strptime(self.start_date, OE_DATEFORMAT).strftime('%B %d, %Y')
def get_end(self):
return datetime.strptime(self.end_date, OE_DATEFORMAT).strftime('%B %d, %Y')
def get_no(self, department_id):
if not self.department_id or self.department_id != department_id:
self.department_id = department_id
self.no = 1
else:
self.no += 1
return self.no
def get_lu(self, employee_id):
data = self.pool.get('hr.employee').read(self.cr, self.uid, employee_id, ['is_labour_union'])
return data.get('is_labour_union', False) and 'Y' or 'N'
def get_employee_start_date(self, employee_id):
first_day = False
c_obj = self.pool.get('hr.contract')
c_ids = c_obj.search(self.cr, self.uid, [('employee_id', '=', employee_id)])
for contract in c_obj.browse(self.cr, self.uid, c_ids):
if not first_day or contract.date_start < first_day:
first_day = contract.date_start
return first_day
def get_worked_days(self, employee_id):
total = 0.0
hol = 0.0
maxw = 0.0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORK100']:
total += float(line['number_of_hours']) / self.regular_hours
elif line['code'] == ['WORKHOL']:
hol += float(line['number_of_hours']) / self.regular_hours
elif line['code'] == ['MAX']:
maxw += float(line['number_of_hours']) / self.regular_hours
total += hol + self.get_paid_leave(employee_id)
awol = self.get_awol(employee_id)
# Take care to identify and handle employee's who didn't work the
# full month: newly hired and terminated employees
#
hire_date = self.get_employee_start_date(employee_id)
term_ids = self.pool.get('hr.employee.termination').search(self.cr, self.uid,
[('name', '<=', self.end_date),
('name', '>=', self.start_date),
('employee_id', '=', employee_id),
('employee_id.status', 'in', ['pending_inactive', 'inactive']),
('state', 'not in', ['cancel'])])
if hire_date <= self.start_date and len(term_ids) == 0:
if total >= maxw:
total = 26
total = total - awol
return total
def get_paid_leave(self, employee_id):
total = 0
paid_leaves = ['LVANNUAL', 'LVBEREAVEMENT', 'LVCIVIC', 'LVMATERNITY',
'LVMMEDICAL', 'LVPTO', 'LVWEDDING', 'LVSICK']
for line in self.ee_lines[employee_id]:
if line['code'] in paid_leaves:
total += float(line['number_of_hours']) / self.regular_hours
return total
def get_daily_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTD']:
total += line['number_of_hours']
return total
def get_nightly_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTN']:
total += line['number_of_hours']
return total
def get_restday_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTR', 'WORKRST']:
total += line['number_of_hours']
return total
def get_holiday_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTH', 'WORKHOL']:
total += line['number_of_hours']
return total
def get_bunch_no(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['BUNCH']:
total += int(line['number_of_hours'])
return total
def get_awol(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['AWOL']:
total += float(line['number_of_hours']) / self.regular_hours
return total
def get_sickleave(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['LVSICK']:
total += float(line['number_of_hours']) / self.regular_hours
elif line['code'] in ['LVSICK50']:
total += float(line['number_of_hours']) * 0.5
return total
def lose_bonus(self, employee_id):
loseit = False
for line in self.ee_lines[employee_id]:
if line['code'] in ['AWOL', 'TARDY', 'NFRA', 'WARNW'] and line['number_of_hours'] > 0.01:
loseit = True
# Check if the employee's contract spans the full month
if not loseit:
dStart = False
dEnd = None
con_obj = self.pool.get('hr.contract')
con_ids = con_obj.search(self.cr, self.uid, [('employee_id', '=', employee_id),
('state', '!=', 'draft'),
('date_start', '<=', self.end_date),
'|', ('date_end', '=', False),
('date_end', '>=', self.start_date)])
for con in con_obj.browse(self.cr, self.uid, con_ids):
dTempStart = datetime.strptime(con.date_start, OE_DATEFORMAT).date()
dTempEnd = False
if con.date_end:
dTempEnd = datetime.strptime(con.date_end, OE_DATEFORMAT).date()
if not dStart or dTempStart < dStart:
dStart = dTempStart
if (dEnd == None) or (not dTempEnd or (dEnd and dTempEnd > dEnd)):
dEnd = dTempEnd
if dStart and dStart > datetime.strptime(self.start_date, OE_DATEFORMAT).date():
loseit = True
elif (dEnd != None) and dEnd and (dEnd < datetime.strptime(self.end_date, OE_DATEFORMAT).date()):
loseit = True
return loseit
| agpl-3.0 | -5,935,333,586,226,009,000 | 42.48105 | 148 | 0.495575 | false | 3.907257 | false | false | false |
adlius/osf.io | api/providers/urls.py | 1 | 5275 | from django.conf.urls import include, url
from api.providers import views
app_name = 'osf'
urlpatterns = [
url(
r'^preprints/', include(
(
[
url(r'^$', views.PreprintProviderList.as_view(), name=views.PreprintProviderList.view_name),
url(r'^(?P<provider_id>\w+)/$', views.PreprintProviderDetail.as_view(), name=views.PreprintProviderDetail.view_name),
url(r'^(?P<provider_id>\w+)/licenses/$', views.PreprintProviderLicenseList.as_view(), name=views.PreprintProviderLicenseList.view_name),
url(r'^(?P<provider_id>\w+)/preprints/$', views.PreprintProviderPreprintList.as_view(), name=views.PreprintProviderPreprintList.view_name),
url(r'^(?P<provider_id>\w+)/subjects/$', views.PreprintProviderSubjects.as_view(), name=views.PreprintProviderSubjects.view_name),
url(r'^(?P<provider_id>\w+)/subjects/highlighted/$', views.PreprintProviderHighlightedSubjectList.as_view(), name=views.PreprintProviderHighlightedSubjectList.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/$', views.PreprintProviderTaxonomies.as_view(), name=views.PreprintProviderTaxonomies.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/highlighted/$', views.PreprintProviderHighlightedTaxonomyList.as_view(), name=views.PreprintProviderHighlightedTaxonomyList.view_name),
url(r'^(?P<provider_id>\w+)/withdraw_requests/$', views.PreprintProviderWithdrawRequestList.as_view(), name=views.PreprintProviderWithdrawRequestList.view_name),
url(r'^(?P<provider_id>\w+)/moderators/$', views.PreprintProviderModeratorsList.as_view(), name=views.PreprintProviderModeratorsList.view_name),
url(r'^(?P<provider_id>\w+)/moderators/(?P<moderator_id>\w+)/$', views.PreprintProviderModeratorsDetail.as_view(), name=views.PreprintProviderModeratorsDetail.view_name),
], 'preprints',
),
namespace='preprint-providers',
),
),
url(
r'^collections/', include(
(
[
url(r'^$', views.CollectionProviderList.as_view(), name=views.CollectionProviderList.view_name),
url(r'^(?P<provider_id>\w+)/$', views.CollectionProviderDetail.as_view(), name=views.CollectionProviderDetail.view_name),
url(r'^(?P<provider_id>\w+)/licenses/$', views.CollectionProviderLicenseList.as_view(), name=views.CollectionProviderLicenseList.view_name),
url(r'^(?P<provider_id>\w+)/submissions/$', views.CollectionProviderSubmissionList.as_view(), name=views.CollectionProviderSubmissionList.view_name),
url(r'^(?P<provider_id>\w+)/subjects/$', views.CollectionProviderSubjects.as_view(), name=views.CollectionProviderSubjects.view_name),
url(r'^(?P<provider_id>\w+)/subjects/highlighted/$', views.CollectionProviderHighlightedSubjectList.as_view(), name=views.CollectionProviderHighlightedSubjectList.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/$', views.CollectionProviderTaxonomies.as_view(), name=views.CollectionProviderTaxonomies.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/highlighted/$', views.CollectionProviderHighlightedTaxonomyList.as_view(), name=views.CollectionProviderHighlightedTaxonomyList.view_name),
], 'collections',
),
namespace='collection-providers',
),
),
url(
r'^registrations/', include(
(
[
url(r'^$', views.RegistrationProviderList.as_view(), name=views.RegistrationProviderList.view_name),
url(r'^(?P<provider_id>\w+)/$', views.RegistrationProviderDetail.as_view(), name=views.RegistrationProviderDetail.view_name),
url(r'^(?P<provider_id>\w+)/licenses/$', views.RegistrationProviderLicenseList.as_view(), name=views.RegistrationProviderLicenseList.view_name),
url(r'^(?P<provider_id>\w+)/schemas/$', views.RegistrationProviderSchemaList.as_view(), name=views.RegistrationProviderSchemaList.view_name),
url(r'^(?P<provider_id>\w+)/submissions/$', views.RegistrationProviderSubmissionList.as_view(), name=views.RegistrationProviderSubmissionList.view_name),
url(r'^(?P<provider_id>\w+)/subjects/$', views.RegistrationProviderSubjects.as_view(), name=views.RegistrationProviderSubjects.view_name),
url(r'^(?P<provider_id>\w+)/subjects/highlighted/$', views.RegistrationProviderHighlightedSubjectList.as_view(), name=views.RegistrationProviderHighlightedSubjectList.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/$', views.RegistrationProviderTaxonomies.as_view(), name=views.RegistrationProviderTaxonomies.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/highlighted/$', views.RegistrationProviderHighlightedTaxonomyList.as_view(), name=views.RegistrationProviderHighlightedTaxonomyList.view_name),
], 'registrations',
),
namespace='registration-providers',
),
),
]
| apache-2.0 | 7,185,396,797,768,840,000 | 80.153846 | 202 | 0.653649 | false | 4.03596 | false | true | false |
akraft196/pyASC | examples/mplot1.py | 1 | 7267 | #! /usr/bin/env python
#
# quick and dirty processing of the MD All Sky images
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.misc import imsave
import numpy as np
import aplpy
import argparse as ap
import os.path
import logging
import time
def d(ff,box=[]):
#very specific for 16 bit data, since we want to keep the data in uint16
h = fits.open(ff, do_not_scale_image_data=True)
if len(box)==0:
return h[0].header, h[0].data
else:
# figure out 0 vs. 1 based offsets; box is 1 based
return h[0].header, h[0].data[box[1]:box[3], box[0]:box[2]]
def dsum(i0,i1,step = 1, box=[]):
""" for a range of fits files
compute the mean and dispersion from the mean
"""
for i in range(i0,i1+1,step):
ff = 'IMG%05d.FIT' % i
h1, d1 = d(ff,box)
#very specific for 16 bit data, since we want to keep the data in uint16
bzero = h1['BZERO']
bscale = h1['BSCALE']
if i == i0:
sum0 = 1.0
sum1 = d1*bscale+bzero
sum2 = sum1*sum1
#sum1 = d1
#sum2 = d1*d1
h = h1
nx = d1.shape[1]
ny = d1.shape[0]
nz = i1 + 1 - i0
c = np.zeros((nz, ny, nx))
c[0,:,:] = d1.reshape(ny,nx)
else:
sum0 = sum0 + 1.0
sum1 = sum1 + (d1 * bscale + bzero)
sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)
#sum2 = sum2+d1*d1
c[i - i0,:,:] = d1.reshape(ny,nx)
sum1 = sum1 / sum0
sum2 = sum2 / sum0 - sum1*sum1
print type(sum1), type(sum2)
return h,sum1,np.sqrt(sum2),c
def show(sum):
""" some native matplotlib display,
doesn't show pointsources well at all
"""
ip = plt.imshow(sum)
plt.show()
def show2(sum):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum)
#fig.show_grayscale()
fig.show_colorscale()
def show3(sum1,sum2):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum1,subplot=(2,2,1))
#fig = aplpy.FITSFigure(sum2,subplot=(2,2,2),figure=1)
fig.show_grayscale()
# For some variations on this theme, e.g. time.time vs. time.clock, see
# http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python
#
class Dtime(object):
""" Class to help measuring the wall clock time between tagged events
Typical usage:
dt = Dtime()
...
dt.tag('a')
...
dt.tag('b')
"""
def __init__(self, label=".", report=True):
self.start = self.time()
self.init = self.start
self.label = label
self.report = report
self.dtimes = []
dt = self.init - self.init
if self.report:
logging.info("Dtime: %s ADMIT " % self.label + str(self.start))
logging.info("Dtime: %s BEGIN " % self.label + str(dt))
def reset(self, report=True):
self.start = self.time()
self.report = report
self.dtimes = []
def tag(self, mytag):
t0 = self.start
t1 = self.time()
dt = t1 - t0
self.dtimes.append((mytag, dt))
self.start = t1
if self.report:
logging.info("Dtime: %s " % self.label + mytag + " " + str(dt))
return dt
def show(self):
if self.report:
for r in self.dtimes:
logging.info("Dtime: %s " % self.label + str(r[0]) + " " + str(r[1]))
return self.dtimes
def end(self):
t0 = self.init
t1 = self.time()
dt = t1 - t0
if self.report:
logging.info("Dtime: %s END " % self.label + str(dt))
return dt
def time(self):
""" pick the actual OS routine that returns some kind of timer
time.time : wall clock time (include I/O and multitasking overhead)
time.clock : cpu clock time
"""
return np.array([time.clock(), time.time()])
if __name__ == '__main__':
logging.basicConfig(level = logging.INFO)
dt = Dtime("mplot1")
#--start, -s n
#--end, -e n
#--box x1 y1 x2 y2
parser = ap.ArgumentParser(description='Plotting .fits files.')
parser.add_argument('-f', '--frame', nargs = '*', type = int, help = 'Starting and ending parameters for the frames analyzed')
parser.add_argument('-b', '--box', nargs = 4, type = int, help = 'Coordinates for the bottom left corner and top right corner of a rectangle of pixels to be analyzed from the data. In the structure x1, y1, x2, y2 (1 based numbers)')
parser.add_argument('-g', '--graphics', nargs = 1, type = int, default = 0, help = 'Controls whether to display or save graphics. 0: no graphics, 1: display graphics, 2: save graphics as .png')
args = vars(parser.parse_args())
if args['frame'] == None:
count = 0
start = None
end = None
step = 1
#while we have yet to find an end
while end == None:
filename = 'IMG%05d.FIT' % count
#if start has not been found yet, and this file exists
if start == None and os.path.isfile(filename):
start = count
#if start has been found and we finally found a file that doesn't exist, set end to the last file that existed (count - 1.FIT)
elif start != None and not os.path.isfile(filename):
end = count - 1
count += 1
elif len(args['frame']) >= 2 and len(args['frame']) <= 3:
start = args['frame'][0] # starting frame (IMGnnnnn.FIT)
end = args['frame'][1] # ending frame
if len(args['frame']) == 3:
step = args['frame']
else:
step = 1
else:
raise Exception,"-f needs 0, 2, or 3 arguments."
box = args['box'] # BLC and TRC
if box == None:
box = []
dt.tag("start")
# compute the average and dispersion of the series
h1,sum1,sum2,cube = dsum(start,end,step,box=box) # end can be uninitialized here might throw an error?
dt.tag("dsum")
nz = cube.shape[0]
# delta X and Y images
dsumy = sum1 - np.roll(sum1, 1, axis = 0) # change in the y axis
dsumx = sum1 - np.roll(sum1, 1, axis = 1) # change in the x axis
# write them to FITS
fits.writeto('dsumx.fits', dsumx, h1, clobber=True)
fits.writeto('dsumy.fits', dsumy, h1, clobber=True)
fits.writeto('sum1.fits', sum1, h1, clobber=True)
fits.writeto('sum2.fits', sum2, h1, clobber=True)
dt.tag("write2d")
# 3D cube to
h1['NAXIS'] = 3
h1['NAXIS3'] = nz
fits.writeto('cube.fits', cube, h1, clobber=True)
dt.tag("write3d")
if args['graphics'][0] == 1:
# plot the sum1 and sum2 correllation (glueviz should do this)
s1 = sum1.flatten()
s2 = sum2.flatten()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(s1,s2)
plt.show()
show2(sum1)
show2(sum2)
if args['graphics'][0] == 2:
imsave('sum1.png', sum1)
imsave('sum2.png', sum2)
dt.tag("done")
dt.end()
| mit | 8,728,276,588,416,666,000 | 31.734234 | 236 | 0.551534 | false | 3.260206 | false | false | false |
wooga/airflow | airflow/providers/google/cloud/operators/natural_language.py | 1 | 10959 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Cloud Language operators.
"""
from typing import Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.language_v1 import enums
from google.cloud.language_v1.types import Document
from google.protobuf.json_format import MessageToDict
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.natural_language import CloudNaturalLanguageHook
MetaData = Sequence[Tuple[str, str]]
class CloudNaturalLanguageAnalyzeEntitiesOperator(BaseOperator):
"""
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeEntitiesOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.enums.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START natural_language_analyze_entities_template_fields]
template_fields = ("document", "gcp_conn_id")
# [END natural_language_analyze_entities_template_fields]
def __init__(
self,
document: Union[dict, Document],
encoding_type: Optional[enums.EncodingType] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[MetaData] = None,
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudNaturalLanguageHook(gcp_conn_id=self.gcp_conn_id)
self.log.info("Start analyzing entities")
response = hook.analyze_entities(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished analyzing entities")
return MessageToDict(response)
class CloudNaturalLanguageAnalyzeEntitySentimentOperator(BaseOperator):
"""
Finds entities, similar to AnalyzeEntities in the text and analyzes sentiment associated with each
entity and its mentions.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeEntitySentimentOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.enums.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
# [START natural_language_analyze_entity_sentiment_template_fields]
template_fields = ("document", "gcp_conn_id")
# [END natural_language_analyze_entity_sentiment_template_fields]
def __init__(
self,
document: Union[dict, Document],
encoding_type: Optional[enums.EncodingType] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[MetaData] = None,
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudNaturalLanguageHook(gcp_conn_id=self.gcp_conn_id)
self.log.info("Start entity sentiment analyze")
response = hook.analyze_entity_sentiment(
document=self.document,
encoding_type=self.encoding_type,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Finished entity sentiment analyze")
return MessageToDict(response)
class CloudNaturalLanguageAnalyzeSentimentOperator(BaseOperator):
"""
Analyzes the sentiment of the provided text.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeSentimentOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.enums.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
# [START natural_language_analyze_sentiment_template_fields]
template_fields = ("document", "gcp_conn_id")
# [END natural_language_analyze_sentiment_template_fields]
def __init__(
self,
document: Union[dict, Document],
encoding_type: Optional[enums.EncodingType] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[MetaData] = None,
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudNaturalLanguageHook(gcp_conn_id=self.gcp_conn_id)
self.log.info("Start sentiment analyze")
response = hook.analyze_sentiment(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished sentiment analyze")
return MessageToDict(response)
class CloudNaturalLanguageClassifyTextOperator(BaseOperator):
"""
Classifies a document into categories.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageClassifyTextOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START natural_language_classify_text_template_fields]
template_fields = ("document", "gcp_conn_id")
# [END natural_language_classify_text_template_fields]
def __init__(
self,
document: Union[dict, Document],
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[MetaData] = None,
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.document = document
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudNaturalLanguageHook(gcp_conn_id=self.gcp_conn_id)
self.log.info("Start text classify")
response = hook.classify_text(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished text classify")
return MessageToDict(response)
| apache-2.0 | 3,930,899,505,662,823,400 | 40.044944 | 102 | 0.686559 | false | 4.140159 | false | false | false |
goinnn/django-multiselectfield | example/app/urls.py | 1 | 1723 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 by Pablo Martín <[email protected]>
#
# This software is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from django import VERSION
try:
from django.conf.urls import url
# Compatibility for Django > 1.8
def patterns(prefix, *args):
if VERSION < (1, 9):
from django.conf.urls import patterns as django_patterns
return django_patterns(prefix, *args)
elif prefix != '':
raise NotImplementedError("You need to update your URLConf for "
"Django 1.10, or tweak it to remove the "
"prefix parameter")
else:
return list(args)
except ImportError: # Django < 1.4
if VERSION < (4, 0):
from django.conf.urls.defaults import patterns, url
else:
from django.urls import re_path as url
from .views import app_index
if VERSION < (1, 11):
urlpatterns = patterns(
'',
url(r'^$', app_index, name='app_index'),
)
else:
urlpatterns = [
url(r'^$', app_index, name='app_index'),
]
| lgpl-3.0 | 1,116,952,307,818,030,800 | 34.875 | 79 | 0.639373 | false | 4.109785 | false | false | false |
bunnylin/supersakura | translate.py | 1 | 4235 | #!/usr/bin/python
# CC0, 2017 :: Kirinn Bunnylin / Mooncore
# https://creativecommons.org/publicdomain/zero/1.0/
import sys, re, time, subprocess
#from subprocess import check_output
if len(sys.argv) < 2:
print("Usage: python translate.py inputfile.tsv >outputfile.tsv")
print("The input file should be a tsv. The leftmost column is preserved in")
print("the output as unique string IDs, and the rightmost column is taken")
print("as the text to be translated.")
print("The translated output is printed in stdout in tsv format. You should")
print('pipe it into a suitable file, for example "outputfile.tsv".')
sys.exit(1)
def GetTranslation(com):
# Handy retry loop with a timeout for easily invoking translate-shell.
tries = 8
while tries != 0:
tries -= 1
try:
transres = subprocess.check_output(com, timeout = 16)
transres = transres.decode(sys.stdout.encoding).split("\n")
except:
transres = [""]
if len(transres) != 0: tries = 0
else: time.sleep(16)
return transres
# Read the constant substitutions list into memory. The file trans-subs.txt
# should contain one substitution per line, in the form "source/new text".
# Lines starting with a # are treated as comments.
sublist = []
with open("trans-subs.txt") as subfile:
for line in subfile:
if line[0] != "#":
line = line.rstrip()
if line != "":
splitline = line.split("/")
sublist.append({"from": splitline[0], "to": splitline[-1]})
# Print the output header.
print("String IDs\tOriginal\tPhonetic\tGoogle\tBing\tYandex")
sys.stdout.flush()
with open(sys.argv[1]) as infile:
for line in infile:
delaytime = time.time() + 1.024
# If this line has no tabs, the line as a whole is used as translatable
# input. Otherwise everything before the first tab is saved as the
# string ID, and everything after the last tab is used as the
# translatable input.
stringid = ""
splitline = line.rstrip().split("\t")
if len(splitline) > 1:
stringid = splitline[0]
line = splitline[-1]
# Output the string ID and translatable input.
linepart = stringid + "\t" + line + "\t"
sys.stdout.buffer.write(linepart.encode("utf-8"))
# Apply pre-translation substitutions.
for subitem in sublist:
line = line.replace(subitem["from"], subitem["to"])
# Replace backslashes with a double backslash. At least Bing sometimes
# drops backslashes if not doubled.
line = line.replace("\\", "\\\\")
# Google translate, wrapped in a retry loop.
transgoo = GetTranslation(["translate-shell","ja:en","-e","google",
"-no-ansi","-no-autocorrect","-show-alternatives","n",
"-show-languages","n","-show-prompt-message","n","--",line])
# transgoo is now expected to have the original on line 1, the phonetic
# on line 2 in brackets, and the translation on line 4.
trans0 = transgoo[1][1:-1]
trans1 = transgoo[3]
# Get the other translations.
trans2 = GetTranslation(["translate-shell","-b","ja:en","-e","bing",
"-no-ansi","--",line])[0]
trans3 = GetTranslation(["translate-shell","-b","ja:en","-e","yandex",
"-no-ansi","--",line])[0]
# A brief wait between requests is polite to the translation servers.
delaylen = delaytime - time.time()
if delaylen > 0: time.sleep(delaylen)
# Pack the translated strings in a single variable for post-processing.
# Delimit with tab characters.
transall = trans0 + "\t" + trans1 + "\t" + trans2 + "\t" + trans3 + "\n"
# If the output contains ": ", but the input doesn't, then the space was
# added unnecessarily and should be removed.
if transall.find(": ") != -1 and line.find(": ") == -1:
transall = transall.replace(": ", ":")
# The translators tend to add spaces after some backslashes, remove.
transall = transall.replace("\\ ", "\\")
# Change double-backslashes back to normal.
transall = transall.replace("\\\\", "\\")
# Some translators also add spaces after dollars, remove them.
transall = transall.replace("\\$ ", "\\$")
# Output the translated, processed strings.
sys.stdout.buffer.write(transall.encode("utf-8"))
sys.stdout.flush()
# end.
| gpl-3.0 | 9,078,087,450,414,523,000 | 35.508621 | 79 | 0.654782 | false | 3.576858 | false | false | false |
marcusmchale/breedcafs | app/cypher.py | 1 | 76573 | class Cypher:
def __init__(self):
pass
# user procedures
allowed_emails = (
' MATCH '
' (e: Emails) '
' RETURN '
' e.allowed '
)
user_allowed_emails = (
' MATCH '
' (u:User) '
' WITH '
' COLLECT (DISTINCT u.email) as registered_emails '
' MATCH '
' (user:User {'
' username_lower : toLower(trim($username)) '
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(e: Emails) '
' RETURN '
' FILTER (n in e.allowed WHERE NOT n in registered_emails) as user_allowed '
)
email_find = (
' MATCH '
' (user: User { '
' email: toLower(trim($email)) '
' }) '
' RETURN '
' user '
)
confirm_email = (
' MATCH '
' (user: User { '
' email: toLower(trim($email)) '
' }) '
' SET '
' user.confirmed = true '
)
user_find = (
' MATCH '
' (user: User) '
' WHERE '
' user.username_lower = toLower($username) '
' OR '
' user.email = toLower(trim($email)) '
' RETURN '
' user '
)
username_find = (
' MATCH '
' (user: User { '
' username_lower: toLower($username)'
' }) '
' RETURN '
' user '
)
user_affiliations = (
' MATCH '
' (u: User { '
' username_lower: toLower($username) '
' }) '
' -[a: AFFILIATED]->(p: Partner) '
' OPTIONAL MATCH '
' (p)<-[: AFFILIATED {admin: true}]-(admin: User) '
' RETURN '
' p.name , '
' p.fullname , '
' a.confirmed as confirmed, '
' a.data_shared as data_shared , '
' admin.email as admin_email'
)
add_affiliations = (
' UNWIND '
' $partners as partner '
' MATCH '
' (u:User { '
' username_lower: toLower(trim($username)) '
' }), '
' (p:Partner { '
' name_lower: toLower(trim(partner)) '
' }) '
' MERGE '
' (u)-[a: AFFILIATED { '
' data_shared: false, '
' admin: false, '
' confirm_timestamp: [], '
' confirmed: false '
' }]->(p) '
' ON CREATE SET '
' a.add_timestamp = datetime.transaction().epochMillis '
' RETURN '
' p.name '
)
remove_affiliations = (
' UNWIND '
' $partners as partner '
' MATCH '
' (u:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[a:AFFILIATED { '
' data_shared: false '
' }]->(p: Partner {'
' name_lower: toLower(trim(partner)) '
' }) '
' WHERE '
' size(a.confirm_timestamp = 0 '
' DELETE '
' a '
' RETURN p.name '
)
password_reset = (
' MATCH '
' (user: User { '
' email : toLower(trim($email)) '
' }) '
' SET user.password = $password '
)
user_register = (
# This is a little cautious using merge to prevent overwriting a user profile if it is called in error
' MATCH '
' (partner:Partner {'
' name_lower: toLower(trim($partner)) '
' }) '
' MERGE '
' (user:User { '
' username_lower: toLower(trim($username)) '
' }) '
' ON CREATE SET '
' user.username = trim($username), '
' user.password = $password, '
' user.email = toLower(trim($email)), '
' user.name = $name, '
' user.time = datetime.transaction().epochMillis, '
' user.access = ["user"], '
' user.confirmed = false, '
' user.found = false '
' ON MATCH SET '
' user.found = TRUE '
' WITH '
' user, partner '
' WHERE '
' user.found = false '
' CREATE '
' (user)-[r: AFFILIATED { '
' data_shared: true, '
' confirmed: false, '
' confirm_timestamp: [], '
' admin: false '
' }]->(partner), '
' (user)-[: SUBMITTED]->(sub: Submissions), '
' (sub)-[: SUBMITTED]->(: Emails {allowed :[]}),'
' (sub)-[: SUBMITTED]->(locations: Locations), '
' (locations)-[: SUBMITTED]->(: Countries), '
' (locations)-[: SUBMITTED]->(: Regions), '
' (locations)-[: SUBMITTED]->(: Farms), '
' (sub)-[:SUBMITTED]->(items: Items), '
' (items)-[: SUBMITTED]->(: Fields), '
' (items)-[: SUBMITTED]->(: Blocks), '
' (items)-[: SUBMITTED]->(: Trees), '
' (items)-[: SUBMITTED]->(: Samples), '
' (sub)-[:SUBMITTED]->(: Records) '
)
add_allowed_email = (
' MATCH '
' (all: Emails) '
' WITH '
' all.allowed as allowed_emails '
' UNWIND '
' allowed_emails as email '
' WITH '
' COLLECT(DISTINCT email) as set '
' WHERE '
' NOT toLower(trim($email)) IN set '
' MATCH '
' (:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[:SUBMITTED]->(: Submissions) '
' -[:SUBMITTED]->(e: Emails) '
' SET e.allowed = e.allowed + [toLower(trim($email))] '
' RETURN toLower(trim($email)) '
)
remove_allowed_email = (
' MATCH '
' (:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[:SUBMITTED]->(: Submissions) '
' -[:SUBMITTED]->(e: Emails) '
' WITH e, extract(x in $email | toLower(trim(x))) as emails'
' SET e.allowed = FILTER (n in e.allowed WHERE NOT n IN emails) '
' RETURN emails '
)
user_del = (
' MATCH '
' (u:User { '
' email: toLower(trim($email)), '
' confirmed: false '
' }) '
' OPTIONAL MATCH '
' (u)-[:SUBMITTED*..3]->(n) '
' DETACH DELETE '
' u,n '
)
partner_admin_users = (
' MATCH '
' (:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[: AFFILIATED { '
' admin: true '
' }]->(p:Partner) '
' WITH p '
' MATCH '
' (p)<-[a:AFFILIATED]-(u:User) '
' RETURN { '
' Username: u.username, '
' Email: u.email, '
' Name: u.name, '
' Partner: p.name, '
' PartnerFullName: p.fullname, '
' Confirmed: a.confirmed '
' } '
)
global_admin_users = (
' MATCH '
' (u:User)-[a:AFFILIATED]->(p:Partner) '
' RETURN { '
' Username : u.username, '
' Email : u.email, '
' Name : u.name, '
' Partner : p.name, '
' PartnerFullName : p.fullname, '
' Confirmed : a.confirmed '
' } '
)
# these functions toggle the confirmed status so do both confirm/un-confirm operations
partner_confirm_users = (
' MATCH '
' (user:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[:AFFILIATED {admin : true}]->(p:Partner) '
' WHERE '
' "partner_admin" in user.access'
' MATCH '
' (p)<-[a:AFFILIATED]-(u:User) '
' UNWIND '
' $confirm_list as confirm '
' WITH '
' p,a,u '
' WHERE '
' p.name_lower = toLower(trim(confirm["partner"])) '
' AND '
' u.username_lower = toLower(trim(confirm["username"])) '
' SET '
' a.confirmed = NOT a.confirmed, '
' a.confirm_timestamp = a.confirm_timestamp + datetime.transaction().epochMillis '
' RETURN u.name '
)
global_confirm_users = (
' MATCH '
' (p:Partner)<-[a:AFFILIATED]-(u:User) '
' UNWIND '
' $confirm_list as confirm '
' WITH '
' p,a,u '
' WHERE '
' p.name_lower = toLower(trim(confirm["partner"])) '
' AND '
' u.username_lower = toLower(trim(confirm["username"])) '
' SET '
' a.confirmed = NOT a.confirmed, '
' a.confirm_timestamp = a.confirm_timestamp + datetime.transaction().epochMillis '
' RETURN u.name '
)
partner_admins = (
' MATCH '
' (u:User)-[a:AFFILIATED]->(p:Partner) '
' RETURN { '
' Username : u.username, '
' Email : u.email, '
' Name : u.name, '
' Partner : p.name, '
' PartnerFullName : p.fullname, '
' Confirmed : a.admin '
' } '
)
confirm_admins = (
' MATCH '
' (p:Partner)<-[a:AFFILIATED]-(u:User) '
' UNWIND $admins as admin '
' WITH '
' p,a,u '
' WHERE '
' p.name_lower = toLower(trim(admin["partner"])) '
' AND '
' u.username_lower = toLower(trim(admin["username"])) '
' SET '
' a.admin = NOT a.admin '
' WITH u '
' MATCH (u)-[a:AFFILIATED]->(:Partner) '
' WITH u, collect(a.admin) as admin_rights '
' set u.access = CASE '
' WHEN true IN admin_rights '
' THEN ["user","partner_admin"] '
' ELSE ["user"] '
' END '
' RETURN '
' u.name '
)
# Upload procedures
upload_check_value = (
# make sure that all the entries match accepted entries
# handles empty items and white space
# forces strings to lower case and float/integer types
# removes % symbols
# ! ensure to declare input (as node) and value (from file) before including
' CASE '
' WHEN input.format = "multicat" '
' THEN CASE '
' WHEN size(FILTER (n in split(value, ":") WHERE size(n) > 0)) '
' = size(FILTER (n in split(value, ":") WHERE toLower(trim(n)) in '
' EXTRACT(item in input.category_list | toLower(item)))) '
' THEN trim(value) '
' ELSE Null '
' END '
' WHEN input.format = "categorical" '
' THEN [category IN input.category_list WHERE toLower(category) = toLower(trim(value)) | category][0] '
' WHEN input.format = "text" '
' THEN CASE '
' WHEN input.name_lower IN [ '
' "assign field sample to sample(s) by id", '
' "assign field sample to tree(s) by id", '
' "assign field sample to block(s) by id" '
' ] THEN CASE '
' WHEN size(split(value, "," )) = size( '
' filter(x in split(value, ",") WHERE '
' toInteger(trim(x)) IS NOT NULL '
' OR ( '
' size(split(x, "-")) = 2'
' AND toInteger(split(x, "-")[0]) IS NOT NULL '
' AND toInteger(split(x, "-")[1]) IS NOT NULL'
' ) '
' ) '
' ) '
' THEN value '
' ELSE Null '
' END '
' WHEN input.name_lower IN [ '
' "assign field sample to block by name", '
' "assign tree to block by name" '
' ] '
' THEN trim(value) '
' WHEN input.name contains "time" '
' THEN CASE '
' WHEN size(split(value, ":")) = 2 '
' AND size(split(value, ":")[0]) <= 2 '
' AND toInteger(trim(split(value, ":")[0])) <=24 '
' AND toInteger(trim(split(value, ":")[0])) >= 0 '
' AND size(split(value, ":")[1]) <= 2 '
' AND toInteger(trim(split(value, ":")[1])) < 60 '
' AND toInteger(trim(split(value, ":")[1])) >= 0 '
' THEN trim(value) '
' ELSE Null '
' END '
' ELSE '
' toString(value) '
' END '
' WHEN input.format = "percent" '
' THEN CASE '
' WHEN toFloat(replace(value, "%", "")) IS NOT NULL '
' THEN toFloat(replace(value, "%", "")) '
' ELSE Null '
' END '
' WHEN input.format = "counter" '
' THEN CASE '
' WHEN toInteger(value) IS NOT NULL '
' THEN toInteger(value) '
' ELSE '
' Null '
' END '
' WHEN input.format = "numeric" '
' THEN CASE '
' WHEN toFloat(value) IS NOT NULL '
' THEN toFloat(value) '
' ELSE Null '
' END '
' WHEN input.format = "boolean" '
' THEN CASE '
' WHEN toLower(value) in ["yes","y"] '
' THEN True '
' WHEN toLower(value) in ["no","n"] '
' THEN False '
' WHEN toBoolean(value) IS NOT NULL '
' THEN toBoolean(value) '
' ELSE Null '
' END '
' WHEN input.format = "location" '
' THEN CASE '
' WHEN size(split(value, ";")) = 2 '
' AND toFloat(trim(split(value, ";")[0])) IS NOT NULL '
' AND toFloat(trim(split(value, ";")[1])) IS NOT NULL '
' THEN trim(value) '
' ELSE Null '
' END '
' WHEN input.format = "date" '
' THEN CASE '
' WHEN size(split(value, "-")) = 3 '
' AND size(trim(split(value, "-")[0])) = 4 '
' AND size(trim(split(value, "-")[1])) <= 2 '
' AND size(trim(split(value, "-")[1])) >= 1 '
' AND toInteger(trim(split(value, "-")[1])) >= 1 '
' AND toInteger(trim(split(value, "-")[1])) <= 12 '
' AND size(trim(split(value, "-")[2])) <= 2 '
' AND size(trim(split(value, "-")[2])) >= 1 '
' AND toInteger(trim(split(value, "-")[2])) >= 1 '
' AND toInteger(trim(split(value, "-")[2])) <= 31 '
' THEN '
' trim(value) '
' ELSE '
' Null '
' END '
' ELSE Null '
' END '
)
upload_fb_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 1) as replicate, '
' toLower(trim(csvLine.trait)) as input_name, '
' trim(csvLine.value) as value, '
' apoc.date.parse(csvLine.timestamp, "ms", "yyyy-MM-dd HH:mm:sszzz") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' WHERE trim(csvLine.value) <> "" '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' OPTIONAL MATCH '
' (:RecordType { '
' name_lower: "trait" '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' input, '
' input_name, '
' item, replicate, '
' time, '
' value '
' OPTIONAL MATCH '
' (item)'
' <-[:FOR_ITEM]-(if: ItemInput)'
' -[:FOR_INPUT*..2]->(input), '
' (if)'
' <-[:RECORD_FOR]-(r: Record { '
' replicate: replicate, '
' time: time '
' }) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED {data_shared: true}]->(p:Partner) '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' time, '
+ upload_check_value +
' AS value, '
' CASE '
' WHEN a.confirmed '
' THEN r.value '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_value, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' value IS NULL '
' OR '
' a.confirmed <> True '
' OR'
' r.value <> value '
' ) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' value, '
' COLLECT(DISTINCT({ '
' existing_value: toString(r_value), '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: input_name, '
' UID: item.uid, '
' Replicate: replicate, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: value, '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_fb = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' trim(csvLine.timestamp) as text_time, '
' trim(csvLine.person) as person, '
' trim(csvLine.location) as location, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 1) as replicate, '
' toLower(trim(csvLine.trait)) as input_name, '
' trim(csvLine.value) as value, '
' apoc.date.parse(csvLine.timestamp, "ms", "yyyy-MM-dd HH:mm:sszzz") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' WHERE trim(csvLine.value) <> "" '
# And identify the fields and inputs assessed
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }), '
' (:RecordType {'
' name_lower: "trait" '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(item_level: ItemLevel { '
' name_lower: level '
' }) '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ON CREATE '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' location, '
' time, '
' replicate, '
' text_time, '
+ upload_check_value +
' AS value '
' WHERE value IS NOT NULL '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput)'
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record { '
' time : time, '
' replicate: replicate '
' }) '
' -[:RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.location = location, '
' r.value = CASE '
' WHEN input.format <> "multicat" THEN value '
' ELSE extract(i in FILTER (n in split(value, ":") WHERE size(n) > 0 )| toLower(trim(i)))'
' END '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' value, '
' r '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN r.value '
' WHEN access IS NOT NULL '
' THEN r.value '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: value, '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' Replicate: r.replicate, '
' Time: r.time, '
' UID: item.uid, '
' `Input variable`: input.name, '
' Partner: partner.name '
' } '
' ORDER BY input.name_lower, field.uid, item.id, r.replicate '
)
upload_table_property_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toUpper(csvLine.uid) '
' END as uid, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' OPTIONAL MATCH '
' (:RecordType {'
' name_lower: "property" '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' input_name, '
' item, '
' input, '
' csvLine[input_name] as value '
' WHERE trim(csvLine[input_name]) <> ""'
' OPTIONAL MATCH '
' (item)'
' <-[:FOR_ITEM]-(if: ItemInput)'
' -[:FOR_INPUT*..2]->(input), '
' (if)'
' <-[:RECORD_FOR]-(r: Record) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED {data_shared: true}]->(p:Partner) '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' input_name, '
' item, '
' input, '
+ upload_check_value +
' AS value, '
' CASE '
' WHEN a.confirmed '
' THEN r.value '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_value, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' value IS NULL '
' ) OR ( '
' a.confirmed <> True '
' OR'
' r.value <> value '
' ) '
' WITH '
' row_index, '
' item, '
' input, '
' input_name, '
' value, '
' COLLECT(DISTINCT({ '
' existing_value: toString(r_value), '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: input_name, '
' UID: item.uid, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: value, '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_table_trait_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.date, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.date, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.time, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.time, " ", "") '
' ELSE '
' "12:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' OPTIONAL MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' time, '
' csvLine[input_name] as value '
' WHERE trim(csvLine[input_name]) <> "" '
' OPTIONAL MATCH '
' (item)'
' <-[:FOR_ITEM]-(if: ItemInput)'
' -[:FOR_INPUT*..2]->(input), '
' (if)'
' <-[:RECORD_FOR]-(r: Record { '
' replicate: replicate, '
' time: time '
' }) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED {data_shared: true}]->(p:Partner) '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' time, '
+ upload_check_value +
' AS value, '
' CASE '
' WHEN a.confirmed '
' THEN r.value '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_value, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' value IS NULL '
' OR '
' a.confirmed <> True '
' OR'
' r.value <> value '
' ) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' value, '
' COLLECT(DISTINCT({ '
' existing_value: toString(r_value), '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: input_name, '
' UID: item.uid, '
' Replicate: replicate, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: value, '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_table_condition_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level, '
# start time from start date and start time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.`start date`, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.`start date`, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.`start time`, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.`start time`, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.`start time`, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.`start time`, " ", "") '
' ELSE '
' "00:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as start, '
# end time from end date and end time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.`end date`, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.`end date`, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.`end time`, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.`end time`, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.`end time`, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.`end time`, " ", "") '
' ELSE '
' "24:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as end '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' OPTIONAL MATCH '
' (:RecordType { '
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' start, end, '
' csvLine[input_name] as value '
' WHERE trim(csvLine[input_name]) <> "" '
' OPTIONAL MATCH '
' (item) '
' <-[:FOR_ITEM]-(if: ItemInput) '
' -[:FOR_INPUT*..2]->(input), '
' (if) '
' <-[:RECORD_FOR]-(r: Record { '
' replicate: replicate '
' }) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED { '
' data_shared: true'
' }]->(p:Partner) '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' start, end, '
+ upload_check_value +
' AS value, '
' CASE WHEN r.start <> False THEN r.start ELSE Null END AS r_start, '
' CASE WHEN r.end <> False THEN r.end ELSE Null END AS r_end, '
' CASE '
' WHEN a.confirmed '
' THEN r.value '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_value, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' value IS NULL '
' ) OR ( '
# condition conflicts
' ( '
' a.confirmed <> True '
' OR '
' r.value <> value '
' ) AND ( '
# handle fully bound records
# - any overlapping records
' ( '
' r_start < end '
' AND '
' r_end > start '
' ) OR ( '
# - a record that has a lower bound in the bound period
' r_start >= start '
' AND '
' r_start < end '
' ) OR ( '
# - a record that has an upper bound in the bound period
' r_end > start '
' AND '
' r_end <= end '
' ) OR ( '
# now handle lower bound only records
' end IS NULL '
' AND ( '
' ( '
# - existing bound period includes start
' r_end > start '
' AND '
' r_start <= start '
# - record with same lower bound
' ) OR ( '
' r_start = start '
# - record with upper bound only greater than this lower bound
' ) OR ( '
' r_start IS NULL '
' AND '
' r_end > start '
' )'
' ) '
' ) OR ( '
# now handle upper bound only records
' start IS NULL '
' AND ( '
' ( '
# - existing bound period includes end
' r_end >= end '
' AND '
' r_start < end '
# - record with same upper bound
' ) OR ( '
' r_end = end '
# - record with lower bound only less than this upper bound
' ) OR ( '
' r_end IS NULL '
' AND '
' r_start < end '
' ) '
' )'
' ) OR ( '
# always conflict with unbound records
' r_end IS NULL '
' AND '
' r_start IS NULL '
' ) '
' ) '
' ) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' value, '
' COLLECT(DISTINCT({ '
' start: r_start, '
' end: r_end, '
' existing_value: toString(r_value), '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: input_name, '
' UID: item.uid, '
' Replicate: replicate, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: value, '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_table_curve_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' ['
' key in keys(csvLine) WHERE toFloat(key) in $x_values AND toFloat(csvLine[key]) <> "" '
' | [toFloat(key), toFloat(csvLine[key])]'
' ] as x_y_list '
' UNWIND x_y_list as x_y '
' WITH '
' csvLine, '
' x_y '
' ORDER BY x_y '
' WITH '
' csvLine, '
' collect(x_y[0]) as x_values, '
' collect(x_y[1]) as y_values '
' WITH '
' csvLine, '
' x_values, '
' y_values, '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.date, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.date, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.time, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.time, " ", "") '
' ELSE '
' "12:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' OPTIONAL MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower($input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' item, replicate, '
' input, '
' time, '
' x_values,'
' y_values '
' WHERE size(y_values) > 0 '
' OPTIONAL MATCH '
' (item)'
' <-[:FOR_ITEM]-(if: ItemInput)'
' -[:FOR_INPUT*..2]->(input), '
' (if)'
' <-[:RECORD_FOR]-(r: Record { '
' replicate: replicate, '
' time: time '
' }) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED {data_shared: true}]->(p:Partner) '
' WHERE '
# compare r.y_values with y_values
# list relevant [x_value, y_value] pairs
' [i IN range(0, size(x_values) - 1) WHERE x_values[i] in r.x_values | [x_values[i], y_values[i]]] <> '
# list of relevant [r.x_value, r.y_value] pairs
' [i IN range(0, size(r.x_values) - 1) WHERE r.x_values[i] in x_values | [r.x_values[i], r.y_values[i]]] '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' item, replicate, '
' input, '
' time, '
' x_values, '
' y_values, '
' CASE '
' WHEN a.confirmed '
' THEN r.y_values '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_y_values, '
' r.x_values as r_x_values, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' a.confirmed <> True '
' OR '
' r.y_values <> y_values '
' ) '
' WITH '
' row_index, '
' item, replicate, '
' input, '
' x_values, '
' y_values, '
' COLLECT(DISTINCT({ '
' existing_value: [i in range(0, size(r_x_values) - 1) | [r_x_values[i], r_y_values[i]]], '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: $input_name, '
' UID: item.uid, '
' Replicate: replicate, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: [i in range(0, size(x_values) - 1) | [x_values[i], y_values[i]]], '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_table_property = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' trim(csvLine.person) as person, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toUpper(csvLine.uid) '
' END as uid, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(item_level: ItemLevel { '
' name_lower: level '
' }) '
# Check for data in table
' WHERE trim(csvLine[input_name]) <> "" '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' csvLine[input_name] as value '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' person, '
+ upload_check_value +
' AS value '
' WHERE value IS NOT NULL '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput) '
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record) '
' -[: RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.value = CASE '
' WHEN input.format <> "multicat" THEN value '
' ELSE extract(i in FILTER (n in split(value, ":") WHERE size(n) > 0 )| toLower(trim(i)))'
' END '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' value, '
' r '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN r.value '
' WHEN access IS NOT NULL '
' THEN r.value '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: value, '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' UID: item.uid, '
' `Input variable`: input.name, '
' Partner: partner.name '
' } '
' ORDER BY input.name_lower, field.uid, item.id '
)
upload_table_trait = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' trim(csvLine.person) as person, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
# time from date and time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.date, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.date, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.time, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.time, " ", "") '
' ELSE '
' "12:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
# And identify the fields and input variables assessed
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(item_level: ItemLevel { '
' name_lower: level '
' }) '
# Check for data in table
' WHERE trim(csvLine[input_name]) <> "" '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' time, '
' replicate, '
' csvLine[input_name] as value, '
# to allow differentiation of defaulted time and set time
' csvLine.time as text_time '
# for trait data if no time is set then drop the row
' WHERE '
' time IS NOT NULL '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' time, '
' replicate, '
' text_time, '
+ upload_check_value +
' AS value '
' WHERE value IS NOT NULL '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput)'
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record { '
' time : time, '
' replicate: replicate '
' }) '
' -[:RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.text_time = text_time, '
' r.value = CASE '
' WHEN input.format <> "multicat" THEN value '
' ELSE extract(i in FILTER (n in split(value, ":") WHERE size(n) > 0 )| toLower(trim(i)))'
' END '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' value, '
' r '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN r.value '
' WHEN access IS NOT NULL '
' THEN r.value '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: value, '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' Replicate: r.replicate, '
' Time: r.time, '
' UID: item.uid, '
' `Input variable`: input.name, '
' Partner: partner.name '
' } '
' ORDER BY input.name_lower, field.uid, item.id, r.replicate '
)
upload_table_curve = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' [ '
' key in keys(csvLine) WHERE toFloat(key) in $x_values AND toFloat(csvLine[key]) <> "" '
' | [toFloat(key), toFloat(csvLine[key])]'
' ] as x_y_list '
' UNWIND x_y_list as x_y '
' WITH '
' csvLine, '
' x_y '
' ORDER BY x_y '
' WITH '
' csvLine, '
' collect(x_y[0]) as x_values, '
' collect(x_y[1]) as y_values '
' WITH '
' csvLine, '
' x_values, '
' y_values, '
' trim(csvLine.person) as person, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
# time from date and time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.date, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.date, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.time, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.time, " ", "") '
' ELSE '
' "12:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
# And identify the fields and input variable assessed
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }) '
' MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower($input_name) '
' })-[:AT_LEVEL]->(item_level: ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' time, '
' replicate, '
' x_values, '
' y_values, '
# to allow differentiation of defaulted time and set time
' csvLine.time as text_time '
# for trait data if no time is set then drop the row
' WHERE '
' time IS NOT NULL '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' time, '
' replicate, '
' text_time, '
' x_values, '
' y_values '
' WHERE size(y_values) > 0 '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput)'
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record { '
' time : time, '
' replicate: replicate, '
' x_values: x_values '
' }) '
' -[:RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.text_time = text_time, '
' r.y_values = y_values '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' x_values, '
' y_values, '
' r '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN [i in range(0, size(r.x_values) - 1) | [r.x_values[i], r.y_values[i]]] '
' WHEN access IS NOT NULL '
' THEN [i in range(0, size(r.x_values) - 1) | [r.x_values[i], r.y_values[i]]] '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: [i in range(0, size(x_values) - 1) | [x_values[i], y_values[i]]], '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' Replicate: r.replicate, '
' Time: r.time, '
' UID: item.uid, '
' Input: input.name, '
' Partner: partner.name '
' } '
' ORDER BY input.name_lower, field.uid, item.id, r.replicate '
)
upload_table_condition = (
# load in the csv
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' trim(csvLine.person) as person, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level, '
# start time from start date and start time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.`start date`, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.`start date`, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.`start time`, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.`start time`, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.`start time`, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.`start time`, " ", "") '
' ELSE '
' "00:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as start, '
# end time from end date and end time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.`end date`, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.`end date`, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.`end time`, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.`end time`, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.`end time`, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.`end time`, " ", "") '
' ELSE '
' "24:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as end '
# And identify the fields and inputs assessed
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(item_level:ItemLevel { '
' name_lower: level '
' }) '
# Check for data in table
' WHERE trim(csvLine[input_name]) <> "" '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' start, end, '
' csvLine[input_name] as value, '
# to allow differentiation of defaulted time and set time
' csvLine.`start time` as text_start_time, '
' csvLine.`end time` as text_end_time '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' start, end, '
' text_start_time, text_end_time, '
+ upload_check_value +
' AS value '
' WHERE value IS NOT NULL '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput)'
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record { '
' start : CASE WHEN start IS NOT NULL THEN start ELSE False END, '
' end : CASE WHEN end IS NOT NULL THEN end ELSE False END '
' }) '
' -[:RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.text_start_time = text_start_time, '
' r.text_end_time = text_end_time, '
' r.value = CASE '
' WHEN input.format <> "multicat" THEN value '
' ELSE extract(i in FILTER (n in split(value, ":") WHERE size(n) > 0 )| toLower(trim(i)))'
' END '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' item_input, '
' value, '
' r, '
' start, end '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# check again for conflicts - in case there have been concurrent submissions
# or there are conflicts within the uploaded table
' OPTIONAL MATCH '
' (r)'
' -[:RECORD_FOR]->(item_input) '
' <-[:RECORD_FOR]-(rr:Record) '
' <-[rr_sub:SUBMITTED]-(:UserFieldInput) '
' <-[:SUBMITTED]-(:Records) '
' <-[:SUBMITTED]-(:Submissions) '
' <-[:SUBMITTED]-(rr_user:User) '
' -[:AFFILIATED {data_shared: True}]->(rr_partner:Partner) '
' WHERE '
' ( '
# handle fully bound records
# - any overlapping records
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END < end '
' AND '
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END > start '
' ) OR ( '
# - a record that has a lower bound in the bound period
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END >= start '
' AND '
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END < end '
' ) OR ( '
# - a record that has an upper bound in the bound period
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END > start '
' AND '
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END <= end '
' ) OR ( '
# now handle lower bound only records
' end IS NULL '
' AND ( '
# - existing bound period includes start
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END > start '
' AND '
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END <= start '
# - record with same lower bound
' ) OR ( '
' rr.start = start '
# - record with upper bound only greater than this lower bound
' ) OR ( '
' rr.start = False '
' AND '
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END > start '
' ) '
' ) OR ( '
# now handle upper bound only records
' start IS NULL '
' AND ( '
# - existing bound period includes end
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END >= end '
' AND '
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END < end '
# - record with same upper bound
' ) OR ( '
' rr.end = end '
# - record with lower bound only less than this upper bound
' ) OR ( '
' rr.end = False '
' AND '
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END < end '
' ) '
' ) OR ( '
# always conflict with unbound records
' rr.end = False '
' AND '
' rr.start = False '
' )'
' OPTIONAL MATCH '
' (rr_partner) '
' <-[rr_access: AFFILIATED {confirmed: True}]-(:User {username_lower: toLower(trim($username))}) '
# If don't have access or if have access and values don't match then potential conflict
# time parsing to allow various degrees of specificity in the relevant time range is below
' WITH '
' r, '
' access, '
' user, '
' partner, '
' value, '
' submitted, '
' item, '
' field, '
' input, '
' case WHEN rr IS NOT NULL AND (rr.value <> r.value OR rr_access IS NULL) THEN '
' collect(DISTINCT { '
' start: rr.start, '
' end: rr.end, '
' existing_value: CASE WHEN rr_access IS NOT NULL THEN toString(rr.value) ELSE "ACCESS DENIED" END, '
' `submitted at`: rr_sub.time, '
' user: CASE WHEN rr_access IS NOT NULL THEN rr_user.name ELSE rr_partner.name END, '
' access: CASE WHEN rr_access IS NOT NULL THEN True ELSE False END '
' }) '
' ELSE Null END as conflicts '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN r.value '
' WHEN access IS NOT NULL '
' THEN r.value '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: value, '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' Period: [r.start, r.end], '
' UID: item.uid, '
' `Input variable`: input.name, '
' Partner: partner.name, '
' Conflicts: conflicts '
' } '
' ORDER BY input.name_lower, field.uid, item.id '
)
get_fields_treecount = (
' MATCH (country:Country)<-[:IS_IN]-(region: Region) '
' OPTIONAL MATCH (region)<-[:IS_IN]-(farm: Farm) '
' OPTIONAL MATCH (farm)<-[:IS_IN]-(field: Field) '
' OPTIONAL MATCH '
' (field)'
' <-[:IS_IN]-(:FieldTrees)'
' <-[:FOR]-(field_tree_counter:Counter {name:"tree"}) '
' OPTIONAL MATCH '
' (field)'
' <-[:IS_IN*2]-(block:Block)'
' <-[:IS_IN]-(:BlockTrees)'
' <-[:FOR]-(block_tree_counter:Counter {name:"tree"}) '
' WITH '
' country, '
' region, '
' farm, '
' field, '
' field_tree_counter.count as field_trees, '
' {'
' name: block.name, '
' label:"Block", '
' treecount: block_tree_counter.count '
' } as blocks, '
' block_tree_counter.count as block_trees '
' WITH '
' country, '
' region, '
' farm, '
' { '
' name: field.name, '
' label:"Field", '
' treecount: field_trees - sum(block_trees), '
' children: FILTER(block IN collect(blocks) WHERE block["name"] IS NOT NULL)'
' } as fields '
' WITH '
' country, '
' region, '
' {'
' name: farm.name, '
' label: "Farm", '
' children: FILTER(field IN collect(fields) WHERE field["name"] IS NOT NULL)'
' } as farms '
' WITH '
' country, '
' {'
' name: region.name, '
' label:"Region", '
' children: FILTER(farm IN collect(farms) WHERE farm["name"] IS NOT NULL)'
' } as regions '
' WITH '
' {'
' name: country.name, '
' label:"Country", '
' children: FILTER(region IN collect (regions) WHERE region["name"] IS NOT NULL)'
' } as countries '
' RETURN countries '
)
get_submissions_range = (
# first get all the data collections and link to a base node formed from field
' MATCH '
' (:User {username_lower: toLower($username)}) '
' -[:SUBMITTED*3]->(uff:UserFieldInput) '
' -[s:SUBMITTED]->(record: Record) '
' -[:RECORD_FOR]->(), '
' (uff)-[:CONTRIBUTED]->(ff:FieldInput) '
' -[:FROM_FIELD]->(field: Field), '
' (ff)-[:FOR_INPUT]->(input: Input) '
' WHERE s.time >= $starttime AND s.time <= $endtime '
' WITH '
' input, count(record) as record_count, field '
' RETURN '
' "Input" as d_label, '
' input.name + " (" + toString(record_count) + ")" as d_name, '
' id(field) + "_" + id(input) as d_id, '
' "Field" as n_label, '
' field.name as n_name,'
' id(field) as n_id, '
' "FROM" as r_type, '
' id(field) + "_" + id(input) + "_rel" as r_id, '
' id(field) + "_" + id(input) as r_start, '
' id(field) as r_end '
' UNION '
# get users farm context
' MATCH '
' (:User {username_lower: toLower($username)}) '
' -[:SUBMITTED*3]->(:UserFieldInput) '
' -[:CONTRIBUTED]->(: FieldInput) '
' -[:FOR_ITEM | FROM_FIELD]->(field:Field) '
' -[:IS_IN]->(farm:Farm) '
' RETURN '
' "Field" as d_label, '
' field.name as d_name, '
' id(field) as d_id, '
' "Farm" as n_label, '
' farm.name as n_name, '
' id(farm) as n_id, '
' "IS_IN" as r_type, '
' (id(field) + "_" + id(farm)) as r_id, '
' id(field) as r_start, '
' id(farm) as r_end'
' UNION '
# link the above into region context
' MATCH '
' (:User {username_lower: toLower($username)}) '
' -[:SUBMITTED*3]->(:UserFieldInput) '
' -[:CONTRIBUTED]->(: FieldInput) '
' -[:FOR_ITEM | FROM_FIELD]->(:Field) '
' -[:IS_IN]->(farm: Farm) '
' -[:IS_IN]->(region: Region) '
' RETURN '
' "Farm" as d_label, '
' farm.name as d_name, '
' id(farm) as d_id, '
' "Region" as n_label, '
' region.name as n_name, '
' id(region) as n_id, '
' "IS_IN" as r_type, '
' (id(farm) + "_" + id(region)) as r_id, '
' id(farm) as r_start, '
' id(region) as r_end'
' UNION '
# link the above into country context
' MATCH '
' (:User {username_lower: toLower($username)}) '
' -[:SUBMITTED*3]->(:UserFieldInput) '
' -[:CONTRIBUTED]->(: FieldInput) '
' -[:FOR_ITEM | FROM_FIELD]->(: Field) '
' -[:IS_IN]->(: Farm) '
' -[:IS_IN]->(region: Region) '
' -[:IS_IN]->(country: Country) '
' RETURN '
' "Region" as d_label, '
' region.name as d_name, '
' id(region) as d_id, '
' "Country" as n_label, '
' country.name as n_name, '
' id(country) as n_id, '
' "IS_IN" as r_type, '
' (id(region) + "_" + id(country)) as r_id, '
' id(region) as r_start, '
' id(country) as r_end'
)
| gpl-3.0 | 6,791,217,323,624,032,000 | 29.951091 | 110 | 0.525838 | false | 2.351317 | false | false | false |
acgtun/acgtun.com | acgtun/leetcode/views.py | 1 | 1338 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import sys
from collections import OrderedDict
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.conf import settings
from . import db_table
from database.database import Database
sys.path.append(os.path.join(settings.BASE_DIR, 'ommon'))
sys.path.append(os.path.join(settings.BASE_DIR, 'database'))
db_path = os.path.join(settings.BASE_DIR, 'database')
def get_solution(response):
db = Database(os.path.join(db_path, 'db.sqlite3'))
solutions = db.query("SELECT id,problem,cpptime,cppcode,javatime,javacode,pythontime,pythoncode FROM {}".format(
db_table.leetcode_solution_table))
problems = OrderedDict()
for r in solutions:
pn = r[1]
pn = pn.rstrip()
if pn not in problems.keys():
problems[pn] = OrderedDict()
problems[pn]['cpp'] = r[3]
problems[pn]['java'] = r[5]
problems[pn]['python'] = r[7]
problems = OrderedDict(sorted(problems.items(), key=lambda t: t[0]))
return response.write(render_to_string('leetcode/index.html', {'problems': problems}))
def index(request):
response = HttpResponse();
get_solution(response)
response.close()
return response
| gpl-2.0 | -7,404,087,127,536,536,000 | 28.733333 | 116 | 0.683857 | false | 3.558511 | false | false | false |
jdilallo/jdilallo-test | examples/dfp/v201311/label_service/create_labels.py | 1 | 1668 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new labels.
To determine which labels exist, run get_all_labels.py. This feature is only
available to DFP premium solution networks."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import uuid
# Import appropriate classes from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
label_service = client.GetService('LabelService', version='v201311')
# Create label objects.
labels = []
for _ in xrange(5):
label = {
'name': 'Label #%s' % uuid.uuid4(),
'isActive': 'true',
'types': ['COMPETITIVE_EXCLUSION']
}
labels.append(label)
# Add Labels.
labels = label_service.createLabels(labels)
# Display results.
for label in labels:
print ('Label with id \'%s\', name \'%s\', and types {%s} was found.'
% (label['id'], label['name'], ','.join(label['types'])))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 | -6,635,565,559,352,071,000 | 28.785714 | 77 | 0.681655 | false | 3.723214 | false | false | false |
cshinaver/cctools | umbrella/src/umbrella.py | 1 | 188703 | #!/usr/bin/env cctools_python
# CCTOOLS_PYTHON_VERSION 2.7 2.6
# All the vanilla python package dependencies of Umbrella can be satisfied by Python 2.6.
"""
Umbrella is a tool for specifying and materializing comprehensive execution environments, from the hardware all the way up to software and data. A user simply invokes Umbrella with the desired task, and Umbrella determines the minimum mechanism necessary to run the task, whether it be direct execution, a system container, a local virtual machine, or submission to a cloud or grid environment. We present the overall design of Umbrella and demonstrate its use to precisely execute a high energy physics application and a ray-tracing application across many platforms using a combination of Parrot, Chroot, Docker, VMware, Condor, and Amazon EC2.
Copyright (C) 2003-2004 Douglas Thain and the University of Wisconsin
Copyright (C) 2005- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
Implementation Logics of Different Execution Engines:
If the sandbox type is Parrot, create the mountlist file and set PARROT_MOUNT_FILE; set PATH; set PARROT_LDSO_PATH if a separate OS image is needed; parrotize the user's command into `parrot_run user_cmd`.
If the sandbox type is Docker, transfer the OS image into a Docker image; use volume to mount all the software and data dependencies into a container; set PATH; dockerize the user's command into `docker run user_cmd`. To use Docker, a separate OS image is needed.
If the sandbox type is chroot, create mountpoints for software and data dependencies inside the OS image directory and mount software and data into the OS image, set PATH, chrootize the user's command into `chroot user_cmd`.
Implementation Logic of Dependency Sources:
HTTP/HTTPS: Download the dependency into Umbrella local cache.
CVMFS: check whether the mountpoint already exists on the execution node, if yes, do not need to set mountpoint for this dependency and directly process the next dependency; if no, parrot will be used to deliver cvmfs for the application.
If Parrot is needed to access cvmfs, and the sandbox type is Parrot,
Do all the work mentioned above for Parrot execution engine + add SITEINFO into mountlist file.
If Parrot is needed to access cvmfs, and the sandbox type is Docker,
Do all the work mentioned above for Docker execution engine + add SITEINFO into mountlist file + parrotize the user's command. First parrotize the user's command, then dockerize the user's command.
If Parrot is needed to access cvmfs, and the sandbox type is chroot,
Do all the work mentioned above for chroot execution engine + add SITEINFO into mountlist file + parrotize the user's command. First parrotize the user's command, then chrootize the user's command.
ROOT: If the user expects the root file to be access at runtime without downloading. Umbrella does nothing if a ROOT file through ROOT protocol is needed, because ROOT supports data access during runtime without downloading first. Inside the umbrella specification file, the user only needs to specify the mount_env attribute.
If the user expects the root file to be downloaded first, then the user needs to specify both the mount_env and mountpoint attributes inside the umbrella specification.
Git: If the user's application needs git to do `git clone <repo_url>; git checkout <branch_name/commit_id>`, then the user does not need to specify mountpoint attribute inside the umbrella specification.
If the user's application does not explicitly require git, but umbrella tries to pull some dependencies from a remote git repository, then the user needs to specify both the mount_env and mountpoint attributes inside the umbrella specification.
mount_env and mountpoint:
If only mountpoint is set to A in a specification, the dependency will be downloaded into the umbrella local cache with the file path of D, and a new mountpoint will be added into mount_dict (mount_dict[A] = D).
If only mount_env is set to B in a specification, the dependency will not be downloaded, meta_search will be executed to get one remote storage location, C, of the dependency, a new environment variable will be set (env_para_dict[B] = C).
If mountpoint is set to A and mount_env is set to B in a specification, the dependency will be downloaded into the umbrella local cache with the file path of D, and a new mountpoint will be added into mount_dict (mount_dict[A] = D) and a new environment variable will also be set (env_para_dict[B] = A).
Local path inside the umbrella local cache:
Case 1: the dependency is delivered as a git repository through http/https/git protocol.
dest = os.path.dirname(sandbox_dir) + "/cache/" + git_commit + '/' + repo_name
Note: git_commit is optional in the metadata database. If git_commit is not specified in the metadata database, then:
dest = os.path.dirname(sandbox_dir) + "/cache/" + repo_name
Case 2: the dependency is delivered not as a git repository through http/https protocol.
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
Note: checksum is required to be specified in the metadata database. If it is not specified, umbrella will complain and exit.
Case 3: SITECONF info necessary for CVMFS cms repository access through Parrot. For this case, we use a hard-coded path.
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/SITECONF"
"""
import sys
from stat import *
from pprint import pprint
import subprocess
import platform
import re
import tarfile
import StringIO
from optparse import OptionParser
import os
import hashlib
import difflib
import sqlite3
import shutil
import datetime
import time
import getpass
import grp
import logging
import multiprocessing
import resource
import tempfile
import urllib
import gzip
import imp
found_requests = None
try:
imp.find_module('requests')
found_requests = True
import requests
import requests.packages.urllib3
except ImportError:
found_requests = False
found_boto3 = None
try:
imp.find_module('boto3')
found_boto3 = True
import boto3
except ImportError:
found_boto3 = False
found_botocore = None
try:
imp.find_module('botocore')
found_botocore = True
import botocore
except ImportError:
found_botocore = False
s3_url = 'https://s3.amazonaws.com'
if sys.version_info >= (3,):
import urllib.request as urllib2
import urllib.parse as urlparse
else:
import urllib2
import urlparse
if sys.version_info > (2,6,):
import json
else:
import simplejson as json #json module is introduce in python 2.4.3
#Replace the version of cctools inside umbrella is easy: set cctools_binary_version.
cctools_binary_version = "5.2.0"
cctools_dest = ""
#set cms_siteconf_url to be the url for the siteconf your application depends
#the url and format settings here should be consistent with the function set_cvmfs_cms_siteconf
cms_siteconf_url = "http://ccl.cse.nd.edu/research/data/hep-case-study/2efd5cbb3424fe6b4a74294c84d0fb43/SITECONF.tar.gz"
cms_siteconf_format = "tgz"
tempfile_list = [] #a list of temporary file created by umbrella and need to be removed before umbrella ends.
tempdir_list = [] #a list of temporary dir created by umbrella and need to be removed before umbrella ends.
pac_manager = {
"yum": ("-y install", "info")
}
"""
ec2 metadata
the instance types provided by ec2 are undergoing changes as time goes by.
"""
ec2_json = {
"redhat-6.5-x86_64": {
"ami-2cf8901c": {
"ami": "ami-2cf8901c",
"root_device_type": "ebs",
"virtualization_type": "paravirtual",
"user": "ec2-user"
},
"ami-0b5f073b": {
"ami": "ami-0b5f073b",
"root_device_type": "ebs",
"virtualization_type": "paravirtual",
"user": "ec2-user"
}
},
"centos-6.6-x86_64": {
"ami-0b06483b": {
"ami": "ami-0b06483b",
"root_device_type": "ebs",
"virtualization_type": "paravirtual",
"user": "root"
}
},
"redhat-5.10-x86_64": {
"ami-d76a29e7": {
"ami": "ami-d76a29e7",
"root_device_type": "ebs",
"virtualization_type": "hvm",
"user": "root"
}
}
}
upload_count = 0
def subprocess_error(cmd, rc, stdout, stderr):
"""Print the command, return code, stdout, and stderr; and then directly exit.
Args:
cmd: the executed command.
rc: the return code.
stdout: the standard output of the command.
stderr: standard error of the command.
Returns:
directly exit the program.
"""
cleanup(tempfile_list, tempdir_list)
sys.exit("`%s` fails with the return code of %d, \nstdout: %s, \nstderr: %s\n" % (cmd, rc, stdout, stderr))
def func_call(cmd):
""" Execute a command and return the return code, stdout, stderr.
Args:
cmd: the command needs to execute using the subprocess module.
Returns:
a tuple including the return code, stdout, stderr.
"""
logging.debug("Start to execute command: %s", cmd)
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
return (rc, stdout, stderr)
def func_call_withenv(cmd, env_dict):
""" Execute a command with a special setting of the environment variables and return the return code, stdout, stderr.
Args:
cmd: the command needs to execute using the subprocess module.
env_dict: the environment setting.
Returns:
a tuple including the return code, stdout, stderr.
"""
logging.debug("Start to execute command: %s", cmd)
logging.debug("The environment variables for executing the command is:")
logging.debug(env_dict)
p = subprocess.Popen(cmd, env = env_dict, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
return (rc, stdout, stderr)
def which_exec(name):
"""The implementation of shell which command
Args:
name: the name of the executable to be found.
Returns:
If the executable is found, returns its fullpath.
If PATH is not set, directly exit.
Otherwise, returns None.
"""
if not os.environ.has_key("PATH"):
cleanup(tempfile_list, tempdir_list)
logging.critical("The environment variable PATH is not set!")
sys.exit("The environment variable PATH is not set!")
for path in os.environ["PATH"].split(":"):
fullpath = path + '/' + name
if os.path.exists(fullpath) and os.path.isfile(fullpath):
return fullpath
return None
def md5_cal(filename, block_size=2**20):
"""Calculate the md5sum of a file
Args:
filename: the name of the file
block_size: the size of each block
Returns:
If the calculation fails for any reason, directly exit.
Otherwise, return the md5 value of the content of the file
"""
try:
with open(filename, 'rb') as f:
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
except Exception as e:
cleanup(tempfile_list, tempdir_list)
logging.critical("Computing the checksum of %s fails: %s.", filename, e)
sys.exit("md5_cal(" + filename + ") failed.\n" + e)
def url_download(url, dest):
""" Download url into dest
Args:
url: the url needed to be downloaded.
dest: the path where the content from the url should be put.
Returns:
If the url is downloaded successfully, return None;
Otherwise, directly exit.
"""
logging.debug("Start to download %s to %s ...." % (url, dest))
urllib.urlretrieve(url, dest)
def dependency_download(name, url, checksum, checksum_tool, dest, format_remote_storage, action):
"""Download a dependency from the url and verify its integrity.
Args:
name: the file name of the dependency. If its format is plain text, then filename is the same with the archived name. If its format is tgz, the filename should be the archived name with the trailing .tgz/.tar.gz removed.
url: the storage location of the dependency.
checksum: the checksum of the dependency.
checksum_tool: the tool used to calculate the checksum, such as md5sum.
dest: the destination of the dependency where the downloaded dependency will be put.
format_remote_storage: the file format of the dependency, such as .tgz.
action: the action on the downloaded dependency. Options: none, unpack. "none" leaves the downloaded dependency at it is. "unpack" uncompresses the dependency.
Returns:
If the url is a broken link or the integrity of the downloaded data is bad, directly exit.
Otherwise, return None.
"""
print "Download software from %s into the umbrella local cache (%s)" % (url, dest)
logging.debug("Download software from %s into the umbrella local cache (%s)", url, dest)
dest_dir = os.path.dirname(dest)
dest_uncompress = dest #dest_uncompress is the path of the uncompressed-version dependency
if format_remote_storage == "plain":
filename = name
elif format_remote_storage == "tgz":
filename = "%s.tar.gz" % name
dest = os.path.join(dest_dir, filename) #dest is the path of the compressed-version dependency
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(dest):
#download the dependency from the url
#this method currently will fail when the data size is larger than the memory size, use subprocess + wget can solve it
url_download(url, dest)
#if it exists, the uncompressed-version directory will be deleted first
if action == "unpack" and format_remote_storage != 'plain' and os.path.exists(dest_uncompress):
shutil.rmtree(dest_uncompress)
logging.debug("the uncompressed-version directory exists already, first delete it")
#calculate the checkusm of the compressed-version dependency
if checksum_tool == "md5sum":
local_checksum = md5_cal(dest)
logging.debug("The checksum of %s is: %s", dest, local_checksum)
if not local_checksum == checksum:
cleanup(tempfile_list, tempdir_list)
logging.critical("The version of %s is incorrect! Please first delete it and its unpacked directory!!", dest)
sys.exit("the version of " + dest + " is incorrect! Please first delete it and its unpacked directory!!\n")
elif not checksum_tool:
logging.debug("the checksum of %s is not provided!", url)
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s is not supported currently!", checksum_tool)
sys.exit(checksum_tool + "is not supported currently!")
#if the uncompressed-version dependency does not exist, uncompress the dependency
if action == "unpack" and (not os.path.exists(dest_uncompress)) and format_remote_storage == "tgz":
logging.debug("Uncompressing %s into %s ....", dest, dest_uncompress)
tfile = tarfile.open(dest, "r:gz")
tfile.extractall(dest_dir)
def extract_tar(src, dest, form):
"""Extract a tgz file from src to dest
Args:
src: the location of a tgz file
dest: the location where the uncompressed data will be put
form: the format the tarball. Such as: tar, tgz
Returns:
None
"""
if form == "tar":
tfile = tarfile.open(src, "r")
elif form == "tgz":
tfile = tarfile.open(src, "r:gz")
tfile.extractall(dest)
def meta_search(meta_json, name, id=None):
"""Search the metadata information of an dependency in the meta_json
First find all the items with the required name in meta_json.
Then find the right one whose id satisfied the requirement.
If no id parameter is problem, then the first matched one will be returned.
Args:
meta_json: the json object including all the metadata of dependencies.
name: the name of the dependency.
id: the id attribute of the dependency. Defaults to None.
Returns:
If one item is found in meta_json, return the item, which is a dictionary.
If no item satisfied the requirement on meta_json, directly exit.
"""
if meta_json.has_key(name):
if not id:
for item in meta_json[name]:
return meta_json[name][item]
else:
if meta_json[name].has_key(id):
return meta_json[name][id]
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("meta_json does not has <%s> with the id <%s>", name, id)
sys.exit("meta_json does not has <%s> with the id <%s>" % (name, id))
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("meta_json does not include %s", name)
sys.exit("meta_json does not include %s\n" % name)
def attr_check(name, item, attr, check_len = 0):
"""Check and obtain the attr of an item.
Args:
name: the name of the dependency.
item: an item from the metadata database
attr: an attribute
check_len: if set to 1, also check whether the length of the attr is > 0; if set to 0, ignore the length checking.
Returns:
If the attribute check is successful, directly return the attribute.
Otherwise, directly exit.
"""
logging.debug("check the %s attr of the following item:", attr)
logging.debug(item)
if item.has_key(attr):
if check_len == 1:
if len(item[attr]) <= 0:
cleanup(tempfile_list, tempdir_list)
logging.debug("The %s attr of the item is empty.", attr)
sys.exit("The %s attr of the item (%s) is empty." % (item, attr))
#when multiple options are available, currently the first one will be picked.
#we can add filter here to control the choice.
if attr == 'source':
return source_filter(item[attr], ['osf', 's3'], name)
else:
return item[attr][0]
else:
return item[attr]
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("This item doesn not have %s attr!", attr)
sys.exit("the item (%s) does not have %s attr!" % (item, attr))
def source_filter(sources, filters, name):
"""Filter the download urls of a dependency.
The reason why this filtering process is necessary is: some urls are not
accessible by the current umbrella runtime. For example, if some urls points to
OSF, but the execution node has no requests python package installed. In this
case, all the download urls pointing to OSF are ignored.
Args:
sources: a list of download urls
filters: a list of protocols which are not supported by the current umbrella runtime.
name: the name of the dependency.
Returns:
If all the download urls are not available, exit directly.
Otherwise, return the first available url.
"""
l = []
for s in sources:
filtered = 0
for item in filters:
if s[:len(item)] == item:
filtered = 1
break
if not filtered:
l.append(s)
if len(l) == 0:
return sources[0]
else:
return l[0]
def cctools_download(sandbox_dir, hardware_platform, linux_distro, action):
"""Download cctools
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
linux_distro: the linux distro. For Example: redhat6, centos6.
action: the action on the downloaded dependency. Options: none, unpack. "none" leaves the downloaded dependency at it is. "unpack" uncompresses the dependency.
Returns:
the path of the downloaded cctools in the umbrella local cache. For example: /tmp/umbrella_test/cache/d19376d92daa129ff736f75247b79ec8/cctools-4.9.0-redhat6-x86_64
"""
name = "cctools-%s-%s-%s" % (cctools_binary_version, hardware_platform, linux_distro)
source = "http://ccl.cse.nd.edu/software/files/%s.tar.gz" % name
global cctools_dest
cctools_dest = os.path.dirname(sandbox_dir) + "/cache/" + name
dependency_download(name, source, None, None, cctools_dest, "tgz", "unpack")
return cctools_dest
def set_cvmfs_cms_siteconf(sandbox_dir):
"""Download cvmfs SITEINFO and set its mountpoint.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
Returns:
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
"""
dest = os.path.dirname(sandbox_dir) + "/cache/cms_siteconf/SITECONF"
dependency_download("SITECONF.tar.gz", cms_siteconf_url, "", "", dest, cms_siteconf_format, "unpack")
cvmfs_cms_siteconf_mountpoint = '/cvmfs/cms.cern.ch/SITECONF/local %s/local' % dest
return cvmfs_cms_siteconf_mountpoint
def is_dir(path):
"""Judge whether a path is directory or not.
If the path is a dir, directly return. Otherwise, exit directly.
Args:
path: a path
Returns:
None
"""
if os.path.isdir(path):
pass
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("%s is not a directory!", path)
sys.exit("%s is not a directory!" % path)
def git_dependency_download(repo_url, dest, git_branch, git_commit):
"""Prepare a dependency from a git repository.
First check whether dest exist or not: if dest exists, then checkout to git_branch and git_commit;
otherwise, git clone url, and then checkout to git_branch and git_commit.
Args:
repo_url: the url of the remote git repository
dest: the local directory where the git repository will be cloned into
git_branch: the branch name of the git repository
git_commit: the commit id of the repository
Returns:
dest: the local directory where the git repository is
"""
dest = remove_trailing_slashes(dest)
scheme, netloc, path, query, fragment = urlparse.urlsplit(repo_url)
repo_name = os.path.basename(path)
if repo_name[-4:] == '.git':
repo_name = repo_name[:-4]
dest = dest + '/' + repo_name
if os.path.exists(dest):
is_dir(dest)
else:
dir = os.path.dirname(dest)
if os.path.exists(dir):
is_dir(dir)
else:
os.makedirs(dir)
os.chdir(dir)
if dependency_check('git') == -1:
cleanup(tempfile_list, tempdir_list)
sys.exit("Git is not found!")
cmd = "git clone %s" % repo_url
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
os.chdir(dest)
if git_branch:
cmd = "git checkout %s" % git_branch
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
if git_commit:
cmd = "git checkout %s" % git_commit
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
return dest
def git_dependency_parser(item, repo_url, sandbox_dir):
"""Parse a git dependency
Args:
item: an item from the metadata database
repo_url: the url of the remote git repository
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
Returns:
dest: the path of the downloaded data dependency in the umbrella local cache.
"""
logging.debug("This dependency is stored as a git repository: ")
logging.debug(item)
git_branch = ''
if item.has_key("branch"):
git_branch = item["branch"]
git_commit = ''
if item.has_key("commit"):
git_commit = item["commit"]
dest = os.path.dirname(sandbox_dir) + "/cache/" + git_commit
dest = git_dependency_download(repo_url, dest, git_branch, git_commit)
return dest
def data_dependency_process(name, id, meta_json, sandbox_dir, action, osf_auth):
"""Download a data dependency
Args:
name: the item name in the data section
id: the id attribute of the processed dependency
meta_json: the json object including all the metadata of dependencies.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
action: the action on the downloaded dependency. Options: none, unpack. "none" leaves the downloaded dependency at it is. "unpack" uncompresses the dependency.
osf_auth: the osf authentication info including osf_username and osf_password.
Returns:
dest: the path of the downloaded data dependency in the umbrella local cache.
"""
item = meta_search(meta_json, name, id)
source = attr_check(name, item, "source", 1)
if source[:4] == 'git+':
dest = git_dependency_parser(item, source[4:], sandbox_dir)
elif source[:4] == 'osf+':
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
try:
logging.debug("Trying to download %s as a normal url ,,,", source)
dependency_download(name, source[4:], checksum, "md5sum", dest, form, action)
except:
logging.debug("Fails to download %s as a normal url ,,,", source)
if len(osf_auth) < 2:
cleanup(tempfile_list, tempdir_list)
logging.debug("Please use --osf_user and --osf_pass to specify your osf authentication info!")
sys.exit("Please use --osf_user and --osf_pass to specify your osf authentication info!")
if form == "tgz":
osf_download(osf_auth[0], osf_auth[1], source[4:], dest + ".tar.gz")
else:
osf_download(osf_auth[0], osf_auth[1], source[4:], dest)
dependency_download(name, dest, checksum, "md5sum", dest, form, action)
elif source[:3] == "s3+":
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
try:
logging.debug("Trying to download %s as a normal url ,,,", source)
dependency_download(name, source[3:], checksum, "md5sum", dest, form, action)
except:
logging.debug("Fails to download %s as a normal url ,,,", source)
if form == "tgz":
s3_download(source[3:], dest + ".tar.gz")
else:
s3_download(source[3:], dest)
dependency_download(name, dest, checksum, "md5sum", dest, form, action)
else:
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
dependency_download(name, source, checksum, "md5sum", dest, form, action)
return dest
def check_cvmfs_repo(repo_name):
""" Check whether a cvmfs repo is installed on the host or not
Args:
repo_name: a cvmfs repo name. For example: "/cvmfs/cms.cern.ch".
Returns:
If the cvmfs repo is installed, returns the string including the mountpoint of cvmfs cms repo. For example: "/cvmfs/cms.cern.ch".
Otherwise, return an empty string.
"""
logging.debug("Check whether a cvmfs repo is installed on the host or not")
cmd = "df -h|grep '^cvmfs'|grep "+ "'" + repo_name + "'" + "|rev| cut -d' ' -f1|rev"
rc, stdout, stderr = func_call(cmd)
if rc == 0:
return stdout
else:
return ''
def dependency_process(name, id, action, meta_json, sandbox_dir, osf_auth):
""" Process each explicit and implicit dependency.
Args:
name: the item name in the software section
id: the id attribute of the processed dependency
action: the action on the downloaded dependency. Options: none, unpack. "none" leaves the downloaded dependency at it is. "unpack" uncompresses the dependency.
meta_json: the json object including all the metadata of dependencies.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
osf_auth: the osf authentication info including osf_username and osf_password.
Returns:
mount_value: the actual storage path of one dependency.
"""
mount_value = ''
item = meta_search(meta_json, name, id)
source = attr_check(name, item, "source", 1)
logging.debug("%s is chosen to deliver %s", source, name)
if source[:4] == "git+":
dest = git_dependency_parser(item, source[4:], sandbox_dir)
mount_value = dest
cleanup(tempfile_list, tempdir_list)
sys.exit("this is git source, can not support")
elif source[:4] == "osf+":
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
#first download it as a normal url
try:
logging.debug("Trying to download %s as a normal url ,,,", source)
dependency_download(name, source[4:], checksum, "md5sum", dest, form, action)
except:
logging.debug("Fails to download %s as a normal url ,,,", source)
if len(osf_auth) < 2:
cleanup(tempfile_list, tempdir_list)
logging.debug("Please use --osf_user and --osf_pass to specify your osf authentication info!")
sys.exit("Please use --osf_user and --osf_pass to specify your osf authentication info!")
if form == "tgz":
osf_download(osf_auth[0], osf_auth[1], source[4:], dest + ".tar.gz")
else:
osf_download(osf_auth[0], osf_auth[1], source[4:], dest)
dependency_download(name, dest, checksum, "md5sum", dest, form, action)
mount_value = dest
elif source[:3] == "s3+":
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
try:
logging.debug("Trying to download %s as a normal url ,,,", source)
dependency_download(name, source[3:], checksum, "md5sum", dest, form, action)
except:
logging.debug("Fails to download %s as a normal url ,,,", source)
if form == "tgz":
s3_download(source[3:], dest + ".tar.gz")
else:
s3_download(source[3:], dest)
dependency_download(name, dest, checksum, "md5sum", dest, form, action)
mount_value = dest
elif source[:5] == "cvmfs":
pass
else:
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
dependency_download(name, source, checksum, "md5sum", dest, form, action)
mount_value = dest
return mount_value
def env_parameter_init(hardware_spec, kernel_spec, os_spec):
""" Set the environment parameters according to the specification file.
Args:
hardware_spec: the hardware section in the specification for the user's task.
kernel_spec: the kernel section in the specification for the user's task.
os_spec: the os section in the specification for the user's task.
Returns:
a tuple including the requirements for hardware, kernel and os.
"""
hardware_platform = attr_check("hardware", hardware_spec, "arch").lower()
cpu_cores = 1
if hardware_spec.has_key("cores"):
cpu_cores = hardware_spec["cores"].lower()
memory_size = "1GB"
if hardware_spec.has_key("memory"):
memory_size = hardware_spec["memory"].lower()
disk_size = "1GB"
if hardware_spec.has_key("disk"):
disk_size = hardware_spec["disk"].lower()
kernel_name = attr_check("kernel", kernel_spec, "name").lower()
kernel_version = attr_check("kernel", kernel_spec, "version").lower()
kernel_version = re.sub('\s+', '', kernel_version).strip()
distro_name = attr_check("os", os_spec, "name").lower()
distro_version = attr_check("os", os_spec, "version").lower()
os_id = ''
if os_spec.has_key("id"):
os_id = os_spec["id"]
index = distro_version.find('.')
linux_distro = distro_name + distro_version[:index] #example of linux_distro: redhat6
return (hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id)
def compare_versions(v1, v2):
""" Compare two versions, the format of version is: X.X.X
Args:
v1: a version.
v2: a version.
Returns:
0 if v1 == v2; 1 if v1 is newer than v2; -1 if v1 is older than v2.
"""
list1 = v1.split('.')
list2 = v2.split('.')
for i in range(len(list1)):
list1[i] = int(list1[i])
for i in range(len(list2)):
list2[i] = int(list2[i])
if list1[0] == list2[0]:
if list1[1] == list2[1]:
if list1[2] == list2[2]:
return 0
elif list1[2] > list2[2]:
return 1
else:
return -1
elif list1[1] > list2[1]:
return 1
else:
return -1
elif list1[0] > list2[0]:
return 1
else:
return -1
def verify_kernel(host_kernel_name, host_kernel_version, kernel_name, kernel_version):
""" Check whether the kernel version of the host machine matches the requirement.
The kernel_version format supported for now includes: >=2.6.18; [2.6.18, 2.6.32].
Args:
host_kernel_name: the name of the OS kernel of the host machine.
host_kernel_version: the version of the kernel of the host machine.
kernel_name: the name of the required OS kernel (e.g., linux). Not case sensitive.
kernel_version: the version of the required kernel (e.g., 2.6.18).
Returns:
If the kernel version of the host machine matches the requirement, return None.
If the kernel version of the host machine does not match the requirement, directly exit.
"""
if host_kernel_name != kernel_name:
cleanup(tempfile_list, tempdir_list)
logging.critical("The required kernel name is %s, the kernel name of the host machine is %s!", kernel_name, host_kernel_name)
sys.exit("The required kernel name is %s, the kernel name of the host machine is %s!\n" % (kernel_name, host_kernel_name))
if kernel_version[0] == '[':
list1 = kernel_version[1:-1].split(',')
if compare_versions(host_kernel_version, list1[0]) >= 0 and compare_versions(host_kernel_version, list1[1]) <= 0:
logging.debug("The kernel version matches!")
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("The required kernel version is %s, the kernel version of the host machine is %s!", kernel_version, host_kernel_version)
sys.exit("The required kernel version is %s, the kernel version of the host machine is %s!\n" % (kernel_version, host_kernel_version))
elif kernel_version[0] == '>':
if compare_versions(host_kernel_version, kernel_version[2:]) >= 0:
logging.debug("The kernel version matches!")
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("The required kernel version is %s, the kernel version of the host machine is %s!", kernel_version, host_kernel_version)
sys.exit("The required kernel version is %s, the kernel version of the host machine is %s!\n" % (kernel_version, host_kernel_version))
elif kernel_version[0] == '<':
if compare_versions(host_kernel_version, kernel_version[2:]) <= 0:
logging.debug("The kernel version matches!")
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("The required kernel version is %s, the kernel version of the host machine is %s!", kernel_version, host_kernel_version)
sys.exit("The required kernel version is %s, the kernel version of the host machine is %s!\n" % (kernel_version, host_kernel_version))
else: #the kernel version is a single value
if compare_versions(host_kernel_version, kernel_version[2:]) == 0:
logging.debug("The kernel version matches!")
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("The required kernel version is %s, the kernel version of the host machine is %s!", kernel_version, host_kernel_version)
sys.exit("The required kernel version is %s, the kernel version of the host machine is %s!\n" % (kernel_version, host_kernel_version))
def env_check(sandbox_dir, sandbox_mode, hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version):
""" Check the matching degree between the specification requirement and the host machine.
Currently check the following item: sandbox_mode, hardware platform, kernel, OS, disk, memory, cpu cores.
Other things needed to check: software, and data??
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
sandbox_mode: the execution engine.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
cpu_cores: the number of required cpus (e.g., 1).
memory_size: the memory size requirement (e.g., 2GB). Not case sensitive.
disk_size: the disk size requirement (e.g., 2GB). Not case sensitive.
kernel_name: the name of the required OS kernel (e.g., linux). Not case sensitive.
kernel_version: the version of the required kernel (e.g., 2.6.18).
Returns:
host_linux_distro: the linux distro of the host machine. For Example: redhat6, centos6.
"""
print "Execution environment checking ..."
if sandbox_mode not in ["docker", "destructive", "parrot"]:
cleanup(tempfile_list, tempdir_list)
logging.critical("Currently local execution engine only support three sandbox techniques: docker, chroot or parrot!")
sys.exit("Currently local execution engine only support three sandbox techniques: docker, chroot or parrot!\n")
uname_list = platform.uname() #format of uname_list: (system,node,release,version,machine,processor)
logging.debug("Hardware platform checking ...")
if hardware_platform != uname_list[4].lower():
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification requires %s, but the local machine is %s", hardware_platform, uname_list[4].lower())
sys.exit("The specification requires " + hardware_platform + ", but the local machine is " + uname_list[4].lower() + "!\n")
logging.debug("CPU cores checking ...")
cpu_cores = int(cpu_cores)
host_cpu_cores = multiprocessing.cpu_count()
if cpu_cores > host_cpu_cores:
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification requires %d cpu cores, but the local machine only has %d cores!", cpu_cores, host_cpu_cores)
sys.exit("The specification requires %d cpu cores, but the local machine only has %d cores!\n" % (cpu_cores, host_cpu_cores))
logging.debug("Memory size checking ...")
memory_size = re.sub('\s+', '', memory_size).strip()
memory_size = float(memory_size[:-2])
cmd = "free -tg|grep Total|sed 's/\s\+/ /g'|cut -d' ' -f2"
rc, stdout, stderr = func_call(cmd)
if rc != 0:
logging.critical("The return code is %d, memory check fail!", rc)
else:
host_memory_size = float(stdout)
if memory_size > host_memory_size:
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification requires %.2f GB memory space, but the local machine only has %.2f GB free memory space!", memory_size, host_memory_size)
sys.exit("The specification requires %.2f GB memory space, but the local machine only has %.2f GB free memory space!" % (memory_size, host_memory_size))
logging.debug("Disk space checking ...")
disk_size = re.sub('\s+', '', disk_size).strip()
disk_size = float(disk_size[:-2])
st = os.statvfs(sandbox_dir)
free_disk = float(st.f_bavail * st.f_frsize) / (1024*1024*1024)
if disk_size > free_disk:
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification requires %.2f GB disk space, but the local machine only has %.2f GB free disk space!", disk_size, free_disk)
sys.exit("The specification requires %.2f GB disk space, but the local machine only has %.2f GB free disk space!" % (disk_size, free_disk))
#check kernel
logging.debug("Kernel checking ...")
host_kernel_name = uname_list[0].lower()
index = uname_list[2].find('-')
host_kernel_version = uname_list[2][:index]
verify_kernel(host_kernel_name, host_kernel_version, kernel_name, kernel_version)
dist_list = platform.dist()
logging.debug("The hardware information of the local machine:")
logging.debug(dist_list)
#set host_linux_distro. Examples: redhat6, centos6.
#potential problem: maybe in the future, we need a finer control about the host_linux_distro, like redhat6.5, centos6.5.
arch_index = uname_list[2].find('ARCH')
host_linux_distro = None
if arch_index != -1:
host_linux_distro = 'arch'
else:
redhat_index = uname_list[2].find('el')
centos_index = uname_list[2].find('centos')
if redhat_index != -1:
dist_version = uname_list[2][redhat_index + 2]
if centos_index != -1 or dist_list[0].lower() == 'centos':
host_linux_distro = 'centos' + dist_version
else:
host_linux_distro = 'redhat' + dist_version
logging.debug("The OS distribution information of the local machine: %s", host_linux_distro)
return host_linux_distro
def parrotize_user_cmd(user_cmd, sandbox_dir, cwd_setting, linux_distro, hardware_platform, meta_json, cvmfs_http_proxy):
"""Modify the user's command into `parrot_run + the user's command`.
The cases when this function should be called: (1) sandbox_mode == parrot; (2) sandbox_mode != parrot and cvmfs is needed to deliver some dependencies not installed on the execution node.
Args:
user_cmd: the user's command.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
cwd_setting: the current working directory for the execution of the user's command.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
linux_distro: the linux distro. For Example: redhat6, centos6.
meta_json: the json object including all the metadata of dependencies.
cvmfs_http_proxy: HTTP_PROXY environmetn variable used to access CVMFS by Parrot
Returns:
None
"""
#Here we use the cctools meta from the local cache (which includes all the meta including cvmfs, globus, fuse and so on). Even if the user may install cctools by himself on the machine, the configuration of the local installation may be not what we want. For example, the user may just configure like this `./configure --prefix ~/cctools`.
#4.4 and 4.4 does not support --no-set-foreground feature.
#user_cmd[0] = dest + "/bin/parrot_run --no-set-foreground /bin/sh -c 'cd " + cwd_setting + "; " + user_cmd[0] + "'"
if cvmfs_http_proxy:
user_cmd[0] = "export HTTP_PROXY=" + cvmfs_http_proxy + "; " + cctools_dest + "/bin/parrot_run --no-set-foreground /bin/sh -c 'cd " + cwd_setting + "; " + user_cmd[0] + "'"
else:
user_cmd[0] = cctools_dest + "/bin/parrot_run --no-set-foreground /bin/sh -c 'cd " + cwd_setting + "; " + user_cmd[0] + "'"
logging.debug("The parrotized user_cmd: %s" % user_cmd[0])
def chrootize_user_cmd(user_cmd, cwd_setting):
"""Modify the user's command when the sandbox_mode is chroot. This check should be done after `parrotize_user_cmd`.
The cases when this function should be called: sandbox_mode == chroot
Args:
user_cmd: the user's command.
cwd_setting: the current working directory for the execution of the user's command.
Returns:
the modified version of the user's cmd.
"""
#By default, the directory of entering chroot is /. So before executing the user's command, first change the directory to the $PWD environment variable.
user_cmd[0] = 'chroot / /bin/sh -c "cd %s; %s"' %(cwd_setting, user_cmd[0])
return user_cmd
def software_install(mount_dict, env_para_dict, software_spec, meta_json, sandbox_dir, pac_install_destructive, osf_auth, name=None):
""" Installation each software dependency specified in the software section of the specification.
Args:
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
software_spec: the software section of the specification
meta_json: the json object including all the metadata of dependencies.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
pac_install_destructive: whether this is to install packages through package manager in destructive mode
osf_auth: the osf authentication info including osf_username and osf_password.
name: if name is specified, then only the specified item will be installed. All the other items in the software section will be ignored.
Returns:
None.
"""
print "Installing software dependencies ..."
for item in software_spec:
if name and name != item:
continue
# always first check whether the attribute is set or not inside the umbrella specificiation file.
id = ''
if software_spec[item].has_key('id'):
id = software_spec[item]['id']
mountpoint = ''
if software_spec[item].has_key('mountpoint'):
mountpoint = software_spec[item]['mountpoint']
mount_env = ''
if software_spec[item].has_key('mount_env'):
mount_env = software_spec[item]['mount_env']
action = 'unpack'
if software_spec[item].has_key('action'):
action = software_spec[item]['action'].lower()
if mount_env and not mountpoint:
result = meta_search(meta_json, item, id)
env_para_dict[mount_env] =attr_check(item, result, "source", 1)
else:
if mount_env and mountpoint:
env_para_dict[mount_env] = mountpoint
mount_value = dependency_process(item, id, action, meta_json, sandbox_dir, osf_auth)
if len(mount_value) > 0:
logging.debug("Add mountpoint (%s:%s) into mount_dict", mountpoint, mount_value)
if pac_install_destructive:
parent_dir = os.path.dirname(mountpoint)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
elif not os.path.isdir(parent_dir):
cleanup(tempfile_list, tempdir_list)
logging.critical("%s is not a directory!\n", parent_dir)
sys.exit("%s is not a directory!\n" % parent_dir)
if not os.path.exists(mountpoint):
cmd = "mv -f %s %s/" % (mount_value, parent_dir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
else:
mount_dict[mountpoint] = mount_value
def data_install(data_spec, meta_json, sandbox_dir, mount_dict, env_para_dict, osf_auth, name=None):
"""Process data section of the specification.
At the beginning of the function, mount_dict only includes items for software and os dependencies. After this function is done, all the items for data dependencies will be added into mount_dict.
Args:
data_spec: the data section of the specification.
meta_json: the json object including all the metadata of dependencies.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
osf_auth: the osf authentication info including osf_username and osf_password.
name: if name is specified, then only the specified item will be installed. All the other items in the software section will be ignored.
Returns:
None
"""
print "Installing data dependencies ..."
for item in data_spec:
if name and name != item:
continue
id = ''
if data_spec[item].has_key('id'):
id = data_spec[item]['id']
mountpoint = ''
if data_spec[item].has_key('mountpoint'):
mountpoint = data_spec[item]['mountpoint']
mount_env = ''
if data_spec[item].has_key('mount_env'):
mount_env = data_spec[item]['mount_env']
action = 'unpack'
if data_spec[item].has_key('action'):
action = data_spec[item]['action']
if mount_env and not mountpoint:
result = meta_search(meta_json, item, id)
env_para_dict[mount_env] = attr_check(item, result, "source", 1)
else:
mount_value = data_dependency_process(item, id, meta_json, sandbox_dir, action, osf_auth)
logging.debug("Add mountpoint (%s:%s) into mount_dict", mountpoint, mount_value)
mount_dict[mountpoint] = mount_value
if mount_env and mountpoint:
env_para_dict[mount_env] = mountpoint
def get_linker_path(hardware_platform, os_image_dir):
"""Return the path of ld-linux.so within the downloaded os image dependency
Args:
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
os_image_dir: the path of the OS image inside the umbrella local cache.
Returns:
If the dynamic linker is found within the OS image, return its fullpath.
Otherwise, returns None.
"""
#env_list is directly under the directory of the downloaded os image dependency
if hardware_platform == "x86_64":
p = os_image_dir + "/lib64/ld-linux-x86-64.so.2"
if os.path.exists(p):
return p
else:
return None
else:
return None
def construct_docker_volume(input_dict, mount_dict, output_f_dict, output_d_dict):
"""Construct the docker volume parameters based on mount_dict.
Args:
input_dict: the setting of input files specified by the --inputs option.
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
Returns:
volume_paras: all the `-v` options for the docker command.
"""
if "/" in mount_dict:
del mount_dict["/"] #remove "/" from the mount_dict to avoid messing the root directory of the host machine
volume_paras = ""
for key in mount_dict:
volume_paras = volume_paras + " -v " + mount_dict[key] + ":" + key + " "
for key in input_dict:
volume_paras = volume_paras + " -v " + input_dict[key] + ":" + key + " "
for key in output_f_dict:
volume_paras = volume_paras + " -v " + output_f_dict[key] + ":" + key + " "
for key in output_d_dict:
volume_paras = volume_paras + " -v " + output_d_dict[key] + ":" + key + " "
return volume_paras
def obtain_path(os_image_dir, sw_mount_dict):
"""Get the path environment variable from envfile and add the mountpoints of software dependencies into it
the envfile here is named env_list under the OS image.
Args:
os_image_dir: the path of the OS image inside the umbrella local cache.
sw_mount_dict: a dict only including all the software mounting items.
Returns:
path_env: the new value for PATH.
"""
path_env = ''
if os.path.exists(os_image_dir + "/env_list") and os.path.isfile(os_image_dir + "/env_list"):
with open(os_image_dir + "/env_list", "rb") as f:
for line in f:
if line[:5] == 'PATH=':
path_env = line[5:-1]
break
else:
path_env = '.:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin'
for key in sw_mount_dict:
path_env = key + "/bin:" + path_env
return path_env
def transfer_env_para_docker(env_para_dict):
"""Transfer the env_para_dict into the docker `-e` options.
Args:
env_para_dict: the environment variables which need to be set for the execution of the user's command.
Returns:
env_options: the docker `-e` options constructed from env_para_dict.
"""
env_options = ''
for key in env_para_dict:
if key:
env_options = env_options + ' -e "' + key + '=' + env_para_dict[key] + '" '
return env_options
def collect_software_bin(host_cctools_path, sw_mount_dict):
"""Construct the path environment from the mountpoints of software dependencies.
Each softare meta has a bin subdir containing all its executables.
Args:
host_cctools_path: the path of cctools under the umbrella local cache.
sw_mount_dict: a dict only including all the software mounting items.
Returns:
extra_path: the paths which are extracted from sw_mount_dict and host_cctools_path, and needed to be added into PATH.
"""
extra_path = ""
for key in sw_mount_dict:
if key != '/':
extra_path += '%s/bin:' % key
if host_cctools_path:
extra_path += '%s/bin:' % host_cctools_path
return extra_path
def in_local_passwd():
"""Judge whether the current user exists in /etc/passwd.
Returns:
If the current user is inside /etc/passwd, returns 'yes'.
Otherwise, returns 'no'.
"""
user_name = getpass.getuser()
with open('/etc/passwd') as f:
for line in f:
if line[:len(user_name)] == user_name:
logging.debug("%s is included in /etc/passwd!", user_name)
return 'yes'
logging.debug("%s is not included in /etc/passwd!", user_name)
return 'no'
def in_local_group():
"""Judge whether the current user's group exists in /etc/group.
Returns:
If the current user's group exists in /etc/group, returns 'yes'.
Otherwise, returns 'no'.
"""
group_name = grp.getgrgid(os.getgid())[0]
with open('/etc/group') as f:
for line in f:
if line[:len(group_name)] == group_name:
logging.debug("%s is included in /etc/group!", group_name)
return 'yes'
logging.debug("%s is not included in /etc/group!", group_name)
return 'no'
def create_fake_mount(os_image_dir, sandbox_dir, mount_list, path):
"""For each ancestor dir B of path (including path iteself), check whether it exists in the rootfs and whether it exists in the mount_list and
whether it exists in the fake_mount directory inside the sandbox.
If B is inside the rootfs or the fake_mount dir, do nothing. Otherwise, create a fake directory inside the fake_mount.
Reason: the reason why we need to guarantee any ancestor dir of a path exists somehow is that `cd` shell builtin does a syscall stat on each level of
the ancestor dir of a path. Without creating the mountpoint for any ancestor dir, `cd` would fail.
Args:
os_image_dir: the path of the OS image inside the umbrella local cache.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
mount_list: a list of mountpoints which already been inside the parrot mountlist file.
path: a dir path.
Returns:
mount_str: a string including the mount items which are needed to added into the parrot mount file.
"""
mount_str = ''
if not path: #if the path is NULL, directly return.
return
path_list = []
tmp_path = path
while tmp_path != '/':
path_list.insert(0, tmp_path)
tmp_path = remove_trailing_slashes(os.path.dirname(tmp_path))
for item in path_list:
logging.debug("Judge whether the following mountpoint exists: %s", item)
fake_mount_path = '%s/fake_mount%s' % (sandbox_dir, item)
#if item is under localdir, do nothing.
if item in remove_trailing_slashes(os.path.dirname(sandbox_dir)):
break
if not os.path.exists(os_image_dir + item) and item not in mount_list and not os.path.exists(fake_mount_path):
logging.debug("The mountpoint (%s) does not exist, create a fake mountpoint (%s) for it!", item, fake_mount_path)
os.makedirs(fake_mount_path)
mount_str += '%s %s\n' % (item, fake_mount_path)
else:
logging.debug("The mountpoint (%s) already exists, do nothing!", item)
return mount_str
def remove_trailing_slashes(path):
"""Remove the trailing slashes of a string
Args:
path: a path, which can be any string.
Returns:
path: the new path without any trailing slashes.
"""
while len(path) > 1 and path.endswith('/'):
path = path[:-1]
return path
def construct_mountfile_full(sandbox_dir, os_image_dir, mount_dict, input_dict, output_f_dict, output_d_dict, cvmfs_cms_siteconf_mountpoint):
"""Create the mountfile if parrot is used to create a sandbox for the application and a separate rootfs is needed.
The trick here is the adding sequence does matter. The latter-added items will be checked first during the execution.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
os_image_dir: the path of the OS image inside the umbrella local cache.
mount_dict: all the mount items extracted from the specification file and possible implicit dependencies like cctools.
input_dict: the setting of input files specified by the --inputs option
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
Returns:
the path of the mountfile.
"""
mount_list = []
mountfile_path = sandbox_dir + "/.__mountlist"
with open(mountfile_path, "wb") as mountfile:
new_root = mount_dict["/"]
mountfile.write("/ " + new_root + "\n")
mount_list.append('/')
del mount_dict["/"]
mountfile.write(new_root + " " + new_root + "\n") #this one is needed to avoid recuisive path resolution.
mount_list.append(new_root)
mountfile.write("%s %s\n" % (os.path.dirname(sandbox_dir), os.path.dirname(sandbox_dir)))
mount_list.append(os.path.dirname(sandbox_dir))
logging.debug("Adding items from mount_dict into %s", mountfile_path)
for key in mount_dict:
#os.path.dirname('/a/b/') is '/a/b'. Therefore, before and after calling dirname, use remove_trailing_slashes to remove the trailing slashes.
key = remove_trailing_slashes(key)
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, remove_trailing_slashes(os.path.dirname(key)))
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mount_list.append(key)
mount_list.append(mount_dict[key])
mountfile.write(key + " " + mount_dict[key] + "\n")
mountfile.write(mount_dict[key] + " " + mount_dict[key] + "\n")
for key in output_f_dict:
mountfile.write(key + " " + output_f_dict[key] + "\n")
for key in output_d_dict:
mountfile.write(key + " " + output_d_dict[key] + "\n")
#common-mountlist includes all the common mountpoint (/proc, /dev, /sys, /mnt, /disc, /selinux)
if os.path.exists(os_image_dir + "/common-mountlist") and os.path.isfile(os_image_dir + "/common-mountlist"):
logging.debug("Adding items from %s/common-mountlist into %s", os_image_dir, mountfile_path)
with open(os_image_dir + "/common-mountlist", "rb") as f:
for line in f:
tmplist = line.split(' ')
item = remove_trailing_slashes(tmplist[0])
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, remove_trailing_slashes(os.path.dirname(item)))
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mount_list.append(tmplist[0])
mountfile.write(line)
logging.debug("Add sandbox_dir(%s) into %s", sandbox_dir, mountfile_path)
mountfile.write(sandbox_dir + ' ' + sandbox_dir + '\n')
mount_list.append(sandbox_dir)
logging.debug("Add /etc/hosts and /etc/resolv.conf into %s", mountfile_path)
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, '/etc')
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mountfile.write('/etc/hosts /etc/hosts\n')
mount_list.append('/etc/hosts')
mountfile.write('/etc/resolv.conf /etc/resolv.conf\n')
mount_list.append('/etc/resolv.conf')
#nd workstation uses NSCD (Name Service Cache Daemon) to deal with passwd, group, hosts services. Here first check whether the current uid and gid is in the /etc/passwd and /etc/group, if yes, use them. Otherwise, construct separate passwd and group files.
#If the current user name and group can not be found in /etc/passwd and /etc/group, a fake passwd and group file will be constructed under sandbox_dir.
existed_user = in_local_passwd()
if existed_user == 'yes':
logging.debug("Add /etc/passwd into %s", mountfile_path)
mountfile.write('/etc/passwd /etc/passwd\n')
else:
logging.debug("Construct a fake passwd file: .passwd, add .passwd into %s", mountfile_path)
with open('.passwd', 'w+') as f:
f.write('%s:x:%d:%d:unknown:%s:%s\n' % (getpass.getuser(), os.getuid(), os.getgid(), sandbox_dir + '/' + getpass.getuser(), os.environ['SHELL']))
mountfile.write('/etc/passwd %s/.passwd\n' % (sandbox_dir))
logging.debug("Construct a fake acl file: .__acl, add .__acl into %s", mountfile_path)
with open('.__acl', 'w+') as acl_file:
acl_file.write('%s rwlax\n' % getpass.getuser())
mount_list.append('/etc/passwd')
#getpass.getuser() returns the login name of the user
#os.makedirs(getpass.getuser()) #it is not really necessary to create this dir.
existed_group = in_local_group()
if existed_group == 'yes':
logging.debug("Add /etc/group into %s", mountfile_path)
mountfile.write('/etc/group /etc/group\n')
else:
logging.debug("Construct a fake group file: .group, add .group into %s", mountfile_path)
with open('.group', 'w+') as f:
f.write('%s:x:%d:%d\n' % (grp.getgrgid(os.getgid())[0], os.getgid(), os.getuid()))
mountfile.write('/etc/group %s/.group\n' % (sandbox_dir))
mount_list.append('/etc/group')
#add /var/run/nscd/socket into mountlist
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, '/var/run/nscd')
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mountfile.write('/var/run/nscd/socket ENOENT\n')
mount_list.append('/var/run/nscd/socket')
if os.path.exists(os_image_dir + "/special_files") and os.path.isfile(os_image_dir + "/special_files"):
logging.debug("Add %s/special_files into %s", os_image_dir, mountfile_path)
with open(os_image_dir + "/special_files", "rb") as f:
for line in f:
tmplist = line.split(' ')
item = remove_trailing_slashes(tmplist[0])
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, remove_trailing_slashes(os.path.dirname(item)))
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mount_list.append(tmplist[0])
mountfile.write(line)
#add the input_dict into mountflie
logging.debug("Add items from input_dict into %s", mountfile_path)
for key in input_dict:
key = remove_trailing_slashes(key)
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, remove_trailing_slashes(os.path.dirname(key)))
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mountfile.write(key + " " + input_dict[key] + "\n")
mount_list.append(key)
if cvmfs_cms_siteconf_mountpoint == '':
logging.debug('cvmfs_cms_siteconf_mountpoint is null')
else:
mountfile.write('/cvmfs /cvmfs\n')
mountfile.write(cvmfs_cms_siteconf_mountpoint + '\n')
logging.debug('cvmfs_cms_siteconf_mountpoint is not null: %s', cvmfs_cms_siteconf_mountpoint)
return mountfile_path
def construct_mountfile_cvmfs_cms_siteconf(sandbox_dir, cvmfs_cms_siteconf_mountpoint):
""" Create the mountfile if chroot and docker is used to execute a CMS application and the host machine does not have cvmfs installed.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
Returns:
the path of the mountfile.
"""
mountfile_path = sandbox_dir + "/.__mountlist"
with open(mountfile_path, "wb") as f:
f.write(cvmfs_cms_siteconf_mountpoint + '\n')
logging.debug('cvmfs_cms_siteconf_mountpoint is not null: %s', cvmfs_cms_siteconf_mountpoint)
return mountfile_path
def construct_mountfile_easy(sandbox_dir, input_dict, output_f_dict, output_d_dict, mount_dict, cvmfs_cms_siteconf_mountpoint):
""" Create the mountfile if parrot is used to create a sandbox for the application and a separate rootfs is not needed.
The trick here is the adding sequence does matter. The latter-added items will be checked first during the execution.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
mount_dict: all the mount items extracted from the specification file and possible implicit dependencies like cctools.
input_dict: the setting of input files specified by the --inputs option
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
Returns:
the path of the mountfile.
"""
mountfile_path = sandbox_dir + "/.__mountlist"
with open(mountfile_path, "wb") as f:
for key in input_dict:
f.write(key + " " + input_dict[key] + "\n")
for key in output_f_dict:
f.write(key + " " + output_f_dict[key] + "\n")
for key in output_d_dict:
f.write(key + " " + output_d_dict[key] + "\n")
for key in mount_dict:
f.write(key + " " + mount_dict[key] + "\n")
f.write(mount_dict[key] + " " + mount_dict[key] + "\n")
if cvmfs_cms_siteconf_mountpoint == '':
logging.debug('cvmfs_cms_siteconf_mountpoint is null')
else:
f.write(cvmfs_cms_siteconf_mountpoint + '\n')
logging.debug('cvmfs_cms_siteconf_mountpoint is not null: %s', cvmfs_cms_siteconf_mountpoint)
return mountfile_path
def construct_env(sandbox_dir, os_image_dir):
""" Read env_list inside an OS image and save all the environment variables into a dictionary.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
os_image_dir: the path of the OS image inside the umbrella local cache.
Returns:
env_dict: a dictionary which includes all the environment variables from env_list
"""
if os.path.exists(os_image_dir + "/env_list") and os.path.isfile(os_image_dir + "/env_list"):
with open(os_image_dir + "/env_list", "rb") as f:
env_dict = {}
for line in f:
index = line.find("=")
key = line[:index]
value = line[(index+1):-1]
env_dict[key] = value
return env_dict
return {}
def has_docker_image(hardware_platform, distro_name, distro_version, tag):
"""Check whether the required docker image exists on the local machine or not.
Args:
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
tag: the tag of the expected docker image. tag is os_id
Returns:
If the required docker image exists on the local machine, returns 'yes'.
Otherwise, returns 'no'.
"""
name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
cmd = "docker images %s | awk '{print $2}'" % (name)
logging.debug("Start to run the command: %s", cmd)
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#str = "\n%s\s+" % (name)
if stdout.find(tag) == -1:
return 'no'
else:
return 'yes'
def create_docker_image(sandbox_dir, hardware_platform, distro_name, distro_version, tag):
"""Create a docker image based on the cached os image directory.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
tag: the tag of the expected docker image. tag is os_id
Returns:
If the docker image is imported from the tarball successfully, returns None.
Otherwise, directly exit.
"""
name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
location = os.path.dirname(sandbox_dir) + '/cache/' + tag + '/' + name
#docker container runs as root user, so use the owner option of tar command to set the owner of the docker image
cmd = 'cd ' + location + '; tar --owner=root -c .|docker import - ' + name + ":" + tag + '; cd -'
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
def construct_chroot_mount_dict(sandbox_dir, output_dir, input_dict, need_separate_rootfs, os_image_dir, mount_dict, host_cctools_path):
"""Construct directory mount list and file mount list for chroot. chroot requires the target mountpoint must be created within the chroot jail.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
output_f_dict: the mappings of output files (key is the file path used by the application; value is the file path the user specifies.)
output_d_dict: the mappings of output dirs (key is the dir path used by the application; value is the dir path the user specified.)
input_dict: the setting of input files specified by the --inputs option.
need_separate_rootfs: whether a separate rootfs is needed to execute the user's command.
os_image_dir: the path of the OS image inside the umbrella local cache.
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
host_cctools_path: the path of cctools under the umbrella local cache.
Returns:
a tuple includes the directory mount list and the file mount list
"""
dir_dict = {}
file_dict = {}
logging.debug("need_separate_rootfs: %d", need_separate_rootfs)
if need_separate_rootfs == 1:
logging.debug("Add %s into dir_dict of chroot", os_image_dir + "/common-mountlist")
with open(os_image_dir + "/common-mountlist") as f:
for line in f:
index = line.find(' ')
item = line[:index]
dir_dict[item] = item
#special_files includes all the paths of the files which includes all the file paths of special types (block, character, socket, pipe)
logging.debug("Add %s into dir_dict of chroot", os_image_dir + "/special_files")
with open(os_image_dir + "/special_files") as f:
for line in f:
index = line.find(' ')
item = line[:index]
if os.path.exists(item):
file_dict[item] = item
if host_cctools_path:
logging.debug("Add cctools binary (%s) into dir_dict of chroot", host_cctools_path)
dir_dict[host_cctools_path] = host_cctools_path
logging.debug("Add sandbox_dir and output_dir into dir_dict of chroot")
dir_dict[sandbox_dir] = sandbox_dir
dir_dict[output_dir] = output_dir
logging.debug("Add items from mount_dict into dir_dict of chroot")
for key in mount_dict:
if key != '/':
value = mount_dict[key]
mode = os.lstat(value).st_mode
if S_ISDIR(mode):
dir_dict[value] = key
else:
file_dict[value] = key
logging.debug("Add /etc/passwd /etc/group /etc/hosts /etc/resolv.conf into file_dict of chroot")
file_dict['/etc/passwd'] = '/etc/passwd'
file_dict['/etc/group'] = '/etc/group'
file_dict['/etc/hosts'] = '/etc/hosts'
file_dict['/etc/resolv.conf'] = '/etc/resolv.conf'
logging.debug("Add input_dict into file_dict of chroot")
for key in input_dict:
value = input_dict[key]
mode = os.lstat(value).st_mode
if S_ISDIR(mode):
dir_dict[value] = key
else:
file_dict[value] = key
logging.debug("dir_dict:")
logging.debug(dir_dict)
logging.debug("file_dict:")
logging.debug(file_dict)
return (dir_dict, file_dict)
def chroot_mount_bind(dir_dict, file_dict, sandbox_dir, need_separate_rootfs, hardware_platform, distro_name, distro_version):
"""Create each target mountpoint under the cached os image directory through `mount --bind`.
Args:
dir_dict: a dict including all the directory mountpoints needed to be created inside the OS image.
file_dict: a dict including all the file mountpoints needed to be created inside the OS image.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
need_separate_rootfs: whether a separate rootfs is needed to execute the user's command.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
Returns:
If no error happens, returns None.
Otherwise, directly exit.
"""
logging.debug("Use mount --bind to redirect mountpoints")
if need_separate_rootfs == 1:
os_image_name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
os_image_path = os.path.dirname(sandbox_dir) + '/cache/' + os_image_name
else:
os_image_path = '/'
#mount --bind -o ro hostdir sandboxdir
for key in dir_dict:
jaildir = '%s%s' % (os_image_path, dir_dict[key])
hostdir = key
#if jaildir and hostdir are the same, there is no necessary to do mount.
if jaildir != hostdir:
if not os.path.exists(jaildir):
os.makedirs(jaildir)
cmd = 'mount --bind -o ro %s %s' % (hostdir, jaildir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
for key in file_dict:
jailfile = '%s%s' % (os_image_path, file_dict[key])
hostfile = key
if jailfile != hostfile:
if not os.path.exists(jailfile):
d = os.path.dirname(jailfile)
if not os.path.exists(d):
os.makedirs(d)
with open(jailfile, 'w+') as f:
pass
cmd = 'mount --bind -o ro %s %s' % (hostfile, jailfile)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
def chroot_post_process(dir_dict, file_dict, sandbox_dir, need_separate_rootfs, hardware_platform, distro_name, distro_version):
"""Remove all the created target mountpoints within the cached os image directory.
It is not necessary to change the mode of the output dir, because only the root user can use the chroot method.
Args:
dir_dict: a dict including all the directory mountpoints needed to be created inside the OS image.
file_dict: a dict including all the file mountpoints needed to be created inside the OS image.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
need_separate_rootfs: whether a separate rootfs is needed to execute the user's command.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
Returns:
If no error happens, returns None.
Otherwise, directly exit.
"""
logging.debug("post process of chroot")
if need_separate_rootfs == 1:
os_image_name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
os_image_path = os.path.dirname(sandbox_dir) + '/cache/' + os_image_name
else:
os_image_path = '/'
#file_dict must be processed ahead of dir_dict, because we can not umount a directory if there is another mountpoints created for files under it.
for key in file_dict:
jailfile = '%s%s' % (os_image_path, file_dict[key])
hostfile = key
if jailfile != hostfile:
if os.path.exists(jailfile):
cmd = 'umount -f %s' % (jailfile)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
for key in dir_dict:
jaildir = '%s%s' % (os_image_path, dir_dict[key])
hostdir = key
if jaildir != hostdir:
if os.path.exists(jaildir):
cmd = 'umount -f %s' % (jaildir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#remove all the empty ancestor directory
parent_dir = jaildir
mode = os.lstat(parent_dir).st_mode
if S_ISDIR(mode):
while len(os.listdir(parent_dir)) == 0:
os.rmdir(parent_dir)
parent_dir = os.path.dirname(parent_dir)
def workflow_repeat(cwd_setting, sandbox_dir, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, hardware_platform, host_linux_distro, distro_name, distro_version, need_separate_rootfs, os_image_dir, os_image_id, host_cctools_path, cvmfs_cms_siteconf_mountpoint, mount_dict, sw_mount_dict, meta_json, new_os_image_dir):
"""Run user's task with the help of the sandbox techniques, which currently inculde chroot, parrot, docker.
Args:
cwd_setting: the current working directory for the execution of the user's command.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
sandbox_mode: the execution engine.
output_f_dict: the mappings of output files (key is the file path used by the application; value is the file path the user specifies.)
output_d_dict: the mappings of output dirs (key is the dir path used by the application; value is the dir path the user specified.)
input_dict: the setting of input files specified by the --inputs option.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
user_cmd: the user's command.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
need_separate_rootfs: whether a separate rootfs is needed to execute the user's command.
os_image_dir: the path of the OS image inside the umbrella local cache.
os_image_id: the id of the OS image.
host_cctools_path: the path of cctools under the umbrella local cache.
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
sw_mount_dict: a dict only including all the software mounting items.
meta_json: the json object including all the metadata of dependencies.
new_os_image_dir: the path of the newly created OS image with all the packages installed by package manager.
Returns:
If no error happens, returns None.
Otherwise, directly exit.
"""
#sandbox_dir will be the home directory of the sandbox
print 'Executing the application ....'
if not os.path.exists(sandbox_dir):
os.makedirs(sandbox_dir)
logging.debug("chdir(%s)", sandbox_dir)
os.chdir(sandbox_dir) #here, we indeed want to chdir to sandbox_dir, not cwd_setting, to do preparation work like create mountlist file for Parrot.
#at this point, all the software should be under the cache dir, all the mountpoint of the software should be in mount_dict
print "Execution engine: %s" % sandbox_mode
logging.debug("execution engine: %s", sandbox_mode)
logging.debug("need_separate_rootfs: %d", need_separate_rootfs)
if sandbox_mode == "destructive":
env_dict = os.environ
if cvmfs_cms_siteconf_mountpoint:
logging.debug("Create a parrot mountfile for the siteconf meta ...")
env_dict['PARROT_MOUNT_FILE'] = construct_mountfile_cvmfs_cms_siteconf(sandbox_dir, cvmfs_cms_siteconf_mountpoint)
logging.debug("Add env_para_dict into environment variables")
for key in env_para_dict:
env_dict[key] = env_para_dict[key]
logging.debug("Add software binary into PATH")
extra_path = collect_software_bin(host_cctools_path, sw_mount_dict)
if "PATH" not in env_dict:
env_dict['PATH'] = ""
env_dict['PATH'] = '%s:%s' % (env_dict['PATH'], extra_path[:-1])
#move software and data into the location
for key in mount_dict:
parent_dir = os.path.dirname(key)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
elif not os.path.isdir(parent_dir):
cleanup(tempfile_list, tempdir_list)
logging.critical("%s is not a directory!\n", parent_dir)
sys.exit("%s is not a directory!\n" % parent_dir)
if not os.path.exists(key):
cmd = "mv -f %s %s/" % (mount_dict[key], parent_dir)
rc, stdout, stderr = func_call_withenv(cmd, env_dict)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
print "Start executing the user's task: %s" % user_cmd[0]
cmd = "cd %s; %s" % (cwd_setting, user_cmd[0])
rc, stdout, stderr = func_call_withenv(cmd, env_dict)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
logging.debug("Moving the outputs to the expected locations ...")
print "Moving the outputs to the expected locations ..."
for key in output_f_dict:
cmd = "mv -f %s %s" % (key, output_f_dict[key])
rc, stdout, stderr = func_call_withenv(cmd, env_dict)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
for key in output_d_dict:
cmd = "mv -f %s %s" % (key, output_d_dict[key])
rc, stdout, stderr = func_call_withenv(cmd, env_dict)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
elif sandbox_mode == "docker":
if need_separate_rootfs == 1:
if has_docker_image(hardware_platform, distro_name, distro_version, os_image_id) == 'no':
logging.debug("Start to construct a docker image from the os image")
create_docker_image(sandbox_dir, hardware_platform, distro_name, distro_version, os_image_id)
logging.debug("Finish constructing a docker image from the os image")
if cvmfs_cms_siteconf_mountpoint:
item = cvmfs_cms_siteconf_mountpoint.split(' ')[1]
logging.debug("Adding the siteconf meta (%s) into mount_dict", item)
mount_dict[item] = item
logging.debug("Create a parrot mountfile for the siteconf meta (%s)", item)
env_para_dict['PARROT_MOUNT_FILE'] = construct_mountfile_cvmfs_cms_siteconf(sandbox_dir, cvmfs_cms_siteconf_mountpoint)
logging.debug("Add a volume item (%s:%s) for the sandbox_dir", sandbox_dir, sandbox_dir)
#-v /home/hmeng/umbrella_test/output:/home/hmeng/umbrella_test/output
volume_output = " -v %s:%s " % (sandbox_dir, sandbox_dir)
#-v /home/hmeng/umbrella_test/cache/git-x86_64-redhat5:/software/git-x86_64-redhat5/
logging.debug("Start to construct other volumes from input_dict")
volume_parameters = construct_docker_volume(input_dict, mount_dict, output_f_dict, output_d_dict)
#-e "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/software/git-x86_64-redhat5/bin"
logging.debug("Set the environment variables ....")
path_env = obtain_path(os_image_dir, sw_mount_dict)
other_envs = transfer_env_para_docker(env_para_dict)
docker_image_name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
#by default, docker executes user_cmd as the root user, `chown` is used to change the owner of the output dir to be the user who calls `umbrella`
chown_cmd = 'chown -R %d:%d %s %s %s' % (os.getuid(), os.getgid(), sandbox_dir, ' '.join(output_f_dict), ' '.join(output_d_dict))
#to count the post processing time, this cmd is split into two commands
container_name = "umbrella_%s_%s_%s" % (docker_image_name, os_image_id, os.path.basename(sandbox_dir))
#do not enable `-i` and `-t` option of Docker, it will fail when condor execution engine is chosen.
#to allow the exit code of user_cmd to be transferred back, seperate the user_cmd and the chown command.
cmd = 'docker run --name %s %s %s -e "PATH=%s" %s %s:%s /bin/sh -c "cd %s; %s"' % (container_name, volume_output, volume_parameters, path_env, other_envs, docker_image_name, os_image_id, cwd_setting, user_cmd[0])
print "Start executing the user's task: %s" % cmd
rc, stdout, stderr = func_call(cmd)
print "\n********** STDOUT of the command **********"
print stdout
print "\n********** STDERR of the command **********"
print stderr
#docker export container_name > tarball
if len(new_os_image_dir) > 0:
if not os.path.exists(new_os_image_dir):
os.makedirs(new_os_image_dir)
os_tar = new_os_image_dir + ".tar"
cmd = "docker export %s > %s" % (container_name, os_tar)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#uncompress the tarball
cmd = "tar xf %s -C %s" % (os_tar, new_os_image_dir)
extract_tar(os_tar, new_os_image_dir, "tar")
#docker rm container_name
cmd = "docker rm %s" % (container_name)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
cmd1 = 'docker run --rm %s %s -e "PATH=%s" %s %s:%s %s' % (volume_output, volume_parameters, path_env, other_envs, docker_image_name, os_image_id, chown_cmd)
rc, stdout, stderr = func_call(cmd1)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
else:
#if a separate rootfs is not needed to execute the user's cmd, should forcely use other execution engine to run the user cmd.
cleanup(tempfile_list, tempdir_list)
logging.debug("Docker execution engine can only be used when a separate rootfs is needed.")
sys.exit("Docker execution engine can only be used when a separate rootfs is needed.\n")
elif sandbox_mode == "parrot":
if need_separate_rootfs == 1:
logging.debug("Construct environment variables ....")
env_dict = construct_env(sandbox_dir, os_image_dir)
env_dict['PWD'] = cwd_setting
logging.debug("Construct mounfile ....")
env_dict['PARROT_MOUNT_FILE'] = construct_mountfile_full(sandbox_dir, os_image_dir, mount_dict, input_dict, output_f_dict, output_d_dict, cvmfs_cms_siteconf_mountpoint)
for key in env_para_dict:
env_dict[key] = env_para_dict[key]
#here, setting the linker will cause strange errors.
logging.debug("Construct dynamic linker path ....")
result = get_linker_path(hardware_platform, os_image_dir)
if not result:
cleanup(tempfile_list, tempdir_list)
logging.critical("Can not find the dynamic linker inside the os image (%s)!", os_image_dir)
sys.exit("Can not find the dynamic linker inside the os image (%s)!\n" % os_image_dir)
env_dict['PARROT_LDSO_PATH'] = result
env_dict['USER'] = getpass.getuser()
#env_dict['HOME'] = sandbox_dir + '/' + getpass.getuser()
logging.debug("Add software binary into PATH")
extra_path = collect_software_bin(host_cctools_path, sw_mount_dict)
if "PATH" not in env_dict:
env_dict['PATH'] = '.:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin'
env_dict['PATH'] = '%s%s' % (extra_path, env_dict['PATH'])
print "Start executing the user's task: %s" % user_cmd[0]
rc, stdout, stderr = func_call_withenv(user_cmd[0], env_dict)
print "\n********** STDOUT of the command **********"
print stdout
print "\n********** STDERR of the command **********"
print stderr
else:
env_dict = os.environ
env_dict['PARROT_MOUNT_FILE'] = construct_mountfile_easy(sandbox_dir, input_dict, output_f_dict, output_d_dict, mount_dict, cvmfs_cms_siteconf_mountpoint)
for key in env_para_dict:
env_dict[key] = env_para_dict[key]
if 'PATH' not in env_dict: #if we run umbrella on Condor, Condor will not set PATH by default.
env_dict['PATH'] = '.:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin'
logging.debug("PATH is empty, forcely set it to be %s", env_dict['PATH'])
else:
env_dict['PATH'] = '.' + env_dict['PATH']
logging.debug("Forcely add '.' into PATH")
logging.debug("Add software binary into PATH")
extra_path = collect_software_bin(host_cctools_path, sw_mount_dict)
env_dict['PATH'] = '%s%s' % (extra_path, env_dict['PATH'])
print "Start executing the user's task: %s" % user_cmd[0]
rc, stdout, stderr = func_call_withenv(user_cmd[0], env_dict)
print "\n********** STDOUT of the command **********"
print stdout
print "\n********** STDERR of the command **********"
print stderr
# logging.debug("Removing the parrot mountlist file and the parrot submit file from the sandbox")
# if os.path.exists(env_dict['PARROT_MOUNT_FILE']):
# os.remove(env_dict['PARROT_MOUNT_FILE'])
else:
pass
def condor_process(spec_path, spec_json, spec_path_basename, meta_path, sandbox_dir, output_dir, input_list_origin, user_cmd, cwd_setting, condorlog_path, cvmfs_http_proxy):
"""Process the specification when condor execution engine is chosen
Args:
spec_path: the absolute path of the specification.
spec_json: the json object including the specification.
spec_path_basename: the file name of the specification.
meta_path: the path of the json file including all the metadata information.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
output_dir: the output directory.
input_list_origin: the list of input file paths.
user_cmd: the user's command.
cwd_setting: the current working directory for the execution of the user's command.
condorlog_path: the path of the umbrella log executed on the remote condor execution node.
cvmfs_http_proxy: HTTP_PROXY environmetn variable used to access CVMFS by Parrot
Returns:
If no errors happen, return None;
Otherwise, directly exit.
"""
if not os.path.exists(sandbox_dir):
os.makedirs(sandbox_dir)
print "Checking the validity of the umbrella specification ..."
if spec_json.has_key("hardware") and spec_json["hardware"] and spec_json.has_key("kernel") and spec_json["kernel"] and spec_json.has_key("os") and spec_json["os"]:
logging.debug("Setting the environment parameters (hardware, kernel and os) according to the specification file ....")
(hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id) = env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("this specification is not complete! You must have a hardware section, a kernel section and a os section!")
sys.exit("this spec has no hardware section\n")
condor_submit_path = sandbox_dir + "/condor_task.submit"
print "Constructing Condor submission file according to the umbrella specification ..."
transfer_inputs = ''
new_input_options = ''
logging.debug("Transform input_list_origin into condor attributes ....")
for item in input_list_origin:
index_equal = item.find('=')
access_path = item[:index_equal]
actual_path = item[(index_equal+1):]
transfer_inputs += ',%s' % (actual_path)
new_input_options += '%s=%s,' % (access_path, os.path.basename(actual_path))
if new_input_options[-1] == ',':
new_input_options = new_input_options[:-1]
logging.debug("transfer_input_files: %s, %s", spec_path, transfer_inputs)
logging.debug("The new_input_options of Umbrella: %s", new_input_options)
condor_output_dir = tempfile.mkdtemp(dir=".")
condor_output_dir = os.path.abspath(condor_output_dir)
condor_log_path = sandbox_dir + '/condor_task.log'
umbrella_fullpath = which_exec("umbrella")
if umbrella_fullpath == None:
cleanup(tempfile_list, tempdir_list)
logging.critical("Failed to find the executable umbrella. Please modify your $PATH.")
sys.exit("Failed to find the executable umbrella. Please modify your $PATH.\n")
logging.debug("The full path of umbrella is: %s" % umbrella_fullpath)
#find cctools_python
cmd = 'which cctools_python'
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
cctools_python_path = stdout[:-1]
condor_submit_file = open(condor_submit_path, "w+")
condor_submit_file.write('universe = vanilla\n')
condor_submit_file.write('executable = %s\n' % cctools_python_path)
if cvmfs_http_proxy:
condor_submit_file.write('arguments = "./umbrella -s local --spec %s --cvmfs_http_proxy %s --meta %s -l condor_umbrella -i \'%s\' -o %s --log condor_umbrella.log run \'%s\'"\n' % (spec_path_basename, cvmfs_http_proxy, os.path.basename(meta_path), new_input_options, os.path.basename(condor_output_dir), user_cmd[0]))
else:
condor_submit_file.write('arguments = "./umbrella -s local --spec %s --meta %s -l condor_umbrella -i \'%s\' -o %s --log condor_umbrella.log run \'%s\'"\n' % (spec_path_basename, os.path.basename(meta_path), new_input_options, os.path.basename(condor_output_dir), user_cmd[0]))
# condor_submit_file.write('PostCmd = "echo"\n')
# condor_submit_file.write('PostArguments = "$?>%s/condor_rc"\n' % os.path.basename(condor_output_dir))
condor_submit_file.write('transfer_input_files = %s, %s, %s, %s%s\n' % (cctools_python_path, umbrella_fullpath, meta_path, spec_path, transfer_inputs))
condor_submit_file.write('transfer_output_files = %s, condor_umbrella.log\n' % os.path.basename(condor_output_dir))
condor_submit_file.write('transfer_output_remaps = "condor_umbrella.log=%s"\n' % condorlog_path)
#the python on the redhat5 machines in the ND condor pool is 2.4. However, umbrella requires python 2.6.* or 2.7*.
if linux_distro == "redhat5":
condor_submit_file.write('requirements = TARGET.Arch == "%s" && TARGET.OpSys == "%s" && TARGET.OpSysAndVer == "redhat6"\n' % (hardware_platform, kernel_name))
else:
#condor_submit_file.write('requirements = TARGET.Arch == "%s" && TARGET.OpSys == "%s" && TARGET.OpSysAndVer == "%s" && TARGET.has_docker == true\n' % (hardware_platform, kernel_name, linux_distro))
condor_submit_file.write('requirements = TARGET.Arch == "%s" && TARGET.OpSys == "%s" && TARGET.OpSysAndVer == "%s"\n' % (hardware_platform, kernel_name, linux_distro))
condor_submit_file.write('environment = PATH=.:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin\n')
condor_submit_file.write('output = %s/condor_stdout\n' % sandbox_dir)
condor_submit_file.write('error = %s/condor_stderr\n' % sandbox_dir)
condor_submit_file.write('log = %s\n' % condor_log_path)
condor_submit_file.write('should_transfer_files = yes\n')
condor_submit_file.write('when_to_transfer_output = on_exit\n')
condor_submit_file.write('queue\n')
condor_submit_file.close()
#submit condor job
print "Submitting the Condor job ..."
cmd = 'condor_submit ' + condor_submit_path
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#keep tracking whether condor job is done
print "Waiting for the job is done ..."
logging.debug("Waiting for the job is done ...")
cmd = 'condor_wait %s' % condor_log_path
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#check the content of condor log file to figure out the exit code of the remote executed umbrella
remote_rc = 500
with open(condor_log_path, 'rb') as f:
content = f.read()
str = "Normal termination (return value "
index1 = content.rfind(str)
index2 = content.find(')', index1)
remote_rc = int(content[(index1 + len(str)):index2])
print "The exit code of the remote executed umbrella found in the condor log file (%s) is %d!" % (condor_log_path, remote_rc)
logging.debug("The exit code of the remote executed umbrella found in the condor log file (%s) is %d!", condor_log_path, remote_rc)
if remote_rc == 500:
cleanup(tempfile_list, tempdir_list)
logging.critical("Can not find the exit code of the remote executed umbrella inside the condor log file (%s)!", condor_log_path)
sys.exit("Can not find the exit code of the remote executed umbrella inside the condor log file (%s)!" % condor_log_path)
elif remote_rc != 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("The remote umbrella fails and the exit code is %d.", remote_rc)
sys.exit("The remote umbrella fails and the exit code is %d." % remote_rc)
logging.debug("the condor jos is done, put the output back into the output directory!")
print "the condor jobs is done, put the output back into the output directory!"
#check until the condor job is done, post-processing (put the output back into the output directory)
#the semantics of condor_output_files only supports transferring a dir from the execution node back to the current working dir (here it is condor_output_dir).
os.rename(condor_output_dir, output_dir)
print "move condor_stdout, condor_stderr and condor_task.log from sandbox_dir to output_dir."
logging.debug("move condor_stdout, condor_stderr and condor_task.log from sandbox_dir to output_dir.")
os.rename(sandbox_dir + '/condor_stdout', output_dir + '/condor_stdout')
os.rename(sandbox_dir + '/condor_stderr', output_dir + '/condor_stderr')
os.rename(sandbox_dir + '/condor_task.log', output_dir + '/condor_task.log')
print "Remove the sandbox dir"
logging.debug("Remove the sandbox_dir.")
shutil.rmtree(sandbox_dir)
print "The output has been put into the output dir: %s" % output_dir
def decide_instance_type(cpu_cores, memory_size, disk_size, instances):
""" Compare the required hardware configurations with each instance type, and return the first matched instance type, return 'no' if no matched instance type exist.
We can rank each instance type in the future, so that in the case of multiple matches exit, the closest matched instance type is returned.
Args:
cpu_cores: the number of required cpus (e.g., 1).
memory_size: the memory size requirement (e.g., 2GB). Not case sensitive.
disk_size: the disk size requirement (e.g., 2GB). Not case sensitive.
instances: the instances section of the ec2 json file.
Returns:
If there is no matched instance type, return 'no'.
Otherwise, returns the first matched instance type.
"""
cpu_cores = int(cpu_cores)
memory_size = int(memory_size[:-2])
disk_size = int(disk_size[:-2])
for item in instances:
j = instances[item]
inst_mem = int(float((j["memory"][:-2])))
inst_disk = int(j["disk"][:-2])
if cpu_cores <= int(j["cores"]) and memory_size <= inst_mem and disk_size <= inst_disk:
return item
return 'no'
def ec2_process(spec_path, spec_json, meta_option, meta_path, ssh_key, ec2_key_pair, ec2_security_group, ec2_instance_type, sandbox_dir, output_option, output_f_dict, output_d_dict, sandbox_mode, input_list, input_list_origin, env_option, env_para_dict, user_cmd, cwd_setting, ec2log_path, cvmfs_http_proxy):
"""
Args:
spec_path: the path of the specification.
spec_json: the json object including the specification.
meta_option: the --meta option.
meta_path: the path of the json file including all the metadata information.
ssh_key: the name the private key file to use when connecting to an instance.
ec2_key_pair: the path of the key-pair to use when launching an instance.
ec2_security_group: the security group within which the EC2 instance should be run.
ec2_instance_type: the type of an Amazone ec2 instance
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
output_f_dict: the mappings of output files (key is the file path used by the application; value is the file path the user specifies.)
output_d_dict: the mappings of output dirs (key is the dir path used by the application; value is the dir path the user specified.)
sandbox_mode: the execution engine.
input_list: a list including all the absolute path of the input files on the local machine.
input_list_origin: the list of input file paths.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
user_cmd: the user's command.
cwd_setting: the current working directory for the execution of the user's command.
ec2log_path: the path of the umbrella log executed on the remote EC2 execution node.
cvmfs_http_proxy: HTTP_PROXY environmetn variable used to access CVMFS by Parrot
Returns:
If no errors happen, return None;
Otherwise, directly exit.
"""
print "Checking the validity of the umbrella specification ..."
if spec_json.has_key("hardware") and spec_json["hardware"] and spec_json.has_key("kernel") and spec_json["kernel"] and spec_json.has_key("os") and spec_json["os"]:
(hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id) = env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
else:
cleanup(tempfile_list, tempdir_list)
sys.exit("this spec has no hardware section!\n")
#According to the given specification file, the AMI and the instance type can be identified. os and arch can be used to decide the AMI; cores, memory and disk can be used to decide the instance type.
#decide the AMI according to (distro_name, distro_version, hardware_platform)
print "Deciding the AMI according to the umbrella specification ..."
name = '%s-%s-%s' % (distro_name, distro_version, hardware_platform)
if ec2_json.has_key(name):
if os_id[:4] != "ec2:":
for item in ec2_json[name]:
logging.debug("The AMI information is: ")
logging.debug(ec2_json[name][item])
ami = ec2_json[name][item]['ami']
user_name = ec2_json[name][item]['user']
break
else:
if ec2_json[name].has_key(os_id):
logging.debug("The AMI information is: ")
logging.debug(ec2_json[name][os_id])
ami = ec2_json[name][os_id]['ami']
user_name = ec2_json[name][os_id]['user']
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s with the id <%s> is not in the ec2 json file (%s).", name, os_id, ec2_path)
sys.exit("%s with the id <%s> is not in the ec2 json file (%s)." % (name, os_id, ec2_path))
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s is not in the ec2 json file (%s).", name, ec2_path)
sys.exit("%s is not in the ec2 json file (%s).\n" % (name, ec2_path))
#start the instance and obtain the instance id
print "Starting an Amazon EC2 instance ..."
instance_id = get_instance_id(ami, ec2_instance_type, ec2_key_pair, ec2_security_group)
logging.debug("Start the instance and obtain the instance id: %s", instance_id)
#get the public DNS of the instance_id
print "Obtaining the public DNS of the Amazon EC2 instance ..."
public_dns = get_public_dns(instance_id)
logging.debug("Get the public DNS of the instance_id: %s", public_dns)
'''
#instance_id = "<instance_id>"
#public_dns = "<public_dns>"
instance_id = "i-e61ad13c"
public_dns = "ec2-52-26-177-97.us-west-2.compute.amazonaws.com"
'''
#install wget on the instance
print "Installing wget on the EC2 instance ..."
logging.debug("Install wget on the instance")
#here we should judge the os type, yum is used by Fedora, CentOS, and REHL.
if distro_name not in ["fedora", "centos", "redhat"]:
cleanup(tempfile_list, tempdir_list)
sys.exit("Currently the supported Linux distributions are redhat, centos and fedora.\n")
#ssh exit code 255: the remote node is down or unavailable
rc = 300
while rc != 0:
#without `-t` option of ssh, if the username is not root, `ssh + sudo` will get the following error: sudo: sorry, you must have a tty to run sudo.
cmd = 'ssh -t -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i %s %s@%s \'sudo yum -y install wget\'' % (ssh_key, user_name, public_dns)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
logging.debug("`%s` fails with the return code of %d, \nstdout: %s, \nstderr: %s" % (cmd, rc, stdout, stderr))
time.sleep(5)
#python, the python is needed to execute umbrella itself
print "Installing python 2.6.9 on the instance ..."
logging.debug("Install python 2.6.9 on the instance.")
python_name = 'python-2.6.9-%s-%s' % (linux_distro, hardware_platform)
python_url = "http://ccl.cse.nd.edu/research/data/hep-case-study/python-2.6.9-%s-%s.tar.gz" % (linux_distro, hardware_platform)
scheme, netloc, path, query, fragment = urlparse.urlsplit(python_url)
python_url_filename = os.path.basename(path)
cmd = 'ssh -t -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i %s %s@%s \'sudo wget %s && sudo tar zxvf %s\'' % (ssh_key, user_name, public_dns, python_url, python_url_filename)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
#scp umbrella, meta.json and input files to the instance
print "Sending the umbrella task to the EC2 instance ..."
logging.debug("scp relevant files into the HOME dir of the instance.")
input_file_string = ''
for input_file in input_list:
input_file_string += input_file + ' '
#here meta_path may start with http so need a special treatement
umbrella_fullpath = which_exec("umbrella")
if meta_option:
meta_option = " --meta ~%s/%s " % (user_name, os.path.basename(meta_path))
cmd = 'scp -i %s %s %s %s %s %s@%s:' % (ssh_key, umbrella_fullpath, spec_path, meta_path, input_file_string, user_name, public_dns)
else:
meta_option = ""
cmd = 'scp -i %s %s %s %s %s@%s:' % (ssh_key, umbrella_fullpath, spec_path, input_file_string, user_name, public_dns)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
#change the --inputs option to put all the inputs directory in the home dir of the instance
new_input_options = ''
if len(input_list_origin) > 0:
logging.debug("change the --inputs option to put all the inputs directory in the home dir of the instance")
logging.debug("Transform input_list_origin ....")
new_input_options = " -i '"
for item in input_list_origin:
index_equal = item.find('=')
access_path = item[:index_equal]
actual_path = item[(index_equal+1):]
new_input_options += '%s=%s,' % (access_path, os.path.basename(actual_path))
if new_input_options[-1] == ',':
new_input_options = new_input_options[:-1]
new_input_options += "'"
logging.debug("The new_input_options of Umbrella: %s", new_input_options) #--inputs option
#find cctools_python
cmd = 'which cctools_python'
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
cctools_python_path = stdout[:-1]
#cvmfs_http_proxy
cvmfs_http_proxy_option = ''
if cvmfs_http_proxy:
cvmfs_http_proxy_option = '--cvmfs_http_proxy %s' % cvmfs_http_proxy
#execute the command on the instance
print "Executing the user's task on the EC2 instance ..."
logging.debug("Execute the command on the instance ...")
ec2_output_option = ""
if output_option:
ec2_output_option = " -o '%s'" % output_option
cmd = 'ssh -t -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i %s %s@%s "sudo %s/bin/python ~%s/umbrella %s -s destructive --spec ~%s/%s %s --log ~%s/ec2_umbrella.log -l ec2_umbrella %s %s %s run \'%s\'"' % (ssh_key, user_name, public_dns, python_name, user_name, cvmfs_http_proxy_option, user_name, os.path.basename(spec_path), meta_option, user_name, ec2_output_option, new_input_options, env_option, user_cmd[0])
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
#postprocessing
print "Transferring the output of the user's task from the EC2 instance back to the local machine ..."
logging.debug("Create a tarball for the output dir on the instance.")
output = '%s %s' % (' '.join(output_f_dict.values()), ' '.join(output_d_dict.values()))
cmd = 'ssh -t -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i %s %s@%s \'sudo tar cvzf ~%s/output.tar.gz %s && sudo chown %s:%s ~%s/output.tar.gz ~%s/ec2_umbrella.log\'' % (ssh_key, user_name, public_dns, user_name, output, user_name, user_name, user_name, user_name)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
logging.debug("The instance returns the output.tar.gz to the local machine.")
cmd = 'scp -i %s %s@%s:output.tar.gz %s/' % (ssh_key, user_name, public_dns, sandbox_dir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
logging.debug("The instance returns the remote umbrella log file to the local machine.")
cmd = 'scp -i %s %s@%s:ec2_umbrella.log %s' % (ssh_key, user_name, public_dns, ec2log_path)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
cmd = 'tar zxvf %s/output.tar.gz -C /' % (sandbox_dir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
print "Terminating the EC2 instance ..."
terminate_instance(instance_id)
def obtain_package(spec_json):
"""Check whether this spec includes a package_manager section, which in turn includes a list attr.
Args:
spec_json: the json object including the specification.
Returns:
if a package list is specified in the spec_json, return the package manager name and a list of the required package name.
Otherwise, return None
"""
if spec_json.has_key("package_manager") and spec_json["package_manager"]:
if spec_json["package_manager"].has_key("name") and spec_json["package_manager"].has_key("list"):
pac_name = spec_json["package_manager"]["name"]
pac_str = spec_json["package_manager"]["list"]
pac_list = pac_str.split()
pac_list.sort()
if len(pac_list) > 0:
if len(pac_name) == 0:
logging.critical("The spec does not specify which package manager to use\n")
sys.exit("The spec does not specify which package manager to use\n")
else:
return (pac_name, pac_list)
return (None, None)
def cal_new_os_id(sec, old_os_id, pac_list):
"""Calculate the id of the new OS based on the old_os_id and the package_manager section
Args:
sec: the json object including the package_manager section.
old_os_id: the id of the original os image without any info about package manager.
pac_list: a list of the required package name.
Returns:
md5_value: the md5 value of the string constructed from binding old_os_id and information from the package_manager section.
install_cmd: the package install cmd, such as: yum -y install python
"""
pm_name = attr_check("os", sec, "name")
cmd = pm_name + " " + pac_manager[pm_name][0] + " " + ' '.join(pac_list)
install_cmd = []
install_cmd.append(cmd)
pac_str = ''.join(pac_list)
config_str = ''
if sec.has_key("config") and sec["config"]:
l = []
for item in sec["config"]:
id_attr = sec["config"][item]["id"]
l.append(id_attr)
l.sort()
config_str = ''.join(l)
data = old_os_id + pm_name + pac_str + config_str
md5 = hashlib.md5()
md5.update(data)
md5_value = md5.hexdigest()
return (md5_value, install_cmd)
def specification_process(spec_json, sandbox_dir, behavior, meta_json, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, cwd_setting, cvmfs_http_proxy, osf_auth):
""" Create the execution environment specified in the specification file and run the task on it.
Args:
spec_json: the json object including the specification.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
behavior: the umbrella behavior, such as `run`.
meta_json: the json object including all the metadata of dependencies.
sandbox_mode: the execution engine.
output_f_dict: the mappings of output files (key is the file path used by the application; value is the file path the user specifies.)
output_d_dict: the mappings of output dirs (key is the dir path used by the application; value is the dir path the user specified.)
input_dict: the setting of input files specified by the --inputs option.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
user_cmd: the user's command.
cwd_setting: the current working directory for the execution of the user's command.
cvmfs_http_proxy: HTTP_PROXY environmetn variable used to access CVMFS by Parrot
osf_auth: the osf authentication info including osf_username and osf_password.
Returns:
None.
"""
print "Checking the validity of the umbrella specification ..."
if spec_json.has_key("hardware") and spec_json["hardware"] and spec_json.has_key("kernel") and spec_json["kernel"] and spec_json.has_key("os") and spec_json["os"]:
logging.debug("Setting the environment parameters (hardware, kernel and os) according to the specification file ....")
(hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id) = env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("this specification is not complete! You must have a hardware section, a kernel section and a os section!")
sys.exit("this specification is not complete! You must have a hardware section, a kernel section and a os section!\n")
host_linux_distro = env_check(sandbox_dir, sandbox_mode, hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version)
#check os
need_separate_rootfs = 0
os_image_dir = ''
if os_id == "":
if sandbox_mode in ["docker"]:
cleanup(tempfile_list, tempdir_list)
logging.critical("the specification does not provide a concrete OS image, but docker execution engine needs a specific OS image!")
sys.exit("the specification does not provide a concrete OS image, but docker execution engine needs a specific OS image!\n")
if linux_distro != host_linux_distro:
cleanup(tempfile_list, tempdir_list)
logging.critical("the specification does not provide a concrete OS image, and the OS image of the local machine does not matching the requirement!")
sys.exit("the specification does not provide a concrete OS image, and the OS image of the local machine does not matching the requirement!\n")
else:
logging.debug("the specification does not provide a concrete OS image, but the OS image of the local machine matches the requirement!")
print "the specification does not provide a concrete OS image, but the OS image of the local machine matches the requirement!\n"
else:
need_separate_rootfs = 1
#check for dependencies which need to be installed by package managers
(pac_name, pac_list) = obtain_package(spec_json)
if pac_list:
logging.debug("The spec needs to use %s install packages.", pac_name)
print "The spec needs to use %s install packages." % pac_name
if sandbox_mode in ["parrot"]:
cleanup(tempfile_list, tempdir_list)
logging.critical("Installing packages through package managers requires the root authority! Please choose a different sandbox mode (docker or destructive)!")
sys.exit("Installing packages through package managers requires the root authority! Please choose a different sandbox mode(docker or destructive)!")
mount_dict = {}
cvmfs_cms_siteconf_mountpoint = ''
host_cctools_path = '' #the path of the cctools binary which is compatible with the host machine under the umbrella cache
if sandbox_mode in ["parrot"]:
logging.debug("To use parrot sandbox mode, cctools binary is needed")
host_cctools_path = cctools_download(sandbox_dir, hardware_platform, host_linux_distro, 'unpack')
logging.debug("Add mountpoint (%s:%s) into mount_dict", host_cctools_path, host_cctools_path)
mount_dict[host_cctools_path] = host_cctools_path
parrotize_user_cmd(user_cmd, sandbox_dir, cwd_setting, host_linux_distro, hardware_platform, meta_json, cvmfs_http_proxy)
item = '%s-%s-%s' % (distro_name, distro_version, hardware_platform) #example of item here: redhat-6.5-x86_64
if need_separate_rootfs and sandbox_mode not in ["destructive"]:
#download the os dependency into the local
os_image_dir = "%s/cache/%s/%s" % (os.path.dirname(sandbox_dir), os_id, item)
logging.debug("A separate OS (%s) is needed!", os_image_dir)
mountpoint = '/'
action = 'unpack'
r3 = dependency_process(item, os_id, action, meta_json, sandbox_dir, osf_auth)
logging.debug("Add mountpoint (%s:%s) into mount_dict for /.", mountpoint, r3)
mount_dict[mountpoint] = r3
#check for cvmfs dependency
is_cms_cvmfs_app = 0
cvmfs_path = ""
cvmfs_mountpoint = ""
result = needCVMFS(spec_json, meta_json)
if result:
(cvmfs_path, cvmfs_mountpoint) = result
if cvmfs_path:
logging.debug("cvmfs is needed! (%s)", cvmfs_path)
print "cvmfs is needed! (%s)" % cvmfs_path
cvmfs_ready = False
if need_separate_rootfs:
os_cvmfs_path = "%s%s" % (os_image_dir, cvmfs_mountpoint)
if os.path.exists(os_cvmfs_path) and os.path.isdir(os_cvmfs_path):
cvmfs_ready = True
logging.debug("The os image has /cvmfs/cms.cern.ch!")
print "The os image has /cvmfs/cms.cern.ch!"
if not cvmfs_ready:
local_cvmfs = ""
local_cvmfs = check_cvmfs_repo(cvmfs_path[7:])
if len(local_cvmfs) > 0:
mount_dict[cvmfs_mountpoint] = local_cvmfs
logging.debug("The cvmfs is installed on the local host, and its mountpoint is: %s", local_cvmfs)
print "The cvmfs is installed on the local host, and its mountpoint is: %s" % local_cvmfs
else:
logging.debug("The cvmfs is not installed on the local host.")
print "The cvmfs is not installed on the local host."
if cvmfs_path.find("cms.cern.ch") != -1:
is_cms_cvmfs_app = 1 #cvmfs is needed to deliver cms.cern.ch repo, and the local host has no cvmfs installed.
if not cvmfs_http_proxy or len(cvmfs_http_proxy) == 0:
cleanup(tempfile_list, tempdir_list)
logging.debug("Access CVMFS through Parrot requires the --cvmfs_http_proxy of umbrella to be set.")
sys.exit("Access CVMFS through Parrot requires the --cvmfs_http_proxy of umbrella to be set.")
#currently, if the logic reaches here, only parrot execution engine is allowed.
cvmfs_cms_siteconf_mountpoint = set_cvmfs_cms_siteconf(sandbox_dir)
#add cvmfs SITEINFO into mount_dict
if sandbox_mode == "docker":
list1 = cvmfs_cms_siteconf_mountpoint.split(' ')
logging.debug("Add mountpoint (%s:%s) into mount_dict for cvmfs SITEINFO", list1[0], list1[1])
mount_dict[list1[0]] = list1[1]
if sandbox_mode != "parrot":
logging.debug("To use parrot to access cvmfs, cctools binary is needed")
host_cctools_path = cctools_download(sandbox_dir, hardware_platform, linux_distro, 'unpack')
logging.debug("Add mountpoint (%s:%s) into mount_dict", host_cctools_path, host_cctools_path)
mount_dict[host_cctools_path] = host_cctools_path
parrotize_user_cmd(user_cmd, sandbox_dir, cwd_setting, linux_distro, hardware_platform, meta_json, cvmfs_http_proxy)
if need_separate_rootfs:
new_os_image_dir = ""
#if some packages from package managers are needed, ceate a intermediate os image with all the packages ready.
if pac_list:
new_sw_sec = spec_json["package_manager"]["config"]
(new_os_id, pm_cmd) = cal_new_os_id(spec_json["package_manager"], os_id, pac_list)
new_os_image_dir = "%s/cache/%s/%s" % (os.path.dirname(sandbox_dir), new_os_id, item)
logging.debug("Installing the package into the image (%s), and create a new image: %s ...", os_image_dir, new_os_image_dir)
if os.path.exists(new_os_image_dir) and os.path.isdir(new_os_image_dir):
logging.debug("the new os image already exists!")
#use the intermidate os image which has all the dependencies from package manager ready as the os image
os_image_dir = new_os_image_dir
os_id = new_os_id
pass
else:
logging.debug("the new os image does not exist!")
new_env_para_dict = {}
#install dependency specified in the spec_json["package_manager"]["config"] section
logging.debug('Install dependency specified in the spec_json["package_manager"]["config"] section.')
if sandbox_mode == "destructive":
software_install(mount_dict, new_env_para_dict, new_sw_sec, meta_json, sandbox_dir, 1, osf_auth)
#install dependencies through package managers
rc, stdout, stderr = func_call(pm_cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
else:
software_install(mount_dict, new_env_para_dict, new_sw_sec, meta_json, sandbox_dir, 0, osf_auth)
#install dependencies through package managers
logging.debug("Create an intermediate OS image with all the dependencies from package managers ready!")
workflow_repeat(cwd_setting, sandbox_dir, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, pm_cmd, hardware_platform, host_linux_distro, distro_name, distro_version, need_separate_rootfs, os_image_dir, os_id, host_cctools_path, cvmfs_cms_siteconf_mountpoint, mount_dict, mount_dict, meta_json, new_os_image_dir)
logging.debug("Finishing creating the intermediate OS image!")
#use the intermidate os image which has all the dependencies from package manager ready as the os image
os_image_dir = new_os_image_dir
os_id = new_os_id
if spec_json.has_key("software") and spec_json["software"]:
software_install(mount_dict, env_para_dict, spec_json["software"], meta_json, sandbox_dir, 0, osf_auth)
else:
logging.debug("this spec does not have software section!")
software_install(mount_dict, env_para_dict, "", meta_json, sandbox_dir, 0, osf_auth)
sw_mount_dict = dict(mount_dict) #sw_mount_dict will be used later to config the $PATH
if spec_json.has_key("data") and spec_json["data"]:
data_install(spec_json["data"], meta_json, sandbox_dir, mount_dict, env_para_dict, osf_auth)
else:
logging.debug("this spec does not have data section!")
workflow_repeat(cwd_setting, sandbox_dir, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, hardware_platform, host_linux_distro, distro_name, distro_version, need_separate_rootfs, os_image_dir, os_id, host_cctools_path, cvmfs_cms_siteconf_mountpoint, mount_dict, sw_mount_dict, meta_json, "")
def dependency_check(item):
"""Check whether an executable exists or not.
Args:
item: the name of the executable to be found.
Returns:
If the executable can be found through $PATH, return 0;
Otherwise, return -1.
"""
print "dependency check -- ", item, " "
result = which_exec(item)
if result == None:
logging.debug("Failed to find the executable `%s` through $PATH.", item)
print "Failed to find the executable `%s` through $PATH." % item
return -1
else:
logging.debug("Find the executable `%s` through $PATH.", item)
print "Find the executable `%s` through $PATH." % item
return 0
def get_instance_id(image_id, instance_type, ec2_key_pair, ec2_security_group):
""" Start one VM instance through Amazon EC2 command line interface and return the instance id.
Args:
image_id: the Amazon Image Identifier.
instance_type: the Amazon EC2 instance type used for the task.
ec2_key_pair: the path of the key-pair to use when launching an instance.
ec2_security_group: the security group within which the EC2 instance should be run.
Returns:
If no error happens, returns the id of the started instance.
Otherwise, directly exit.
"""
sg_option = ''
if ec2_security_group:
sg_option = ' -g ' + ec2_security_group
cmd = 'ec2-run-instances %s -t %s -k %s %s --associate-public-ip-address true' % (image_id, instance_type, ec2_key_pair, sg_option)
logging.debug("Starting an instance: %s", cmd)
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
str = "\nINSTANCE"
index = stdout.find(str)
if index == -1:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fail to get the instance id!")
else:
instance_id = stdout[(index+9):(index+20)]
return instance_id
def terminate_instance(instance_id):
"""Terminate an instance.
Args:
instance_id: the id of the VM instance.
Returns:
None.
"""
logging.debug("Terminate the ec2 instance: %s", instance_id)
cmd = 'ec2-terminate-instances %s' % instance_id
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
def get_public_dns(instance_id):
"""Get the public dns of one VM instance from Amazon EC2.
`ec2-run-instances` can not directly return the public dns of the instance, so this function is needed to check the result of `ec2-describe-instances` to obtain the public dns of the instance.
Args:
instance_id: the id of the VM instance.
Returns:
If no error happens, returns the public dns of the instance.
Otherwise, directly exit.
"""
public_dns = ''
while public_dns == None or public_dns == '' or public_dns == 'l':
cmd = 'ec2-describe-instances ' + instance_id
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
str = "\nPRIVATEIPADDRESS"
index = stdout.find(str)
if index >= 0:
index1 = stdout.find("ec2", index + 1)
if index1 == -1:
time.sleep(5)
continue
public_dns = stdout[index1:-1]
break
return public_dns
def add2spec(item, source_dict, target_dict):
"""Abstract the metadata information (source format checksum size) from source_dict (metadata database) and add these information into target_dict (umbrella spec).
For any piece of metadata information, if it already exists in target_dict, do nothing; otherwise, add it into the umbrella spec.
Args:
item: the name of a dependency
source_dict: fragment of an Umbrella metadata database
target_dict: fragement of an Umbrella specficiation
Returns:
None
"""
#item must exist inside target_dict.
ident = None
if source_dict.has_key("checksum"):
checksum = source_dict["checksum"]
ident = checksum
if not target_dict.has_key("id"):
target_dict["id"] = ident
if not target_dict.has_key(checksum):
target_dict["checksum"] = source_dict["checksum"]
if source_dict.has_key("source"):
if len(source_dict["source"]) == 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("the source attribute of %s can not be empty!" % item)
sys.exit("the source attribute of %s can not be empty!" % item)
else:
source = source_dict["source"][0]
#if checksum is not provided in source_dict, the first url in the source section will be set to the ident.
if not ident and not target_dict.has_key("id"):
target_dict["id"] = source
if not target_dict.has_key("source"):
target_dict["source"] = list(source_dict["source"])
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s does not have source attribute in the umbrella metadata database!", item)
sys.exit("%s does not have source attribute in the umbrella metadata database!" % item)
if source_dict.has_key("format") and not target_dict.has_key("format"):
target_dict["format"] = source_dict["format"]
if source_dict.has_key("size") and not target_dict.has_key("size"):
target_dict["size"] = source_dict["size"]
if source_dict.has_key("uncompressed_size") and not target_dict.has_key("uncompressed_size"):
target_dict["uncompressed_size"] = source_dict["uncompressed_size"]
def add2db(item, source_dict, target_dict):
"""Add the metadata information (source format checksum size) about item from source_dict (umbrella specification) to target_dict (metadata database).
The item can be identified through two mechanisms: checksum attribute or one source location, which is used when checksum is not applicable for this item.
If the item has been in the metadata database, do nothing; otherwise, add it, together with its metadata, into the metadata database.
Args:
item: the name of a dependency
source_dict: fragment of an Umbrella specification
target_dict: fragement of an Umbrella metadata database
Returns:
None
"""
if not item in target_dict:
target_dict[item] = {}
ident = None
if source_dict.has_key("checksum"):
checksum = source_dict["checksum"]
if target_dict[item].has_key(checksum):
logging.debug("%s has been inside the metadata database!", item)
return
ident = checksum
target_dict[item][ident] = {}
target_dict[item][ident]["checksum"] = source_dict["checksum"]
if source_dict.has_key("source"):
if len(source_dict["source"]) == 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("the source attribute of %s can not be empty!" % item)
sys.exit("the source attribute of %s can not be empty!" % item)
else:
source = source_dict["source"][0]
if target_dict[item].has_key(source):
logging.debug("%s has been inside the metadata database!", item)
return
#if checksum is not provided in source_dict, the first url in the source section will be set to the ident.
if not ident:
ident = source
target_dict[item][ident] = {}
target_dict[item][ident]["source"] = list(source_dict["source"])
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s does not have source attribute in the umbrella specification!", item)
sys.exit("%s does not have source attribute in the umbrella specification!" % item)
if source_dict.has_key("format"):
target_dict[item][ident]["format"] = source_dict["format"]
if source_dict.has_key("size"):
target_dict[item][ident]["size"] = source_dict["size"]
if source_dict.has_key("uncompressed_size"):
target_dict[item][ident]["uncompressed_size"] = source_dict["uncompressed_size"]
def prune_attr(dict_item, attr_list):
"""Remove certain attributes from a dict.
If a specific ttribute does not exist, pass.
Args:
dict_item: a dict
attr_list: a list of attributes which will be removed from the dict.
Returns:
None
"""
for item in attr_list:
if dict_item.has_key(item):
del dict_item[item]
def prune_spec(json_object):
"""Remove the metadata information from a json file (which represents an umbrella specification).
Note: the original json file will not be changed by this function.
Args:
json_object: a json file representing an umbrella specification
Returns:
temp_json: a new json file without metadata information
"""
logging.debug("Remove the metadata information from %s.\n", json_object)
temp_json = dict(json_object)
attr_list = ["source", "checksum", "format", "size", "uncompressed_size"]
if temp_json.has_key("os"):
os_sec = temp_json["os"]
if os_sec:
prune_attr(os_sec, attr_list)
if temp_json.has_key("package_manager") and temp_json["package_manager"] \
and temp_json["package_manager"].has_key("config") and temp_json["package_manager"]["config"]:
pm_config_sec = temp_json["package_manager"]["config"]
if pm_config_sec:
for item in pm_config_sec:
prune_attr(pm_config_sec[item], attr_list)
if temp_json.has_key("software"):
software_sec = temp_json["software"]
if software_sec:
for item in software_sec:
prune_attr(software_sec[item], attr_list)
if temp_json.has_key("data"):
data_sec = temp_json["data"]
if data_sec:
for item in data_sec:
prune_attr(data_sec[item], attr_list)
return temp_json
def abstract_metadata(spec_json, meta_path):
"""Abstract metadata information from a self-contained umbrella spec into a metadata database.
Args:
spec_json: a dict including the contents from a json file
meta_path: the path of the metadata database.
Returns:
If the umbrella spec is not complete, exit directly.
Otherwise, return None.
"""
hardware_sec = attr_check("hardware", spec_json, "hardware")
hardware_arch = attr_check("hardware", hardware_sec, "arch")
metadata = {}
os_sec = attr_check("os", spec_json, "os")
os_name = attr_check("os", os_sec, "name")
os_version = attr_check("os", os_sec, "version")
os_item = "%s-%s-%s" % (os_name, os_version, hardware_arch)
os_item = os_item.lower()
add2db(os_item, os_sec, metadata)
if spec_json.has_key("package_manager") and spec_json["package_manager"] \
and spec_json["package_manager"].has_key("config") and spec_json["package_manager"]["config"]:
pm_config_sec = spec_json["package_manager"]["config"]
if pm_config_sec:
for item in pm_config_sec:
add2db(item, pm_config_sec[item], metadata)
if spec_json.has_key("software"):
software_sec = spec_json["software"]
if software_sec:
for item in software_sec:
add2db(item, software_sec[item], metadata)
if spec_json.has_key("data"):
data_sec = spec_json["data"]
if data_sec:
for item in data_sec:
add2db(item, data_sec[item], metadata)
with open(meta_path, 'w') as f:
json.dump(metadata, f, indent=4)
logging.debug("dump the metadata information from the umbrella spec to %s" % meta_path)
print "dump the metadata information from the umbrella spec to %s" % meta_path
def needCVMFS(spec_json, meta_json):
"""For each dependency in the spec_json, check whether cvmfs is needed to deliver it.
Args:
spec_json: the json object including the specification.
meta_json: the json object including all the metadata of dependencies.
Returns:
if cvmfs is needed, return the cvmfs url. Otherwise, return None
"""
for sec_name in ["software", "data", "package_manager"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
if sec_name == "package_manager":
if sec.has_key("config") and sec["config"]:
sec = sec["config"]
else:
logging.debug("%s does not have config attribute!", sec_name)
break
for item in sec:
item_id = ""
if sec[item].has_key("id") and len(sec[item]["id"]) > 0:
item_id = sec[item]["id"]
mountpoint = sec[item]["mountpoint"]
result = meta_search(meta_json, item, item_id)
if result.has_key("source") and len(result["source"]) > 0:
url = result["source"][0]
if url[:5] == "cvmfs":
return (url, mountpoint)
return None
def cleanup(filelist, dirlist):
"""Cleanup the temporary files and dirs created by umbrella
Args:
filelist: a list including file paths
dirlist: a list including dir paths
Returns:
None
"""
#cleanup the temporary files
for item in filelist:
if os.path.exists(item):
logging.debug("cleanup temporary file: %s", item)
print "cleanup temporary file: ", item
os.remove(item)
#cleanup the temporary dirs
for item in dirlist:
if os.path.exists(item):
logging.debug("cleanup temporary dir: %s", item)
print "cleanup temporary dir: ", item
shutil.rmtree(item)
def separatize_spec(spec_json, meta_json, target_type):
"""Given an umbrella specification and an umbrella metadata database, generate a self-contained umbrella specification or a metadata database only including the informationnecessary for the umbrella spec.
If the target_type is spec, then generate a self-contained umbrella specification.
If the target_type is db, then generate a metadata database only including the information necessary for the umbrella spec.
Args:
spec_json: the json object including the specification.
meta_json: the json object including all the metadata of dependencies.
target_type: the type of the target json file, which can be an umbrella spec or an umbrella metadata db.
Returns:
metadata: a json object
"""
#pull the metadata information of the spec from the meatadata db to the spec
if target_type == "spec":
metadata = dict(spec_json)
#pull the metadata information of the spec from the metadata db into a separate db
if target_type == "meta":
metadata = {}
hardware_sec = attr_check("hardware", spec_json, "hardware")
hardware_arch = attr_check("hardware", hardware_sec, "arch")
os_sec = attr_check("os", spec_json, "os")
os_name = attr_check("os", os_sec, "name")
os_version = attr_check("os", os_sec, "version")
os_item = "%s-%s-%s" % (os_name, os_version, hardware_arch)
os_item = os_item.lower()
ident = None
if os_sec.has_key("id"):
ident = os_sec["id"]
source = meta_search(meta_json, os_item, ident)
if target_type == "spec":
add2spec(os_item, source, metadata["os"])
if target_type == "meta":
add2db(os_item, source, metadata)
if spec_json.has_key("package_manager") and spec_json["package_manager"] \
and spec_json["package_manager"].has_key("config") and spec_json["package_manager"]["config"]:
pm_config_sec = spec_json["package_manager"]["config"]
if pm_config_sec:
for item in pm_config_sec:
ident = None
if pm_config_sec[item].has_key("id"):
ident = pm_config_sec[item]["id"]
source = meta_search(meta_json, item, ident)
if target_type == "spec":
add2spec(os_item, source, metadata["package_manager"]["config"][item])
if target_type == "meta":
add2db(item, source, metadata)
if spec_json.has_key("software"):
software_sec = spec_json["software"]
if software_sec:
for item in software_sec:
ident = None
if software_sec[item].has_key("id"):
ident = software_sec[item]["id"]
source = meta_search(meta_json, item, ident)
if target_type == "spec":
add2spec(os_item, source, metadata["software"][item])
if target_type == "meta":
add2db(item, source, metadata)
if spec_json.has_key("data"):
data_sec = spec_json["data"]
if data_sec:
for item in data_sec:
ident = None
if data_sec[item].has_key("id"):
ident = data_sec[item]["id"]
source = meta_search(meta_json, item, ident)
if target_type == "spec":
add2spec(os_item, source, metadata["data"][item])
if target_type == "meta":
add2db(item, source, metadata)
return metadata
def json2file(filepath, json_item):
"""Write a json object into a file
Args:
filepath: a file path
json_item: a dict representing a json object
Returns:
None
"""
with open(filepath, 'w') as f:
json.dump(json_item, f, indent=4)
logging.debug("dump a json object from the umbrella spec to %s" % filepath)
print "dump a json object from the umbrella spec to %s" % filepath
def path_exists(filepath):
"""Check the validity and existence of a file path.
Args:
filepath: a file path
Returns:
Exit directly if any error happens.
Otherwise, returns None.
"""
logging.debug("Checking file path: %s", filepath)
if os.path.exists(filepath):
cleanup(tempfile_list, tempdir_list)
logging.debug("The file (%s) already exists, please specify a new path!", filepath)
sys.exit("The file (%s) already exists, please specify a new path!" % filepath)
def dir_create(filepath):
"""Create the directory for it if necessary. If the file already exists, exit directly.
Args:
filepath: a file path
Returns:
Exit directly if any error happens.
Otherwise, returns None.
"""
dirpath = os.path.dirname(filepath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
else:
if not os.path.isdir(dirpath):
cleanup(tempfile_list, tempdir_list)
logging.debug("The basename of the file (%s) is not a directory!\n", dirpath)
sys.exit("The basename of the file (%s) is not a directory!\n" % dirpath)
def validate_meta(meta_json):
"""Validate a metadata db.
The current standard for a valid metadata db is: for each item, the "source" attribute must exist and not be not empty.
Args:
meta_json: a dict object representing a metadata db.
Returns:
If error happens, return directly with the error info.
Otherwise, None.
"""
logging.debug("Starting validating the metadata db ....\n")
print "Starting validating the metadata db ...."
for name in meta_json:
for ident in meta_json[name]:
logging.debug("check for %s with the id of %s ...", name, ident)
print "check for %s with the id of %s ..." % (name, ident)
attr_check(name, meta_json[name][ident], "source", 1)
logging.debug("Finish validating the metadata db ....\n")
print "Finish validating the metadata db successfully!"
def validate_spec(spec_json, meta_json = None):
"""Validate a spec_json.
Args:
spec_json: a dict object representing a specification.
meta_json: a dict object representing a metadata db.
Returns:
If error happens, return directly with the error info.
Otherwise, None.
"""
logging.debug("Starting validating the spec file ....\n")
print "Starting validating the spec file ...."
#validate the following three sections: hardware, kernel and os.
env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
for sec_name in ["software", "data", "package_manager"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
if sec_name == "package_manager":
if sec.has_key("config") and sec["config"]:
sec = sec["config"]
else:
logging.debug("%s does not have config attribute!", sec_name)
break
for item in sec:
if (sec[item].has_key("mountpoint") and sec[item]["mountpoint"]) \
or (sec[item].has_key("mount_env") and sec[item]["mount_env"]):
pass
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s in the %s section should have either <mountpoint> or <mount_env>!\n", item, sec_name)
sys.exit("%s in the %s section should have either <mountpoint> or <mount_env>!\n" % (item, sec_name))
if sec[item].has_key("source") and len(sec[item]["source"]) > 0:
pass
else:
if meta_json:
ident = None
if sec[item].has_key("id"):
ident = sec[item]["id"]
result = meta_search(meta_json, item, ident)
if result.has_key("source") and len(result["source"]) > 0:
pass
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s in the metadata db should have <source> attr!\n", item)
sys.exit("%s in the metadata db should have <source> attr!\n", item)
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s in the %s section should have <source> attr!\n", item, sec_name)
sys.exit("%s in the %s section should have <source> attr!\n" % (item, sec_name))
logging.debug("Finish validating the spec file ....\n")
print "Finish validating the spec file successfully!"
def osf_create(username, password, user_id, proj_name, is_public):
"""Create an OSF project, and return the project id.
Args:
username: an OSF username
password: an OSF password
user_id: the id of an OSF user
proj_name: the name of the OSF project
is_public: set to 1 if the project is public; set to 0 if the project is private.
Returns:
the id of the OSF project
"""
#first check whether the user already has an existing OSF project having the same name
url="https://api.osf.io:443/v2/users/%s/nodes/" % user_id
nodes=set()
#the response results are splitted into pages, and each page has 10 items.
while url:
r=requests.get(url)
if r.status_code != 200:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to check the projects contributed by the user (%d): %s!" % (r.status_code, r.reason))
for data in r.json()['data']:
nodes.add(data['attributes']['title'])
url=r.json()['links']['next']
if proj_name in nodes:
cleanup(tempfile_list, tempdir_list)
sys.exit("The project name (%s) already exists!" % proj_name)
#create the new project
auth = (username, password)
payload = {
"type": "nodes",
"title": proj_name,
"category": "project",
"public": is_public
}
url="https://api.osf.io:443/v2/nodes/"
r=requests.post(url, auth=auth, data=payload)
if r.status_code != 201:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to create the new project (%d): %s!" % (r.status_code, r.reason))
proj_id = r.json()['data']['id']
return proj_id
def osf_upload(username, password, proj_id, source):
"""upload a file from source into the OSF project identified by proj_id.
Args:
username: an OSF username
password: an OSF password
proj_id: the id of the OSF project
source: a file path
Returns:
the OSF download url of the uploaded file
"""
print "Upload %s to OSF ..." % source
logging.debug("Upload %s to OSF ...",source)
url="https://files.osf.io/v1/resources/%s/providers/osfstorage/" % proj_id
payload = {"kind":"file", "name":os.path.basename(source)}
auth = (username, password)
f=open(source, 'rb')
r=requests.put(url, params=payload, auth = auth, data=f)
if r.status_code != 201 and r.status_code != 200:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to upload the file %s to OSF(%d): %s!" % (source, r.status_code, r.reason))
return r.json()['data']['links']['download']
def osf_download(username, password, osf_url, dest):
"""download a file pointed by an OSF url to dest.
Args:
username: an OSF username
password: an OSF password
osf_url: the OSF download url
dest: the destination of the OSF file
Returns:
If the osf_url is downloaded successfully, return None;
Otherwise, directly exit.
"""
if not found_requests:
cleanup(tempfile_list, tempdir_list)
logging.critical("\nDownloading private stuff from OSF requires a python package - requests. Please check the installation page of requests:\n\n\thttp://docs.python-requests.org/en/latest/user/install/\n")
sys.exit("\nDownloading private stuff from OSF requires a python package - requests. Please check the installation page of requests:\n\n\thttp://docs.python-requests.org/en/latest/user/install/\n")
print "Download %s from OSF to %s" % (osf_url, dest)
logging.debug("Download %s from OSF to %s", osf_url, dest)
word = 'resources'
proj_id = osf_url[(osf_url.index(word) + len(word) + 1):(osf_url.index(word) + len(word) + 6)]
url="https://api.osf.io:443/v2/nodes/%s/" % proj_id
r=requests.get(url)
r2 = None
if r.status_code == 401:
if username == None or password == None:
cleanup(tempfile_list, tempdir_list)
sys.exit("The OSF resource (%s) is private (%d): %s! To use the OSF resource, you need to provide a legal OSF username and password." % (url, r.status_code, r.reason))
auth = (username, password)
r1=requests.get(url, auth=auth)
if r1.status_code != 200:
cleanup(tempfile_list, tempdir_list)
sys.exit("The OSF resource (%s) is private (%d): %s! The username or password is incorrect!" % (url, r1.status_code, r1.reason))
else:
r2=requests.get(osf_url, auth=auth, stream=True)
else:
r2=requests.get(osf_url, stream=True)
if r2.status_code != 200:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to download the osf resource: %s (%d): %s!" % (r2.status_code, r2.reason))
chunk_size=10240
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
with open(dest, 'wb') as fd:
for chunk in r2.iter_content(chunk_size):
fd.write(chunk)
def s3_create(bucket_name, acl):
"""Create a s3 bucket
Args:
bucket_name: the bucket name
acl: the access control, which can be: private, public-read
Returns:
bucket: an S3.Bucket instance
"""
#create the connection with s3
s3 = boto3.resource('s3')
#list all the bucket names
buckets = set()
try:
for bucket in s3.buckets.all():
buckets.add(bucket.name)
except botocore.exceptions.ClientError as e:
cleanup(tempfile_list, tempdir_list)
sys.exit(e.message)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to list all the current buckets: %s!" % e)
#check whether the bucket name already exists
if bucket_name in buckets:
cleanup(tempfile_list, tempdir_list)
sys.exit("The bucket name (%s) already exists!" % bucket_name)
#create a new bucket
try:
s3.create_bucket(Bucket=bucket_name)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to create the new bucket (%s): %s!" % (bucket_name, e))
#obtain the created bucket
bucket = s3.Bucket(bucket_name)
#set access control
#ACL totally can be one of these options: 'private'|'public-read'|'public-read-write'|'authenticated-read'
#for now, when an user uses Umbrella to upload to s3, the acl can only be private, public-read.
try:
bucket.Acl().put(ACL=acl)
except botocore.exceptions.ClientError as e:
cleanup(tempfile_list, tempdir_list)
sys.exit(e.message)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to list all the current buckets: %s!" % e)
return bucket
def s3_upload(bucket, source, acl):
"""Upload a local file to s3
Args:
bucket: an S3.Bucket instance
source: the local file path
acl: the access control, which can be: private, public-read
Returns:
link: the link of a s3 object
"""
print "Upload %s to S3 ..." % source
logging.debug("Upload %s to S3 ...", source)
key = os.path.basename(source)
data = open(source, 'rb')
try:
#acl on the bucket does not automatically apply to all the objects in it. Acl must be set on each object.
bucket.put_object(ACL=acl, Key=key, Body=data) #https://s3.amazonaws.com/testhmeng/s3
except botocore.exceptions.ClientError as e:
cleanup(tempfile_list, tempdir_list)
sys.exit(e.message)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to upload the file (%s) to S3: %s!" % (source, e))
return "%s/%s/%s" % (s3_url, bucket.name, key)
def s3_download(link, dest):
"""Download a s3 file to dest
Args:
link: the link of a s3 object. e.g., https://s3.amazonaws.com/testhmeng/s3
dest: a local file path
Returns:
None
"""
if not found_boto3 or not found_botocore:
cleanup(tempfile_list, tempdir_list)
logging.critical("\nUploading umbrella spec dependencies to s3 requires a python package - boto3. Please check the installation page of boto3:\n\n\thttps://boto3.readthedocs.org/en/latest/guide/quickstart.html#installation\n")
sys.exit("\nUploading umbrella spec dependencies to s3 requires a python package - boto3. Please check the installation page of boto3:\n\n\thttps://boto3.readthedocs.org/en/latest/guide/quickstart.html#installation\n")
print "Download %s from S3 to %s" % (link, dest)
logging.debug("Download %s from S3 to %s", link, dest)
s3 = boto3.resource('s3')
if (len(s3_url)+1) >= len(link):
cleanup(tempfile_list, tempdir_list)
sys.exit("The s3 object link (%s) is invalid! The correct format shoulde be <%s>/<bucket_name>/<key>!" % (link, s3_url))
m = link[(len(s3_url)+1):] #m format: <bucket_name>/<key>
i = m.find('/')
if i == -1:
cleanup(tempfile_list, tempdir_list)
sys.exit("The s3 object link (%s) is invalid! The correct format shoulde be <%s>/<bucket_name>/<key>!" % (link, s3_url))
bucket_name = m[:i]
if (i+1) >= len(m):
cleanup(tempfile_list, tempdir_list)
sys.exit("The s3 object link (%s) is invalid! The correct format shoulde be <%s>/<bucket_name>/<key>!" % (link, s3_url))
key = m[(i+1):]
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
#the download url can be automatically combined through bucket name and key
try:
s3.Object(bucket_name, key).download_file(dest)
except botocore.exceptions.ClientError as e:
cleanup(tempfile_list, tempdir_list)
sys.exit(e.message)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to download the object (%s) from the bucket(%s):! Please ensure you have the right permission to download these s3 objects: %s!" % (key, bucket_name, e))
def has_source(sources, target):
"""Check whether the sources includes a url from the specific target.
Args:
sources: a list of url
target: the specific resource url. For example, s3, osf.
Returns:
If a url from the specific target exists, return True.
Otherwise, return False.
"""
if not sources or len(sources) == 0:
return False
n = len(target)
for source in sources:
if len(source) > n and source[:n] == target:
return True
return False
def spec_upload(spec_json, meta_json, target_info, sandbox_dir, osf_auth=None, s3_bucket=None):
"""Upload each dependency in an umbrella spec to the target (OSF or s3), and add the new target download url into the umbrella spec.
The source of the dependencies can be anywhere supported by umbrella: http
https git local s3 osf. Umbrella always first downloads each dependency into
its local cache, then upload the dep from its local cache to the target.
Args:
spec_json: the json object including the specification.
meta_json: the json object including all the metadata of dependencies.
target_info: the info necessary to communicate with the remote target (i.e., OSF, s3)
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
osf_auth: the osf authentication info including osf_username and osf_password.
s3_bucket: an S3.Bucket instance
Returns:
None
"""
mount_dict = {}
env_para_dict = {}
global upload_count
print "Upload the dependencies from the umbrella spec to %s ..." % target_info[0]
logging.debug("Upload the dependencies from the umbrella spec to %s ...", target_info[0])
if spec_json.has_key("os") and spec_json["os"] and spec_json["os"].has_key("id") and spec_json["os"]["id"]:
os_id = spec_json["os"]["id"]
if spec_json.has_key("hardware") and spec_json["hardware"] and spec_json.has_key("kernel") and spec_json["kernel"] and spec_json.has_key("os") and spec_json["os"]:
logging.debug("Setting the environment parameters (hardware, kernel and os) according to the specification file ....")
(hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id) = env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
item = '%s-%s-%s' % (distro_name, distro_version, hardware_platform) #example of item here: redhat-6.5-x86_64
os_image_dir = "%s/cache/%s/%s" % (os.path.dirname(sandbox_dir), os_id, item)
logging.debug("A separate OS (%s) is needed!", os_image_dir)
mountpoint = '/'
action = 'unpack'
if spec_json["os"].has_key("source") or attr_check(item, meta_search(meta_json, item, os_id), "source", 1):
if spec_json["os"].has_key("source"):
sources = spec_json["os"]["source"]
else:
sources = meta_search(meta_json, item, os_id)["source"]
if has_source(sources, target_info[0]):
logging.debug("The os section already has a url from %s!", target_info[0])
print "The os section already has a url from %s!" % target_info[0]
else:
upload_count += 1
r3 = dependency_process(item, os_id, action, meta_json, sandbox_dir, osf_auth)
logging.debug("Add mountpoint (%s:%s) into mount_dict for /.", mountpoint, r3)
mount_dict[mountpoint] = r3
if target_info[0] == "osf":
osf_url = osf_upload(target_info[1], target_info[2], target_info[3], os_image_dir + ".tar.gz")
spec_json["os"]["source"].append("osf+" + osf_url)
elif target_info[0] == "s3":
s3_url = s3_upload(s3_bucket, os_image_dir + ".tar.gz", target_info[1])
spec_json["os"]["source"].append("s3+" + s3_url)
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("the os section does not has source attr!")
sys.exit("the os section does not has source attr!")
for sec_name in ["data"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
for item in sec:
if sec[item].has_key("source") or attr_check(item, meta_search(meta_json, item, id), "source", 1):
if sec[item].has_key("source"):
sources = sec[item]["source"]
else:
sources = meta_search(meta_json, item, id)["source"]
if has_source(sources, target_info[0]):
logging.debug("%s already has a url from %s!", item, target_info[0])
print "%s already has a url from %s!" % (item, target_info[0])
continue
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s does not has the source attr!", item)
sys.exit("%s does not has the source attr!" % item)
upload_count += 1
data_install(sec, meta_json, sandbox_dir, mount_dict, env_para_dict, osf_auth, item)
if sec[item]["format"] == "tgz":
source_url = mount_dict[sec[item]["mountpoint"]] + ".tar.gz"
else:
source_url = mount_dict[sec[item]["mountpoint"]]
if target_info[0] == "osf":
osf_url = osf_upload(target_info[1], target_info[2], target_info[3], source_url)
sec[item]["source"].append("osf+" + osf_url)
elif target_info[0] == "s3":
s3_url = s3_upload(s3_bucket, source_url, target_info[1])
sec[item]["source"].append("s3+" + s3_url)
for sec_name in ["software", "package_manager"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
if sec_name == "package_manager":
if sec.has_key("config") and sec["config"]:
sec = sec["config"]
else:
logging.debug("%s does not have config attribute!", sec_name)
break
for item in sec:
if sec[item].has_key("source") or attr_check(item, meta_search(meta_json, item, id), "source", 1):
if sec[item].has_key("source"):
sources = sec[item]["source"]
else:
sources = meta_search(meta_json, item, id)["source"]
if has_source(sources, target_info[0]):
logging.debug("%s already has a url from %s!", item, target_info[0])
print "%s already has a url from %s!" % (item, target_info[0])
continue
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s does not has the source attr!", item)
sys.exit("%s does not has the source attr!" % item)
upload_count += 1
software_install(mount_dict, env_para_dict, sec, meta_json, sandbox_dir, 0, osf_auth, item)
#ignore upload resouces from cvmfs
if (not sec[item].has_key("mountpoint")) or (not mount_dict.has_key(sec[item]["mountpoint"])) or mount_dict[sec[item]["mountpoint"]] == "":
continue
if sec[item]["format"] == "tgz":
source_url = mount_dict[sec[item]["mountpoint"]] + ".tar.gz"
else:
source_url = mount_dict[sec[item]["mountpoint"]]
if target_info[0] == "osf":
osf_url = osf_upload(target_info[1], target_info[2], target_info[3], source_url)
sec[item]["source"].append("osf+" + osf_url)
elif target_info[0] == "s3":
s3_url = s3_upload(s3_bucket, source_url, target_info[1])
sec[item]["source"].append("s3+" + s3_url)
def dep_build(d, name):
"""Build the metadata info of a dependency.
Args:
d: a dependency object
name: the name of the dependency
Returns:
None
"""
#check the validity of the 'format' attr
formats = ['plain', 'tgz']
form = attr_check(name, d, "format")
if not form in formats:
cleanup(tempfile_list, tempdir_list)
sys.exit("The format attr can only be: %s!\n", ' or '.join(formats))
#check the validity of the 'source' attr
source = attr_check(name, d, "source", 1)
if source == '':
cleanup(tempfile_list, tempdir_list)
sys.exit("The source of %s is empty!" % name)
if source[0] != '/':
cleanup(tempfile_list, tempdir_list)
sys.exit("The source of %s should be a local path!" % name)
#set the file size
size = os.stat(source).st_size
d["size"] = str(size)
#set the uncompressed size of tgz file
if form == "tgz":
full_size = get_tgz_size(source)
d["uncompressed_size"] = str(full_size)
#set the 'checksum' and 'id' attrs
checksum = md5_cal(source)
d["id"] = checksum
d["checksum"] = checksum
def get_tgz_size(path):
"""Get the uncompressed size of a tgz file
Args:
path: a tgz file path
Returns:
size: the uncompressed size of a tgz file
"""
size = 0
f = gzip.open(path, 'rb')
try:
while True:
c = f.read(1024*1024)
if not c:
break
else:
size += len(c)
finally:
f.close()
return size
def spec_build(spec_json):
"""Build the metadata information of an umbrella spec
Args:
spec_json: the json object including the specification.
Returns:
None
"""
if spec_json.has_key("os") and spec_json["os"]:
dep_build(spec_json["os"], "os")
for sec_name in ["data", "software", "package_manager"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
if sec_name == "package_manager":
if sec.has_key("config") and sec["config"]:
sec = sec["config"]
else:
logging.debug("%s does not have config attribute!", sec_name)
break
for item in sec:
dep_build(sec[item], item)
help_info = {
"build": '''Build up the metadata info of dependencies inside an umbrella spec, and write the built-up version into a new file.
A good use case of build is when you have some dependencies from the local filesystem. In this case, umbrella will calculate the metadata info
about these dependencies.
The source spec should specify the following info of each local dependency: source, action, mountpoint, format.
When the local dependency is a .tar.gz file, the following metadata info will be put into the target spec: id, checksum, size, uncompressed size.
When the local dependency is a plain file, the following metadata info will be put into the target spec: id, checksum, size.
When the local dependencies is a dir D, a corresponding D.tar.gz file will be created under the same directory with D, then the following metadata info will be put into the target spec: id, checksum, size, uncompressed size.
For more info about how to compose an umbrella spec, please check the following link:
http://ccl.cse.nd.edu/software/manuals/umbrella.html#create_spec
usage: umbrella [options] build source target
source the path of an existing umbrella spec file from your local filesystem whose metadata info is needed to be built up
target an non-existing file path on your local filesystem where the built-up version of the umbrella spec will be wrotten into
''',
"expand": '''Expand an umbrella spec file into a self-contained umbrella spec
The source umbrella spec should be specified through the --spec option; the metadata db should be specified through the --meta option.
For each dependency in the source umbrella spec, the following info will be extracted from the metadata db: source, size, format, checksum.
Finally, the expanded umbrella sepc will be wrotten into a new file.
usage: umbrella [options] expand target
target an non-existing file path on your local filesystem where the expanded version of the umbrella spec will be wrotten into
''',
"filter": '''Filter the metadata info for an umbrella spec file from a huge metadata db
The source umbrella spec should be specified through the --spec option; the metadata db should be specified through the --meta option.
The source umbrella spec should NOT be self-contained.
For each dependency specified in the source umbrella spec, its metadata info will be extracted from the huge metadata db, and written into the target path.
usage: umbrella [options] filter target
target an non-existing file path on your local filesystem where the metadata info of all the dependencies in the umbrella spec will be wrotten into
''',
"run": '''Run your application through umbrella
usage: umbrella [options] run [command]
command command to run, the command can also be set inside the umbrella spec. By default: /bin/sh
''',
"split": '''Split a self-contained umbrella spec file into an umbrella spec and a metadata db
The source umbrella spec should be specified through the --spec option; The --meta option will be ignored.
The source umbrella spec should be self-contained.
usage: umbrella [options] split newspec newdb
newspec an non-existing file path on your local filesystem where the new umbrella spec will be wrotten into
newdb an non-existing file path on your local filesystem where the metadata info corresponding to newspec will be wrotten into
''',
"upload": '''Upload the dependencies in an umbrella spec into remote archives (OSF, Amazon S3)
Umbrella will upload all the dependencies to the target archive, and add the new resource location into the source section of each dependency.
Finally, the new umbrella spec will be written into a new file.
When the source of a dependency has already include one url from the target archive, the dependency will be ignored.
Currently, the supported target includes: OSF, the Amazon S3.
Uploading to OSF requires the following umbrella options: --osf_user, --osf_pass, --osf_userid
usage of upload osf: umbrella [options] upload osf proj acl target
proj the osf project name
acl the access permission of the uploaded data. Options: public, private
target an non-existing file path on your local filesystem where the new umbrella spec will be wrotten into
usage of upload s3: umbrella [options] upload s3 bucket acl target
bucket the s3 bucket name
acl the access permission of the uploaded data. Options: public-read, private
target an non-existing file path on your local filesystem where the new umbrella spec will be wrotten into
''',
"validate": '''Validate an umbrella spec file
The source umbrella spec should be specified through the --spec option; the metadata db should be specified through the --meta option.
usage: umbrella [options] validate
'''
}
def main():
parser = OptionParser(description="Umbrella is a portable environment creator for reproducible computing on clusters, clouds, and grids.",
usage="""usage: %prog [options] run|expand|filter|split|validate|upload|build ...
Currently, umbrella supports the following behaviors:
build\t\tbuild up the metadata info of dependencies inside an umbrella spec
expand\t\texpand an umbrella spec file into a self-contained umbrella spec
filter\t\tfilter the metadata info for an umbrella spec file from a huge metadata db
run\t\trun your application through umbrella
split\t\tsplit a self-contained umbrella spec file into an umbrella spec and a metadata db
upload\t\tupload the dependencies in an umbrella spec into remote archives (OSF, Amazon S3)
validate\tvalidate an umbrella spec file
To check the help doc for a specific behavoir, use: %prog <behavior> help""",
version="%prog CCTOOLS_VERSION")
parser.add_option("--spec",
action="store",
help="The specification json file.",)
parser.add_option("--meta",
action="store",
help="The source of meta information, which can be a local file path (e.g., file:///tmp/meta.json) or url (e.g., http://...).\nIf this option is not provided, the specification will be treated a self-contained specification.",)
parser.add_option("-l", "--localdir",
action="store",
help="The path of directory used for all the cached data and all the sandboxes, the directory can be an existing dir.",)
parser.add_option("-o", "--output",
action="store",
help="The mappings of outputs in the format of <container_path>=<local_path>. Multiple mappings should be separated by comma.\ncontainer_path is a path inside the sandbox and should be exposed in the output section of an umbrella spec.\nlocal_path should be a non-existing path on your local filessytem where you want the output from container_path to be put into.",)
parser.add_option("-s", "--sandbox_mode",
action="store",
choices=['parrot', 'destructive', 'docker', 'ec2',],
help="sandbox mode, which can be parrot, destructive, docker, ec2.",)
parser.add_option("-i", "--inputs",
action="store",
help="The path of input files in the format of <container_path>=<local_path>. Multiple mappings should be separated by comma. Please refer to the --output option for the settings of local_path and container_path.",)
parser.add_option("-e", "--env",
action="store",
help="The environment variables in the format of <variable_name>=<variable_value>. Multiple settings should be separated by comma. I.e., -e 'PWD=/tmp'.")
parser.add_option("--log",
action="store",
default="./umbrella.log",
help="The path of umbrella log file. (By default: ./umbrella.log)",)
parser.add_option("--cvmfs_http_proxy",
action="store",
help="HTTP_PROXY to access cvmfs (Used by Parrot)",)
parser.add_option("--ec2",
action="store",
help="The source of ec2 information.",)
parser.add_option("--condor_log",
action="store",
help="The path of the condor umbrella log file. Required for condor execution engines.",)
parser.add_option("--ec2_log",
action="store",
help="The path of the ec2 umbrella log file. Required for ec2 execution engines.",)
parser.add_option("-g", "--ec2_group",
action="store",
help="the security group within which an Amazon EC2 instance should be run. (only for ec2)",)
parser.add_option("-k", "--ec2_key",
action="store",
help="the name of the key pair to use when launching an Amazon EC2 instance. (only for ec2)",)
parser.add_option("--ec2_sshkey",
action="store",
help="the name of the private key file to use when connecting to an Amazon EC2 instance. (only for ec2)",)
parser.add_option("--ec2_instance_type",
action="store",
help="the type of an Amazon EC2 instance. (only for ec2)",)
parser.add_option("--osf_user",
action="store",
help="the OSF username (required in two cases: uploading to osf; downloading private osf resources.)",)
parser.add_option("--osf_pass",
action="store",
help="the OSF password (required in two cases: uploading to osf; downloading private osf resources.)",)
parser.add_option("--osf_userid",
action="store",
help="the OSF user id (required in two cases: uploading to osf; downloading private osf resources.)",)
(options, args) = parser.parse_args()
logfilename = options.log
if os.path.exists(logfilename) and not os.path.isfile(logfilename):
sys.exit("The --log option <%s> is not a file!" % logfilename)
global tempfile_list
global tempdir_list
global upload_count
"""
disable_warnings function is used here to disable the SNIMissingWarning and InsecurePlatformWarning from /afs/crc.nd.edu/user/h/hmeng/.local/lib/python2.6/site-packages/requests-2.9.1-py2.6.egg/requests/packages/urllib3/util/ssl_.py.
"Requests 2.6 introduced this warning for users of Python prior to Python 2.7.9 with only stock SSL modules available."
"""
if found_requests:
requests.packages.urllib3.disable_warnings()
logging.basicConfig(filename=logfilename, level=logging.DEBUG,
format='%(asctime)s.%(msecs)d %(levelname)s %(module)s - %(funcName)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
logging.debug("*******Welcome to Umbrella*******")
logging.debug("Arguments: ")
logging.debug(sys.argv)
start = datetime.datetime.now()
logging.debug("Start time: %s", start)
logging.debug("Check the validity of the command ....")
if not args:
logging.critical("You must provide the behavior and the command!")
print "You must provide the behavior and the command!\n"
parser.print_help()
sys.exit(1)
user_cmd = []
behavior = args[0]
logging.debug("Check the validity of the behavior: %s", behavior)
behavior_list = ["run", "expand", "filter", "split", "validate", "upload", "build"]
if behavior not in behavior_list:
logging.critical("%s is not supported by umbrella!", behavior)
print behavior + " is not supported by umbrella!\n"
parser.print_help()
sys.exit(1)
if len(args) > 1 and args[1] in ['help']:
print help_info[behavior]
sys.exit(0)
if behavior in ["build"]:
if len(args) != 3:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella build is: umbrella ... build <source.umbrella> <dest.umbrella>\n")
sys.exit("The syntax for umbrella build is: umbrella ... build <source.umbrella> <dest.umbrella>\n")
args[1] = os.path.abspath(args[1])
if (not os.path.exists(args[1])) or (not os.path.isfile(args[1])):
cleanup(tempfile_list, tempdir_list)
logging.critical("<source.umbrella> (%s) should be an existing file!\n", args[1])
sys.exit("<source.umbrella> (%s) should be an existing file!\n" % args[1])
if os.path.exists(args[2]):
cleanup(tempfile_list, tempdir_list)
logging.critical("<dest.umbrella> (%s) should be a non-existing file!\n", args[2])
sys.exit("<dest.umbrella> (%s) should be a non-existing file!\n" % args[2])
args[2] = os.path.abspath(args[2])
if not os.path.exists(os.path.dirname(args[2])):
print os.path.dirname(args[2])
try:
os.makedirs(os.path.dirname(args[2]))
except Exception as e:
cleanup(tempfile_list, tempdir_list)
logging.critical("Fails to create the directory for the <dest.umbrella> (%s): %s!", args[2], e)
sys.exit("Fails to create the directory for the <dest.umbrella> (%s)!" % (args[2], e))
with open(args[1]) as f:
spec_json = json.load(f)
spec_build(spec_json)
json2file(args[2], spec_json)
sys.exit(0)
if behavior in ["run", "upload"]:
#get the absolute path of the localdir directory, which will cache all the data, and store all the sandboxes.
#to allow the reuse the local cache, the localdir can be a dir which already exists.
localdir = options.localdir
localdir = os.path.abspath(localdir)
logging.debug("Check the localdir option: %s", localdir)
if not os.path.exists(localdir):
logging.debug("create the localdir: %s", localdir)
os.makedirs(localdir)
sandbox_dir = tempfile.mkdtemp(dir=localdir)
logging.debug("Create the sandbox_dir: %s", sandbox_dir)
#add sandbox_dir into tempdir_list
tempdir_list.append(sandbox_dir)
osf_auth = []
#osf_auth info
osf_user = options.osf_user
osf_pass = options.osf_pass
if osf_user or osf_pass:
osf_auth.append(osf_user)
osf_auth.append(osf_pass)
if behavior in ["run"]:
sandbox_mode = options.sandbox_mode
logging.debug("Check the sandbox_mode option: %s", sandbox_mode)
if sandbox_mode in ["destructive"]:
if getpass.getuser() != 'root':
cleanup(tempfile_list, tempdir_list)
logging.critical("You must be root to use the %s sandbox mode.", sandbox_mode)
print 'You must be root to use the %s sandbox mode.\n' % (sandbox_mode)
parser.print_help()
sys.exit(1)
#transfer options.env into a dictionary, env_para_dict
env_para = options.env
env_para_dict = {}
if (not env_para) or env_para == '':
logging.debug("The env option is null")
env_para_list = ''
env_para_dict = {}
else:
logging.debug("Process the env option: %s", env_para)
env_para = re.sub('\s+', '', env_para).strip()
env_para_list = env_para.split(',')
for item in env_para_list:
index = item.find('=')
name = item[:index]
value = item[(index+1):]
env_para_dict[name] = value
logging.debug("the dictionary format of the env options (env_para_dict):")
logging.debug(env_para_dict)
#get the cvmfs HTTP_PROXY
cvmfs_http_proxy = options.cvmfs_http_proxy
if behavior in ["run", "expand", "filter", "split", "validate", "upload"]:
spec_path = options.spec
if behavior == "validate" and spec_path == None:
spec_json = None
else:
spec_path_basename = os.path.basename(spec_path)
logging.debug("Start to read the specification file: %s", spec_path)
if not os.path.isfile(spec_path):
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification json file (%s) does not exist! Please refer the -c option.", spec_path)
print "The specification json file does not exist! Please refer the -c option.\n"
parser.print_help()
sys.exit(1)
with open(spec_path) as f: #python 2.4 does not support this syntax: with open () as
spec_json = json.load(f)
if behavior in ["run"]:
user_cmd = args[1:]
if len(user_cmd) == 0:
if spec_json.has_key("cmd") and len(spec_json["cmd"]) > 0:
user_cmd.append(spec_json["cmd"])
else:
user_cmd.append("/bin/sh") #set the user_cmd to be default: /bin/sh
logging.debug("The user's command is: %s", user_cmd)
#if the spec file has environ seciton, merge the variables defined in it into env_para_dict
if spec_json.has_key("environ") and spec_json["environ"]:
logging.debug("The specification file has environ section, update env_para_dict ....")
spec_env = spec_json["environ"]
for key in spec_env:
env_para_dict[key] = spec_env[key]
logging.debug("env_para_dict:")
logging.debug(env_para_dict)
if behavior in ["run"]:
if 'PWD' in env_para_dict:
cwd_setting = env_para_dict['PWD']
logging.debug("PWD environment variable is set explicitly: %s", cwd_setting)
else:
cwd_setting = sandbox_dir
env_para_dict['PWD'] = cwd_setting
logging.debug("PWD is not set explicitly, use sandbox_dir (%s) as PWD", cwd_setting)
#get the absolute path of each input file
input_files = options.inputs
input_list = []
input_dict = {}
if (not input_files) or input_files == '':
input_list_origin = ''
input_list = []
input_dict = {}
logging.debug("the inputs options is null")
else:
input_files = re.sub( '\s+', '', input_files).strip() #remove all the whitespaces within the inputs option
logging.debug("The inputs option: %s", input_files)
input_list_origin = input_files.split(',')
for item in input_list_origin:
index = item.find('=')
access_path = item[:index]
actual_path = item[(index+1):]
if access_path[0] != '/':
access_path = os.path.join(cwd_setting, access_path)
actual_path = os.path.abspath(actual_path)
input_dict[access_path] = actual_path
input_list.append(actual_path) #get the absolute path of each input file and add it into input_list
logging.debug("The list version of the inputs option: ")
logging.debug(input_list)
logging.debug("The dict version of the inputs option: ")
logging.debug(input_dict)
#get the absolute path of each output file
output_dir = options.output
output_dict = {}
output_f_dict = {}
output_d_dict = {}
if output_dir and len(output_dir) > 0:
output_dir = re.sub( '\s+', '', output_dir).strip() #remove all the whitespaces within the inputs option
if output_dir == "":
logging.debug("the output option is null!")
else:
logging.debug("the output option: %s", output_dir)
outputs = output_dir.split(',')
for item in outputs:
index = item.find('=')
access_path = item[:index]
actual_path = item[(index+1):]
if access_path[0] != '/':
cleanup(tempfile_list, tempdir_list)
logging.critical("the path of an output should be absolute!")
sys.exit("the path of an output should be absolute!")
actual_path = os.path.abspath(actual_path)
output_dict[access_path] = actual_path
if len(output_dict) > 0:
if spec_json.has_key("output"):
files = []
dirs = []
if spec_json["output"].has_key("files"):
files = spec_json["output"]["files"]
if spec_json["output"].has_key("dirs"):
dirs = spec_json["output"]["dirs"]
for key in output_dict.keys():
if key in files:
output_f_dict[key] = output_dict[key]
elif key in dirs:
output_d_dict[key] = output_dict[key]
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("the output file (%s) is not specified in the spec file!", key)
sys.exit("the output file (%s) is not specified in the spec file!" % key)
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("the specification does not have a output section!")
sys.exit("the specification does not have a output section!")
del output_dict
for f in output_f_dict.values():
if not os.path.exists(f):
logging.debug("create the output file: %s", f)
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
elif not os.path.isdir(d):
cleanup(tempfile_list, tempdir_list)
logging.critical("the parent path of the output file (%s) is not a directory!", f)
sys.exit("the parent path of the output file (%s) is not a directory!" % f)
else:
pass
new_file = open(f, 'a')
new_file.close()
elif len(f) != 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("the output file (%s) already exists!", f)
sys.exit("the output file (%s) already exists!\n" % f)
else:
pass
for d in output_d_dict.values():
if not os.path.exists(d):
logging.debug("create the output dir: %s", d)
os.makedirs(d)
elif len(d) != 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("the output dir (%s) already exists!", d)
sys.exit("the output dir(%s) already exists!" % d)
else:
pass
meta_json = None
if behavior in ["run", "expand", "filter", "validate"]:
"""
meta_path is optional. If set, it provides the metadata information for the dependencies.
If not set, the umbrella specification is treated as a self-contained specification.
meta_path can be in either file:///filepath format or a http/https url like http:/ccl.cse.nd.edu/.... Otherwise, it is treated as a local path.
"""
meta_path = options.meta
if meta_path:
if meta_path[:7] == "file://":
meta_path = meta_path[7:]
logging.debug("Check the metatdata database file: %s", meta_path)
if not os.path.exists(meta_path):
cleanup(tempfile_list, tempdir_list)
logging.critical("the metatdata database file (%s) does not exist!", meta_path)
sys.exit("the metatdata database file (%s) does not exist!" % meta_path)
elif meta_path[:7] == "http://" or meta_path[:8] == "https://":
url = meta_path
if behavior in ["run"]:
meta_path = '%s/meta.json' % (sandbox_dir)
if behavior in ["expand", "filter", "validate"]:
#create a tempfile under /tmp
(fd, meta_path) = tempfile.mkstemp()
tempfile_list.append(meta_path)
os.close(fd)
logging.debug("Creating a temporary file (%s) to hold the metadata file specified by the --meta options!", meta_path)
logging.debug("Download metadata database from %s into %s", url, meta_path)
print "Download metadata database from %s into %s" % (url, meta_path)
url_download(url, meta_path)
else:
logging.debug("Check the metatdata database file: %s", meta_path)
if not os.path.exists(meta_path):
cleanup(tempfile_list, tempdir_list)
logging.critical("the metatdata database file (%s) does not exist!", meta_path)
sys.exit("the metatdata database file (%s) does not exist!" % meta_path)
else:
if behavior in ["run"]:
#the provided specification should be self-contained.
# One solution is to change all the current implementation of Umbrella to check whether the metadata information is included in the specification.
# Another solution is to extract all the metadata information into a separate metadata database file. (This solution is currently used).
meta_path = '%s/meta.json' % (sandbox_dir)
abstract_metadata(spec_json, meta_path)
elif behavior in ["expand", "filter"]:
cleanup(tempfile_list, tempdir_list)
logging.critical("The --meta option should be provided for the umbrella %s behavior!\n", behavior)
sys.exit("The --meta option should be provided for the umbrella %s behavior!\n" % behavior)
if meta_path:
with open(meta_path) as f: #python 2.4 does not support this syntax: with open () as
meta_json = json.load(f)
if behavior in ["upload"]:
#the provided specification should be self-contained.
# One solution is to change all the current implementation of Umbrella to check whether the metadata information is included in the specification.
# Another solution is to extract all the metadata information into a separate metadata database file. (This solution is currently used).
meta_path = '%s/meta.json' % (sandbox_dir)
abstract_metadata(spec_json, meta_path)
with open(meta_path) as f: #python 2.4 does not support this syntax: with open () as
meta_json = json.load(f)
if behavior in ["run", "validate", "split", "filter", "expand", "upload"]:
#for validate, if only --spec is provided, then check whether this spec is self-contained.
#for validate, if only --meta is provided, then check whether each item in the metadata db is well archived (for now, well-archived means the source attr is not null).
#for validate, if both --spec and --meta are provided, then check whether the dependencies of the spec file is well archived.
if spec_json == None:
if meta_json == None:
pass
else:
validate_meta(meta_json)
else:
if meta_json == None:
validate_spec(spec_json)
else:
validate_spec(spec_json, meta_json)
if behavior in ["run"]:
# user_name = 'root' #username who can access the VM instances from Amazon EC2
# ssh_key = 'hmeng_key_1018.pem' #the pem key file used to access the VM instances from Amazon EC2
if sandbox_mode == "ec2":
ec2log_path = options.ec2_log
ec2log_path = os.path.abspath(ec2log_path)
if os.path.exists(ec2log_path):
cleanup(tempfile_list, tempdir_list)
sys.exit("The ec2_log option <%s> already exists!" % ec2log_path)
ssh_key = os.path.abspath(options.ec2_sshkey)
if not os.path.exists(ssh_key):
cleanup(tempfile_list, tempdir_list)
logging.critical("The ssh key file (%s) does not exists!", ssh_key)
sys.exit("The ssh key file (%s) does not exists!\n" % ssh_key)
ec2_security_group = options.ec2_group
ec2_key_pair = options.ec2_key
ec2_instance_type = options.ec2_instance_type
ec2_process(spec_path, spec_json, options.meta, meta_path, ssh_key, ec2_key_pair, ec2_security_group, ec2_instance_type, sandbox_dir, output_dir, output_f_dict, output_d_dict, sandbox_mode, input_list, input_list_origin, env_para, env_para_dict, user_cmd, cwd_setting, ec2log_path, cvmfs_http_proxy)
elif sandbox_mode == "condor":
condorlog_path = options.condor_log
condorlog_path = os.path.abspath(condorlog_path)
if os.path.exists(condorlog_path):
cleanup(tempfile_list, tempdir_list)
sys.exit("The condor_log option <%s> already exists!" % condorlog_path)
condor_process(spec_path, spec_json, spec_path_basename, meta_path, sandbox_dir, output_dir, input_list_origin, user_cmd, cwd_setting, condorlog_path, cvmfs_http_proxy)
elif sandbox_mode == "local":
#first check whether Docker exists, if yes, use docker execution engine; if not, use parrot execution engine.
if dependency_check('docker') == 0:
logging.debug('docker exists, use docker execution engine')
specification_process(spec_json, sandbox_dir, behavior, meta_json, 'docker', output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, cwd_setting, cvmfs_http_proxy, osf_auth)
else:
logging.debug('docker does not exist, use parrot execution engine')
specification_process(spec_json, sandbox_dir, behavior, meta_json, 'parrot', output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, cwd_setting, cvmfs_http_proxy, osf_auth)
else:
if sandbox_mode == 'docker' and dependency_check('docker') != 0:
cleanup(tempfile_list, tempdir_list)
logging.critical('Docker is not installed on the host machine, please try other execution engines!')
sys.exit('Docker is not installed on the host machine, please try other execution engines!')
specification_process(spec_json, sandbox_dir, behavior, meta_json, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, cwd_setting, cvmfs_http_proxy, osf_auth)
if behavior in ["expand", "filter"]:
if len(args) != 2:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella %s is: umbrella ... %s <filepath>.\n", behavior, behavior)
sys.exit("The syntax for umbrella %s is: umbrella ... %s <filepath>.\n" % (behavior, behavior))
target_specpath = os.path.abspath(args[1])
path_exists(target_specpath)
dir_create(target_specpath)
if behavior == "expand":
new_json = separatize_spec(spec_json, meta_json, "spec")
else:
new_json = separatize_spec(spec_json, meta_json, "meta")
#write new_json into the file specified by the user.
json2file(target_specpath, new_json)
if behavior in ["split"]:
if len(args) != 3:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella split is: umbrella ... split <spec_filepath> <meta_filepath>.\n")
sys.exit("The syntax for umbrella split is: umbrella ... split <spec_filepath> <meata_filepath>.\n")
new_spec_path = os.path.abspath(args[1])
db_path = os.path.abspath(args[2])
path_exists(new_spec_path)
dir_create(new_spec_path)
path_exists(db_path)
dir_create(db_path)
abstract_metadata(spec_json, db_path)
new_json = prune_spec(spec_json)
json2file(new_spec_path, new_json)
if behavior in ["upload"]:
target = ["osf", "s3"]
if len(args) < 2 or args[1] not in target:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella upload is: umbrella ... upload <target> ... (target can be: %s)\n", " or ".join(target))
sys.exit("The syntax for umbrella upload is: umbrella ... upload <target> ... (target can be: %s)\n" % " or ".join(target))
if args[1] == "osf":
if not found_requests:
cleanup(tempfile_list, tempdir_list)
logging.critical("\nUploading umbrella spec dependencies to OSF requires a python package - requests. Please check the installation page of requests:\n\n\thttp://docs.python-requests.org/en/latest/user/install/\n")
sys.exit("\nUploading umbrella spec dependencies to OSF requires a python package - requests. Please check the installation page of requests:\n\n\thttp://docs.python-requests.org/en/latest/user/install/\n")
if len(args) != 5:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella upload osf is: umbrella ... upload osf <osf_project_name> <public_or_private> <target_specpath>\n")
sys.exit("The syntax for umbrella upload osf is: umbrella ... upload osf <osf_project_name> <public_or_private> <target_specpath>\n")
acl = ["private", "public"]
if args[3] not in acl:
cleanup(tempfile_list, tempdir_list)
sys.exit("The access control for s3 bucket and object can only be: %s" % " or ".join(acl))
target_specpath = os.path.abspath(args[4])
path_exists(target_specpath)
dir_create(target_specpath)
osf_info = []
osf_info.append("osf")
osf_info += [options.osf_user, options.osf_pass]
osf_proj_id = osf_create(options.osf_user, options.osf_pass, options.osf_userid, args[2], args[3] == "public")
osf_info.append(osf_proj_id)
spec_upload(spec_json, meta_json, osf_info, sandbox_dir, osf_auth)
if upload_count > 0:
json2file(target_specpath, spec_json)
osf_upload(options.osf_user, options.osf_pass, osf_proj_id, target_specpath)
else:
logging.debug("All the dependencies has been already inside OSF!")
print "All the dependencies has been already inside OSF!"
elif args[1] == "s3":
if not found_boto3 or not found_botocore:
cleanup(tempfile_list, tempdir_list)
logging.critical("\nUploading umbrella spec dependencies to s3 requires a python package - boto3. Please check the installation page of boto3:\n\n\thttps://boto3.readthedocs.org/en/latest/guide/quickstart.html#installation\n")
sys.exit("\nUploading umbrella spec dependencies to s3 requires a python package - boto3. Please check the installation page of boto3:\n\n\thttps://boto3.readthedocs.org/en/latest/guide/quickstart.html#installation\n")
if len(args) != 5:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella upload s3 is: umbrella ... upload s3 <bucket_name> <access_control> <target_specpath>\n")
sys.exit("The syntax for umbrella upload s3 is: umbrella ... upload s3 <bucket_name> <access_control> <target_specpath>\n")
acl = ["private", "public-read"]
if args[3] not in acl:
cleanup(tempfile_list, tempdir_list)
sys.exit("The access control for s3 bucket and object can only be: %s" % " or ".join(acl))
target_specpath = os.path.abspath(args[4])
path_exists(target_specpath)
dir_create(target_specpath)
s3_info = []
s3_info.append("s3")
s3_info.append(args[3])
bucket = s3_create(args[2], args[3])
spec_upload(spec_json, meta_json, s3_info, sandbox_dir, s3_bucket=bucket)
if upload_count > 0:
json2file(target_specpath, spec_json)
s3_upload(bucket, target_specpath, args[3])
else:
logging.debug("All the dependencies has been already inside S3!")
print "All the dependencies has been already inside S3!"
cleanup(tempfile_list, tempdir_list)
end = datetime.datetime.now()
diff = end - start
logging.debug("End time: %s", end)
logging.debug("execution time: %d seconds", diff.seconds)
if __name__ == "__main__":
main()
#set sts=4 sw=4 ts=4 expandtab ft=python
| gpl-2.0 | -3,054,751,530,107,959,000 | 42.014133 | 648 | 0.702808 | false | 3.167061 | false | false | false |
leshchevds/ganeti | lib/jqueue/__init__.py | 1 | 52112 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module implementing the job queue handling.
"""
import logging
import errno
import time
import weakref
import threading
import itertools
import operator
import os
try:
# pylint: disable=E0611
from pyinotify import pyinotify
except ImportError:
import pyinotify
from ganeti import asyncnotifier
from ganeti import constants
from ganeti import serializer
from ganeti import locking
from ganeti import luxi
from ganeti import opcodes
from ganeti import opcodes_base
from ganeti import errors
from ganeti import mcpu
from ganeti import utils
from ganeti import jstore
import ganeti.rpc.node as rpc
from ganeti import runtime
from ganeti import netutils
from ganeti import compat
from ganeti import ht
from ganeti import query
from ganeti import qlang
from ganeti import pathutils
from ganeti import vcluster
from ganeti.cmdlib import cluster
#: Retrieves "id" attribute
_GetIdAttr = operator.attrgetter("id")
class CancelJob(Exception):
"""Special exception to cancel a job.
"""
def TimeStampNow():
"""Returns the current timestamp.
@rtype: tuple
@return: the current time in the (seconds, microseconds) format
"""
return utils.SplitTime(time.time())
def _CallJqUpdate(runner, names, file_name, content):
"""Updates job queue file after virtualizing filename.
"""
virt_file_name = vcluster.MakeVirtualPath(file_name)
return runner.call_jobqueue_update(names, virt_file_name, content)
class _QueuedOpCode(object):
"""Encapsulates an opcode object.
@ivar log: holds the execution log and consists of tuples
of the form C{(log_serial, timestamp, level, message)}
@ivar input: the OpCode we encapsulate
@ivar status: the current status
@ivar result: the result of the LU execution
@ivar start_timestamp: timestamp for the start of the execution
@ivar exec_timestamp: timestamp for the actual LU Exec() function invocation
@ivar stop_timestamp: timestamp for the end of the execution
"""
__slots__ = ["input", "status", "result", "log", "priority",
"start_timestamp", "exec_timestamp", "end_timestamp",
"__weakref__"]
def __init__(self, op):
"""Initializes instances of this class.
@type op: L{opcodes.OpCode}
@param op: the opcode we encapsulate
"""
self.input = op
self.status = constants.OP_STATUS_QUEUED
self.result = None
self.log = []
self.start_timestamp = None
self.exec_timestamp = None
self.end_timestamp = None
# Get initial priority (it might change during the lifetime of this opcode)
self.priority = getattr(op, "priority", constants.OP_PRIO_DEFAULT)
@classmethod
def Restore(cls, state):
"""Restore the _QueuedOpCode from the serialized form.
@type state: dict
@param state: the serialized state
@rtype: _QueuedOpCode
@return: a new _QueuedOpCode instance
"""
obj = _QueuedOpCode.__new__(cls)
obj.input = opcodes.OpCode.LoadOpCode(state["input"])
obj.status = state["status"]
obj.result = state["result"]
obj.log = state["log"]
obj.start_timestamp = state.get("start_timestamp", None)
obj.exec_timestamp = state.get("exec_timestamp", None)
obj.end_timestamp = state.get("end_timestamp", None)
obj.priority = state.get("priority", constants.OP_PRIO_DEFAULT)
return obj
def Serialize(self):
"""Serializes this _QueuedOpCode.
@rtype: dict
@return: the dictionary holding the serialized state
"""
return {
"input": self.input.__getstate__(),
"status": self.status,
"result": self.result,
"log": self.log,
"start_timestamp": self.start_timestamp,
"exec_timestamp": self.exec_timestamp,
"end_timestamp": self.end_timestamp,
"priority": self.priority,
}
class _QueuedJob(object):
"""In-memory job representation.
This is what we use to track the user-submitted jobs. Locking must
be taken care of by users of this class.
@type queue: L{JobQueue}
@ivar queue: the parent queue
@ivar id: the job ID
@type ops: list
@ivar ops: the list of _QueuedOpCode that constitute the job
@type log_serial: int
@ivar log_serial: holds the index for the next log entry
@ivar received_timestamp: the timestamp for when the job was received
@ivar start_timestmap: the timestamp for start of execution
@ivar end_timestamp: the timestamp for end of execution
@ivar writable: Whether the job is allowed to be modified
"""
# pylint: disable=W0212
__slots__ = ["queue", "id", "ops", "log_serial", "ops_iter", "cur_opctx",
"received_timestamp", "start_timestamp", "end_timestamp",
"writable", "archived",
"livelock", "process_id",
"__weakref__"]
def AddReasons(self, pickup=False):
"""Extend the reason trail
Add the reason for all the opcodes of this job to be executed.
"""
count = 0
for queued_op in self.ops:
op = queued_op.input
if pickup:
reason_src_prefix = constants.OPCODE_REASON_SRC_PICKUP
else:
reason_src_prefix = constants.OPCODE_REASON_SRC_OPCODE
reason_src = opcodes_base.NameToReasonSrc(op.__class__.__name__,
reason_src_prefix)
reason_text = "job=%d;index=%d" % (self.id, count)
reason = getattr(op, "reason", [])
reason.append((reason_src, reason_text, utils.EpochNano()))
op.reason = reason
count = count + 1
def __init__(self, queue, job_id, ops, writable):
"""Constructor for the _QueuedJob.
@type queue: L{JobQueue}
@param queue: our parent queue
@type job_id: job_id
@param job_id: our job id
@type ops: list
@param ops: the list of opcodes we hold, which will be encapsulated
in _QueuedOpCodes
@type writable: bool
@param writable: Whether job can be modified
"""
if not ops:
raise errors.GenericError("A job needs at least one opcode")
self.queue = queue
self.id = int(job_id)
self.ops = [_QueuedOpCode(op) for op in ops]
self.AddReasons()
self.log_serial = 0
self.received_timestamp = TimeStampNow()
self.start_timestamp = None
self.end_timestamp = None
self.archived = False
self.livelock = None
self.process_id = None
self.writable = None
self._InitInMemory(self, writable)
assert not self.archived, "New jobs can not be marked as archived"
@staticmethod
def _InitInMemory(obj, writable):
"""Initializes in-memory variables.
"""
obj.writable = writable
obj.ops_iter = None
obj.cur_opctx = None
def __repr__(self):
status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__),
"id=%s" % self.id,
"ops=%s" % ",".join([op.input.Summary() for op in self.ops])]
return "<%s at %#x>" % (" ".join(status), id(self))
@classmethod
def Restore(cls, queue, state, writable, archived):
"""Restore a _QueuedJob from serialized state:
@type queue: L{JobQueue}
@param queue: to which queue the restored job belongs
@type state: dict
@param state: the serialized state
@type writable: bool
@param writable: Whether job can be modified
@type archived: bool
@param archived: Whether job was already archived
@rtype: _JobQueue
@return: the restored _JobQueue instance
"""
obj = _QueuedJob.__new__(cls)
obj.queue = queue
obj.id = int(state["id"])
obj.received_timestamp = state.get("received_timestamp", None)
obj.start_timestamp = state.get("start_timestamp", None)
obj.end_timestamp = state.get("end_timestamp", None)
obj.archived = archived
obj.livelock = state.get("livelock", None)
obj.process_id = state.get("process_id", None)
if obj.process_id is not None:
obj.process_id = int(obj.process_id)
obj.ops = []
obj.log_serial = 0
for op_state in state["ops"]:
op = _QueuedOpCode.Restore(op_state)
for log_entry in op.log:
obj.log_serial = max(obj.log_serial, log_entry[0])
obj.ops.append(op)
cls._InitInMemory(obj, writable)
return obj
def Serialize(self):
"""Serialize the _JobQueue instance.
@rtype: dict
@return: the serialized state
"""
return {
"id": self.id,
"ops": [op.Serialize() for op in self.ops],
"start_timestamp": self.start_timestamp,
"end_timestamp": self.end_timestamp,
"received_timestamp": self.received_timestamp,
"livelock": self.livelock,
"process_id": self.process_id,
}
def CalcStatus(self):
"""Compute the status of this job.
This function iterates over all the _QueuedOpCodes in the job and
based on their status, computes the job status.
The algorithm is:
- if we find a cancelled, or finished with error, the job
status will be the same
- otherwise, the last opcode with the status one of:
- waitlock
- canceling
- running
will determine the job status
- otherwise, it means either all opcodes are queued, or success,
and the job status will be the same
@return: the job status
"""
status = constants.JOB_STATUS_QUEUED
all_success = True
for op in self.ops:
if op.status == constants.OP_STATUS_SUCCESS:
continue
all_success = False
if op.status == constants.OP_STATUS_QUEUED:
pass
elif op.status == constants.OP_STATUS_WAITING:
status = constants.JOB_STATUS_WAITING
elif op.status == constants.OP_STATUS_RUNNING:
status = constants.JOB_STATUS_RUNNING
elif op.status == constants.OP_STATUS_CANCELING:
status = constants.JOB_STATUS_CANCELING
break
elif op.status == constants.OP_STATUS_ERROR:
status = constants.JOB_STATUS_ERROR
# The whole job fails if one opcode failed
break
elif op.status == constants.OP_STATUS_CANCELED:
status = constants.OP_STATUS_CANCELED
break
if all_success:
status = constants.JOB_STATUS_SUCCESS
return status
def CalcPriority(self):
"""Gets the current priority for this job.
Only unfinished opcodes are considered. When all are done, the default
priority is used.
@rtype: int
"""
priorities = [op.priority for op in self.ops
if op.status not in constants.OPS_FINALIZED]
if not priorities:
# All opcodes are done, assume default priority
return constants.OP_PRIO_DEFAULT
return min(priorities)
def GetLogEntries(self, newer_than):
"""Selectively returns the log entries.
@type newer_than: None or int
@param newer_than: if this is None, return all log entries,
otherwise return only the log entries with serial higher
than this value
@rtype: list
@return: the list of the log entries selected
"""
if newer_than is None:
serial = -1
else:
serial = newer_than
entries = []
for op in self.ops:
entries.extend(filter(lambda entry: entry[0] > serial, op.log))
return entries
def MarkUnfinishedOps(self, status, result):
"""Mark unfinished opcodes with a given status and result.
This is an utility function for marking all running or waiting to
be run opcodes with a given status. Opcodes which are already
finalised are not changed.
@param status: a given opcode status
@param result: the opcode result
"""
not_marked = True
for op in self.ops:
if op.status in constants.OPS_FINALIZED:
assert not_marked, "Finalized opcodes found after non-finalized ones"
continue
op.status = status
op.result = result
not_marked = False
def Finalize(self):
"""Marks the job as finalized.
"""
self.end_timestamp = TimeStampNow()
def Cancel(self):
"""Marks job as canceled/-ing if possible.
@rtype: tuple; (bool, string)
@return: Boolean describing whether job was successfully canceled or marked
as canceling and a text message
"""
status = self.CalcStatus()
if status == constants.JOB_STATUS_QUEUED:
self.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
"Job canceled by request")
self.Finalize()
return (True, "Job %s canceled" % self.id)
elif status == constants.JOB_STATUS_WAITING:
# The worker will notice the new status and cancel the job
self.MarkUnfinishedOps(constants.OP_STATUS_CANCELING, None)
return (True, "Job %s will be canceled" % self.id)
else:
logging.debug("Job %s is no longer waiting in the queue", self.id)
return (False, "Job %s is no longer waiting in the queue" % self.id)
def ChangePriority(self, priority):
"""Changes the job priority.
@type priority: int
@param priority: New priority
@rtype: tuple; (bool, string)
@return: Boolean describing whether job's priority was successfully changed
and a text message
"""
status = self.CalcStatus()
if status in constants.JOBS_FINALIZED:
return (False, "Job %s is finished" % self.id)
elif status == constants.JOB_STATUS_CANCELING:
return (False, "Job %s is cancelling" % self.id)
else:
assert status in (constants.JOB_STATUS_QUEUED,
constants.JOB_STATUS_WAITING,
constants.JOB_STATUS_RUNNING)
changed = False
for op in self.ops:
if (op.status == constants.OP_STATUS_RUNNING or
op.status in constants.OPS_FINALIZED):
assert not changed, \
("Found opcode for which priority should not be changed after"
" priority has been changed for previous opcodes")
continue
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING)
changed = True
# Set new priority (doesn't modify opcode input)
op.priority = priority
if changed:
return (True, ("Priorities of pending opcodes for job %s have been"
" changed to %s" % (self.id, priority)))
else:
return (False, "Job %s had no pending opcodes" % self.id)
def SetPid(self, pid):
"""Sets the job's process ID
@type pid: int
@param pid: the process ID
"""
status = self.CalcStatus()
if status in (constants.JOB_STATUS_QUEUED,
constants.JOB_STATUS_WAITING):
if self.process_id is not None:
logging.warning("Replacing the process id %s of job %s with %s",
self.process_id, self.id, pid)
self.process_id = pid
else:
logging.warning("Can set pid only for queued/waiting jobs")
class _OpExecCallbacks(mcpu.OpExecCbBase):
def __init__(self, queue, job, op):
"""Initializes this class.
@type queue: L{JobQueue}
@param queue: Job queue
@type job: L{_QueuedJob}
@param job: Job object
@type op: L{_QueuedOpCode}
@param op: OpCode
"""
super(_OpExecCallbacks, self).__init__()
assert queue, "Queue is missing"
assert job, "Job is missing"
assert op, "Opcode is missing"
self._queue = queue
self._job = job
self._op = op
def _CheckCancel(self):
"""Raises an exception to cancel the job if asked to.
"""
# Cancel here if we were asked to
if self._op.status == constants.OP_STATUS_CANCELING:
logging.debug("Canceling opcode")
raise CancelJob()
def NotifyStart(self):
"""Mark the opcode as running, not lock-waiting.
This is called from the mcpu code as a notifier function, when the LU is
finally about to start the Exec() method. Of course, to have end-user
visible results, the opcode must be initially (before calling into
Processor.ExecOpCode) set to OP_STATUS_WAITING.
"""
assert self._op in self._job.ops
assert self._op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# Cancel here if we were asked to
self._CheckCancel()
logging.debug("Opcode is now running")
self._op.status = constants.OP_STATUS_RUNNING
self._op.exec_timestamp = TimeStampNow()
# And finally replicate the job status
self._queue.UpdateJobUnlocked(self._job)
def NotifyRetry(self):
"""Mark opcode again as lock-waiting.
This is called from the mcpu code just after calling PrepareRetry.
The opcode will now again acquire locks (more, hopefully).
"""
self._op.status = constants.OP_STATUS_WAITING
logging.debug("Opcode will be retried. Back to waiting.")
def _AppendFeedback(self, timestamp, log_type, log_msgs):
"""Internal feedback append function, with locks
@type timestamp: tuple (int, int)
@param timestamp: timestamp of the log message
@type log_type: string
@param log_type: log type (one of Types.ELogType)
@type log_msgs: any
@param log_msgs: log data to append
"""
# This should be removed once Feedback() has a clean interface.
# Feedback can be called with anything, we interpret ELogMessageList as
# messages that have to be individually added to the log list, but pushed
# in a single update. Other msgtypes are only transparently passed forward.
if log_type == constants.ELOG_MESSAGE_LIST:
log_type = constants.ELOG_MESSAGE
else:
log_msgs = [log_msgs]
for msg in log_msgs:
self._job.log_serial += 1
self._op.log.append((self._job.log_serial, timestamp, log_type, msg))
self._queue.UpdateJobUnlocked(self._job, replicate=False)
# TODO: Cleanup calling conventions, make them explicit
def Feedback(self, *args):
"""Append a log entry.
Calling conventions:
arg[0]: (optional) string, message type (Types.ELogType)
arg[1]: data to be interpreted as a message
"""
assert len(args) < 3
# TODO: Use separate keyword arguments for a single string vs. a list.
if len(args) == 1:
log_type = constants.ELOG_MESSAGE
log_msg = args[0]
else:
(log_type, log_msg) = args
# The time is split to make serialization easier and not lose
# precision.
timestamp = utils.SplitTime(time.time())
self._AppendFeedback(timestamp, log_type, log_msg)
def CurrentPriority(self):
"""Returns current priority for opcode.
"""
assert self._op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# Cancel here if we were asked to
self._CheckCancel()
return self._op.priority
def SubmitManyJobs(self, jobs):
"""Submits jobs for processing.
See L{JobQueue.SubmitManyJobs}.
"""
# Locking is done in job queue
return self._queue.SubmitManyJobs(jobs)
def _EncodeOpError(err):
"""Encodes an error which occurred while processing an opcode.
"""
if isinstance(err, errors.GenericError):
to_encode = err
else:
to_encode = errors.OpExecError(str(err))
return errors.EncodeException(to_encode)
class _TimeoutStrategyWrapper:
def __init__(self, fn):
"""Initializes this class.
"""
self._fn = fn
self._next = None
def _Advance(self):
"""Gets the next timeout if necessary.
"""
if self._next is None:
self._next = self._fn()
def Peek(self):
"""Returns the next timeout.
"""
self._Advance()
return self._next
def Next(self):
"""Returns the current timeout and advances the internal state.
"""
self._Advance()
result = self._next
self._next = None
return result
class _OpExecContext:
def __init__(self, op, index, log_prefix, timeout_strategy_factory):
"""Initializes this class.
"""
self.op = op
self.index = index
self.log_prefix = log_prefix
self.summary = op.input.Summary()
# Create local copy to modify
if getattr(op.input, opcodes_base.DEPEND_ATTR, None):
self.jobdeps = op.input.depends[:]
else:
self.jobdeps = None
self._timeout_strategy_factory = timeout_strategy_factory
self._ResetTimeoutStrategy()
def _ResetTimeoutStrategy(self):
"""Creates a new timeout strategy.
"""
self._timeout_strategy = \
_TimeoutStrategyWrapper(self._timeout_strategy_factory().NextAttempt)
def CheckPriorityIncrease(self):
"""Checks whether priority can and should be increased.
Called when locks couldn't be acquired.
"""
op = self.op
# Exhausted all retries and next round should not use blocking acquire
# for locks?
if (self._timeout_strategy.Peek() is None and
op.priority > constants.OP_PRIO_HIGHEST):
logging.debug("Increasing priority")
op.priority -= 1
self._ResetTimeoutStrategy()
return True
return False
def GetNextLockTimeout(self):
"""Returns the next lock acquire timeout.
"""
return self._timeout_strategy.Next()
class _JobProcessor(object):
(DEFER,
WAITDEP,
FINISHED) = range(1, 4)
def __init__(self, queue, opexec_fn, job,
_timeout_strategy_factory=mcpu.LockAttemptTimeoutStrategy):
"""Initializes this class.
"""
self.queue = queue
self.opexec_fn = opexec_fn
self.job = job
self._timeout_strategy_factory = _timeout_strategy_factory
@staticmethod
def _FindNextOpcode(job, timeout_strategy_factory):
"""Locates the next opcode to run.
@type job: L{_QueuedJob}
@param job: Job object
@param timeout_strategy_factory: Callable to create new timeout strategy
"""
# Create some sort of a cache to speed up locating next opcode for future
# lookups
# TODO: Consider splitting _QueuedJob.ops into two separate lists, one for
# pending and one for processed ops.
if job.ops_iter is None:
job.ops_iter = enumerate(job.ops)
# Find next opcode to run
while True:
try:
(idx, op) = job.ops_iter.next()
except StopIteration:
raise errors.ProgrammerError("Called for a finished job")
if op.status == constants.OP_STATUS_RUNNING:
# Found an opcode already marked as running
raise errors.ProgrammerError("Called for job marked as running")
opctx = _OpExecContext(op, idx, "Op %s/%s" % (idx + 1, len(job.ops)),
timeout_strategy_factory)
if op.status not in constants.OPS_FINALIZED:
return opctx
# This is a job that was partially completed before master daemon
# shutdown, so it can be expected that some opcodes are already
# completed successfully (if any did error out, then the whole job
# should have been aborted and not resubmitted for processing).
logging.info("%s: opcode %s already processed, skipping",
opctx.log_prefix, opctx.summary)
@staticmethod
def _MarkWaitlock(job, op):
"""Marks an opcode as waiting for locks.
The job's start timestamp is also set if necessary.
@type job: L{_QueuedJob}
@param job: Job object
@type op: L{_QueuedOpCode}
@param op: Opcode object
"""
assert op in job.ops
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING)
update = False
op.result = None
if op.status == constants.OP_STATUS_QUEUED:
op.status = constants.OP_STATUS_WAITING
update = True
if op.start_timestamp is None:
op.start_timestamp = TimeStampNow()
update = True
if job.start_timestamp is None:
job.start_timestamp = op.start_timestamp
update = True
assert op.status == constants.OP_STATUS_WAITING
return update
@staticmethod
def _CheckDependencies(queue, job, opctx):
"""Checks if an opcode has dependencies and if so, processes them.
@type queue: L{JobQueue}
@param queue: Queue object
@type job: L{_QueuedJob}
@param job: Job object
@type opctx: L{_OpExecContext}
@param opctx: Opcode execution context
@rtype: bool
@return: Whether opcode will be re-scheduled by dependency tracker
"""
op = opctx.op
result = False
while opctx.jobdeps:
(dep_job_id, dep_status) = opctx.jobdeps[0]
(depresult, depmsg) = queue.depmgr.CheckAndRegister(job, dep_job_id,
dep_status)
assert ht.TNonEmptyString(depmsg), "No dependency message"
logging.info("%s: %s", opctx.log_prefix, depmsg)
if depresult == _JobDependencyManager.CONTINUE:
# Remove dependency and continue
opctx.jobdeps.pop(0)
elif depresult == _JobDependencyManager.WAIT:
# Need to wait for notification, dependency tracker will re-add job
# to workerpool
result = True
break
elif depresult == _JobDependencyManager.CANCEL:
# Job was cancelled, cancel this job as well
job.Cancel()
assert op.status == constants.OP_STATUS_CANCELING
break
elif depresult in (_JobDependencyManager.WRONGSTATUS,
_JobDependencyManager.ERROR):
# Job failed or there was an error, this job must fail
op.status = constants.OP_STATUS_ERROR
op.result = _EncodeOpError(errors.OpExecError(depmsg))
break
else:
raise errors.ProgrammerError("Unknown dependency result '%s'" %
depresult)
return result
def _ExecOpCodeUnlocked(self, opctx):
"""Processes one opcode and returns the result.
"""
op = opctx.op
assert op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# The very last check if the job was cancelled before trying to execute
if op.status == constants.OP_STATUS_CANCELING:
return (constants.OP_STATUS_CANCELING, None)
timeout = opctx.GetNextLockTimeout()
try:
# Make sure not to hold queue lock while calling ExecOpCode
result = self.opexec_fn(op.input,
_OpExecCallbacks(self.queue, self.job, op),
timeout=timeout)
except mcpu.LockAcquireTimeout:
assert timeout is not None, "Received timeout for blocking acquire"
logging.debug("Couldn't acquire locks in %0.6fs", timeout)
assert op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# Was job cancelled while we were waiting for the lock?
if op.status == constants.OP_STATUS_CANCELING:
return (constants.OP_STATUS_CANCELING, None)
# Stay in waitlock while trying to re-acquire lock
return (constants.OP_STATUS_WAITING, None)
except CancelJob:
logging.exception("%s: Canceling job", opctx.log_prefix)
assert op.status == constants.OP_STATUS_CANCELING
return (constants.OP_STATUS_CANCELING, None)
except Exception, err: # pylint: disable=W0703
logging.exception("%s: Caught exception in %s",
opctx.log_prefix, opctx.summary)
return (constants.OP_STATUS_ERROR, _EncodeOpError(err))
else:
logging.debug("%s: %s successful",
opctx.log_prefix, opctx.summary)
return (constants.OP_STATUS_SUCCESS, result)
def __call__(self, _nextop_fn=None):
"""Continues execution of a job.
@param _nextop_fn: Callback function for tests
@return: C{FINISHED} if job is fully processed, C{DEFER} if the job should
be deferred and C{WAITDEP} if the dependency manager
(L{_JobDependencyManager}) will re-schedule the job when appropriate
"""
queue = self.queue
job = self.job
logging.debug("Processing job %s", job.id)
try:
opcount = len(job.ops)
assert job.writable, "Expected writable job"
# Don't do anything for finalized jobs
if job.CalcStatus() in constants.JOBS_FINALIZED:
return self.FINISHED
# Is a previous opcode still pending?
if job.cur_opctx:
opctx = job.cur_opctx
job.cur_opctx = None
else:
if __debug__ and _nextop_fn:
_nextop_fn()
opctx = self._FindNextOpcode(job, self._timeout_strategy_factory)
op = opctx.op
# Consistency check
assert compat.all(i.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_CANCELING)
for i in job.ops[opctx.index + 1:])
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
assert (op.priority <= constants.OP_PRIO_LOWEST and
op.priority >= constants.OP_PRIO_HIGHEST)
waitjob = None
if op.status != constants.OP_STATUS_CANCELING:
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING)
# Prepare to start opcode
if self._MarkWaitlock(job, op):
# Write to disk
queue.UpdateJobUnlocked(job)
assert op.status == constants.OP_STATUS_WAITING
assert job.CalcStatus() == constants.JOB_STATUS_WAITING
assert job.start_timestamp and op.start_timestamp
assert waitjob is None
# Check if waiting for a job is necessary
waitjob = self._CheckDependencies(queue, job, opctx)
assert op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING,
constants.OP_STATUS_ERROR)
if not (waitjob or op.status in (constants.OP_STATUS_CANCELING,
constants.OP_STATUS_ERROR)):
logging.info("%s: opcode %s waiting for locks",
opctx.log_prefix, opctx.summary)
assert not opctx.jobdeps, "Not all dependencies were removed"
(op_status, op_result) = self._ExecOpCodeUnlocked(opctx)
op.status = op_status
op.result = op_result
assert not waitjob
if op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_QUEUED):
# waiting: Couldn't get locks in time
# queued: Queue is shutting down
assert not op.end_timestamp
else:
# Finalize opcode
op.end_timestamp = TimeStampNow()
if op.status == constants.OP_STATUS_CANCELING:
assert not compat.any(i.status != constants.OP_STATUS_CANCELING
for i in job.ops[opctx.index:])
else:
assert op.status in constants.OPS_FINALIZED
if op.status == constants.OP_STATUS_QUEUED:
# Queue is shutting down
assert not waitjob
finalize = False
# Reset context
job.cur_opctx = None
# In no case must the status be finalized here
assert job.CalcStatus() == constants.JOB_STATUS_QUEUED
elif op.status == constants.OP_STATUS_WAITING or waitjob:
finalize = False
if not waitjob and opctx.CheckPriorityIncrease():
# Priority was changed, need to update on-disk file
queue.UpdateJobUnlocked(job)
# Keep around for another round
job.cur_opctx = opctx
assert (op.priority <= constants.OP_PRIO_LOWEST and
op.priority >= constants.OP_PRIO_HIGHEST)
# In no case must the status be finalized here
assert job.CalcStatus() == constants.JOB_STATUS_WAITING
else:
# Ensure all opcodes so far have been successful
assert (opctx.index == 0 or
compat.all(i.status == constants.OP_STATUS_SUCCESS
for i in job.ops[:opctx.index]))
# Reset context
job.cur_opctx = None
if op.status == constants.OP_STATUS_SUCCESS:
finalize = False
elif op.status == constants.OP_STATUS_ERROR:
# If we get here, we cannot afford to check for any consistency
# any more, we just want to clean up.
# TODO: Actually, it wouldn't be a bad idea to start a timer
# here to kill the whole process.
to_encode = errors.OpExecError("Preceding opcode failed")
job.MarkUnfinishedOps(constants.OP_STATUS_ERROR,
_EncodeOpError(to_encode))
finalize = True
elif op.status == constants.OP_STATUS_CANCELING:
job.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
"Job canceled by request")
finalize = True
else:
raise errors.ProgrammerError("Unknown status '%s'" % op.status)
if opctx.index == (opcount - 1):
# Finalize on last opcode
finalize = True
if finalize:
# All opcodes have been run, finalize job
job.Finalize()
# Write to disk. If the job status is final, this is the final write
# allowed. Once the file has been written, it can be archived anytime.
queue.UpdateJobUnlocked(job)
assert not waitjob
if finalize:
logging.info("Finished job %s, status = %s", job.id, job.CalcStatus())
return self.FINISHED
assert not waitjob or queue.depmgr.JobWaiting(job)
if waitjob:
return self.WAITDEP
else:
return self.DEFER
finally:
assert job.writable, "Job became read-only while being processed"
class _JobDependencyManager:
"""Keeps track of job dependencies.
"""
(WAIT,
ERROR,
CANCEL,
CONTINUE,
WRONGSTATUS) = range(1, 6)
def __init__(self, getstatus_fn):
"""Initializes this class.
"""
self._getstatus_fn = getstatus_fn
self._waiters = {}
def JobWaiting(self, job):
"""Checks if a job is waiting.
"""
return compat.any(job in jobs
for jobs in self._waiters.values())
def CheckAndRegister(self, job, dep_job_id, dep_status):
"""Checks if a dependency job has the requested status.
If the other job is not yet in a finalized status, the calling job will be
notified (re-added to the workerpool) at a later point.
@type job: L{_QueuedJob}
@param job: Job object
@type dep_job_id: int
@param dep_job_id: ID of dependency job
@type dep_status: list
@param dep_status: Required status
"""
assert ht.TJobId(job.id)
assert ht.TJobId(dep_job_id)
assert ht.TListOf(ht.TElemOf(constants.JOBS_FINALIZED))(dep_status)
if job.id == dep_job_id:
return (self.ERROR, "Job can't depend on itself")
# Get status of dependency job
try:
status = self._getstatus_fn(dep_job_id)
except errors.JobLost, err:
return (self.ERROR, "Dependency error: %s" % err)
assert status in constants.JOB_STATUS_ALL
job_id_waiters = self._waiters.setdefault(dep_job_id, set())
if status not in constants.JOBS_FINALIZED:
# Register for notification and wait for job to finish
job_id_waiters.add(job)
return (self.WAIT,
"Need to wait for job %s, wanted status '%s'" %
(dep_job_id, dep_status))
# Remove from waiters list
if job in job_id_waiters:
job_id_waiters.remove(job)
if (status == constants.JOB_STATUS_CANCELED and
constants.JOB_STATUS_CANCELED not in dep_status):
return (self.CANCEL, "Dependency job %s was cancelled" % dep_job_id)
elif not dep_status or status in dep_status:
return (self.CONTINUE,
"Dependency job %s finished with status '%s'" %
(dep_job_id, status))
else:
return (self.WRONGSTATUS,
"Dependency job %s finished with status '%s',"
" not one of '%s' as required" %
(dep_job_id, status, utils.CommaJoin(dep_status)))
def _RemoveEmptyWaitersUnlocked(self):
"""Remove all jobs without actual waiters.
"""
for job_id in [job_id for (job_id, waiters) in self._waiters.items()
if not waiters]:
del self._waiters[job_id]
class JobQueue(object):
"""Queue used to manage the jobs.
"""
def __init__(self, context, cfg):
"""Constructor for JobQueue.
The constructor will initialize the job queue object and then
start loading the current jobs from disk, either for starting them
(if they were queue) or for aborting them (if they were already
running).
@type context: GanetiContext
@param context: the context object for access to the configuration
data and other ganeti objects
"""
self.context = context
self._memcache = weakref.WeakValueDictionary()
self._my_hostname = netutils.Hostname.GetSysName()
# Get initial list of nodes
self._nodes = dict((n.name, n.primary_ip)
for n in cfg.GetAllNodesInfo().values()
if n.master_candidate)
# Remove master node
self._nodes.pop(self._my_hostname, None)
# Job dependencies
self.depmgr = _JobDependencyManager(self._GetJobStatusForDependencies)
def _GetRpc(self, address_list):
"""Gets RPC runner with context.
"""
return rpc.JobQueueRunner(self.context, address_list)
@staticmethod
def _CheckRpcResult(result, nodes, failmsg):
"""Verifies the status of an RPC call.
Since we aim to keep consistency should this node (the current
master) fail, we will log errors if our rpc fail, and especially
log the case when more than half of the nodes fails.
@param result: the data as returned from the rpc call
@type nodes: list
@param nodes: the list of nodes we made the call to
@type failmsg: str
@param failmsg: the identifier to be used for logging
"""
failed = []
success = []
for node in nodes:
msg = result[node].fail_msg
if msg:
failed.append(node)
logging.error("RPC call %s (%s) failed on node %s: %s",
result[node].call, failmsg, node, msg)
else:
success.append(node)
# +1 for the master node
if (len(success) + 1) < len(failed):
# TODO: Handle failing nodes
logging.error("More than half of the nodes failed")
def _GetNodeIp(self):
"""Helper for returning the node name/ip list.
@rtype: (list, list)
@return: a tuple of two lists, the first one with the node
names and the second one with the node addresses
"""
# TODO: Change to "tuple(map(list, zip(*self._nodes.items())))"?
name_list = self._nodes.keys()
addr_list = [self._nodes[name] for name in name_list]
return name_list, addr_list
def _UpdateJobQueueFile(self, file_name, data, replicate):
"""Writes a file locally and then replicates it to all nodes.
This function will replace the contents of a file on the local
node and then replicate it to all the other nodes we have.
@type file_name: str
@param file_name: the path of the file to be replicated
@type data: str
@param data: the new contents of the file
@type replicate: boolean
@param replicate: whether to spread the changes to the remote nodes
"""
getents = runtime.GetEnts()
utils.WriteFile(file_name, data=data, uid=getents.masterd_uid,
gid=getents.daemons_gid,
mode=constants.JOB_QUEUE_FILES_PERMS)
if replicate:
names, addrs = self._GetNodeIp()
result = _CallJqUpdate(self._GetRpc(addrs), names, file_name, data)
self._CheckRpcResult(result, self._nodes, "Updating %s" % file_name)
def _RenameFilesUnlocked(self, rename):
"""Renames a file locally and then replicate the change.
This function will rename a file in the local queue directory
and then replicate this rename to all the other nodes we have.
@type rename: list of (old, new)
@param rename: List containing tuples mapping old to new names
"""
# Rename them locally
for old, new in rename:
utils.RenameFile(old, new, mkdir=True)
# ... and on all nodes
names, addrs = self._GetNodeIp()
result = self._GetRpc(addrs).call_jobqueue_rename(names, rename)
self._CheckRpcResult(result, self._nodes, "Renaming files (%r)" % rename)
@staticmethod
def _GetJobPath(job_id):
"""Returns the job file for a given job id.
@type job_id: str
@param job_id: the job identifier
@rtype: str
@return: the path to the job file
"""
return utils.PathJoin(pathutils.QUEUE_DIR, "job-%s" % job_id)
@staticmethod
def _GetArchivedJobPath(job_id):
"""Returns the archived job file for a give job id.
@type job_id: str
@param job_id: the job identifier
@rtype: str
@return: the path to the archived job file
"""
return utils.PathJoin(pathutils.JOB_QUEUE_ARCHIVE_DIR,
jstore.GetArchiveDirectory(job_id),
"job-%s" % job_id)
@staticmethod
def _DetermineJobDirectories(archived):
"""Build list of directories containing job files.
@type archived: bool
@param archived: Whether to include directories for archived jobs
@rtype: list
"""
result = [pathutils.QUEUE_DIR]
if archived:
archive_path = pathutils.JOB_QUEUE_ARCHIVE_DIR
result.extend(map(compat.partial(utils.PathJoin, archive_path),
utils.ListVisibleFiles(archive_path)))
return result
@classmethod
def _GetJobIDsUnlocked(cls, sort=True, archived=False):
"""Return all known job IDs.
The method only looks at disk because it's a requirement that all
jobs are present on disk (so in the _memcache we don't have any
extra IDs).
@type sort: boolean
@param sort: perform sorting on the returned job ids
@rtype: list
@return: the list of job IDs
"""
jlist = []
for path in cls._DetermineJobDirectories(archived):
for filename in utils.ListVisibleFiles(path):
m = constants.JOB_FILE_RE.match(filename)
if m:
jlist.append(int(m.group(1)))
if sort:
jlist.sort()
return jlist
def _LoadJobUnlocked(self, job_id):
"""Loads a job from the disk or memory.
Given a job id, this will return the cached job object if
existing, or try to load the job from the disk. If loading from
disk, it will also add the job to the cache.
@type job_id: int
@param job_id: the job id
@rtype: L{_QueuedJob} or None
@return: either None or the job object
"""
assert isinstance(job_id, int), "Job queue: Supplied job id is not an int!"
job = self._memcache.get(job_id, None)
if job:
logging.debug("Found job %s in memcache", job_id)
assert job.writable, "Found read-only job in memcache"
return job
try:
job = JobQueue._LoadJobFromDisk(self, job_id, False)
if job is None:
return job
except errors.JobFileCorrupted:
old_path = self._GetJobPath(job_id)
new_path = self._GetArchivedJobPath(job_id)
if old_path == new_path:
# job already archived (future case)
logging.exception("Can't parse job %s", job_id)
else:
# non-archived case
logging.exception("Can't parse job %s, will archive.", job_id)
self._RenameFilesUnlocked([(old_path, new_path)])
return None
assert job.writable, "Job just loaded is not writable"
self._memcache[job_id] = job
logging.debug("Added job %s to the cache", job_id)
return job
@staticmethod
def _LoadJobFromDisk(queue, job_id, try_archived, writable=None):
"""Load the given job file from disk.
Given a job file, read, load and restore it in a _QueuedJob format.
@type job_id: int
@param job_id: job identifier
@type try_archived: bool
@param try_archived: Whether to try loading an archived job
@rtype: L{_QueuedJob} or None
@return: either None or the job object
"""
path_functions = [(JobQueue._GetJobPath, False)]
if try_archived:
path_functions.append((JobQueue._GetArchivedJobPath, True))
raw_data = None
archived = None
for (fn, archived) in path_functions:
filepath = fn(job_id)
logging.debug("Loading job from %s", filepath)
try:
raw_data = utils.ReadFile(filepath)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
break
if not raw_data:
logging.debug("No data available for job %s", job_id)
return None
if writable is None:
writable = not archived
try:
data = serializer.LoadJson(raw_data)
job = _QueuedJob.Restore(queue, data, writable, archived)
except Exception, err: # pylint: disable=W0703
raise errors.JobFileCorrupted(err)
return job
@staticmethod
def SafeLoadJobFromDisk(queue, job_id, try_archived, writable=None):
"""Load the given job file from disk.
Given a job file, read, load and restore it in a _QueuedJob format.
In case of error reading the job, it gets returned as None, and the
exception is logged.
@type job_id: int
@param job_id: job identifier
@type try_archived: bool
@param try_archived: Whether to try loading an archived job
@rtype: L{_QueuedJob} or None
@return: either None or the job object
"""
try:
return JobQueue._LoadJobFromDisk(queue, job_id, try_archived,
writable=writable)
except (errors.JobFileCorrupted, EnvironmentError):
logging.exception("Can't load/parse job %s", job_id)
return None
@classmethod
def SubmitManyJobs(cls, jobs):
"""Create and store multiple jobs.
"""
return luxi.Client(address=pathutils.QUERY_SOCKET).SubmitManyJobs(jobs)
@staticmethod
def _ResolveJobDependencies(resolve_fn, deps):
"""Resolves relative job IDs in dependencies.
@type resolve_fn: callable
@param resolve_fn: Function to resolve a relative job ID
@type deps: list
@param deps: Dependencies
@rtype: tuple; (boolean, string or list)
@return: If successful (first tuple item), the returned list contains
resolved job IDs along with the requested status; if not successful,
the second element is an error message
"""
result = []
for (dep_job_id, dep_status) in deps:
if ht.TRelativeJobId(dep_job_id):
assert ht.TInt(dep_job_id) and dep_job_id < 0
try:
job_id = resolve_fn(dep_job_id)
except IndexError:
# Abort
return (False, "Unable to resolve relative job ID %s" % dep_job_id)
else:
job_id = dep_job_id
result.append((job_id, dep_status))
return (True, result)
def _GetJobStatusForDependencies(self, job_id):
"""Gets the status of a job for dependencies.
@type job_id: int
@param job_id: Job ID
@raise errors.JobLost: If job can't be found
"""
# Not using in-memory cache as doing so would require an exclusive lock
# Try to load from disk
job = JobQueue.SafeLoadJobFromDisk(self, job_id, True, writable=False)
if job:
assert not job.writable, "Got writable job" # pylint: disable=E1101
if job:
return job.CalcStatus()
raise errors.JobLost("Job %s not found" % job_id)
def UpdateJobUnlocked(self, job, replicate=True):
"""Update a job's on disk storage.
After a job has been modified, this function needs to be called in
order to write the changes to disk and replicate them to the other
nodes.
@type job: L{_QueuedJob}
@param job: the changed job
@type replicate: boolean
@param replicate: whether to replicate the change to remote nodes
"""
if __debug__:
finalized = job.CalcStatus() in constants.JOBS_FINALIZED
assert (finalized ^ (job.end_timestamp is None))
assert job.writable, "Can't update read-only job"
assert not job.archived, "Can't update archived job"
filename = self._GetJobPath(job.id)
data = serializer.DumpJson(job.Serialize())
logging.debug("Writing job %s to %s", job.id, filename)
self._UpdateJobQueueFile(filename, data, replicate)
def HasJobBeenFinalized(self, job_id):
"""Checks if a job has been finalized.
@type job_id: int
@param job_id: Job identifier
@rtype: boolean
@return: True if the job has been finalized,
False if the timeout has been reached,
None if the job doesn't exist
"""
job = JobQueue.SafeLoadJobFromDisk(self, job_id, True, writable=False)
if job is not None:
return job.CalcStatus() in constants.JOBS_FINALIZED
elif cluster.LUClusterDestroy.clusterHasBeenDestroyed:
# FIXME: The above variable is a temporary workaround until the Python job
# queue is completely removed. When removing the job queue, also remove
# the variable from LUClusterDestroy.
return True
else:
return None
def CancelJob(self, job_id):
"""Cancels a job.
This will only succeed if the job has not started yet.
@type job_id: int
@param job_id: job ID of job to be cancelled.
"""
logging.info("Cancelling job %s", job_id)
return self._ModifyJobUnlocked(job_id, lambda job: job.Cancel())
def ChangeJobPriority(self, job_id, priority):
"""Changes a job's priority.
@type job_id: int
@param job_id: ID of the job whose priority should be changed
@type priority: int
@param priority: New priority
"""
logging.info("Changing priority of job %s to %s", job_id, priority)
if priority not in constants.OP_PRIO_SUBMIT_VALID:
allowed = utils.CommaJoin(constants.OP_PRIO_SUBMIT_VALID)
raise errors.GenericError("Invalid priority %s, allowed are %s" %
(priority, allowed))
def fn(job):
(success, msg) = job.ChangePriority(priority)
return (success, msg)
return self._ModifyJobUnlocked(job_id, fn)
def _ModifyJobUnlocked(self, job_id, mod_fn):
"""Modifies a job.
@type job_id: int
@param job_id: Job ID
@type mod_fn: callable
@param mod_fn: Modifying function, receiving job object as parameter,
returning tuple of (status boolean, message string)
"""
job = self._LoadJobUnlocked(job_id)
if not job:
logging.debug("Job %s not found", job_id)
return (False, "Job %s not found" % job_id)
assert job.writable, "Can't modify read-only job"
assert not job.archived, "Can't modify archived job"
(success, msg) = mod_fn(job)
if success:
# If the job was finalized (e.g. cancelled), this is the final write
# allowed. The job can be archived anytime.
self.UpdateJobUnlocked(job)
return (success, msg)
| bsd-2-clause | -2,647,619,656,363,776,500 | 29.403734 | 80 | 0.644055 | false | 3.897098 | false | false | false |
tmoer/multimodal_varinf | networks/network_rl.py | 1 | 7832 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 2 14:48:24 2017
@author: thomas
"""
#from layers import Latent_Layer
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from tfutils.helpers import repeat_v2
from tfutils.distributions import logsumexp, discretized_logistic
from layers import Latent_Layer
class Network(object):
''' VAE & RL template '''
def __init__(self,hps,state_dim,binsize=6):
# binsize = the number of discrete categories per dimension of the state (x and y below).
# Input and output are normalized over this quantity.
# placeholders
self.x = x = tf.placeholder("float32", shape=[None,state_dim])
self.y = y = tf.placeholder("float32", shape=[None,state_dim])
self.a = a = tf.placeholder("float32", shape=[None,1])
self.Qtarget = Qtarget = tf.placeholder("float32", shape=[None,1])
self.is_training = is_training = tf.placeholder("bool") # if True: sample from q, else sample from p
self.k = k = tf.placeholder('int32') # number of importance samples
self.temp = temp = tf.Variable(5.0,name='temperature',trainable=False) # Temperature for discrete latents
self.lamb = lamb = tf.Variable(1.0,name="lambda",trainable=False) # Lambda for KL annealing
xa = tf.concat([x/binsize,a],axis=1)
# Importance sampling: repeats along second dimension
xa_rep = repeat_v2(xa,k)
y_rep = repeat_v2(y/binsize,k)
# RL part of the graph
with tf.variable_scope('q_net'):
rl1 = slim.fully_connected(x,50,tf.nn.relu)
rl2 = slim.fully_connected(rl1,50,tf.nn.relu)
rl3 = slim.fully_connected(rl2,50,activation_fn=None)
self.Qsa = Qsa = slim.fully_connected(rl3,4,activation_fn=None)
if hps.use_target_net:
with tf.variable_scope('target_net'):
rl1_t = slim.fully_connected(x,50,tf.nn.relu)
rl2_t = slim.fully_connected(rl1_t,50,tf.nn.relu)
rl3_t = slim.fully_connected(rl2_t,50,activation_fn=None)
self.Qsa_t = slim.fully_connected(rl3_t,4,activation_fn=None)
copy_ops = []
q_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_net')
tar_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')
for tar,q in zip(q_var,tar_var):
copy_op = q.assign(tar)
copy_ops.append(copy_op)
self.copy_op = tf.group(*copy_ops, name='copy_op')
a_onehot = tf.one_hot(tf.to_int32(tf.squeeze(a,axis=1)),4,1.0,0.0)
Qs = tf.reduce_sum(a_onehot*Qsa,reduction_indices=1) ## identify Qsa based on a
self.rl_cost = rl_cost = tf.nn.l2_loss(Qs - Qtarget)
# Batch norm: skip for now
# Encoder x,y --> h
xy = tf.concat([xa_rep,y_rep],1) # concatenate along last dim
h_up = slim.fully_connected(xy,hps.h_size,tf.nn.relu)
# Initialize ladders
layers = []
for i in range(hps.depth):
layers.append(Latent_Layer(hps,hps.var_type[i],i))
# Ladder up
for i,layer in enumerate(layers):
h_up = layer.up(h_up)
# Ladder down
# Prior x --> p_z
h_down = slim.fully_connected(xa_rep,hps.h_size,tf.nn.relu)
kl_sum = 0.0
kl_sample = 0.0
for i,layer in reversed(list(enumerate(layers))):
h_down, kl_cur, kl_sam = layer.down(h_down,is_training,temp,lamb)
kl_sum += kl_cur
kl_sample += kl_sam
# Decoder: x,z --> y
xz = tf.concat([slim.flatten(h_down),xa_rep],1)
dec1 = slim.fully_connected(xz,250,tf.nn.relu)
dec2 = slim.fully_connected(dec1,250,tf.nn.relu)
dec3 = slim.fully_connected(dec2,250,activation_fn=None)
mu_y = slim.fully_connected(dec3,state_dim,activation_fn=None)
if hps.ignore_sigma_outcome:
log_dec_noise = tf.zeros(tf.shape(mu_y))
else:
log_dec_noise = slim.fully_connected(dec3,1,activation_fn=None)
# p(y|x,z)
if hps.out_lik == 'normal':
dec_noise = tf.exp(tf.clip_by_value(log_dec_noise,-10,10))
outdist = tf.contrib.distributions.Normal(mu_y,dec_noise)
self.log_py_x = log_py_x = tf.reduce_sum(outdist.log_prob(y_rep),axis=1)
self.nats = -1*tf.reduce_mean(logsumexp(tf.reshape(log_py_x - kl_sample,[-1,k])) - tf.log(tf.to_float(k)))
y_sample = outdist.sample() if not hps.ignore_sigma_outcome else mu_y
self.y_sample = tf.to_int32(tf.round(tf.clip_by_value(y_sample,0,1)*binsize))
elif hps.out_lik == 'discretized_logistic':
self.log_py_x = log_py_x = tf.reduce_sum(discretized_logistic(mu_y,log_dec_noise,binsize=1,sample=y_rep),axis=1)
outdist = tf.contrib.distributions.Logistic(loc=mu_y,scale = tf.exp(log_dec_noise))
self.nats = -1*tf.reduce_mean(logsumexp(tf.reshape(tf.reduce_sum(outdist.log_prob(y_rep),axis=1) - kl_sample,[-1,k]))- tf.log(tf.to_float(k)))
y_sample = outdist.sample() if not hps.ignore_sigma_outcome else mu_y
self.y_sample = tf.to_int32(tf.round(tf.clip_by_value(y_sample,0,1)*binsize))
elif hps.out_lik == 'discrete':
logits_y = slim.fully_connected(dec3,state_dim*(binsize+1),activation_fn=None)
logits_y = tf.reshape(logits_y,[-1,state_dim,binsize+1])
disc_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_y,labels=tf.to_int32(tf.round(y_rep*6)))
self.log_py_x = log_py_x = -tf.reduce_sum(disc_loss,[1])
self.nats = -1*tf.reduce_mean(logsumexp(tf.reshape(log_py_x - kl_sample,[-1,k])) - tf.log(tf.to_float(k)))
outdist = tf.contrib.distributions.Categorical(logits=logits_y)
self.y_sample = outdist.sample() if not hps.ignore_sigma_outcome else tf.argmax(logits_y,axis=2)
# To display
self.kl = tf.reduce_mean(kl_sum)
# ELBO
log_divergence = tf.reshape(log_py_x - kl_sum,[-1,k]) # shape [batch_size,k]
if np.abs(hps.alpha-1.0)>1e-3: # use Renyi alpha-divergence
log_divergence = log_divergence * (1-hps.alpha)
logF = logsumexp(log_divergence)
self.elbo = elbo = tf.reduce_mean(logF - tf.log(tf.to_float(k)))/ (1-hps.alpha)
else:
# use KL divergence
self.elbo = elbo = tf.reduce_mean(log_divergence)
self.loss = loss = -elbo
### Optimizer
self.lr = lr = tf.Variable(0.001,name="learning_rate",trainable=False)
global_step = tf.Variable(0,name='global_step',trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
if hps.max_grad != None:
grads_and_vars = optimizer.compute_gradients(loss)
for idx, (grad, var) in enumerate(grads_and_vars):
if grad is not None:
grads_and_vars[idx] = (tf.clip_by_norm(grad, hps.max_grad), var)
self.train_op = optimizer.apply_gradients(grads_and_vars)
self.grads_and_vars = grads_and_vars
else:
self.train_op = optimizer.minimize(loss,global_step=global_step)
self.grads_and_vars = tf.constant(0)
self.train_op_rl = optimizer.minimize(rl_cost)
self.init_op=tf.global_variables_initializer() | mit | 8,419,598,953,240,772,000 | 47.652174 | 154 | 0.588994 | false | 3.125299 | false | false | false |
KaiserAndres/kaiserBot | bot_executables.py | 1 | 4751 | import roller
import random
DEFAULT_CARD_AMOUNT = 1
MAX_CARDS = 15
CARD_SEPARATOR = "||"
def ping_exec(irc, message):
pong = 'PONG ' + message.text.split(" ")[1] + '\r\n'
irc.send(pong.encode("utf-8"))
def roll_exec(irc, message):
'''
A !roll comand has the following structure:
!roll diceAmount+d+diceSize+"+"+modifier
* Dice amount is an integer up to 20000
* Dice Size is an integer
* Modifier is an integer that is added onto the roll after
The !Roll command can also have this structure:
!!roll d+diceAmount+d+diceSize+"+"+modifier
* Dice amount is the result of a roll of said size and then proceeds
to roll that many of the following dice
* Dice Size is an integer
* Modifier is an integer that is added onto the roll after
'''
diceNumbers = roller.getRolledNumbers(message.text)
messageToSend = ''
# -------------------------------------------------------------------
# Hard limits on the dice sizes
# -------------------------------------------------------------------
if diceNumbers[0] > 10:
diceNumbers[0] = 10
if diceNumbers[0] < 1:
diceNumbers[0] = 1
if diceNumbers[1] > 2000:
diceNumbers[1] = 2000
if diceNumbers[1] < 1:
diceNumbers[1] = 1
if diceNumbers[2] < 1:
diceNumbers[2] = 1
rolledArray = roller.roll(diceNumbers[0],
diceNumbers[1],
diceNumbers[2])
for rollNum in rolledArray:
# REMINDER: make a message maker function cause this is ugly!
if (diceNumbers[3] == 0):
messageToSend = (messageToSend +
"\x0312,15(" + str(diceNumbers[1]) +
"d" + str(diceNumbers[2]) + ") \x032,15[" +
str(rollNum) + "]\x031,15 : \x034,15{" +
str(rollNum + diceNumbers[3]) + "} ")
else:
messageToSend = (messageToSend + "\x0312,15(" +
str(diceNumbers[1]) + "d" +
str(diceNumbers[2]) + "+" +
str(diceNumbers[3]) + ") \x032,15[" +
str(rollNum) + "+" +
str(diceNumbers[3]) +
"]\x031,15 : \x034,15{" +
str(rollNum + diceNumbers[3]) + "} ")
irc.send(message.reply(messageToSend))
def join_exec(irc, message):
'''
A join command has the following structure:
!JOIN #CHANNEL
A message is sent to the irc server requesting to join #CHANNEL
'''
chann = ""
foundLink = False
for char in message.text:
if char == "#":
foundLink = True
if foundLink:
chann = chann + char
if chann != "":
join_message = "JOIN " + chann + "\n"
irc.send(join_message.encode("utf-8"))
else:
irc.send(message.reply("Error 02: bad channel."))
def tarot_exec(irc, message):
'''
Tarot command asks for the number of cards to be drawn and returns them.
A tarot command has the following structure:
!tarot <NUMBER OF CARDS>
'''
card_amount = get_card_amount(message)
card_spread = spread_cards(card_amount)
output_message = "You got these cards: " + CARD_SEPARATOR.join(card_spread)
irc.send(message.reply(output_message))
def spread_cards(card_amount):
card_spread = []
local_deck = load_deck("deck")
for time in range(0, card_amount):
card_index = random.randint(0, len(local_deck) - 1)
is_reversed = random.randint(0, 1) == 1
card_text = local_deck[card_index]
if is_reversed:
card_text = card_text + "(reversed)"
card_spread.append(card_text)
local_deck.remove(local_deck[card_index])
return card_spread
def get_card_amount(message):
number_buffer = ""
number_end = 9
for characterIndex in range(0, len(message.text)):
try:
int(message.text[characterIndex])
if characterIndex < number_end:
number_buffer = number_buffer + message.text[characterIndex]
except ValueError:
continue
try:
card_amount = int(number_buffer)
except ValueError:
card_amount = DEFAULT_CARD_AMOUNT
if card_amount > MAX_CARDS:
card_amount = MAX_CARDS
return card_amount
def load_deck(deck_file_name):
deck_file = open(deck_file_name, "r")
deck_text = deck_file.readlines()
deck = []
deck_file.close()
for card in deck_text:
deck.append(card[:-1])
return deck
| mit | -1,279,902,818,268,667,600 | 29.455128 | 80 | 0.53273 | false | 3.708821 | false | false | false |
thermokarst/advent-of-code-2015 | day20.py | 1 | 2724 | # Matthew Ryan Dillon
# github.com/thermokarst
#
# --- Day 20: Infinite Elves and Infinite Houses ---
#
# To keep the Elves busy, Santa has them deliver some presents by hand,
# door-to-door. He sends them down a street with infinite houses numbered
# sequentially: 1, 2, 3, 4, 5, and so on.
#
# Each Elf is assigned a number, too, and delivers presents to houses based on
# that number:
#
# - The first Elf (number 1) delivers presents to every house: 1, 2, 3, 4, 5, ....
# - The second Elf (number 2) delivers presents to every second house: 2, 4, 6,
# 8, 10, ....
# - Elf number 3 delivers presents to every third house: 3, 6, 9, 12, 15, ....
#
# There are infinitely many Elves, numbered starting with 1. Each Elf delivers
# presents equal to ten times his or her number at each house.
#
# So, the first nine houses on the street end up like this:
#
# House 1 got 10 presents.
# House 2 got 30 presents.
# House 3 got 40 presents.
# House 4 got 70 presents.
# House 5 got 60 presents.
# House 6 got 120 presents.
# House 7 got 80 presents.
# House 8 got 150 presents.
# House 9 got 130 presents.
#
# The first house gets 10 presents: it is visited only by Elf 1, which delivers 1
# * 10 = 10 presents. The fourth house gets 70 presents, because it is visited by
# Elves 1, 2, and 4, for a total of 10 + 20 + 40 = 70 presents.
#
# What is the lowest house number of the house to get at least as many presents
# as the number in your puzzle input?
#
# --- Part Two ---
#
# The Elves decide they don't want to visit an infinite number of houses.
# Instead, each Elf will stop after delivering presents to 50 houses. To make up
# for it, they decide to deliver presents equal to eleven times their number at
# each house.
#
# With these changes, what is the new lowest house number of the house to get at
# least as many presents as the number in your puzzle input?
INPUT = 34000000
def visit_homes(pph, max_visit=None):
homes = [0 for x in range(int(INPUT/pph))]
for elf in range(1, len(homes)+1):
house = elf
count = 0
while house < len(homes):
if max_visit and count >= max_visit:
break
homes[house] += elf*pph
house += elf
count += 1
return homes
def check_homes(homes):
for house, presents in enumerate(homes):
if presents >= INPUT:
return (house, presents)
homes = visit_homes(10)
house, presents = check_homes(homes)
print("pt 1: house {}, presents {}".format(house, presents))
homes = visit_homes(11, max_visit=50)
house, presents = check_homes(homes)
print("pt 2: house {}, presents {}".format(house, presents))
| mit | -2,547,104,543,869,876,000 | 34.842105 | 82 | 0.656021 | false | 3.182243 | false | false | false |
emmettk/pvrsex | tiff_file_folder_divider.py | 1 | 5268 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 28 16:14:22 2017
@author: ekrupczak on LaVision
Divide tiffs up into subfolders with maximum number of files per folder
This allows them to be imported into DaVis which seems unwilling to import more than about 10000 files at a time.
Or pull all tiffs from specified folder between start and end file numbers
"""
import os
import math
import re
def divide_tiffs(path, max_per_folder = 10000):
"""
Divides all tiffs in path into subfolders with up to max_per_folder files per subfolder
"""
tiffs = [file for file in os.listdir(path) if ".tif" in file]
num_sub_dir = math.ceil(len(tiffs)/max_per_folder)
print("Dividing", len(tiffs), "tiffs into", num_sub_dir, "directories")
for i in range(1,num_sub_dir+1):
os.mkdir(path+r"\tiffs_pt"+str(i).zfill(2))
if i < num_sub_dir:
for file in tiffs[max_per_folder*(i-1):max_per_folder*i]:
os.rename(path+r"\\"+file, path+r"\tiffs_pt"+str(i)+r"\\"+file)
elif i == num_sub_dir:
for file in tiffs[max_per_folder*(i-1):]:
os.rename(path+r"\\"+file, path+r"\tiffs_pt"+str(i)+r"\\"+file)
print("Directory", "tiffs_pt"+str(i), "populated")
def unpack_folders(path):
"""
Undoes divide_tiffs by unpacking all files in subfolders into the main folder
"""
for folder in [f for f in os.listdir(path) if "." not in f]:
print("unpacking", folder)
for file in os.listdir(path+"/"+folder):
os.rename(path+"/"+folder+"/"+file, path+"/"+file)
print("deleting empty folder", folder)
os.rmdir(path+"/"+folder)
def pull_nth_tiff(path, n):
"""
Pulls every nth tiff into a separate folder
Designed for reading into DaVis
Run 'unpack folders' after to undo this action
"""
tiffs = [file for file in os.listdir(path) if '.tif' in file.lower()]
print(len(tiffs), "tiffs in ", path)
newdirname = r"\every_"+str(n)+"th_tiff"
os.mkdir(path+newdirname)
for tiff in tiffs[0::n]:
os.rename(path+r"\\"+tiff, path+newdirname+"\\"+tiff)
print("Every", str(n)+"th tiff put in ", path+newdirname)
print("Folder contains ", len(os.listdir(path+newdirname)), "files")
def pull_tiffs_in_range(path, start, stop):
"""
Pull all tiffs between file number start and file number stop.
Assumes tiff names are formatted as follows: tiff_name_00001.tif
"""
tiffs = [file for file in os.listdir(path) if '.tif' in file.lower()]
print(len(tiffs), "tiffs in ", path)
newdirname = r"\tiffs_in_range"+str(start)+"_"+str(stop)
os.mkdir(path+newdirname)
for tiff in tiffs:
filenum = int(re.findall("(?:_)([0-9]+)(?:_grayscale\.tif|\.TIF)", tiff)[0])
# print(filenum, filenum > start, filenum < stop)
if start<=filenum<=stop:
# print(filenum, tiff)
os.rename(path+r"\\"+tiff, path+newdirname+"\\"+tiff)
print("Files placed in",path+newdirname)
print("Folder contains", len(os.listdir(path+newdirname)))
if __name__ == "__main__":
##2.5 hz
# n = 2 ##tower EO
# n = 6 ##pier EO
## ~2hz
# n = 8 ##pier EO (1.875hz)
##1.5 hz
# n = 3 #tower EO (1.66 hz)
##1 hz
# n = 15 ##pier EO
# n = 30 ##tower IR / pier IR
# n = 5 ## tower EO
## 0.66 Hz
# n = 8 ##tower EO (0.625 Hz)
##0.5 Hz
# n = 30 #pier EO
##0.33 Hz
n = 15 #tower EO
##0.166 Hz
# n = 30 #tower EO
#
# camera = "tower_EO_12mm"
# camera = "pier_EO_08mm"
# camera = "tower_IR_16mm"
# camera = "pier_IR_09mm"
# run = r"20170926_1000_towerEO_pierEO/"
# run = r"20170926_1100_pierIR_pierEO/"
# run = r"20170926_1200_towerIR_pierIR/"
# run = r"20170926_1300_towerIR_towerEO/"
#
# path = r"D:/RSEX17_TIFF/0926/"+run+camera
# path = r'D:/RSEX17_TIFF/1005/201710051000/'+camera+"/tiffs_in_range4488_7488"
#path = r'D:\RSEX17_TIFF\1015\201710151610\201710151610_tower_color'
# path = r'D:\RSEX17_TIFF\1005\201710051000\tower_1015_1025'
# path = r'D:\RSEX17_TIFF\1005\201710051000\tower_EO_12mm_range_4488_7488_grayscale'
# path = r'D:\RSEX17_TIFF\1005\201710051000\pier_EO_08mm'
path = r'D:\RSEX17_TIFF\1005\201710051000\tower_EO_12mm'
# path = r'E:\RSEX17_TIFF\1005\201710051000\pier_EO_08mm\tiffs_in_range13464_22464'
# path = r'D:\RSEX17_TIFF\1015\201710151610\201710151610_tower_grayscale'
# path = r"D:/RSEX17_TIFF/1013/201710131200/"+camera
# divide_tiffs(path, max_per_folder = 20*10**3)
# print("Tiffs divided")
# path = r'D:\RSEX17_TIFF\1005\201710051000\tower_EO_12mm_range_4488_7488_grayscale'
unpack_folders(path)
# pull_nth_tiff(path, n)
# path = r'D:/RSEX17_TIFF/1005/201710051000/tower_EO_12mm'
# unpack_folders(path)
#
# path = r"D:\RSEX17_TIFF\1005\201710051000\tower_EO_12mm_every_2th_tiff_grayscale"
# pull_tiffs_in_range(path, 4488, 7488)
# pull_tiffs_in_range(path, 13464, 22464)
# path = path+"\every_"+str(n)+"th_tiff"
# unpack_folders(path)
# path = r'E:\RSEX17_TIFF\1005\201710051000\pier_EO_08mm\tiffs_in_range13464_22464'
# pull_nth_tiff(path, n) | mit | -5,883,737,634,653,318,000 | 33.664474 | 113 | 0.610478 | false | 2.679552 | false | false | false |
czcorpus/kontext | lib/bgcalc/csv_cache.py | 1 | 1387 | # Copyright (c) 2021 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2021 Tomas Machalek <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import csv
def load_cached_partial(path, offset, limit):
with open(path, 'r') as fr:
csv_reader = csv.reader(fr)
_, total_str = next(csv_reader)
for i in range(0, offset):
next(csv_reader)
ans = []
i = offset
for row in csv_reader:
if i == offset + limit:
break
ans.append((row[0], ) + tuple(int(x) for x in row[1:]))
i += 1
return int(total_str), ans
def load_cached_full(path):
ans = []
with open(path, 'r') as fr:
csv_reader = csv.reader(fr)
_, total_str = next(csv_reader)
for row in csv_reader:
ans.append((row[0], ) + tuple(int(x) for x in row[1:]))
return int(total_str), ans
| gpl-2.0 | 5,040,845,778,632,978,000 | 32.829268 | 67 | 0.618601 | false | 3.583979 | false | false | false |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/demos/smt2011.py | 1 | 5140 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: smt2011.py
# Purpose: Demonstrations for the SMT 2011 demo
#
# Authors: Christopher Ariza
# Michael Scott Cuthbert
#
# Copyright: Copyright © 2011 Michael Scott Cuthbert and the music21 Project
# License: BSD or LGPL, see license.txt
#-------------------------------------------------------------------------------
import copy
from music21 import environment, corpus
_MOD = 'demo/smt2011.py'
environLocal = environment.Environment(_MOD)
def ex01():
# beethoven
#s1 = corpus.parse('opus18no1/movement3.xml')
#s1.show()
# has lots of triplets toward end
# viola not coming in as alto clef
# s2 = corpus.parse('haydn/opus17no1/movement3.zip')
# s2.show()
s2 = corpus.parse('haydn/opus17no2/movement3.zip')
# works well; some triplets are missing but playback correctly
s2Chordified = s2.measures(1, 25).chordify()
s2Chordified.show()
#-------------------------------------------------------------------------------
def chordsToAnalysis(chordStream, manifest, scale):
'''
manifest is a list of tuples in the following form:
(measureNumber, chordNumberOrNone, scaleDegree, octaveDisplay, durationTypeDisplay)
'''
from music21 import note, bar
chordMeasures = chordStream.getElementsByClass('Measure')
measureTemplate = copy.deepcopy(chordMeasures)
for i, m in enumerate(measureTemplate):
m.removeByClass(['GeneralNote'])
# assuming we have measure numbers
for (measureNumber, chordNumberOrNone, scaleDegree, octaveDisplay,
durationTypeDisplay, textDisplay) in manifest:
# assume measures are in order; replace with different method
m = chordMeasures[measureNumber-1]
mPost = measureTemplate[measureNumber-1]
if chordNumberOrNone is None:
c = m.notes[0]
else:
c = m.notes[chordNumberOrNone-1] # assume counting from 1
pTarget = scale.pitchFromDegree(scaleDegree)
match = False
p = None
for p in c.pitches:
if p.name == pTarget.name:
match = True
break
if not match:
print('no scale degree found in specified chord', p, pTarget)
pTarget.octave = octaveDisplay
n = note.Note(pTarget)
if durationTypeDisplay in ['whole']:
n.noteheadFill = False
else:
n.noteheadFill = True
n.stemDirection = 'noStem'
n.addLyric(textDisplay)
mPost.insert(c.getOffsetBySite(m), n)
# fill with rests
for m in measureTemplate:
m.rightBarline = bar.Barline('none')
# need to hide rests
if len(m.notes) == 0:
r = note.Rest(quarterLength=4)
r.hideObjectOnPrint = True
m.append(r)
return measureTemplate
def exShenker():
from music21 import stream, scale, bar
# wtc no 1
src = corpus.parse('bwv846')
#src.show()
melodicSrc = src.parts[0]
measureTemplate = copy.deepcopy(melodicSrc.getElementsByClass('Measure'))
for i, m in enumerate(measureTemplate):
m.removeByClass(['GeneralNote'])
m.number = i + 1
# this stream has triple bar lines, clefs, etc
unused_chords = src.flat.makeChords(minimumWindowSize=2)
analysis = stream.Score()
chordReduction = copy.deepcopy(measureTemplate)
for i, m in enumerate(chordReduction.getElementsByClass('Measure')):
mNotes = src.flat.getElementsByOffset(m.offset,
m.offset+m.barDuration.quarterLength, includeEndBoundary=False)
mNotes.makeChords(minimumWindowSize=4, inPlace=True)
c = mNotes.flat.notes[0]
c.duration.type = 'whole'
m.append(c)
m.rightBarline = bar.Barline('regular')
# add parts
scaleCMajor = scale.MajorScale('c')
#measureNumber, chordNumberOrNone, scaleDegree, octaveDisplay,
# durationTypeDisplay, textDisplay
manifest = [(1, None, 3, 5, 'whole', '3'),
(24, None, 2, 5, 'whole', '2'),
(35, None, 1, 5, 'whole', '1'),
]
analysis1 = chordsToAnalysis(chordReduction, manifest, scaleCMajor)
manifest = [(1, None, 1, 4, 'whole', 'I'),
(24, None, 5, 3, 'whole', 'V'),
(31, None, 4, 4, 'quarter', '--7'),
(35, None, 1, 4, 'whole', 'I'),
]
analysis2 = chordsToAnalysis(chordReduction, manifest, scaleCMajor)
analysis.insert(0, analysis1)
analysis.insert(0, analysis2)
analysis.insert(0, chordReduction)
analysis.show()
def demoMakeChords():
# wtc no 1
#src = corpus.parse('bwv65.2').measures(0, 5)
src = corpus.parse('opus18no1/movement3.xml').measures(0, 10)
src.flattenParts().makeChords(minimumWindowSize=3).show()
src = corpus.parse('opus18no1/movement3.xml').measures(0, 10)
src.chordify().show()
if __name__ == '__main__':
#ex01()
#exShenker()
demoMakeChords()
| mit | -2,055,209,700,920,989,400 | 30.527607 | 87 | 0.590971 | false | 3.616467 | false | false | false |
jaeddy/bripipetools | bripipetools/dbification/flowcellrun.py | 1 | 8314 | """
Class for importing data from a sequencing run into GenLIMS and the
Research DB as new objects.
"""
import logging
import os
import re
from .. import parsing
from .. import database
from .. import annotation
logger = logging.getLogger(__name__)
class FlowcellRunImporter(object):
"""
Collects FlowcellRun and SequencedLibrary objects from a sequencing run,
converts to documents, inserts into database.
"""
def __init__(self, path, db, run_opts):
logger.debug("creating `SequencingImporter` instance")
logger.debug("...with arguments (path: '{}', db: '{}')"
.format(path, db.name))
self.path = path
self.db = db
self.run_opts = run_opts
def _collect_flowcellrun(self):
"""
Collect FlowcellRun object for flowcell run.
"""
path_items = parsing.parse_flowcell_path(self.path)
logger.info("collecting info for flowcell run {}"
.format(path_items['run_id']))
return annotation.FlowcellRunAnnotator(
run_id=path_items['run_id'],
pipeline_root=path_items['pipeline_root'],
db=self.db
).get_flowcell_run()
def _collect_sequencedlibraries(self):
"""
Collect list of SequencedLibrary objects for flowcell run.
"""
path_items = parsing.parse_flowcell_path(self.path)
logger.info("Collecting sequenced libraries for flowcell run '{}'"
.format(path_items['run_id']))
return annotation.FlowcellRunAnnotator(
run_id=path_items['run_id'],
pipeline_root=path_items['pipeline_root'],
db=self.db
).get_sequenced_libraries()
def _collect_librarygenecounts(self):
"""
Collect list of library gene count objects for flowcell run.
"""
path_items = parsing.parse_flowcell_path(self.path)
# print("path: {}, items: {}".format(self.path, path_items))
logger.info("Collecting library gene counts for flowcell run '{}'"
.format(path_items['run_id']))
return annotation.FlowcellRunAnnotator(
run_id=path_items['run_id'],
pipeline_root=path_items['pipeline_root'],
db=self.db
).get_library_gene_counts()
def _collect_librarymetrics(self):
"""
Collect list of library metrics objects for flowcell run.
"""
path_items = parsing.parse_flowcell_path(self.path)
# print("path: {}, items: {}".format(self.path, path_items))
logger.info("Collecting library metrics for flowcell run '{}'"
.format(path_items['run_id']))
return annotation.FlowcellRunAnnotator(
run_id=path_items['run_id'],
pipeline_root=path_items['pipeline_root'],
db=self.db
).get_library_metrics()
def _insert_flowcellrun(self, collection='all'):
"""
Convert FlowcellRun object and insert into GenLIMS database.
"""
flowcellrun = self._collect_flowcellrun()
logger.debug("inserting flowcell run {} into {}"
.format(flowcellrun, self.db.name))
database.put_runs(self.db, flowcellrun.to_json())
def _insert_sequencedlibraries(self):
"""
Convert SequencedLibrary objects and insert into GenLIMS database.
"""
sequencedlibraries = self._collect_sequencedlibraries()
for sl in sequencedlibraries:
logger.debug("inserting sequenced library {}".format(sl))
database.put_samples(self.db, sl.to_json())
def _insert_genomicsSequencedlibraries(self):
"""
Convert SequencedLibrary objects and insert into Research database.
"""
sequencedlibraries = self._collect_sequencedlibraries()
for sl in sequencedlibraries:
logger.debug("inserting sequenced library {}".format(sl))
database.put_genomicsSamples(self.db, sl.to_json())
def _insert_librarygenecounts(self):
"""
Convert Library Results objects and insert into Research database.
"""
librarygenecounts = self._collect_librarygenecounts()
for lgc in librarygenecounts:
logger.debug("inserting library gene counts '{}'".format(lgc))
database.put_genomicsCounts(self.db, lgc.to_json())
def _insert_librarymetrics(self):
"""
Convert Library Results objects and insert into GenLIMS database.
"""
librarymetrics = self._collect_librarymetrics()
for lgc in librarymetrics:
logger.debug("inserting library metrics '{}'".format(lgc))
database.put_metrics(self.db, lgc.to_json())
def _insert_genomicsLibrarymetrics(self):
"""
Convert Library Results objects and insert into Research database.
"""
librarymetrics = self._collect_librarymetrics()
for lgc in librarymetrics:
logger.debug("inserting library metrics '{}'".format(lgc))
database.put_genomicsMetrics(self.db, lgc.to_json())
def _insert_genomicsWorkflowbatches(self):
"""
Collect WorkflowBatch objects and insert them into database.
"""
path_items = parsing.parse_flowcell_path(self.path)
batchfile_dir = os.path.join(self.path, "globus_batch_submission")
logger.info("collecting info for workflow batch files in '{}'"
.format(batchfile_dir))
batchfile_list = [batchfile for batchfile in os.listdir(batchfile_dir)
if not re.search('DS_Store', batchfile)]
for curr_batchfile in batchfile_list:
workflowbatch = annotation.WorkflowBatchAnnotator(
workflowbatch_file=os.path.join(batchfile_dir, curr_batchfile),
pipeline_root=path_items['pipeline_root'],
db=self.db,
run_opts = self.run_opts
).get_workflow_batch()
logger.debug("inserting workflow batch '{}'".format(workflowbatch))
database.put_genomicsWorkflowbatches(self.db, workflowbatch.to_json())
def insert(self, collection='genlims'):
"""
Insert documents into GenLIMS or ResearchDB databases.
Note that ResearchDB collections are prepended by 'genomics'
to indicate the data origin.
"""
# Sample information into ResDB/GenLIMS
if collection in ['all', 'researchdb', 'genomicsSamples']:
logger.info(("Inserting sequenced libraries for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_genomicsSequencedlibraries()
if collection in ['all', 'genlims', 'samples']:
logger.info(("Inserting sequenced libraries for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_sequencedlibraries()
# Gene counts - only into ResDB
if collection in ['all', 'researchdb', 'genomicsCounts']:
logger.info(("Inserting gene counts for libraries for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_librarygenecounts()
# Metrics information - only into ResDB
if collection in ['all', 'researchdb', 'genomicsMetrics']:
logger.info(("Inserting metrics for libraries for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_genomicsLibrarymetrics()
# Workflow Batch files - only into ResDB
if collection in ['all', 'researchdb', 'genomicsWorkflowbatches']:
logger.info(("Inserting workflow batches for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_genomicsWorkflowbatches()
# Run information into GenLIMS
if collection in ['all', 'genlims', 'flowcell', 'runs']:
logger.info("Inserting flowcell run '{}' into '{}'"
.format(self.path, self.db.name))
self._insert_flowcellrun()
| mit | 3,064,618,017,734,528,000 | 39.955665 | 82 | 0.595141 | false | 4.294421 | false | false | false |
hidashun/django-typed-models | typedmodels/tests.py | 1 | 8182 | from django.utils import unittest
try:
import yaml # NOQA
PYYAML_AVAILABLE = True
except ImportError:
PYYAML_AVAILABLE = False
from django.core import serializers
from django.test import TestCase
from django.db.models.query_utils import DeferredAttribute
from .test_models import AngryBigCat, Animal, BigCat, Canine, Feline, Parrot, AbstractVegetable, Vegetable, Fruit
class SetupStuff(TestCase):
def setUp(self):
Feline.objects.create(name="kitteh")
Feline.objects.create(name="cheetah")
Canine.objects.create(name="fido")
BigCat.objects.create(name="simba")
AngryBigCat.objects.create(name="mufasa")
Parrot.objects.create(name="Kajtek")
class TestTypedModels(SetupStuff):
def test_cant_instantiate_base_model(self):
# direct instantiation shouldn't work
self.assertRaises(RuntimeError, Animal.objects.create, name="uhoh")
# ... unless a type is specified
Animal.objects.create(name="dingo", type="typedmodels.canine")
# ... unless that type is stupid
try:
Animal.objects.create(name="dingo", type="macaroni.buffaloes")
except ValueError:
pass
def test_get_types(self):
self.assertEqual(set(Animal.get_types()), set(['typedmodels.canine', 'typedmodels.bigcat', 'typedmodels.parrot', 'typedmodels.angrybigcat', 'typedmodels.feline']))
self.assertEqual(set(Canine.get_types()), set(['typedmodels.canine']))
self.assertEqual(set(Feline.get_types()), set(['typedmodels.bigcat', 'typedmodels.angrybigcat', 'typedmodels.feline']))
def test_get_type_classes(self):
self.assertEqual(set(Animal.get_type_classes()), set([Canine, BigCat, Parrot, AngryBigCat, Feline]))
self.assertEqual(set(Canine.get_type_classes()), set([Canine]))
self.assertEqual(set(Feline.get_type_classes()), set([BigCat, AngryBigCat, Feline]))
def test_base_model_queryset(self):
# all objects returned
qs = Animal.objects.all().order_by('type')
self.assertEqual(len(qs), 6)
self.assertEqual([obj.type for obj in qs], ['typedmodels.angrybigcat', 'typedmodels.bigcat', 'typedmodels.canine', 'typedmodels.feline', 'typedmodels.feline', 'typedmodels.parrot'])
self.assertEqual([type(obj) for obj in qs], [AngryBigCat, BigCat, Canine, Feline, Feline, Parrot])
def test_proxy_model_queryset(self):
qs = Canine.objects.all().order_by('type')
self.assertEqual(qs.count(), 1)
self.assertEqual(len(qs), 1)
self.assertEqual([obj.type for obj in qs], ['typedmodels.canine'])
self.assertEqual([type(obj) for obj in qs], [Canine])
qs = Feline.objects.all().order_by('type')
self.assertEqual(qs.count(), 4)
self.assertEqual(len(qs), 4)
self.assertEqual([obj.type for obj in qs], ['typedmodels.angrybigcat', 'typedmodels.bigcat', 'typedmodels.feline', 'typedmodels.feline'])
self.assertEqual([type(obj) for obj in qs], [AngryBigCat, BigCat, Feline, Feline])
def test_doubly_proxied_model_queryset(self):
qs = BigCat.objects.all().order_by('type')
self.assertEqual(qs.count(), 2)
self.assertEqual(len(qs), 2)
self.assertEqual([obj.type for obj in qs], ['typedmodels.angrybigcat', 'typedmodels.bigcat'])
self.assertEqual([type(obj) for obj in qs], [AngryBigCat, BigCat])
def test_triply_proxied_model_queryset(self):
qs = AngryBigCat.objects.all().order_by('type')
self.assertEqual(qs.count(), 1)
self.assertEqual(len(qs), 1)
self.assertEqual([obj.type for obj in qs], ['typedmodels.angrybigcat'])
self.assertEqual([type(obj) for obj in qs], [AngryBigCat])
def test_recast_auto(self):
cat = Feline.objects.get(name='kitteh')
cat.type = 'typedmodels.bigcat'
cat.recast()
self.assertEqual(cat.type, 'typedmodels.bigcat')
self.assertEqual(type(cat), BigCat)
def test_recast_string(self):
cat = Feline.objects.get(name='kitteh')
cat.recast('typedmodels.bigcat')
self.assertEqual(cat.type, 'typedmodels.bigcat')
self.assertEqual(type(cat), BigCat)
def test_recast_modelclass(self):
cat = Feline.objects.get(name='kitteh')
cat.recast(BigCat)
self.assertEqual(cat.type, 'typedmodels.bigcat')
self.assertEqual(type(cat), BigCat)
def test_recast_fail(self):
cat = Feline.objects.get(name='kitteh')
self.assertRaises(ValueError, cat.recast, AbstractVegetable)
self.assertRaises(ValueError, cat.recast, 'typedmodels.abstractvegetable')
self.assertRaises(ValueError, cat.recast, Vegetable)
self.assertRaises(ValueError, cat.recast, 'typedmodels.vegetable')
def test_fields_in_subclasses(self):
canine = Canine.objects.all()[0]
angry = AngryBigCat.objects.all()[0]
angry.mice_eaten = 5
angry.save()
self.assertEqual(AngryBigCat.objects.get(pk=angry.pk).mice_eaten, 5)
angry.canines_eaten.add(canine)
self.assertEqual(list(angry.canines_eaten.all()), [canine])
# Feline class was created before Parrot and has mice_eaten field which is non-m2m, so it may break accessing
# known_words field in Parrot instances (since Django 1.5).
parrot = Parrot.objects.all()[0]
parrot.known_words = 500
parrot.save()
self.assertEqual(Parrot.objects.get(pk=parrot.pk).known_words, 500)
def test_fields_cache(self):
mice_eaten = Feline._meta.get_field('mice_eaten')
known_words = Parrot._meta.get_field('known_words')
self.assertIn(mice_eaten, AngryBigCat._meta.fields)
self.assertIn(mice_eaten, Feline._meta.fields)
self.assertNotIn(mice_eaten, Parrot._meta.fields)
self.assertIn(known_words, Parrot._meta.fields)
self.assertNotIn(known_words, AngryBigCat._meta.fields)
self.assertNotIn(known_words, Feline._meta.fields)
def test_m2m_cache(self):
canines_eaten = AngryBigCat._meta.get_field_by_name('canines_eaten')[0]
self.assertIn(canines_eaten, AngryBigCat._meta.many_to_many)
self.assertNotIn(canines_eaten, Feline._meta.many_to_many)
self.assertNotIn(canines_eaten, Parrot._meta.many_to_many)
def test_related_names(self):
'''Ensure that accessor names for reverse relations are generated properly.'''
canine = Canine.objects.all()[0]
self.assertTrue(hasattr(canine, 'angrybigcat_set'))
def test_queryset_defer(self):
"""
Ensure that qs.defer() works correctly
"""
Vegetable.objects.create(name='cauliflower', color='white', yumness=1)
Vegetable.objects.create(name='spinach', color='green', yumness=5)
Vegetable.objects.create(name='sweetcorn', color='yellow', yumness=10)
Fruit.objects.create(name='Apple', color='red', yumness=7)
qs = AbstractVegetable.objects.defer('yumness')
objs = set(qs)
for o in objs:
print(o)
self.assertIsInstance(o, AbstractVegetable)
self.assertTrue(o._deferred)
self.assertIsInstance(o.__class__.__dict__['yumness'], DeferredAttribute)
# does a query, since this field was deferred
self.assertIsInstance(o.yumness, float)
def _check_serialization(self, serialization_format):
"""Helper function used to check serialization and deserialization for concrete format."""
animals = Animal.objects.order_by('pk')
serialized_animals = serializers.serialize(serialization_format, animals)
deserialized_animals = [wrapper.object for wrapper in serializers.deserialize(serialization_format, serialized_animals)]
self.assertEqual(set(deserialized_animals), set(animals))
def test_xml_serialization(self):
self._check_serialization('xml')
def test_json_serialization(self):
self._check_serialization('json')
@unittest.skipUnless(PYYAML_AVAILABLE, 'PyYAML is not available.')
def test_yaml_serialization(self):
self._check_serialization('yaml')
| bsd-3-clause | -235,403,817,046,583,680 | 43.467391 | 189 | 0.666585 | false | 3.466949 | true | false | false |
Zogg/Tiltai | tiltai/sdn/docker.py | 1 | 1700 | from tiltai.utils import tiltai_logs_format
import socket
from logbook import Logger, StderrHandler
err = StderrHandler(format_string=tiltai_logs_format)
log = Logger("sdn[docker]")
def dockersdn(queue_name, resolver, storage):
"""
Get addresses and type of the socket from within docker container. A
hostname of the container is used as the identifier to receive network links
definition.
Parameters
----------
queue_name : string
Name of the queue, for which to get network settings
resolver : callable
A `name` -> `network address` mapper. More than likely one of resolvers
provided by `tiltai.sdn` modules
storage : callable
A data backend which provides network mapping: definition of links
between gates. More than likely one of the methods provided by
`tiltai.sdn` modules
Returns
-------
network : dict
A dict of shape `{'endpoints': [], 'type': value}`
"""
with err.applicationbound():
hostname = socket.gethostname()
log.debug('My hostname is: ' + hostname)
links = storage(hostname)
if links:
for link in links['links']:
if link['queue'] == queue_name:
if link.get('outgate', None):
protocolized_nodes = ['tcp://' + address for address in resolver(link['outgate'])]
endpoints = {'endpoints': protocolized_nodes}
else:
endpoints = {'endpoints': link.get('addresses', [])}
if link.get('type', None):
endpoints['type'] = link['type']
log.debug('Topology resolved to ip addresses: ' + str(endpoints))
return endpoints
return {'endpoints': []}
| gpl-3.0 | -5,203,554,288,579,424,000 | 29.357143 | 94 | 0.63 | false | 4.228856 | false | false | false |
AdamsLee/mongo-connector | mongo_connector/connector.py | 1 | 31437 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discovers the mongo cluster and starts the connector.
"""
import json
import logging
import logging.handlers
import optparse
import os
import pymongo
import re
import shutil
import sys
import threading
import time
import imp
from mongo_connector import constants, errors, util
from mongo_connector.locking_dict import LockingDict
from mongo_connector.oplog_manager import OplogThread
from mongo_connector.doc_managers import doc_manager_simulator as simulator
from pymongo import MongoClient
class Connector(threading.Thread):
"""Checks the cluster for shards to tail.
"""
def __init__(self, address, oplog_checkpoint, target_url, ns_set,
u_key, auth_key, doc_manager=None, auth_username=None,
collection_dump=True, batch_size=constants.DEFAULT_BATCH_SIZE,
fields=None, dest_mapping={},
auto_commit_interval=constants.DEFAULT_COMMIT_INTERVAL):
if target_url and not doc_manager:
raise errors.ConnectorError("Cannot create a Connector with a "
"target URL but no doc manager!")
def is_string(s):
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def load_doc_manager(path):
name, _ = os.path.splitext(os.path.basename(path))
try:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(name, path)
module = loader.load_module(name)
except ImportError:
module = imp.load_source(name, path)
return module
doc_manager_modules = None
if doc_manager is not None:
# backwards compatilibity: doc_manager may be a string
if is_string(doc_manager):
doc_manager_modules = [load_doc_manager(doc_manager)]
# doc_manager is a list
else:
doc_manager_modules = []
for dm in doc_manager:
doc_manager_modules.append(load_doc_manager(dm))
super(Connector, self).__init__()
#can_run is set to false when we join the thread
self.can_run = True
#The name of the file that stores the progress of the OplogThreads
self.oplog_checkpoint = oplog_checkpoint
#main address - either mongos for sharded setups or a primary otherwise
self.address = address
#The URLs of each target system, respectively
if is_string(target_url):
self.target_urls = [target_url]
elif target_url:
self.target_urls = list(target_url)
else:
self.target_urls = None
#The set of relevant namespaces to consider
self.ns_set = ns_set
#The dict of source namespace to destination namespace
self.dest_mapping = dest_mapping
#The key that is a unique document identifier for the target system.
#Not necessarily the mongo unique key.
self.u_key = u_key
#Password for authentication
self.auth_key = auth_key
#Username for authentication
self.auth_username = auth_username
#The set of OplogThreads created
self.shard_set = {}
#Boolean chooses whether to dump the entire collection if no timestamp
# is present in the config file
self.collection_dump = collection_dump
#Num entries to process before updating config file with current pos
self.batch_size = batch_size
#Dict of OplogThread/timestamp pairs to record progress
self.oplog_progress = LockingDict()
# List of fields to export
self.fields = fields
try:
docman_kwargs = {"unique_key": u_key,
"namespace_set": ns_set,
"auto_commit_interval": auto_commit_interval}
# No doc managers specified, using simulator
if doc_manager is None:
self.doc_managers = [simulator.DocManager(**docman_kwargs)]
else:
self.doc_managers = []
for i, d in enumerate(doc_manager_modules):
# self.target_urls may be shorter than
# self.doc_managers, or left as None
if self.target_urls and i < len(self.target_urls):
target_url = self.target_urls[i]
else:
target_url = None
if target_url:
self.doc_managers.append(
d.DocManager(self.target_urls[i],
**docman_kwargs))
else:
self.doc_managers.append(
d.DocManager(**docman_kwargs))
# If more target URLs were given than doc managers, may need
# to create additional doc managers
for url in self.target_urls[i + 1:]:
self.doc_managers.append(
doc_manager_modules[-1].DocManager(url,
**docman_kwargs))
except errors.ConnectionFailed:
err_msg = "MongoConnector: Could not connect to target system"
logging.critical(err_msg)
self.can_run = False
return
if self.oplog_checkpoint is not None:
if not os.path.exists(self.oplog_checkpoint):
info_str = ("MongoConnector: Can't find %s, "
"attempting to create an empty progress log" %
self.oplog_checkpoint)
logging.info(info_str)
try:
# Create oplog progress file
open(self.oplog_checkpoint, "w").close()
except IOError as e:
logging.critical("MongoConnector: Could not "
"create a progress log: %s" %
str(e))
sys.exit(2)
else:
if (not os.access(self.oplog_checkpoint, os.W_OK)
and not os.access(self.oplog_checkpoint, os.R_OK)):
logging.critical("Invalid permissions on %s! Exiting" %
(self.oplog_checkpoint))
sys.exit(2)
def join(self):
""" Joins thread, stops it from running
"""
self.can_run = False
for dm in self.doc_managers:
dm.stop()
threading.Thread.join(self)
def write_oplog_progress(self):
""" Writes oplog progress to file provided by user
"""
if self.oplog_checkpoint is None:
return None
# write to temp file
backup_file = self.oplog_checkpoint + '.backup'
os.rename(self.oplog_checkpoint, backup_file)
# for each of the threads write to file
with open(self.oplog_checkpoint, 'w') as dest:
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
for oplog, time_stamp in oplog_dict.items():
oplog_str = str(oplog)
timestamp = util.bson_ts_to_long(time_stamp)
json_str = json.dumps([oplog_str, timestamp])
try:
dest.write(json_str)
except IOError:
# Basically wipe the file, copy from backup
dest.truncate()
with open(backup_file, 'r') as backup:
shutil.copyfile(backup, dest)
break
os.remove(self.oplog_checkpoint + '.backup')
def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
logging.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
source = open(self.oplog_checkpoint, 'r')
try:
data = json.load(source)
except ValueError: # empty file
reason = "It may be empty or corrupt."
logging.info("MongoConnector: Can't read oplog progress file. %s" %
(reason))
source.close()
return None
source.close()
count = 0
oplog_dict = self.oplog_progress.get_dict()
for count in range(0, len(data), 2):
oplog_str = data[count]
time_stamp = data[count + 1]
oplog_dict[oplog_str] = util.long_to_bson_ts(time_stamp)
#stored as bson_ts
def run(self):
"""Discovers the mongo cluster and creates a thread for each primary.
"""
main_conn = MongoClient(self.address)
if self.auth_key is not None:
main_conn['admin'].authenticate(self.auth_username, self.auth_key)
self.read_oplog_progress()
conn_type = None
try:
main_conn.admin.command("isdbgrid")
except pymongo.errors.OperationFailure:
conn_type = "REPLSET"
if conn_type == "REPLSET":
# Make sure we are connected to a replica set
is_master = main_conn.admin.command("isMaster")
if not "setName" in is_master:
logging.error(
'No replica set at "%s"! A replica set is required '
'to run mongo-connector. Shutting down...' % self.address
)
return
# Establish a connection to the replica set as a whole
main_conn.disconnect()
main_conn = MongoClient(self.address,
replicaSet=is_master['setName'])
if self.auth_key is not None:
main_conn.admin.authenticate(self.auth_username, self.auth_key)
#non sharded configuration
oplog_coll = main_conn['local']['oplog.rs']
oplog = OplogThread(
primary_conn=main_conn,
main_address=self.address,
oplog_coll=oplog_coll,
is_sharded=False,
doc_manager=self.doc_managers,
oplog_progress_dict=self.oplog_progress,
namespace_set=self.ns_set,
auth_key=self.auth_key,
auth_username=self.auth_username,
repl_set=is_master['setName'],
collection_dump=self.collection_dump,
batch_size=self.batch_size,
fields=self.fields,
dest_mapping=self.dest_mapping
)
self.shard_set[0] = oplog
logging.info('MongoConnector: Starting connection thread %s' %
main_conn)
oplog.start()
while self.can_run:
if not self.shard_set[0].running:
logging.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[0])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
else: # sharded cluster
while self.can_run is True:
for shard_doc in main_conn['config']['shards'].find():
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
if not self.shard_set[shard_id].running:
logging.error("MongoConnector: OplogThread "
"%s unexpectedly stopped! Shutting "
"down" %
(str(self.shard_set[shard_id])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
continue
try:
repl_set, hosts = shard_doc['host'].split('/')
except ValueError:
cause = "The system only uses replica sets!"
logging.error("MongoConnector: %s", cause)
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
shard_conn = MongoClient(hosts, replicaSet=repl_set)
oplog_coll = shard_conn['local']['oplog.rs']
oplog = OplogThread(
primary_conn=shard_conn,
main_address=self.address,
oplog_coll=oplog_coll,
is_sharded=True,
doc_manager=self.doc_managers,
oplog_progress_dict=self.oplog_progress,
namespace_set=self.ns_set,
auth_key=self.auth_key,
auth_username=self.auth_username,
collection_dump=self.collection_dump,
batch_size=self.batch_size,
fields=self.fields,
dest_mapping=self.dest_mapping
)
self.shard_set[shard_id] = oplog
msg = "Starting connection thread"
logging.info("MongoConnector: %s %s" % (msg, shard_conn))
oplog.start()
self.oplog_thread_join()
self.write_oplog_progress()
def oplog_thread_join(self):
"""Stops all the OplogThreads
"""
logging.info('MongoConnector: Stopping all OplogThreads')
for thread in self.shard_set.values():
thread.join()
def main():
""" Starts the mongo connector (assuming CLI)
"""
parser = optparse.OptionParser()
#-m is for the main address, which is a host:port pair, ideally of the
#mongos. For non sharded clusters, it can be the primary.
parser.add_option("-m", "--main", action="store", type="string",
dest="main_addr", default="localhost:27217",
help="""Specify the main address, which is a"""
""" host:port pair. For sharded clusters, this"""
""" should be the mongos address. For individual"""
""" replica sets, supply the address of the"""
""" primary. For example, `-m localhost:27217`"""
""" would be a valid argument to `-m`. Don't use"""
""" quotes around the address.""")
#-o is to specify the oplog-config file. This file is used by the system
#to store the last timestamp read on a specific oplog. This allows for
#quick recovery from failure.
parser.add_option("-o", "--oplog-ts", action="store", type="string",
dest="oplog_config", default="config.txt",
help="""Specify the name of the file that stores the """
"""oplog progress timestamps. """
"""This file is used by the system to store the last """
"""timestamp read on a specific oplog. This allows """
"""for quick recovery from failure. By default this """
"""is `config.txt`, which starts off empty. An empty """
"""file causes the system to go through all the mongo """
"""oplog and sync all the documents. Whenever the """
"""cluster is restarted, it is essential that the """
"""oplog-timestamp config file be emptied - otherwise """
"""the connector will miss some documents and behave """
"""incorrectly.""")
#--no-dump specifies whether we should read an entire collection from
#scratch if no timestamp is found in the oplog_config.
parser.add_option("--no-dump", action="store_true", default=False, help=
"If specified, this flag will ensure that "
"mongo_connector won't read the entire contents of a "
"namespace iff --oplog-ts points to an empty file.")
#--batch-size specifies num docs to read from oplog before updating the
#--oplog-ts config file with current oplog position
parser.add_option("--batch-size", action="store",
default=constants.DEFAULT_BATCH_SIZE, type="int",
help="Specify an int to update the --oplog-ts "
"config file with latest position of oplog every "
"N documents. By default, the oplog config isn't "
"updated until we've read through the entire oplog. "
"You may want more frequent updates if you are at risk "
"of falling behind the earliest timestamp in the oplog")
#-t is to specify the URL to the target system being used.
parser.add_option("-t", "--target-url", "--target-urls", action="store",
type="string", dest="urls", default=None, help=
"""Specify the URL to each target system being """
"""used. For example, if you were using Solr out of """
"""the box, you could use '-t """
"""http://localhost:8080/solr' with the """
"""SolrDocManager to establish a proper connection. """
"""URLs should be specified in the same order as """
"""their respective doc managers in the """
"""--doc-managers option. URLs are assigned to doc """
"""managers respectively. Additional doc managers """
"""are implied to have no target URL. Additional """
"""URLs are implied to have the same doc manager """
"""type as the last doc manager for which a URL was """
"""specified. """
"""Don't use quotes around addresses. """)
#-n is to specify the namespaces we want to consider. The default
#considers all the namespaces
parser.add_option("-n", "--namespace-set", action="store", type="string",
dest="ns_set", default=None, help=
"""Used to specify the namespaces we want to """
"""consider. For example, if we wished to store all """
"""documents from the test.test and alpha.foo """
"""namespaces, we could use `-n test.test,alpha.foo`. """
"""The default is to consider all the namespaces, """
"""excluding the system and config databases, and """
"""also ignoring the "system.indexes" collection in """
"""any database.""")
#-u is to specify the mongoDB field that will serve as the unique key
#for the target system,
parser.add_option("-u", "--unique-key", action="store", type="string",
dest="u_key", default="_id", help=
"""The name of the MongoDB field that will serve """
"""as the unique key for the target system. """
"""Note that this option does not apply """
"""when targeting another MongoDB cluster. """
"""Defaults to "_id".""")
#-f is to specify the authentication key file. This file is used by mongos
#to authenticate connections to the shards, and we'll use it in the oplog
#threads.
parser.add_option("-f", "--password-file", action="store", type="string",
dest="auth_file", default=None, help=
"""Used to store the password for authentication."""
""" Use this option if you wish to specify a"""
""" username and password but don't want to"""
""" type in the password. The contents of this"""
""" file should be the password for the admin user.""")
#-p is to specify the password used for authentication.
parser.add_option("-p", "--password", action="store", type="string",
dest="password", default=None, help=
"""Used to specify the password."""
""" This is used by mongos to authenticate"""
""" connections to the shards, and in the"""
""" oplog threads. If authentication is not used, then"""
""" this field can be left empty as the default """)
#-a is to specify the username for authentication.
parser.add_option("-a", "--admin-username", action="store", type="string",
dest="admin_name", default="__system", help=
"""Used to specify the username of an admin user to """
"""authenticate with. To use authentication, the user """
"""must specify both an admin username and a keyFile. """
"""The default username is '__system'""")
#-d is to specify the doc manager file.
parser.add_option("-d", "--docManager", "--doc-managers", action="store",
type="string", dest="doc_managers", default=None, help=
"""Used to specify the path to each doc manager """
"""file that will be used. DocManagers should be """
"""specified in the same order as their respective """
"""target addresses in the --target-urls option. """
"""URLs are assigned to doc managers """
"""respectively. Additional doc managers are """
"""implied to have no target URL. Additional URLs """
"""are implied to have the same doc manager type as """
"""the last doc manager for which a URL was """
"""specified. By default, Mongo Connector will use """
"""'doc_manager_simulator.py'. It is recommended """
"""that all doc manager files be kept in the """
"""doc_managers folder in mongo-connector. For """
"""more information about making your own doc """
"""manager, see 'Writing Your Own DocManager' """
"""section of the wiki""")
#-g is the destination namespace
parser.add_option("-g", "--dest-namespace-set", action="store",
type="string", dest="dest_ns_set", default=None, help=
"""Specify a destination namespace mapping. Each """
"""namespace provided in the --namespace-set option """
"""will be mapped respectively according to this """
"""comma-separated list. These lists must have """
"""equal length. The default is to use the identity """
"""mapping. This is currently only implemented """
"""for mongo-to-mongo connections.""")
#-s is to enable syslog logging.
parser.add_option("-s", "--enable-syslog", action="store_true",
dest="enable_syslog", default=False, help=
"""Used to enable logging to syslog."""
""" Use -l to specify syslog host.""")
#--syslog-host is to specify the syslog host.
parser.add_option("--syslog-host", action="store", type="string",
dest="syslog_host", default="localhost:514", help=
"""Used to specify the syslog host."""
""" The default is 'localhost:514'""")
#--syslog-facility is to specify the syslog facility.
parser.add_option("--syslog-facility", action="store", type="string",
dest="syslog_facility", default="user", help=
"""Used to specify the syslog facility."""
""" The default is 'user'""")
#-i to specify the list of fields to export
parser.add_option("-i", "--fields", action="store", type="string",
dest="fields", default=None, help=
"""Used to specify the list of fields to export. """
"""Specify a field or fields to include in the export. """
"""Use a comma separated list of fields to specify multiple """
"""fields. The '_id', 'ns' and '_ts' fields are always """
"""exported.""")
#--auto-commit-interval to specify auto commit time interval
parser.add_option("--auto-commit-interval", action="store",
dest="commit_interval", type="int",
default=constants.DEFAULT_COMMIT_INTERVAL,
help="""Seconds in-between calls for the Doc Manager"""
""" to commit changes to the target system. A value of"""
""" 0 means to commit after every write operation."""
""" When left unset, Mongo Connector will not make"""
""" explicit commits. Some systems have"""
""" their own mechanism for adjusting a commit"""
""" interval, which should be preferred to this"""
""" option.""")
#-v enables vebose logging
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Sets verbose logging to be on.")
#-w enable logging to a file
parser.add_option("-w", "--logfile", dest="logfile",
help=("Log all output to a file rather than stream to "
"stderr. Omit to stream to stderr."))
(options, args) = parser.parse_args()
logger = logging.getLogger()
loglevel = logging.INFO
if options.verbose:
loglevel = logging.DEBUG
logger.setLevel(loglevel)
if options.enable_syslog and options.logfile:
print ("You cannot specify syslog and a logfile simultaneously, please"
" choose the logging method you would prefer.")
sys.exit(1)
if options.enable_syslog:
syslog_info = options.syslog_host.split(":")
syslog_host = logging.handlers.SysLogHandler(
address=(syslog_info[0], int(syslog_info[1])),
facility=options.syslog_facility
)
syslog_host.setLevel(loglevel)
logger.addHandler(syslog_host)
elif options.logfile is not None:
log_out = logging.FileHandler(options.logfile)
log_out.setLevel(loglevel)
log_out.setFormatter(logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(log_out)
else:
log_out = logging.StreamHandler()
log_out.setLevel(loglevel)
log_out.setFormatter(logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(log_out)
logger.info('Beginning Mongo Connector')
# Get DocManagers and target URLs
# Each DocManager is assigned the respective (same-index) target URL
# Additional DocManagers may be specified that take no target URL
doc_managers = options.doc_managers
doc_managers = doc_managers.split(",") if doc_managers else doc_managers
target_urls = options.urls.split(",") if options.urls else None
if options.doc_managers is None:
logger.info('No doc managers specified, using simulator.')
if options.ns_set is None:
ns_set = []
else:
ns_set = options.ns_set.split(',')
if options.dest_ns_set is None:
dest_ns_set = ns_set
else:
dest_ns_set = options.dest_ns_set.split(',')
if len(dest_ns_set) != len(ns_set):
logger.error("Destination namespace must be the same length as the "
"origin namespace!")
sys.exit(1)
elif len(set(ns_set)) + len(set(dest_ns_set)) != 2 * len(ns_set):
logger.error("Namespace set and destination namespace set should not "
"contain any duplicates!")
sys.exit(1)
else:
## Create a mapping of source ns to dest ns as a dict
dest_mapping = dict(zip(ns_set, dest_ns_set))
fields = options.fields
if fields is not None:
fields = options.fields.split(',')
key = None
if options.auth_file is not None:
try:
key = open(options.auth_file).read()
re.sub(r'\s', '', key)
except IOError:
logger.error('Could not parse password authentication file!')
sys.exit(1)
if options.password is not None:
key = options.password
if key is None and options.admin_name != "__system":
logger.error("Admin username specified without password!")
sys.exit(1)
if options.commit_interval is not None and options.commit_interval < 0:
raise ValueError("--auto-commit-interval must be non-negative")
connector = Connector(
address=options.main_addr,
oplog_checkpoint=options.oplog_config,
target_url=target_urls,
ns_set=ns_set,
u_key=options.u_key,
auth_key=key,
doc_manager=doc_managers,
auth_username=options.admin_name,
collection_dump=(not options.no_dump),
batch_size=options.batch_size,
fields=fields,
dest_mapping=dest_mapping,
auto_commit_interval=options.commit_interval
)
connector.start()
while True:
try:
time.sleep(3)
if not connector.is_alive():
break
except KeyboardInterrupt:
logging.info("Caught keyboard interrupt, exiting!")
connector.join()
break
if __name__ == '__main__':
main()
| apache-2.0 | -3,153,926,013,907,341,300 | 42.967832 | 85 | 0.533193 | false | 4.748074 | true | false | false |
v4hn/ecto | test/scripts/test_tendrils.py | 1 | 2868 | #!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ecto
import ecto.ecto_test as ecto_test
def test_tendrils():
t = ecto.Tendrils()
t.declare("Hello","doc str",6)
assert t.Hello == 6
assert t["Hello"] == 6
t.declare("x","a number", "str")
assert len(t) == 2
assert t["x"] == "str"
assert t.x == "str"
#test the redeclare
try:
t.declare("Hello","new doc", "you")
util.fail()
except ecto.TendrilRedeclaration, e:
print str(e)
assert('TendrilRedeclaration' in str(e))
try:
#read error
t.nonexistant = 1
util.fail()
except ecto.NonExistant, e:
print str(e)
assert "tendril_key nonexistant" in str(e)
try:
#index error
print t["nonexistant"]
util.fail()
except ecto.NonExistant, e:
print str(e)
assert "tendril_key nonexistant" in str(e)
assert len(t.keys()) == 2
assert len(t.values()) == 2
print t
#by value
_x = t.x
_x = 10
assert t.x != 10
x = t.x
t.x = 11
assert x != 11
#by reference
x = t.at("x")
t.x = 13
assert x.val == 13
t.x = 17
assert t.x == 17
t.x = 199
t.x = 15
print t.x
assert t.x == 15
if __name__ == '__main__':
test_tendrils()
| bsd-3-clause | 3,189,837,938,713,107,000 | 31.590909 | 77 | 0.656206 | false | 3.681643 | false | false | false |
lazuxd/teste-admitere-snpap | slidingpanel.py | 1 | 3425 | # -*- coding: utf-8 -*-
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import OptionProperty, NumericProperty, StringProperty, \
BooleanProperty, ListProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.relativelayout import RelativeLayout
Builder.load_string("""
#: import Window kivy.core.window.Window
<SlidingPanel>
orientation: 'vertical'
size_hint_x: None
width: Window.width - dp(56) if Window.width - dp(56) < dp(320) else dp(320)
x: -1 * self.width if self.side == 'left' else Window.width
<PanelShadow>
canvas:
Color:
rgba: root.color
Rectangle:
size: root.size
""")
class PanelShadow(BoxLayout):
color = ListProperty([0, 0, 0, 0])
class SlidingPanel(BoxLayout):
anim_length_close = NumericProperty(0.3)
anim_length_open = NumericProperty(0.3)
animation_t_open = StringProperty('out_sine')
animation_t_close = StringProperty('out_sine')
side = OptionProperty('left', options=['left', 'right'])
_open = False
def __init__(self, **kwargs):
super(SlidingPanel, self).__init__(**kwargs)
self.width = Window.width - dp(56) if Window.width - dp(56) < dp(320) else dp(320)
self.shadow = PanelShadow()
Clock.schedule_once(lambda x: Window.add_widget(self.shadow,89), 0)
Clock.schedule_once(lambda x: Window.add_widget(self,90), 0)
def toggle(self):
Animation.stop_all(self, 'x')
Animation.stop_all(self.shadow, 'color')
if self._open:
if self.side == 'left':
target_x = -1 * self.width
else:
target_x = Window.width
sh_anim = Animation(duration=self.anim_length_open,
t=self.animation_t_open,
color=[0, 0, 0, 0])
sh_anim.start(self.shadow)
self._get_main_animation(duration=self.anim_length_close,
t=self.animation_t_close,
x=target_x,
is_closing=True).start(self)
self._open = False
else:
if self.side == 'left':
target_x = 0
else:
target_x = Window.width - self.width
Animation(duration=self.anim_length_open, t=self.animation_t_open,
color=[0, 0, 0, 0.5]).start(self.shadow)
self._get_main_animation(duration=self.anim_length_open,
t=self.animation_t_open,
x=target_x,
is_closing=False).start(self)
self._open = True
def _get_main_animation(self, duration, t, x, is_closing):
return Animation(duration=duration, t=t, x=x)
def on_touch_down(self, touch):
# Prevents touch events from propagating to anything below the widget.
super(SlidingPanel, self).on_touch_down(touch)
if self.collide_point(*touch.pos) or self._open:
return True
def on_touch_up(self, touch):
super(SlidingPanel, self).on_touch_up(touch)
if not self.collide_point(touch.x, touch.y) and self._open:
self.toggle()
return True
| mit | -6,720,822,278,794,531,000 | 35.827957 | 90 | 0.574015 | false | 3.767877 | false | false | false |
sophilabs/py101 | py101/lists/__init__.py | 1 | 1763 | """"
Introduction Adventure
Author: Ignacio Avas ([email protected])
"""
import codecs
import io
import sys
import unittest
from story.adventures import AdventureVerificationError, BaseAdventure
from story.translation import gettext as _
class TestOutput(unittest.TestCase):
"""Variables Adventure test"""
def __init__(self, candidate_code, file_name='<inline>'):
"""Init the test"""
super(TestOutput, self).__init__()
self.candidate_code = candidate_code
self.file_name = file_name
def setUp(self):
self.__old_stdout = sys.stdout
sys.stdout = self.__mockstdout = io.StringIO()
def tearDown(self):
sys.stdout = self.__old_stdout
self.__mockstdout.close()
def runTest(self):
"""Makes a simple test of the output"""
code = compile(self.candidate_code, self.file_name, 'exec', optimize=0)
self.assertIn('languages',
code.co_names,
'Should have defined languages variable')
exec(code)
lines = self.__mockstdout.getvalue().split('\n')
self.assertEqual([str(["ADA", "Pascal", "Fortran", "Smalltalk"]), ''],
lines,
'Should have same output'
)
class Adventure(BaseAdventure):
"""Lists Adventure"""
title = _('Lists')
@classmethod
def test(cls, sourcefile):
"""Test against the provided file"""
suite = unittest.TestSuite()
raw_program = codecs.open(sourcefile).read()
suite.addTest(TestOutput(raw_program, sourcefile))
result = unittest.TextTestRunner().run(suite)
if not result.wasSuccessful():
raise AdventureVerificationError()
| mit | 1,598,433,818,799,941,000 | 29.396552 | 79 | 0.601815 | false | 4.177725 | true | false | false |
ZeitOnline/zeit.push | src/zeit/push/browser/mobile.py | 1 | 1348 | from zope.cachedescriptors.property import Lazy as cachedproperty
import logging
import sys
import zeit.push.interfaces
import zope.security.proxy
log = logging.getLogger(__name__)
class FindTitle(object):
def __call__(self):
name = self.request.form.get('q')
if not name:
return ''
source = zeit.push.interfaces.PAYLOAD_TEMPLATE_SOURCE.factory
template = source.find(name)
return source.getDefaultTitle(template)
class PreviewPayload(object):
@cachedproperty
def message(self):
# We need to talk to private API
push = zope.security.proxy.getObject(
zeit.push.interfaces.IPushMessages(self.context))
return push._create_message(
'mobile', self.context, push.get(type='mobile'))
@cachedproperty
def rendered(self):
return self.message._render()
def rendered_linenumbers(self):
result = []
for i, line in enumerate(self.rendered.split('\n')):
result.append(u'%03d %s' % (i, line))
return '\n'.join(result)
@cachedproperty
def error(self):
try:
self.message.validate_template(self.rendered)
except Exception, e:
e.traceback = zeit.cms.browser.error.getFormattedException(
sys.exc_info())
return e
| bsd-3-clause | 2,564,557,959,152,968,000 | 27.083333 | 71 | 0.626855 | false | 4.084848 | false | false | false |
grafgustav/accessmail | src/Service/ImapReceiver.py | 1 | 1024 | __author__ = 'phillip'
from .MailReceiver import MailReceiver
import poplib
class IMAPReceiver(MailReceiver):
def __init__(self, config):
self._conn = None
def connect(self, config):
self._server = poplib.POP3_SSL()
self._server.apop()
def delete_mail(self, n):
self._server.dele(n)
def list_folders(self):
pass
def create_folder(self, name):
pass
def get_number_of_mails(self):
count, size = self._server.stat()
return count
def change_folder(self, path):
pass
def get_header(self, n):
return self._server.top(n,0)
def can_create_folder(self):
return False
def delete_folder(self, name):
pass
def get_total_mails(self):
return self.get_number_of_mails()
def get_mail(self, n):
return self._server.retr(n)
def get_mailbox_size(self):
count, size = self._server.stat()
return size
def quit(self):
self._server.quit() | mit | -1,807,214,119,877,362,200 | 18.711538 | 41 | 0.584961 | false | 3.605634 | false | false | false |
kurennon/misc-tools | find_validator/find_validator.py | 1 | 1259 | #!/usr/bin/env python3
DIG_CHARS = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def find_validator(dig_string, old_base):
dig_sum = sum_digits(dig_string, old_base)
return dig_sum[-1:].upper()
def sum_digits(dig_string, old_base):
int_sum = 0
while dig_string:
int_sum += int(dig_string[:1], base=old_base)
dig_string = dig_string[1:]
dig_sum = unint(int_sum, old_base)
return dig_sum
def unint(int_val, new_base):
if int_val < new_base:
return DIG_CHARS[int_val]
else:
return unint(int_val//new_base, new_base) + DIG_CHARS[int_val%new_base]
if __name__ == "__main__":
print("Welcome to find_validator.py!\nPlease enter an invalid base to quit" +
"\nor q at the validator to choose a new base.")
work_base = 1
while 0 < work_base < 35:
dig_string = ""
work_base = int(input("\nEnter the base of the number(s) you would like to validate: "))
if work_base <= 0 or work_base > 35:
break
while dig_string.lower() != "q":
dig_string = input("Enter a number to validate: ")
if dig_string.lower() == "q":
break
print("The validator is:", find_validator(dig_string, work_base))
| gpl-3.0 | 3,645,768,671,179,305,500 | 36.029412 | 96 | 0.590151 | false | 3.375335 | false | false | false |
yggi49/wtforms-polyglot | wtf_polyglot/meta.py | 1 | 2876 | from __future__ import unicode_literals
try:
from html import escape
from html.parser import HTMLParser
except ImportError:
from cgi import escape
from HTMLParser import HTMLParser
from wtforms.meta import DefaultMeta
from wtforms.widgets.core import HTMLString
class PolyglotHTMLParser(HTMLParser):
"""This simplified ``HTMLParser`` converts its input to polyglot HTML.
It works by making sure that stand-alone tags like ``<input>`` have a
slash before the closing angle bracket, that attribute values are always
quoted, and that boolean attributes have their value set to the attribute
name (e.g., ``checked="checked"``).
Note: boolean attributes are simply identified as attributes with no value
at all. Specifically, an attribute with an empty string (e.g.,
``checked=""``) will *not* be identified as boolean attribute, i.e., there
is no semantic intelligence involved.
>>> parser = PolyglotHTMLParser()
>>> parser.feed('''<input type=checkbox name=foo value=y checked>''')
>>> print(parser.get_output())
<input type="checkbox" name="foo" value="y" checked="checked" />
"""
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.output = []
def html_params(self, attrs):
output = []
for key, value in attrs:
if value is None:
value = key
output.append(' {}="{}"'.format(key, escape(value, quote=True)))
return ''.join(output)
def handle_starttag(self, tag, attrs):
if tag == 'input':
return self.handle_startendtag(tag, attrs)
self.output.append('<{}{}>'.format(tag, self.html_params(attrs)))
def handle_endtag(self, tag):
self.output.append('</{}>'.format(tag))
def handle_startendtag(self, tag, attrs):
self.output.append('<{}{} />'.format(tag, self.html_params(attrs)))
def handle_data(self, data):
self.output.append(data)
def handle_entityref(self, name):
self.output.append('&{};'.format(name))
def handle_charref(self, name):
self.output.append('&#{};'.format(name))
def get_output(self):
return ''.join(self.output)
class PolyglotMeta(DefaultMeta):
"""
This meta class works exactly like ``DefaultMeta``, except that fields of
forms using this meta class will output polyglot markup.
"""
def render_field(self, field, render_kw):
"""
Render a widget, and convert its output to polyglot HTML.
"""
other_kw = getattr(field, 'render_kw', None)
if other_kw is not None:
render_kw = dict(other_kw, **render_kw)
html = field.widget(field, **render_kw)
parser = PolyglotHTMLParser()
parser.feed(html)
output = HTMLString(parser.get_output())
return output
| bsd-3-clause | -4,385,307,373,991,363,000 | 31.681818 | 78 | 0.631085 | false | 4.050704 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.