repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ool2016-seclab/quarantineSystem | api.py | 1 | 3438 | import json
import logging
import ryutest
from webob import Response
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
from ryu.lib import dpid as dpid_lib
simple_switch_instance_name = 'simple_switch_api_app'
url = '/simpleswitch/mactable/{dpid}'
class SimpleSwitchRest13(ryutest.SimpleSwitch13):
_CONTEXTS = { 'wsgi': WSGIApplication }
def __init__(self, *args, **kwargs):
super(SimpleSwitchRest13, self).__init__(*args, **kwargs)
self.switches = {}
wsgi = kwargs['wsgi']
wsgi.register(SimpleSwitchController, {simple_switch_instance_name : self})
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
super(SimpleSwitchRest13, self).switch_features_handler(ev)
datapath = ev.msg.datapath
self.switches[datapath.id] = datapath
self.mac_to_port.setdefault(datapath.id, {})
def set_mac_to_port(self, dpid, entry):
mac_table = self.mac_to_port.setdefault(dpid, {})
datapath = self.switches.get(dpid)
entry_port = entry['port']
entry_mac = entry['mac']
if datapath is not None:
parser = datapath.ofproto_parser
if entry_port not in mac_table.values():
for mac, port in mac_table.items():
# from known device to new device
actions = [parser.OFPActionOutput(entry_port)]
match = parser.OFPMatch(in_port=port, eth_dst=entry_mac)
self.add_flow(datapath, 1, match, actions)
# from new device to known device
actions = [parser.OFPActionOutput(port)]
match = parser.OFPMatch(in_port=entry_port, eth_dst=mac)
self.add_flow(datapath, 1, match, actions)
mac_table.update({entry_mac : entry_port})
return mac_table
class SimpleSwitchController(ControllerBase):
def __init__(self, req, link, data, **config):
super(SimpleSwitchController, self).__init__(req, link, data, **config)
self.simpl_switch_spp = data[simple_switch_instance_name]
@route('simpleswitch', url, methods=['GET'], requirements={'dpid': dpid_lib.DPID_PATTERN})
def list_mac_table(self, req, **kwargs):
simple_switch = self.simpl_switch_spp
dpid = dpid_lib.str_to_dpid(kwargs['dpid'])
if dpid not in simple_switch.mac_to_port:
return Response(status=404)
mac_table = simple_switch.mac_to_port.get(dpid, {})
body = json.dumps(mac_table)
return Response(content_type='application/json', body=body)
@route('simpleswitch', url, methods=['PUT'], requirements={'dpid': dpid_lib.DPID_PATTERN})
def put_mac_table(self, req, **kwargs):
simple_switch = self.simpl_switch_spp
dpid = dpid_lib.str_to_dpid(kwargs['dpid'])
new_entry = eval(req.body)
if dpid not in simple_switch.mac_to_port:
return Response(status=404)
try:
mac_table = simple_switch.set_mac_to_port(dpid, new_entry)
body = json.dumps(mac_table)
return Response(content_type='application/json', body=body)
except Exception as e:
return Response(status=500) | mit | 627,949,269,274,732,800 | 36.380435 | 94 | 0.630308 | false | 3.745098 | false | false | false |
lerker/cupydle | cupydle/dnn/viejo/Neurons.py | 1 | 3537 | import numpy as np
__author__ = "Nelson Ponzoni"
__copyright__ = "Copyright 2015-2016, Proyecto Final de Carrera"
__credits__ = ["Nelson Ponzoni"]
__license__ = "GPL"
__version__ = "20160101"
__maintainer__ = "Nelson Ponzoni"
__email__ = "[email protected]"
__status__ = "Production"
"""
Neurons class, this is abstraction of various neurons, a pack of neurons, that compose a neural layer
"""
class Neurons(object):
def __init__(self, mat, shape):
if len(shape) == 1:
shape += (1,)
if isinstance(mat, list):
mat = np.array(mat)
# la matriz debe tener la forma deseada
self.matrix = mat.reshape(shape)
self.rows = shape[0]
self.cols = shape[1]
# propiedades intrisecas de las neuronas
@property
def shape(self):
return self.rows, self.cols
@property
def count(self):
rows, cols = self.shape
return rows * cols
def __str__(self):
return str(self.matrix)
def __mul__(self, other):
if isinstance(other, Neurons):
other = other.matrix
return Neurons(self.matrix * other, self.shape)
def __div__(self, other):
if isinstance(other, Neurons):
other = other.matrix
return Neurons(self.matrix / other, self.shape)
def __sub__(self, other):
if isinstance(other, Neurons):
other = other.matrix
return Neurons(self.matrix - other, self.shape)
def __add__(self, other):
if isinstance(other, Neurons):
other = other.matrix
return Neurons(self.matrix + other, self.shape)
def __pow__(self, power):
return Neurons(self.matrix ** power, self.shape)
# opraciones basicas
def mul_elemwise(self, array):
if isinstance(array, Neurons):
array = array.matrix
return Neurons(np.multiply(self.matrix, array), self.shape)
def mul_array(self, array):
if isinstance(array, Neurons):
array = array.matrix
arrshape = array.shape
if len(arrshape) == 1:
arrshape += (1,) # Le agrego la dimension que le falta
shape = self.rows, arrshape[1]
return Neurons(self.matrix.dot(array), shape)
def sum_array(self, array):
if isinstance(array, Neurons):
array = array.matrix
return Neurons(self.matrix + array, self.shape)
def dot(self, vec):
return self.matrix.dot(vec)
def outer(self, array):
if isinstance(array, Neurons):
array = array.matrix
res = np.outer(self.matrix, array)
shape = res.shape
return Neurons(res, shape)
def transpose(self):
return Neurons(self.matrix.transpose(), self.shape[::-1])
def loss(self, fun, y):
return fun(self.matrix, y)
def loss_d(self, fun, y):
return Neurons(fun(self.matrix, y), self.shape)
def activation(self, fun):
# el map no anda mas en python3 como iterador,
# ver: http://stackoverflow.com/questions/28524378/convert-map-object-to-numpy-array-in-python-3
return Neurons(list(map(lambda x: fun(x), self.matrix)), self.shape)
def softmax(self):
# Uso de tip de implementacion (http://ufldl.stanford.edu/wiki/index.php/Exercise:Softmax_Regression)
x = self.matrix
# instead: first shift the values of f so that the highest number is 0:
x = x - max(x)
softmat = np.exp(x) / (sum(np.exp(x)))
return Neurons(softmat, self.shape)
| apache-2.0 | -3,702,147,777,341,866,000 | 30.026316 | 109 | 0.599943 | false | 3.544088 | false | false | false |
hesseltuinhof/mxnet | python/mxnet/gluon/model_zoo/vision/__init__.py | 1 | 3746 | # coding: utf-8
# pylint: disable=wildcard-import, arguments-differ
r"""Module for pre-defined neural network models.
This module contains definitions for the following model architectures:
- `AlexNet`_
- `DenseNet`_
- `Inception V3`_
- `ResNet V1`_
- `ResNet V2`_
- `SqueezeNet`_
- `VGG`_
You can construct a model with random weights by calling its constructor:
.. code:: python
import mxnet.gluon.models as models
resnet18 = models.resnet18_v1()
alexnet = models.alexnet()
squeezenet = models.squeezenet1_0()
densenet = models.densenet_161()
We provide pre-trained models for all the models except ResNet V2.
These can constructed by passing
``pretrained=True``:
.. code:: python
import mxnet.gluon.models as models
resnet18 = models.resnet18_v1(pretrained=True)
alexnet = models.alexnet(pretrained=True)
Pretrained model is converted from torchvision.
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB images of shape (N x 3 x H x W),
where N is the batch size, and H and W are expected to be at least 224.
The images have to be loaded in to a range of [0, 1] and then normalized
using ``mean = [0.485, 0.456, 0.406]`` and ``std = [0.229, 0.224, 0.225]``.
The transformation should preferrably happen at preprocessing. You can use
``mx.image.color_normalize`` for such transformation::
image = image/255
normalized = mx.image.color_normalize(image,
mean=mx.nd.array([0.485, 0.456, 0.406]),
std=mx.nd.array([0.229, 0.224, 0.225]))
.. _AlexNet: https://arxiv.org/abs/1404.5997
.. _DenseNet: https://arxiv.org/abs/1608.06993
.. _Inception V3: http://arxiv.org/abs/1512.00567
.. _ResNet V1: https://arxiv.org/abs/1512.03385
.. _ResNet V2: https://arxiv.org/abs/1512.03385
.. _SqueezeNet: https://arxiv.org/abs/1602.07360
.. _VGG: https://arxiv.org/abs/1409.1556
"""
from .alexnet import *
from .densenet import *
from .inception import *
from .resnet import *
from .squeezenet import *
from .vgg import *
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
Returns
-------
HybridBlock
The model.
"""
models = {'resnet18_v1': resnet18_v1,
'resnet34_v1': resnet34_v1,
'resnet50_v1': resnet50_v1,
'resnet101_v1': resnet101_v1,
'resnet152_v1': resnet152_v1,
'resnet18_v2': resnet18_v2,
'resnet34_v2': resnet34_v2,
'resnet50_v2': resnet50_v2,
'resnet101_v2': resnet101_v2,
'resnet152_v2': resnet152_v2,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'vgg11_bn': vgg11_bn,
'vgg13_bn': vgg13_bn,
'vgg16_bn': vgg16_bn,
'vgg19_bn': vgg19_bn,
'alexnet': alexnet,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'squeezenet1.0': squeezenet1_0,
'squeezenet1.1': squeezenet1_1,
'inceptionv3': inception_v3,
}
name = name.lower()
if name not in models:
raise ValueError(
'Model %s is not supported. Available options are\n\t%s'%(
name, '\n\t'.join(sorted(models.keys()))))
return models[name](**kwargs)
| apache-2.0 | -176,548,373,229,067,420 | 33.366972 | 82 | 0.60331 | false | 3.254561 | false | false | false |
MPIBGC-TEE/CompartmentalSystems | notebooks/ELM_dask.py | 1 | 1730 | #from dask.distributed import Client
import xarray as xr
import numpy as np
import pandas as pd
import importlib
import ELMlib
importlib.reload(ELMlib)
#client = Client(n_workers=2, threads_per_worker=2, memory_limit='1GB')
#client
#ds = xr.open_dataset('../Data/14C_spinup_holger_fire.2x2_small.nc')
from netCDF4 import Dataset
ds = Dataset('../Data/14C_spinup_holger_fire.2x2_small.nc')
#lat, lon = ds.coords['lat'], ds.coords['lon']
lat, lon = ds['lat'][:], ds['lon'][:]
lat_indices, lon_indices = np.meshgrid(
range(len(lat)),
range(len(lon)),
indexing='ij'
)
lats, lons = np.meshgrid(lat, lon, indexing='ij')
df_pd = pd.DataFrame(
{
'cell_nr': range(len(lat)*len(lon)),
'lat_index': lat_indices.flatten(),
'lon_index': lon_indices.flatten(),
'lat': lats.flatten(),
'lon': lons.flatten()
}
)
import dask.array as da
import dask.dataframe as dask_df
df_dask = dask_df.from_pandas(df_pd, npartitions=4)
df_dask
parameter_set = ELMlib.load_parameter_set(
ds_filename = '../Data/14C_spinup_holger_fire.2x2_small.nc',
time_shift = -198*365,
nstep = 10
)
def func(line):
location_dict = {
'cell_nr': int(line.cell_nr),
'lat_index': int(line.lat_index),
'lon_index': int(line.lon_index)
}
cell_nr, log, xs_12C_data, us_12C_data, rs_12C_data= ELMlib.load_model_12C_data(parameter_set, location_dict)
return cell_nr, log, xs_12C_data, us_12C_data, rs_12C_data
df_dask_2 = df_dask.apply(func, axis=1, meta=('A', 'object'))
df_dask_2.compute()
type(df_dask_2)
df_dask_2
list(df_dask_2)
pd.DataFrame(list(df_dask_2), columns=('cell_nr', 'log', 'xs_12C_data', 'us_12C_data', 'rs_12C_data'))
| mit | 2,440,978,287,868,239,400 | 23.366197 | 113 | 0.636416 | false | 2.585949 | false | false | false |
gusgollings/scbdo | scbdo/tod.py | 1 | 16340 |
# SCBdo : DISC Track Racing Management Software
# Copyright (C) 2010 Nathan Fraser
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Time of Day (ToD) functions and abstract class.
This module defines the tod class and some utility functions.
ToD records are used to establish net times
Time of Day quantities are stored as a positive decimal number of
seconds in the range [0, 86400). The overflow value '24:00:00'
(equivalent to 86400 seconds) is forbidden and its presence
flags a programming error or an error in the attached timing device.
All time of day and net time values must be less than 24hrs.
'rounding' is by truncation toward zero. If a negative value is
specified by manually setting the timeval attribute, the resulting
timestring may not be what is expected. Arithmetic will still be
exact, however a negative result may not display as expected.
A time of day object includes:
- timeval : decimal tod in seconds (eg 1.2345, 4506.9023, etc)
- index : 4 character identifier string (eg '1' to '9999')
- chan : 3 character channel string from source (eg 'C0', 'C2M', etc)
- refid : string reference id, used for RFID tag events (eg '75ae7f')
Supported ToD String Patterns:
[[HH:]MM:]SS[.dcmz] Canonical
[[HH-]MM-]SS[.dcmz] Keypad
[[HHh]MM:]SS[.dcmz] Result
Arithmetic operations on ToD types:
The only supported arithmetic operations on ToD objects are
subtraction and addition. Subtraction obtains a net time from
two time of day values, while addition obtains a time of day
from a time of day and a net time. These conventions are assumed
and have the following peculiarities:
Given two tod objects a and b, the statement:
c = a - b
Creates a "net time" c such that:
c.timeval == (a.timeval - b.timeval) if a.timeval >= b.timeval
OR
c.timeval == (86400 - b.timeval + a.timeval) if a.timeval < b.timeval
'c' is a new tod object, whose timeval is the exact number of
seconds between tod 'b' and tod 'a'. 'b' is always
assumed to have happened before 'a', and so if the value of
'a.timeval' is less than the value of 'b.timeval', overflow
is assumed.
Given a tod object a and a "net time" b, the statement:
c = a + b
Creates a new tod c such that:
c.timeval == (a.timeval + b.timeval) % 86400
'c' is a new tod object, whose timeval is exactly the number of
seconds in net time 'b' after tod 'a'.
In both cases, the index chan and refid are set on 'c' as follows:
index = ''
chan = 'NET'
refid = ''
Normalised tod strings are printed as on the Timy receipt:
'NNNN CCC HH:MM:SS.dcmz REFID'
Where 'NNNN' is the index, 'CCC' is the chan and the time is
printed, space padded, according to the requested precision.
"""
import decimal # ToD internal representation
import re # used to scan ToD string: HH:MM:SS.dcmz
import time
QUANT_5PLACES = decimal.Decimal('0.00001') # does not work with Timy printer
QUANT_4PLACES = decimal.Decimal('0.0001')
QUANT_3PLACES = decimal.Decimal('0.001')
QUANT_2PLACES = decimal.Decimal('0.01')
QUANT_1PLACE = decimal.Decimal('0.1')
QUANT_0PLACES = decimal.Decimal('1')
QUANT = [QUANT_0PLACES, QUANT_1PLACE, QUANT_2PLACES,
QUANT_3PLACES, QUANT_4PLACES, QUANT_5PLACES]
QUANT_FW = [2, 4, 5, 6, 7, 8]
QUANT_TWID = [8, 10, 11, 12, 13, 14]
QUANT_PAD = [' ', ' ', ' ', ' ', '', '']
TOD_RE=re.compile(r'^(?:(?:(\d{1,2})[h:-])?(\d{1,2})[:-])?(\d{1,2}(?:\.\d+)?)$')
def str2tod(timeval=''):
"""Return tod for given string without fail."""
ret = None
if timeval is not None and timeval != '':
try:
ret = tod(timeval)
except:
pass
return ret
def dec2str(dectod=None, places=4, zeros=False):
"""Return formatted string for given tod decimal value.
Convert the decimal number dectod to a time string with the
supplied number of decimal places.
Note: negative timevals match case one or three depending on
value of zeros flag, and are truncated toward zero.
Oversized timevals will grow in width
optional argument 'zeros' will use leading zero chars. eg:
'00h00:01.2345' zeros=True
'1.2345' zeros=False
"""
strtod = None
assert places >= 0 and places <= 5, 'places not in range [0, 5]'
if dectod is not None: # conditional here?
if zeros or dectod >= 3600: # NOTE: equal compares fine w/decimal
fmt = '{0}h{1:02}:{2:0{3}}' # 'HHhMM:SS.dcmz'
if zeros:
fmt = '{0:02}:{1:02}:{2:0{3}}' # '00h00:0S.dcmz'
strtod = fmt.format(int(dectod)//3600,
(int(dectod)%3600)//60,
dectod.quantize(QUANT[places],
rounding=decimal.ROUND_FLOOR)%60,
QUANT_FW[places])
elif dectod >= 60: # MM:SS.dcmz
strtod = '{0}:{1:0{2}}'.format(int(dectod)//60,
dectod.quantize(QUANT[places],
rounding=decimal.ROUND_FLOOR)%60,
QUANT_FW[places])
else: # SS.dcmz or -SSSSS.dcmz
strtod = '{0}'.format(dectod.quantize(QUANT[places],
rounding=decimal.ROUND_FLOOR))
return strtod
def str2dec(timestr=''):
"""Return decimal for given string.
Convert the time of day value represented by the string supplied
to a decimal number of seconds.
Attempts to match against the common patterns:
HHhMM:SS.dcmz Result style
HH:MM:SS.dcmz Canonical
HH-MM-SS.dcmz Keypad
In optional groups as follows:
[[HH:]MM:]SS[.dcmz]
NOTE: Now truncates all incoming times to 4 places to avoid
inconsistencies.
"""
dectod=None
timestr=timestr.strip()
if timestr == 'now':
ltoft = time.localtime().tm_isdst * 3600 # DST Hack
dectod = decimal.Decimal(str(
(time.time() - (time.timezone - ltoft)) % 86400))
# !!ERROR!! 2038, UTC etc -> check def Unix time
else:
m = TOD_RE.match(timestr)
if m is not None:
dectod = decimal.Decimal(m.group(3))
dectod += decimal.Decimal(m.group(2) or 0) * 60
dectod += decimal.Decimal(m.group(1) or 0) * 3600
else:
# last attempt - try and handle as other decimal constructor
dectod = decimal.Decimal(timestr)
return dectod.quantize(QUANT[4], rounding=decimal.ROUND_FLOOR)
class tod(object):
"""A class for representing time of day and RFID events."""
def __init__(self, timeval=0, index='', chan='', refid=''):
"""Construct tod object.
Keyword arguments:
timeval -- time value to be represented (string/int/decimal/tod)
index -- tod index identifier string
chan -- channel string
refed -- a reference identifier string
"""
self.index = str(index)[0:4]
self.chan = str(chan)[0:3]
self.refid = refid
if type(timeval) is str:
self.timeval = str2dec(timeval)
elif type(timeval) is tod:
self.timeval = timeval.timeval
else:
self.timeval = decimal.Decimal(timeval)
assert self.timeval >= 0 and self.timeval < 86400, 'timeval not in range [0, 86400)'
def __str__(self):
"""Return a normalised tod string."""
return self.refstr()
def __repr__(self):
"""Return object representation string."""
return "tod('{0}', '{1}', '{2}', '{3}')".format(str(self.timeval),
str(self.index), str(self.chan), str(self.refid))
def refstr(self, places=4):
"""Return 'normalised' string form.
'NNNN CCC HHhMM:SS.dcmz REFID'
to the specified number of decimal places in the set
[0, 1, 2, 3, 4, 5]
"""
return '{0: >4} {1: <3} {2} {3}'.format(self.index, self.chan,
self.timestr(places), self.refid)
def truncate(self, places=4):
"""Return a new ToD object with a truncated time value."""
return tod(timeval=self.timeval.quantize(QUANT[places],
rounding=decimal.ROUND_FLOOR), index='', chan='ToD', refid='')
def as_hours(self, places=0):
"""Return the tod value in hours, truncated to the desired places."""
return (self.timeval / 3600).quantize(QUANT[places],
rounding=decimal.ROUND_FLOOR)
def as_seconds(self, places=0):
"""Return the tod value in seconds, truncated to the desired places."""
return self.timeval.quantize(QUANT[places],
rounding=decimal.ROUND_FLOOR)
def as_minutes(self, places=0):
"""Return the tod value in minutes, truncated to the desired places."""
return (self.timeval / 60).quantize(QUANT[places],
rounding=decimal.ROUND_FLOOR)
def timestr(self, places=4, zeros=False):
"""Return time string component of the tod, whitespace padded."""
return '{0: >{1}}{2}'.format(dec2str(self.timeval, places, zeros),
QUANT_TWID[places], QUANT_PAD[places])
def rawtime(self, places=4, zeros=False):
"""Return time string component of the tod, without padding."""
return dec2str(self.timeval, places, zeros)
def speedstr(self, dist=200):
"""Return an average speed estimate for the provided distance."""
if self.timeval == 0:
return '---.--- km/h'
return '{0:7.3f} km/h'.format(3.6 * float(dist) / float(self.timeval))
def copy(self):
"""Return a copy of the supplied tod."""
return tod(self.timeval, self.index, self.chan, self.refid)
def __lt__(self, other):
if type(other) is tod:
return self.timeval < other.timeval
else:
return self.timeval < other
def __le__(self, other):
if type(other) is tod:
return self.timeval <= other.timeval
else:
return self.timeval <= other
def __eq__(self, other):
if type(other) is tod:
return self.timeval == other.timeval
else:
return self.timeval == other
def __ne__(self, other):
if type(other) is tod:
return self.timeval != other.timeval
else:
return self.timeval != other
def __gt__(self, other):
if type(other) is tod:
return self.timeval > other.timeval
else:
return self.timeval > other
def __ge__(self, other):
if type(other) is tod:
return self.timeval >= other.timeval
else:
return self.timeval >= other
def __sub__(self, other):
"""Compute time of day subtraction and return a NET tod object.
NOTE: 'other' always happens _before_ self, so a smaller value
for self implies rollover of the clock. This mods all net
times by 24Hrs.
"""
if type(other) is tod:
oft = None
if self.timeval >= other.timeval:
oft = self.timeval - other.timeval
else:
oft = 86400 - other.timeval + self.timeval
return tod(timeval=oft, index='', chan='NET', refid='')
else:
raise TypeError('Cannot subtract {0} from tod.'.format(
str(type(other).__name__)))
def __add__(self, other):
"""Compute time of day addition and return a new tod object.
NOTE: 'other' is assumed to be a NET time interval. The returned
tod will have a timeval mod 86400.
"""
if type(other) is tod:
oft = (self.timeval + other.timeval) % 86400
return tod(timeval=oft, index='', chan='ToD', refid='')
else:
raise TypeError('Cannot add {0} to tod.'.format(
str(type(other).__name__)))
# ToD 'constants'
ZERO = tod()
MAX = tod('23h59:59.9999')
# Fake times for special cases
FAKETIMES = {
'catch':ZERO,
'max':MAX.copy(),
'caught':MAX.copy(),
'abort':MAX.copy(),
'dsq':MAX.copy(),
'dnf':MAX.copy(),
'dns':MAX.copy()}
extra = decimal.Decimal('0.00001')
cof = decimal.Decimal('0.00001')
for c in ['caught', 'abort', 'dsq', 'dnf', 'dns']:
FAKETIMES[c].timeval += cof
cof += extra
class todlist():
"""ToD list helper class for managing splits and ranks."""
def __init__(self, lbl=''):
self.__label = lbl
self.__store = []
def __iter__(self):
return self.__store.__iter__()
def __len__(self):
return len(self.__store)
def __getitem__(self, key):
return self.__store[key]
def rank(self, bib, series=''):
"""Return current 0-based rank for given bib."""
ret = None
i = 0
last = None
for lt in self.__store:
if last is not None:
if lt != last:
i += 1
if lt.refid == bib and lt.index == series:
ret = i
break
last = lt
return ret
def clear(self):
self.__store = []
def remove(self, bib, series=''):
i = 0
while i < len(self.__store):
if self.__store[i].refid == bib and self.__store[i].index == series:
del self.__store[i]
else:
i += 1
def insert(self, t, bib=None, series=''):
"""Insert t into ordered list."""
ret = None
if t in FAKETIMES: # re-assign a coded 'finish'
t = FAKETIMES[t]
if type(t) is tod:
if bib is None:
bib = t.index
rt = tod(timeval=t.timeval, chan=self.__label,
refid=bib, index=series)
last = None
i = 0
found = False
for lt in self.__store:
if rt < lt:
self.__store.insert(i, rt)
found = True
break
i += 1
if not found:
self.__store.append(rt)
if __name__ == "__main__":
srcs = ['1:23:45.6789', '1:23-45.6789', '1-23-45.6789',
'1:23:45', '1:23-45', '1-23-45',
'3:45.6789', '3-45.6789',
'3:45', '3-45',
'45.6789', '5.6',
'45',
1.4, float('1.4'), decimal.Decimal('1.4'), '1.4',
10123, float('10123'), decimal.Decimal('10123'), '10123',
10123.456, float('10123.456'),
decimal.Decimal('10123.456'), '10123.456',
'-10234', '87012', '0', '86400', '86399.9999',
'inf', 'nan', 'zero', 'now', '-inf',
tod(0, 'ZERO'), tod('now', 'NOW') ]
print ('1: Check Source Formats')
for src in srcs:
try:
print ('\t' + repr(src) + ' =>\t' + str(tod(src)) + '/' + str(str2tod(src)))
except Exception as e:
print ('\t' + repr(src) + ' =>\t' + str(e) + '/' + str(str2tod(src)))
print ('2: ToD Subtraction')
a = tod(0, '1', 'C0')
print ('\t a: '+ str(a))
b = tod('12.1234', '2', 'C1')
print ('\t b: '+ str(b))
print ('\t [b-a]: '+ str(b-a))
print ('\t [b+a]: '+ str(b+a))
print ('\t1/100s: '+ (b-a).refstr(2))
print ('\t1/100s: '+ (b+a).refstr(2))
print ('\t NET: '+ (b-a).timestr(2))
print ('\t ToD: '+ (b+a).timestr(2))
print ('\t [a-b]: '+ str(a-b))
print ('\t [a+b]: '+ str(a+b))
print ('\t1/100s: '+ (a-b).refstr(2))
print ('\t1/100s: '+ (a+b).refstr(2))
print ('3: Copy & Speedstr')
c = b.copy()
print ('\t c: '+ str(c))
print ('\t avg: '+ (b-a).speedstr())
| gpl-3.0 | 5,077,695,465,656,237,000 | 32.970894 | 92 | 0.567993 | false | 3.510204 | false | false | false |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/coordinates/baseframe.py | 1 | 45786 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Framework and base classes for coordinate frames/"low-level" coordinate
classes.
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
# Standard library
import inspect
import warnings
from copy import deepcopy
from collections import namedtuple
# Dependencies
import numpy as np
# Project
from ..utils.compat.misc import override__dir__
from ..extern import six
from ..utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from .. import units as u
from ..utils import OrderedDict
from .transformations import TransformGraph
from .representation import (BaseRepresentation, CartesianRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
REPRESENTATION_CLASSES)
__all__ = ['BaseCoordinateFrame', 'frame_transform_graph', 'GenericFrame',
'FrameAttribute', 'TimeFrameAttribute', 'QuantityFrameAttribute',
'EarthLocationAttribute', 'RepresentationMapping']
# the graph used for all transformations between frames
frame_transform_graph = TransformGraph()
def _get_repr_cls(value):
"""
Return a valid representation class from ``value`` or raise exception.
"""
if value in REPRESENTATION_CLASSES:
value = REPRESENTATION_CLASSES[value]
try:
# value might not be a class, so use try
assert issubclass(value, BaseRepresentation)
except (TypeError, AssertionError):
raise ValueError(
'Representation is {0!r} but must be a BaseRepresentation class '
'or one of the string aliases {1}'.format(
value, list(REPRESENTATION_CLASSES)))
return value
class FrameMeta(type):
def __new__(mcls, name, bases, members):
if 'default_representation' in members:
default_repr = members.pop('default_representation')
found_default_repr = True
else:
default_repr = None
found_default_repr = False
if 'frame_specific_representation_info' in members:
repr_info = members.pop('frame_specific_representation_info')
found_repr_info = True
else:
repr_info = None
found_repr_info = False
# somewhat hacky, but this is the best way to get the MRO according to
# https://mail.python.org/pipermail/python-list/2002-December/167861.html
tmp_cls = super(FrameMeta, mcls).__new__(mcls, name, bases, members)
# now look through the whole MRO for the class attributes, raw for
# frame_attr_names, and leading underscore for others
for m in (c.__dict__ for c in tmp_cls.__mro__):
if not found_default_repr and '_default_representation' in m:
default_repr = m['_default_representation']
found_default_repr = True
if (not found_repr_info and
'_frame_specific_representation_info' in m):
repr_info = m['_frame_specific_representation_info']
found_repr_info = True
if found_default_repr and found_repr_info:
break
else:
raise ValueError(
'Could not find all expected BaseCoordinateFrame class '
'attributes. Are you mis-using FrameMeta?')
# Make read-only properties for the frame class attributes that should
# be read-only to make them immutable after creation.
# We copy attributes instead of linking to make sure there's no
# accidental cross-talk between classes
mcls.readonly_prop_factory(members, 'default_representation',
default_repr)
mcls.readonly_prop_factory(members,
'frame_specific_representation_info',
deepcopy(repr_info))
# now set the frame name as lower-case class name, if it isn't explicit
if 'name' not in members:
members['name'] = name.lower()
return super(FrameMeta, mcls).__new__(mcls, name, bases, members)
@staticmethod
def readonly_prop_factory(members, attr, value):
private_attr = '_' + attr
def getter(self):
return getattr(self, private_attr)
members[private_attr] = value
members[attr] = property(getter)
class FrameAttribute(object):
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeFrameAttribute(default=_EQUINOX_B1950)
obstime = TimeFrameAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
_nextid = 1
"""
Used to ascribe some ordering to FrameAttribute instances so that the
order they were assigned in a class body can be determined.
"""
def __init__(self, default=None, secondary_attribute=''):
self.default = default
self.secondary_attribute = secondary_attribute
# Use FrameAttribute._nextid explicitly so that subclasses of
# FrameAttribute use the same counter
self._order = FrameAttribute._nextid
FrameAttribute._nextid += 1
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if not hasattr(self, 'name'):
# Find attribute name of self by finding this object in the frame
# class which is requesting this attribute or any of its
# superclasses.
for mro_cls in frame_cls.__mro__:
for name, val in mro_cls.__dict__.items():
if val is self:
self.name = name
break
if hasattr(self, 'name'): # Can't nicely break out of two loops
break
else:
# Cannot think of a way to actually raise this exception. This
# instance containing this code must be in the class dict in
# order to get excecuted by attribute access. But leave this
# here just in case...
raise AttributeError(
'Unexpected inability to locate descriptor')
out = None
if instance is not None:
out = getattr(instance, '_' + self.name, None)
if out is None and self.default is None:
out = getattr(instance, self.secondary_attribute, None)
if out is None:
out = self.default
out, converted = self.convert_input(out)
if instance is not None and converted:
setattr(instance, '_' + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError('Cannot set frame attribute')
class TimeFrameAttribute(FrameAttribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.FrameAttribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from ..time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(
'Invalid time input {0}={1!r}\n{2}'.format(self.name,
value, err))
converted = True
return out, converted
class QuantityFrameAttribute(FrameAttribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute='', unit=None, shape=None):
super(QuantityFrameAttribute, self).__init__(default, secondary_attribute)
self.unit = unit
self.shape = shape
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if np.all(value == 0) and self.unit is not None and self.unit is not None:
return u.Quantity(np.zeros(self.shape), self.unit), True
else:
converted = True
if not (hasattr(value, 'unit') ):
raise TypeError('Tried to set a QuantityFrameAttribute with '
'something that does not have a unit.')
oldvalue = value
value = u.Quantity(oldvalue, copy=False).to(self.unit)
if self.shape is not None and value.shape != self.shape:
raise ValueError('The provided value has shape "{0}", but '
'should have shape "{1}"'.format(value.shape,
self.shape))
if (oldvalue.unit == value.unit and hasattr(oldvalue, 'value') and
np.all(oldvalue.value == value.value)):
converted = False
return value, converted
class EarthLocationAttribute(FrameAttribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
#we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, 'transform_to'):
raise ValueError('"{0}" was passed into an '
'EarthLocationAttribute, but it does not have '
'"transform_to" method'.format(value))
itrsobj = value.transform_to(ITRS)
return itrsobj.earth_location, True
_RepresentationMappingBase = \
namedtuple('RepresentationMapping',
('reprname', 'framename', 'defaultunit'))
class RepresentationMapping(_RepresentationMappingBase):
"""
This `~collections.namedtuple` is used with the
``frame_specific_representation_info`` attribute to tell frames what
attribute names (and default units) to use for a particular representation.
``reprname`` and ``framename`` should be strings, while ``defaultunit`` can
be either an astropy unit, the string ``'recommended'`` (to use whatever
the representation's ``recommended_units`` is), or None (to indicate that
no unit mapping should be done).
"""
def __new__(cls, reprname, framename, defaultunit='recommended'):
# this trick just provides some defaults
return super(RepresentationMapping, cls).__new__(cls, reprname,
framename,
defaultunit)
@six.add_metaclass(FrameMeta)
class BaseCoordinateFrame(object):
"""
The base class for coordinate frames.
This class is intended to be subclassed to create instances of specific
systems. Subclasses can implement the following attributes:
* `default_representation`
A subclass of `~astropy.coordinates.BaseRepresentation` that will be
treated as the default representation of this frame. This is the
representation assumed by default when the frame is created.
* `~astropy.coordinates.FrameAttribute` class attributes
Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined
using a descriptor class. See the narrative documentation or
built-in classes code for details.
* `frame_specific_representation_info`
A dictionary mapping the name or class of a representation to a list of
`~astropy.coordinates.RepresentationMapping` objects that tell what
names and default units should be used on this frame for the components
of that representation.
"""
default_representation = None
# specifies special names/units for representation attributes
frame_specific_representation_info = {}
# This __new__ provides for backward-compatibility with pre-0.4 API.
# TODO: remove in 1.0
def __new__(cls, *args, **kwargs):
# Only do backward-compatibility if frame is previously defined one
frame_name = cls.__name__.lower()
if frame_name not in ['altaz', 'fk4', 'fk4noeterms', 'fk5',
'galactic', 'icrs']:
return super(BaseCoordinateFrame, cls).__new__(cls)
use_skycoord = False
if (len(args) > 1 or (len(args) == 1 and
not isinstance(args[0], BaseRepresentation))):
for arg in args:
if (not isinstance(arg, u.Quantity)
and not isinstance(arg, BaseRepresentation)):
msg = ('Initializing frame classes like "{0}" using string '
'or other non-Quantity arguments is deprecated, and '
'will be removed in the next version of Astropy. '
'Instead, you probably want to use the SkyCoord '
'class with the "frame={1}" keyword, or if you '
'really want to use the low-level frame classes, '
'create it with an Angle or Quantity.')
warnings.warn(msg.format(cls.__name__,
cls.__name__.lower()),
AstropyDeprecationWarning)
use_skycoord = True
break
if 'unit' in kwargs and not use_skycoord:
warnings.warn(
"Initializing frames using the ``unit`` argument is "
"now deprecated. Use SkyCoord or pass Quantity "
"instances to frames instead.", AstropyDeprecationWarning)
use_skycoord = True
if not use_skycoord:
representation = kwargs.get('representation',
cls._default_representation)
representation = _get_repr_cls(representation)
repr_info = cls._get_representation_info()
for key in repr_info[representation]['names']:
if key in kwargs:
if not isinstance(kwargs[key], u.Quantity):
warnings.warn(
"Initializing frames using non-Quantity arguments "
"is now deprecated. Use SkyCoord or pass Quantity "
"instances instead.", AstropyDeprecationWarning)
use_skycoord = True
break
if use_skycoord:
kwargs['frame'] = frame_name
from .sky_coordinate import SkyCoord
return SkyCoord(*args, **kwargs)
else:
return super(BaseCoordinateFrame, cls).__new__(cls)
def __init__(self, *args, **kwargs):
self._attr_names_with_defaults = []
if 'representation' in kwargs:
self.representation = kwargs.pop('representation')
# if not set below, this is a frame with no data
representation_data = None
for fnm, fdefault in self.get_frame_attr_names().items():
# Read-only frame attributes are defined as FrameAttribue
# descriptors which are not settable, so set 'real' attributes as
# the name prefaced with an underscore.
if fnm in kwargs:
value = kwargs.pop(fnm)
setattr(self, '_' + fnm, value)
else:
setattr(self, '_' + fnm, fdefault)
self._attr_names_with_defaults.append(fnm)
# Validate input by getting the attribute here.
getattr(self, fnm)
pref_rep = self.representation
args = list(args) # need to be able to pop them
if (len(args) > 0) and (isinstance(args[0], BaseRepresentation) or
args[0] is None):
representation_data = args.pop(0)
if len(args) > 0:
raise TypeError(
'Cannot create a frame with both a representation and '
'other positional arguments')
elif self.representation:
repr_kwargs = {}
for nmkw, nmrep in self.representation_component_names.items():
if len(args) > 0:
#first gather up positional args
repr_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
repr_kwargs[nmrep] = kwargs.pop(nmkw)
#special-case the Spherical->UnitSpherical if no `distance`
#TODO: possibly generalize this somehow?
if repr_kwargs:
if repr_kwargs.get('distance', True) is None:
del repr_kwargs['distance']
if (issubclass(self.representation, SphericalRepresentation) and
'distance' not in repr_kwargs):
representation_data = UnitSphericalRepresentation(**repr_kwargs)
else:
representation_data = self.representation(**repr_kwargs)
if len(args) > 0:
raise TypeError(
'{0}.__init__ had {1} remaining unhandled arguments'.format(
self.__class__.__name__, len(args)))
if kwargs:
raise TypeError(
'Coordinate frame got unexpected keywords: {0}'.format(
list(kwargs)))
self._data = representation_data
# We do ``is not None`` because self._data might evaluate to false for
# empty arrays or data == 0
if self._data is not None:
self._rep_cache = dict()
self._rep_cache[self._data.__class__.__name__, False] = self._data
@property
def data(self):
"""
The coordinate data for this object. If this frame has no data, an
`~.exceptions.ValueError` will be raised. Use `has_data` to
check if data is present on this frame object.
"""
if self._data is None:
raise ValueError('The frame object "{0}" does not have associated '
'data'.format(repr(self)))
return self._data
@property
def has_data(self):
"""
True if this frame has `data`, False otherwise.
"""
return self._data is not None
def __len__(self):
return len(self.data)
def __nonzero__(self): # Py 2.x
return self.isscalar or len(self) != 0
def __bool__(self): # Py 3.x
return self.isscalar or len(self) != 0
@property
def shape(self):
return self.data.shape
@property
def isscalar(self):
return self.data.isscalar
@classmethod
def get_frame_attr_names(cls):
seen = set()
attributes = []
for mro_cls in cls.__mro__:
for name, val in mro_cls.__dict__.items():
if isinstance(val, FrameAttribute) and name not in seen:
seen.add(name)
# Add the sort order, name, and actual value of the frame
# attribute in question
attributes.append((val._order, name,
getattr(mro_cls, name)))
# Sort by the frame attribute order
attributes.sort(key=lambda a: a[0])
return OrderedDict((a[1], a[2]) for a in attributes)
@property
def representation(self):
"""
The representation of the data in this frame, as a class that is
subclassed from `~astropy.coordinates.BaseRepresentation`. Can
also be *set* using the string name of the representation.
"""
if not hasattr(self, '_representation'):
self._representation = self.default_representation
return self._representation
@representation.setter
def representation(self, value):
self._representation = _get_repr_cls(value)
@classmethod
def _get_representation_info(cls):
# This exists as a class method only to support handling frame inputs
# without units, which are deprecated and will be removed. This can be
# moved into the representation_info property at that time.
repr_attrs = {}
for repr_cls in REPRESENTATION_CLASSES.values():
repr_attrs[repr_cls] = {'names': [], 'units': []}
for c in repr_cls.attr_classes.keys():
repr_attrs[repr_cls]['names'].append(c)
rec_unit = repr_cls.recommended_units.get(c, None)
repr_attrs[repr_cls]['units'].append(rec_unit)
for repr_cls, mappings in cls._frame_specific_representation_info.items():
# keys may be a class object or a name
repr_cls = _get_repr_cls(repr_cls)
# take the 'names' and 'units' tuples from repr_attrs,
# and then use the RepresentationMapping objects
# to update as needed for this frame.
nms = repr_attrs[repr_cls]['names']
uns = repr_attrs[repr_cls]['units']
comptomap = dict([(m.reprname, m) for m in mappings])
for i, c in enumerate(repr_cls.attr_classes.keys()):
if c in comptomap:
mapp = comptomap[c]
nms[i] = mapp.framename
# need the isinstance because otherwise if it's a unit it
# will try to compare to the unit string representation
if not (isinstance(mapp.defaultunit, six.string_types) and
mapp.defaultunit == 'recommended'):
uns[i] = mapp.defaultunit
# else we just leave it as recommended_units says above
# Convert to tuples so that this can't mess with frame internals
repr_attrs[repr_cls]['names'] = tuple(nms)
repr_attrs[repr_cls]['units'] = tuple(uns)
return repr_attrs
@property
def representation_info(self):
"""
A dictionary with the information of what attribute names for this frame
apply to particular representations.
"""
return self._get_representation_info()
@property
def representation_component_names(self):
out = OrderedDict()
if self.representation is None:
return out
data_names = self.representation.attr_classes.keys()
repr_names = self.representation_info[self.representation]['names']
for repr_name, data_name in zip(repr_names, data_names):
out[repr_name] = data_name
return out
@property
def representation_component_units(self):
out = OrderedDict()
if self.representation is None:
return out
repr_attrs = self.representation_info[self.representation]
repr_names = repr_attrs['names']
repr_units = repr_attrs['units']
for repr_name, repr_unit in zip(repr_names, repr_units):
if repr_unit:
out[repr_name] = repr_unit
return out
def realize_frame(self, representation):
"""
Generates a new frame *with new data* from another frame (which may or
may not have data).
Parameters
----------
representation : BaseRepresentation
The representation to use as the data for the new frame.
Returns
-------
frameobj : same as this frame
A new object with the same frame attributes as this one, but
with the ``representation`` as the data.
"""
frattrs = dict([(attr, getattr(self, attr))
for attr in self.get_frame_attr_names()
if attr not in self._attr_names_with_defaults])
return self.__class__(representation, **frattrs)
def represent_as(self, new_representation, in_frame_units=False):
"""
Generate and return a new representation of this frame's `data`
as a Representation object.
Note: In order to make an in-place change of the representation
of a Frame or SkyCoord object, set the ``representation``
attribute of that object to the desired new representation.
Parameters
----------
new_representation : subclass of BaseRepresentation or string
The type of representation to generate. May be a *class*
(not an instance), or the string name of the representation
class.
in_frame_units : bool
Force the representation units to match the specified units
particular to this frame
Returns
-------
newrep : BaseRepresentation-derived object
A new representation object of this frame's `data`.
Raises
------
AttributeError
If this object had no `data`
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> coord = SkyCoord(0*u.deg, 0*u.deg)
>>> coord.represent_as(CartesianRepresentation)
<CartesianRepresentation (x, y, z) [dimensionless]
(1.0, 0.0, 0.0)>
>>> coord.representation = CartesianRepresentation
>>> coord
<SkyCoord (ICRS): (x, y, z) [dimensionless]
(1.0, 0.0, 0.0)>
"""
new_representation = _get_repr_cls(new_representation)
cached_repr = self._rep_cache.get((new_representation.__name__,
in_frame_units))
if not cached_repr:
data = self.data.represent_as(new_representation)
# If the new representation is known to this frame and has a defined
# set of names and units, then use that.
new_attrs = self.representation_info.get(new_representation)
if new_attrs and in_frame_units:
datakwargs = dict((comp, getattr(data, comp))
for comp in data.components)
for comp, new_attr_unit in zip(data.components, new_attrs['units']):
if new_attr_unit:
datakwargs[comp] = datakwargs[comp].to(new_attr_unit)
data = data.__class__(**datakwargs)
self._rep_cache[new_representation.__name__, in_frame_units] = data
return self._rep_cache[new_representation.__name__, in_frame_units]
def transform_to(self, new_frame):
"""
Transform this object's coordinate data to a new frame.
Parameters
----------
new_frame : class or frame object or SkyCoord object
The frame to transform this coordinate frame into.
Returns
-------
transframe
A new object with the coordinate data represented in the
``newframe`` system.
Raises
------
ValueError
If there is no possible transformation route.
"""
from .errors import ConvertError
if self._data is None:
raise ValueError('Cannot transform a frame with no data')
if inspect.isclass(new_frame):
#means use the defaults for this class
new_frame = new_frame()
if hasattr(new_frame, '_sky_coord_frame'):
# Input new_frame is not a frame instance or class and is most
# likely a SkyCoord object.
new_frame = new_frame._sky_coord_frame
trans = frame_transform_graph.get_transform(self.__class__,
new_frame.__class__)
if trans is None:
if new_frame is self.__class__:
# no special transform needed, but should update frame info
return new_frame.realize_frame(self.data)
msg = 'Cannot transform from {0} to {1}'
raise ConvertError(msg.format(self.__class__, new_frame.__class__))
return trans(self, new_frame)
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : class or frame object
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__
trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls)
if trans is None:
if new_frame_cls is self.__class__:
return 'same'
else:
return False
else:
return True
def is_frame_attr_default(self, attrnm):
"""
Determine whether or not a frame attribute has its value because it's
the default value, or because this frame was created with that value
explicitly requested.
Parameters
----------
attrnm : str
The name of the attribute to check.
Returns
-------
isdefault : bool
True if the attribute ``attrnm`` has its value by default, False if
it was specified at creation of this frame.
"""
return attrnm in self._attr_names_with_defaults
def is_equivalent_frame(self, other):
"""
Checks if this object is the same frame as the ``other`` object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. Note that it does *not* matter what, if any,
data either object has.
Parameters
----------
other : BaseCoordinateFrame
the other frame to check
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `BaseCoordinateFrame` or subclass.
"""
if self.__class__ == other.__class__:
for frame_attr_name in self.get_frame_attr_names():
if getattr(self, frame_attr_name) != getattr(other, frame_attr_name):
return False
return True
elif not isinstance(other, BaseCoordinateFrame):
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't a frame")
else:
return False
def __repr__(self):
frameattrs = self._frame_attrs_repr()
data_repr = self._data_repr()
if frameattrs:
frameattrs = ' ({0})'.format(frameattrs)
if data_repr:
return '<{0} Coordinate{1}: {2}>'.format(self.__class__.__name__,
frameattrs, data_repr)
else:
return '<{0} Frame{1}>'.format(self.__class__.__name__,
frameattrs)
def _data_repr(self):
"""Returns a string representation of the coordinate data."""
if not self.has_data:
return ''
if self.representation:
if (issubclass(self.representation, SphericalRepresentation) and
isinstance(self.data, UnitSphericalRepresentation)):
data = self.represent_as(self.data.__class__,
in_frame_units=True)
else:
data = self.represent_as(self.representation,
in_frame_units=True)
data_repr = repr(data)
for nmpref, nmrepr in self.representation_component_names.items():
data_repr = data_repr.replace(nmrepr, nmpref)
else:
data = self.data
data_repr = repr(self.data)
if data_repr.startswith('<' + data.__class__.__name__):
# remove both the leading "<" and the space after the name, as well
# as the trailing ">"
data_repr = data_repr[(len(data.__class__.__name__) + 2):-1]
else:
data_repr = 'Data:\n' + data_repr
return data_repr
def _frame_attrs_repr(self):
"""
Returns a string representation of the frame's attributes, if any.
"""
return ', '.join([attrnm + '=' + str(getattr(self, attrnm))
for attrnm in self.get_frame_attr_names()])
def __getitem__(self, view):
if self.has_data:
out = self.realize_frame(self.data[view])
out.representation = self.representation
return out
else:
raise ValueError('Cannot index a frame with no data')
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include representation
names.
TODO: dynamic representation transforms (i.e. include cylindrical et al.).
"""
dir_values = set(self.representation_component_names)
return dir_values
def __getattr__(self, attr):
"""
Allow access to attributes defined in
``self.representation_component_names``.
TODO: dynamic representation transforms (i.e. include cylindrical et
al.).
"""
# attr == '_representation' is likely from the hasattr() test in the
# representation property which is used for
# self.representation_component_names.
#
# Prevent infinite recursion here.
if (attr == '_representation' or
attr not in self.representation_component_names):
raise AttributeError("'{0}' object has no attribute '{1}'"
.format(self.__class__.__name__, attr))
rep = self.represent_as(self.representation, in_frame_units=True)
val = getattr(rep, self.representation_component_names[attr])
return val
def __setattr__(self, attr, value):
repr_attr_names = []
if hasattr(self, 'representation_info'):
for representation_attr in self.representation_info.values():
repr_attr_names.extend(representation_attr['names'])
if attr in repr_attr_names:
raise AttributeError(
'Cannot set any frame attribute {0}'.format(attr))
else:
super(BaseCoordinateFrame, self).__setattr__(attr, value)
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] http://en.wikipedia.org/wiki/Great-circle_distance
"""
from .angle_utilities import angular_separation
from .angles import Angle
self_unit_sph = self.represent_as(UnitSphericalRepresentation)
other_transformed = other.transform_to(self)
other_unit_sph = other_transformed.represent_as(UnitSphericalRepresentation)
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(self_unit_sph.lon, self_unit_sph.lat,
other_unit_sph.lon, other_unit_sph.lat)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate system to get the distance to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
from .distances import Distance
if self.data.__class__ == UnitSphericalRepresentation:
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
# do this first just in case the conversion somehow creates a distance
other_in_self_system = other.transform_to(self)
if other_in_self_system.__class__ == UnitSphericalRepresentation:
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
dx = self.cartesian.x - other_in_self_system.cartesian.x
dy = self.cartesian.y - other_in_self_system.cartesian.y
dz = self.cartesian.z - other_in_self_system.cartesian.z
distval = (dx.value ** 2 + dy.value ** 2 + dz.value ** 2) ** 0.5
return Distance(distval, dx.unit)
@property
def cartesian(self):
"""
Shorthand for a cartesian representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as(CartesianRepresentation, in_frame_units=True)
@property
def spherical(self):
"""
Shorthand for a spherical representation of the coordinates in this object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as(SphericalRepresentation, in_frame_units=True)
class GenericFrame(BaseCoordinateFrame):
"""
A frame object that can't store data but can hold any arbitrary frame
attributes. Mostly useful as a utility for the high-level class to store
intermediate frame attributes.
Parameters
----------
frame_attrs : dict
A dictionary of attributes to be used as the frame attributes for this
frame.
"""
name = None # it's not a "real" frame so it doesn't have a name
def __init__(self, frame_attrs):
super(GenericFrame, self).__setattr__('_frame_attr_names', frame_attrs)
super(GenericFrame, self).__init__(None)
for attrnm, attrval in frame_attrs.items():
setattr(self, '_' + attrnm, attrval)
def get_frame_attr_names(self):
return self._frame_attr_names
def __getattr__(self, name):
if '_' + name in self.__dict__:
return getattr(self, '_' + name)
else:
raise AttributeError('no {0}'.format(name))
def __setattr__(self, name, value):
if name in self._frame_attr_names:
raise AttributeError("can't set frame attribute '{0}'".format(name))
else:
super(GenericFrame, self).__setattr__(name, value)
# doing this import at the bottom prevents a circular import issue that is
# otherwise present due to EarthLocation needing to import ITRS
from .earth import EarthLocation
| mit | -3,056,814,761,154,917,000 | 36.591133 | 88 | 0.58151 | false | 4.778833 | false | false | false |
atilag/qiskit-sdk-py | qiskit/_jobprocessor.py | 1 | 4559 | # -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Processor for running Quantum Jobs in the different backends."""
import logging
import pprint
from concurrent import futures
from threading import Lock
from ._qiskiterror import QISKitError
from ._compiler import compile_circuit
from ._result import Result
logger = logging.getLogger(__name__)
def run_backend(q_job):
"""Run a program of compiled quantum circuits on a backend.
Args:
q_job (QuantumJob): job object
Returns:
Result: Result object.
Raises:
QISKitError: if the backend is malformed
"""
backend = q_job.backend
qobj = q_job.qobj
backend_name = qobj['config']['backend_name']
if not backend:
raise QISKitError("No backend instance to run on.")
if backend_name != backend.configuration['name']:
raise QISKitError('non-matching backends specified in Qobj '
'object and json')
if backend.configuration.get('local'): # remove condition when api gets qobj
for circuit in qobj['circuits']:
if circuit['compiled_circuit'] is None:
compiled_circuit = compile_circuit(circuit['circuit'], format='json')
circuit['compiled_circuit'] = compiled_circuit
return backend.run(q_job)
class JobProcessor:
"""
Process a series of jobs and collect the results
"""
def __init__(self, q_jobs, callback, max_workers=1):
"""
Args:
q_jobs (list(QuantumJob)): List of QuantumJob objects.
callback (fn(results)): The function that will be called when all
jobs finish. The signature of the function must be:
fn(results)
results: A list of Result objects.
max_workers (int): The maximum number of workers to use.
Raises:
QISKitError: if any of the job backends could not be found.
"""
self.q_jobs = q_jobs
self.max_workers = max_workers
# check whether any jobs are remote
self.online = any(not q_job.backend.configuration.get('local')
for q_job in q_jobs)
self.futures = {}
self.lock = Lock()
# Set a default dummy callback just in case the user doesn't want
# to pass any callback.
self.callback = (lambda rs: ()) if callback is None else callback
self.num_jobs = len(self.q_jobs)
self.jobs_results = []
if self.online:
# I/O intensive -> use ThreadedPoolExecutor
self.executor_class = futures.ThreadPoolExecutor
else:
# CPU intensive -> use ProcessPoolExecutor
self.executor_class = futures.ProcessPoolExecutor
def _job_done_callback(self, future):
try:
result = future.result()
except Exception as ex: # pylint: disable=broad-except
result = Result({'job_id': '0', 'status': 'ERROR',
'result': ex},
future.qobj)
with self.lock:
logger.debug("Have a Result: %s", pprint.pformat(result))
self.jobs_results.append(result)
if self.num_jobs != 0:
self.num_jobs -= 1
logger.debug("Jobs left count decreased: %d", self.num_jobs)
# Call the callback when all jobs have finished
if self.num_jobs == 0:
logger.debug("No more jobs in queue, returning results")
self.callback(self.jobs_results)
def submit(self):
"""Process/submit jobs"""
executor = self.executor_class(max_workers=self.max_workers)
for q_job in self.q_jobs:
future = executor.submit(run_backend, q_job)
future.qobj = q_job.qobj
self.futures[future] = q_job.qobj
future.add_done_callback(self._job_done_callback)
| apache-2.0 | 5,967,503,450,596,751,000 | 36.368852 | 85 | 0.606054 | false | 4.225209 | false | false | false |
morta-code/YAX | setup.py | 1 | 1245 | from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='YAX',
version='1.2.0',
packages=['yax'],
url='https://github.com/morta-code/YAX',
license='LGPLv3',
author='Móréh Tamás, MTA-PPKE-NLPG',
author_email='[email protected]',
description='Yet Another XML parser with the power of event-based memory-safe mechanism.',
long_description=long_description,
keywords="xml lxml parser event-based record-oriented",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup :: XML"
]
)
| gpl-3.0 | 6,089,661,871,399,462,000 | 35.529412 | 94 | 0.662641 | false | 3.857143 | false | true | false |
lutianming/leetcode | reorder_list.py | 1 | 1288 | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return nothing
def reorderList(self, head):
if not head or not head.next:
return head
fast = head
slow = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
lasthalf = slow.next
lasthalf = self.reverse(lasthalf)
slow.next = None
firsthalf = head
while lasthalf:
a = firsthalf.next
b = lasthalf.next
firsthalf.next = lasthalf
lasthalf.next = a
firsthalf = a
lasthalf = b
return head
def reverse(self, head):
if not head:
return head
next = head.next
head.next = None
while next:
tmp = next.next
next.next = head
head = next
next = tmp
return head
head = ListNode(1)
head.next = ListNode(2)
# node = head.next
# node.next = ListNode(3)
# node = node.next
# node.next = ListNode(4)
solution = Solution()
head = solution.reorderList(head)
while head:
print(head.val)
head = head.next
| mit | 3,573,646,055,956,980,700 | 21.596491 | 41 | 0.534938 | false | 3.95092 | false | false | false |
magyarm/periphondemand-code | src/bin/code/intercon.py | 1 | 5000 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Name: Intercon.py
# Purpose:
# Author: Fabien Marteau <[email protected]>
# Created: 13/05/2008
#-----------------------------------------------------------------------------
# Copyright (2008) Armadeus Systems
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#-----------------------------------------------------------------------------
# Revision list :
#
# Date By Changes
#
#-----------------------------------------------------------------------------
__doc__ = ""
__version__ = "1.0.0"
__versionTime__ = "13/05/2008"
__author__ = "Fabien Marteau <[email protected]>"
import periphondemand.bin.define
from periphondemand.bin.define import *
from periphondemand.bin.utils.settings import Settings
from periphondemand.bin.utils.error import Error
from periphondemand.bin.utils import wrappersystem as sy
from periphondemand.bin.utils.display import Display
from periphondemand.bin.core.component import Component
from periphondemand.bin.core.port import Port
from periphondemand.bin.core.interface import Interface
from periphondemand.bin.core.hdl_file import Hdl_file
settings = Settings()
display = Display()
class Intercon(Component):
""" Generate Intercon component
"""
def __init__(self,masterinterface,project):
""" Init fonction
"""
masterinstancename = masterinterface.getParent().getInstanceName()
masterinterfacename = masterinterface.getName()
Component.__init__(self,project)
self.interfaceslist = []
self.addNode(nodename="component")
masterinstance = self.parent.getInstance(masterinstancename)
masterinterface = masterinstance.getInterface(masterinterfacename)
# Write xml description
self.generateXML(masterinterface)
# Write Code for component
masterinterface.getBus().generateIntercon(self)
display.msg("Intercon with name : "+self.getInstanceName()+" Done")
def generateXML(self,masterinterface):
""" Generate intercon code
"""
masterinstance = masterinterface.getParent()
# set name and description
self.setName(str(masterinstance.getInstanceName()) \
+ "_" \
+ str(masterinterface.getName()))
self.setInstanceName(str(masterinstance.getInstanceName())\
+ "_"\
+str(masterinterface.getName())\
+ "_intercon")
self.setDescription("Connect slaves to "\
+ masterinterface.getName()\
+ " from "\
+ masterinstance.getInstanceName())
# Save to make directories
self.saveInstance()
#####
# Create interface for each component connected on intercon
# for slaves and master:
slaveslist = masterinterface.getSlavesList()
interfaceslist = [slave.getInterface() for slave in slaveslist]
interfaceslist.append(masterinterface)
# For each slave and master interface, create interface in intercon
for interface in interfaceslist:
instance = interface.getParent()
#######
# bus (wishbone,...)
bus = Interface(self,
name=instance.getInstanceName()\
+"_"+interface.getName())
bus.setClass("intercon")
# Adding bus interface on intercon
self.addInterface(bus)
#Creating port with invert direction value
for port in interface.getPortsList():
newport = Port(bus,
name=instance.getInstanceName()\
+"_"+port.getName())
newport.setDir(self.invertDir(port.getDir()))
newport.setSize(port.getSize())
# adding port on bus interface
bus.addPort(newport)
#connect port new port on instance interface
port.connectAllPin(newport)
bus.setClass("intercon")
self.setNum("0")
| lgpl-2.1 | 8,862,320,480,658,115,000 | 35.764706 | 78 | 0.5792 | false | 4.608295 | false | false | false |
zdrjson/DDKit | python/iMacFirstPythonPragrammer/FindSameNameImage.py | 1 | 1755 | import os, sys, re, shutil
if __name__ == '__main__':
used_map = {}
resPath = "./MagnetPF/Res/"
depDir = "Deprecated"
skipDir = ["message"]
for root, dirs, files in os.walk("./"):
for file in files:
if file.endswith(".m"):
filepath = os.path.join(root, file)
f = open(filepath, "r")
for line in f:
match = re.findall(".*?@\"(Res.*?.png)\".*?", line)
if match:
for image in match:
used_map[image] = 1
skipDir.append(depDir)
for root, dirs, files in os.walk(resPath):
for file in files:
orginfile = os.path.join(root, file)
match = re.findall(".*?(Res.*?.png).*?", orginfile)
if match:
matchfile = match[0].replace("@2x","").replace("@3x","")
print matchfile
if not used_map.has_key(matchfile):
filename = orginfile.split(os.path.sep)[-1]
relPath = orginfile.replace(resPath,"")
originDir = relPath.split(os.path.sep)[0]
tofile = resPath + depDir + "/" + relPath
topath = tofile.replace(filename,"")
if not originDir in skipDir:
if not os.path.exists(topath):
os.mkdir(topath)
print "from: " + orginfile
print " to:" + tofile
print ""
shutil.move(orginfile, tofile) | mit | -5,205,550,570,592,033,000 | 30.927273 | 76 | 0.406268 | false | 4.488491 | false | false | false |
owlabs/incubator-airflow | airflow/contrib/operators/snowflake_operator.py | 1 | 3034 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.snowflake_hook import SnowflakeHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SnowflakeOperator(BaseOperator):
"""
Executes sql code in a Snowflake database
:param snowflake_conn_id: reference to specific snowflake connection id
:type snowflake_conn_id: str
:param sql: the sql code to be executed. (templated)
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
:param warehouse: name of warehouse (will overwrite any warehouse
defined in the connection's extra JSON)
:type warehouse: str
:param database: name of database (will overwrite database defined
in connection)
:type database: str
:param schema: name of schema (will overwrite schema defined in
connection)
:type schema: str
:param role: name of role (will overwrite any role defined in
connection's extra JSON)
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#ededed'
@apply_defaults
def __init__(
self, sql, snowflake_conn_id='snowflake_default', parameters=None,
autocommit=True, warehouse=None, database=None, role=None,
schema=None, *args, **kwargs):
super(SnowflakeOperator, self).__init__(*args, **kwargs)
self.snowflake_conn_id = snowflake_conn_id
self.sql = sql
self.autocommit = autocommit
self.parameters = parameters
self.warehouse = warehouse
self.database = database
self.role = role
self.schema = schema
def get_hook(self):
return SnowflakeHook(snowflake_conn_id=self.snowflake_conn_id,
warehouse=self.warehouse, database=self.database,
role=self.role, schema=self.schema)
def execute(self, context):
self.log.info('Executing: %s', self.sql)
hook = self.get_hook()
hook.run(
self.sql,
autocommit=self.autocommit,
parameters=self.parameters)
| apache-2.0 | -7,577,476,445,885,619,000 | 38.402597 | 78 | 0.677983 | false | 4.156164 | false | false | false |
thefinn93/orgsms | orgsms/api.py | 1 | 3675 | from flask import Blueprint, abort, jsonify, request, current_app, Response
import datetime
from sqlalchemy import desc
from .provider import providers
from .socketio import socketio
from . import models, exceptions
app = Blueprint('api', __name__)
@app.route('/inbound/<provider>', methods=["POST"])
def inbound(provider):
if provider in providers:
message = providers[provider].receive()
models.db.session.add(message)
models.db.session.commit()
message.push()
return Response()
else:
return abort(404)
def send(local_number, remote_number, text, provider=None):
if provider is None:
local = models.PhoneNumber.query.get(local_number)
if local is not None:
provider = local.provider
if provider is None:
raise exceptions.CantDetermineProviderException()
if provider not in providers:
raise exceptions.UnknownProviderException("Provider {} unknown".format(provider))
message = models.Message(local_number=local_number, remote_number=remote_number,
inbound=False, mms=False, text=text)
models.db.session.add(message)
current_app.logger.debug("Sending %s to %s from %s", text, remote_number, local_number)
providers[provider].send(message)
models.db.session.commit()
broadcast_msg = message.json()
broadcast_msg['source_session'] = request.sid
socketio.emit('newmessage', broadcast_msg)
return message
@app.route('/outbound', methods=["POST"])
def outbound():
try:
message = send(request.form.get("from"), request.form.get("to"), request.form.get("text"))
return jsonify({"id": message.id})
except (exceptions.CantDetermineProviderException, exceptions.UnknownProviderException):
return abort(400)
@socketio.on('send')
def outbound_socket(json):
current_app.logger.debug("Received message from client %s: %s", request.sid, json)
try:
message = send(json.get("from"), json.get("to"), json.get("text"))
return {"success": True, "message": message.json()}
except (exceptions.CantDetermineProviderException, exceptions.UnknownProviderException):
current_app.logger.exception("Failed to send %s", str(json))
return {"success": False}
@app.route('/messages/<number>')
def get_messages(number):
results = []
query = models.Message.query.filter_by(remote_number=number)
if request.args.get('after') is not None:
after = datetime.datetime.fromtimestamp(float(request.args.get('after')))
query = query.filter(models.Message.timestamp > after)
if request.args.get('before') is not None:
before = datetime.datetime.fromtimestamp(float(request.args.get('before')))
query = query.filter(models.Message.timestamp < before)
query = query.order_by(desc(models.Message.timestamp)).limit(50)
for message in query.all():
results.append({
"mms": message.mms,
"inbound": message.inbound,
"text": message.text,
"attachment": message.attachment,
"timestamp": message.timestamp.timestamp()
})
return jsonify(results)
@app.route("/messages")
def get_conversations():
query = models.db.session.query(models.Message.remote_number.distinct().label("number"))
conversations = []
for conversation in query.all():
contact = models.Contact.query.filter_by(number=conversation.number).first()
conversations.append({
"number": conversation.number,
"name": contact.name if contact is not None else None
})
return jsonify(conversations)
| gpl-3.0 | 5,098,620,182,597,977,000 | 36.5 | 98 | 0.667483 | false | 4.007634 | false | false | false |
bolkedebruin/airflow | airflow/executors/executor_loader.py | 1 | 3573 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""All executors."""
import importlib
from typing import Optional
from airflow.executors.base_executor import BaseExecutor
class ExecutorLoader:
"""
Keeps constants for all the currently available executors.
"""
LOCAL_EXECUTOR = "LocalExecutor"
SEQUENTIAL_EXECUTOR = "SequentialExecutor"
CELERY_EXECUTOR = "CeleryExecutor"
DASK_EXECUTOR = "DaskExecutor"
KUBERNETES_EXECUTOR = "KubernetesExecutor"
DEBUG_EXECUTOR = "DebugExecutor"
_default_executor: Optional[BaseExecutor] = None
executors = {
LOCAL_EXECUTOR: 'airflow.executors.local_executor',
SEQUENTIAL_EXECUTOR: 'airflow.executors.sequential_executor',
CELERY_EXECUTOR: 'airflow.executors.celery_executor',
DASK_EXECUTOR: 'airflow.executors.dask_executor',
KUBERNETES_EXECUTOR: 'airflow.executors.kubernetes_executor',
DEBUG_EXECUTOR: 'airflow.executors.debug_executor'
}
@classmethod
def get_default_executor(cls) -> BaseExecutor:
"""Creates a new instance of the configured executor if none exists and returns it"""
if cls._default_executor is not None:
return cls._default_executor
from airflow.configuration import conf
executor_name = conf.get('core', 'EXECUTOR')
cls._default_executor = ExecutorLoader._get_executor(executor_name)
from airflow import LoggingMixin
log = LoggingMixin().log
log.info("Using executor %s", executor_name)
return cls._default_executor
@classmethod
def _get_executor(cls, executor_name: str) -> BaseExecutor:
"""
Creates a new instance of the named executor.
In case the executor name is unknown in airflow,
look for it in the plugins
"""
if executor_name in cls.executors:
executor_module = importlib.import_module(cls.executors[executor_name])
executor = getattr(executor_module, executor_name)
return executor()
else:
# Load plugins here for executors as at that time the plugins might not have been initialized yet
# TODO: verify the above and remove two lines below in case plugins are always initialized first
from airflow import plugins_manager
plugins_manager.integrate_executor_plugins()
executor_path = executor_name.split('.')
if len(executor_path) != 2:
raise ValueError(f"Executor {executor_name} not supported: "
f"please specify in format plugin_module.executor")
if executor_path[0] not in globals():
raise ValueError(f"Executor {executor_name} not supported")
return globals()[executor_path[0]].__dict__[executor_path[1]]()
| apache-2.0 | -3,511,072,061,836,489,000 | 41.035294 | 109 | 0.683459 | false | 4.378676 | false | false | false |
pdelsante/thug | thug/DOM/History.py | 1 | 3513 | #!/usr/bin/env python
#
# History.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import logging
from .JSClass import JSClass
from .Alexa import Alexa
log = logging.getLogger("Thug")
class History(JSClass):
def __init__(self, window):
self._window = window
self.urls = Alexa
self.pos = len(self.urls) - 1
self.__init_personality()
def __init_personality(self):
self._navigationMode = "automatic"
if log.ThugOpts.Personality.isIE():
self.__init_personality_IE()
return
if log.ThugOpts.Personality.isFirefox():
self.__init_personality_Firefox()
return
if log.ThugOpts.Personality.isChrome():
self.__init_personality_Chrome()
return
if log.ThugOpts.Personality.isSafari():
self.__init_personality_Safari()
return
def __init_personality_IE(self):
pass
def __init_personality_Firefox(self):
self.current = self._current
self.next = self._next
self.previous = self._previous
def __init_personality_Chrome(self):
pass
def __init_personality_Safari(self):
pass
@property
def window(self):
return self._window
@property
def length(self):
return len(self.urls)
@property
def _current(self):
return self.urls[self.pos] if self.length > self.pos and self.pos > 0 else None
@property
def _next(self):
return self.urls[self.pos + 1] if self.length > self.pos + 1 and self.pos > 0 else None
@property
def _previous(self):
return self.urls[self.pos - 1] if self.length > self.pos - 1 and self.pos > 0 else None
def _get_navigationMode(self):
return self._navigationMode
def _set_navigationMode(self, value):
if value in ("automatic", "compatible", "fast", ):
self._navigationMode = value
navigationMode = property(_get_navigationMode, _set_navigationMode)
def pushState(self, state, title, URL):
# self._window.url = URL
pass
def back(self):
"""Loads the previous URL in the history list"""
return self.go(-1)
def forward(self):
"""Loads the next URL in the history list"""
return self.go(1)
def go(self, num_or_url):
"""Loads a specific URL from the history list"""
try:
off = int(num_or_url)
self.pos += off
self.pos = min(max(0, self.pos), len(self.urls) - 1)
self._window.open(self.urls[self.pos])
except ValueError:
self._window.open(num_or_url)
def update(self, url, replace = False):
if replace:
self.urls[self.pos] = url
return
if self.urls[self.pos] != url:
self.urls.insert(self.pos, url)
self.pos += 1
| gpl-2.0 | 2,978,125,687,368,465,400 | 26.661417 | 95 | 0.609735 | false | 3.877483 | false | false | false |
DLR-SC/DataFinder | src/datafinder/gui/user/common/widget/property/editors/list_editor.py | 1 | 14838 | #
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Dialog for editing list property values.
"""
from PyQt4 import QtGui, QtCore
from PyQt4.Qt import Qt
from datafinder.core.configuration.properties import constants
from datafinder.core.configuration.properties import property_type
from datafinder.gui.gen.user.list_property_dialog_ui import Ui_listPropertyDialog
from datafinder.gui.user.common.util import extractPyObject, determineDisplayRepresentation
__version__ = "$Revision-Id:$"
class ListEditor(QtGui.QLineEdit):
"""
This widget widget is a specialized line editor which allows
the manipulation of list data.
"""
_SUPPORTED_PROPERTY_TYPES = [
constants.STRING_TYPE, constants.DATETIME_TYPE, constants.NUMBER_TYPE, constants.BOOLEAN_TYPE]
def __init__(self, restrictions, editorFactory, initData=list(), parent=None):
"""
@param restrictions: List-specific restrictions.
see: L{<property_type.ListType>datafinder.core.configuration.properties.property_type.ListType}
@type restrictions: C{dict}
@param editorFactory: Factory for creation of value editors.
@type editorFactory: C{EditorFactory}
@param initData: Initial list data.
@type initData: C{list} of C{object}
@param parent: Parent widget of the dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget}
"""
QtGui.QLineEdit.__init__(self, parent)
self._editorFactory = editorFactory
self.value = initData
self._allowedPropertyTypes = restrictions.get(constants.ALLOWED_SUB_TYPES, self._SUPPORTED_PROPERTY_TYPES)
self._removeUnsupportedPropertyTypes()
self._editButton = QtGui.QPushButton("...", self)
self._editButton.setMaximumSize(QtCore.QSize(20, 20))
self.setReadOnly(True)
self.setStyleSheet("QLineEdit { padding-right: 0px; } ")
self.setText(determineDisplayRepresentation(initData))
self._showEditorSlot()
self.connect(self._editButton, QtCore.SIGNAL("clicked()"), self._showEditorSlot)
def _removeUnsupportedPropertyTypes(self):
removes = list()
for propertyTypeName in self._allowedPropertyTypes:
if not propertyTypeName in self._SUPPORTED_PROPERTY_TYPES:
removes.append(propertyTypeName)
for propertyTypeName in removes:
self._allowedPropertyTypes.remove(propertyTypeName)
def resizeEvent(self, _):
""" Ensures that the edit button is in the right corner of the line editor. """
size = self._editButton.maximumSize()
self._editButton.move(self.rect().right() - size.width(),
(self.rect().bottom() + 1 - size.height()) / 2)
def _showEditorSlot(self):
""" Slot which shows the list editor. """
listPropertyEditor = _ListPropertyDialog(self._allowedPropertyTypes, self.value, self._editorFactory, self)
listPropertyEditor.exec_()
self.setText(determineDisplayRepresentation(self.value))
self.setFocus(Qt.OtherFocusReason)
def text(self):
""" Overwrites the text behavior. """
return self.value
class _ListPropertyDialog(QtGui.QDialog, Ui_listPropertyDialog):
"""
This dialog shows the content of a list property and supports the editing the property.
"""
def __init__(self, allowedPropertyTypes, propertyValues, editorFactory, parent=None):
"""
Constructor.
@param allowedPropertyTypes: Names of available property types.
@type allowedPropertyTypes: C{list} of C{unicode}
@param propertyValues: Initial list data.
@type propertyValues: C{list} of C{object}
@param editorFactory: Factory for creation of value editors.
@type editorFactory: L{EditorFactory<datafinder.gui.user.common.widget.property.editors.factory.Editorfactory>}
@param parent: Parent widget of the dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget}
"""
QtGui.QDialog.__init__(self, parent)
Ui_listPropertyDialog.__init__(self)
self.setupUi(self)
self._initState = propertyValues
self._allowedPropertyTypes = allowedPropertyTypes
self._editorFactory = editorFactory
self._initializeSignalConnections()
self._initializeEditButtonsEnabledState()
self._initializeTable(propertyValues)
def _initializeSignalConnections(self):
self.connect(self.tableWidget, QtCore.SIGNAL("itemSelectionChanged()"), self.itemSelectionChangedSlot)
self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self.addSlot)
self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self.editSlot)
self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self.deleteSlot)
self.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), self.accepted)
self.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), self.rejected)
def _initializeEditButtonsEnabledState(self):
if not self._allowedPropertyTypes:
self.addButton.setEnabled(False)
self._setEnableStateOfItemEditingButtons(False)
def _setEnableStateOfItemEditingButtons(self, isEnabled):
self.deleteButton.setEnabled(isEnabled)
self.editButton.setEnabled(isEnabled)
def _initializeTable(self, propertyValues):
"""
Adds the property values into the table model.
@param propertyValues: Property values which should be displayed in the editor.
@type propertyValues: C{list} of C{object}
"""
self.tableWidget.setItemDelegate(
_ListPropertyItemDelegate(self._allowedPropertyTypes, self._editorFactory, self))
self.tableWidget.setColumnWidth(1, 150)
for row, value in enumerate(propertyValues):
propertyType = property_type.determinePropertyTypeConstant(value)
isEditingSupported = self._isEditingSupported(propertyType)
self._addPropertyItem(row, value, propertyType, isEditingSupported)
self.emit(QtCore.SIGNAL("layoutChanged()"))
def _isEditingSupported(self, propertyType):
if propertyType in self._allowedPropertyTypes:
return True
else:
return False
def _addPropertyItem(self, row, value, propertyType, isEditingSupported):
self.tableWidget.insertRow(row)
self.tableWidget.setRowHeight(row, 20)
self.tableWidget.setItem(row, 0 , QtGui.QTableWidgetItem(propertyType))
self.tableWidget.setItem(row, 1, _TableWidgetItem(value, isEditingSupported))
def addSlot(self):
""" This slot is called when a new item should be inserted. """
self.tableWidget.insertRow(self.tableWidget.model().rowCount())
self.tableWidget.setRowHeight(self.tableWidget.model().rowCount() - 1, 20)
self.tableWidget.setItem(self.tableWidget.rowCount() - 1, 0, QtGui.QTableWidgetItem(""))
self.tableWidget.setItem(self.tableWidget.rowCount() - 1, 1, _TableWidgetItem())
self.tableWidget.setFocus()
self.tableWidget.editItem(self.tableWidget.item(self.tableWidget.rowCount() - 1, 0))
def editSlot(self):
""" This slot is called when the edit button is pressed. """
item = self.tableWidget.currentItem()
self.tableWidget.editItem(item)
def deleteSlot(self):
""" Slot is called when the delete button is pressed. """
index = self.tableWidget.selectionModel().currentIndex()
self.tableWidget.model().removeRow(index.row())
if self.tableWidget.rowCount() == 0:
self._setEnableStateOfItemEditingButtons(False)
def itemSelectionChangedSlot(self):
""" De-activates buttons for properties which cannot be properly edited. """
if self.tableWidget.selectedItems():
item = self.tableWidget.selectedItems()[0]
if item.column() == 0: # Only items of the value column contain the editing information
item = self.tableWidget.item(item.row(), 1)
if item.isEditingSupported:
self._setEnableStateOfItemEditingButtons(True)
else:
self._setEnableStateOfItemEditingButtons(False)
def accepted(self):
""" This slot is called when the user clicks OK. It returns the edited list. """
properties = list()
for i in range(self.tableWidget.model().rowCount()):
item = self.tableWidget.item(i, 1)
if not item.value is None:
properties.append(item.value)
self.parent().value = properties
QtGui.QDialog.accept(self)
def rejected(self):
"""
This slot is called when the user cancels the dialog. It returns the
list that was passed to dialog as initData.
"""
self.parent().value = self._initState
QtGui.QDialog.reject(self)
class _ListPropertyItemDelegate(QtGui.QItemDelegate):
"""
Delegate for the property modification.
"""
def __init__(self, propertyTypeNames, editorFactory, parent=None):
"""
Constructor.
@param propertyTypeNames: Names of available property types.
@type propertyTypeNames: C{list} of C{unicode}
@param editorFactory: Factory for creation of value editors.
@type editorFactory: L{EditorFactory<datafinder.gui.user.common.widget.property.editors.factory.Editorfactory>}
@param parent: Parent widget of the dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget}
"""
QtGui.QItemDelegate.__init__(self, parent)
self._factory = editorFactory
self._propertyTypes = [QtCore.QString(unicode(propType)) for propType in propertyTypeNames]
def createEditor(self, parent, _, index):
"""
@see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor>}
"""
typeIndex = index.model().index(index.row(), 0)
valueType = index.model().data(typeIndex, QtCore.Qt.DisplayRole).toString()
if index.column() == 0:
editor = QtGui.QComboBox(parent)
editor.addItems(self._propertyTypes)
if valueType in self._propertyTypes:
editor.setCurrentIndex(self._propertyTypes.index(valueType))
elif index.column() == 1:
editor = self._factory.createEditor(parent, valueType)
if not editor.isEnabled():
return None
return editor
def setModelData(self, editor, model, index):
"""
@see: QtGui.QItemDelegate#setModelData
"""
returnValue = self._factory.getValueFromEditor(editor)
model.setData(index, QtCore.QVariant(returnValue))
def setEditorData(self, editor, index):
"""
@see: L{setEditorData<PyQt4.QtGui.QItemDelegate.setEditorData>}
"""
if index.column() == 1:
value = self.parent().tableWidget.item(index.row(), 1).value
self._factory.setEditorValue(editor, value)
else:
QtGui.QItemDelegate.setEditorData(self, editor, index)
class _TableWidgetItem(QtGui.QTableWidgetItem):
""" Specific implementation of C{QTableWidgetItem}. """
def __init__(self, value=None, isEditingSupported=True):
"""
@param value: Value which is represented by this item.
@type value: C{object}
@param isEditingSupported: Flag which indicates whether the value can be edited or not.
@type isEditingSupported: C{bool}
"""
QtGui.QTableWidgetItem.__init__(self)
self._value = value
self._isEditingSupported = isEditingSupported
@property
def isEditingSupported(self):
""" Read-only access to the isEditingSupported flag."""
return self._isEditingSupported
@property
def value(self):
""" Read-only access to the value."""
return self._value
def data(self, role):
""" Ensures that the values are correctly rendered. """
if role == Qt.DisplayRole:
return QtCore.QVariant(determineDisplayRepresentation(self._value))
else:
return QtGui.QTableWidgetItem(self).data(role)
def setData(self, _, value):
""" Converts value given as QVariant to a Python object. """
value = extractPyObject(value)
self._value = value
| bsd-3-clause | 1,791,342,349,966,951,400 | 38.540984 | 119 | 0.638765 | false | 4.466586 | false | false | false |
chripell/pyasicam | view.py | 1 | 8948 | #!/usr/bin/python3
import datetime
import os
import pyasicam as pc
import sys
import numpy as np
import cairo
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GdkPixbuf, GLib, Gdk, Gio, GObject
def gamma_stretch(im, gamma):
if im.dtype != np.float:
im = im.astype(np.float)
im /= 255.0
im = im ** gamma
return im * 255.0
class Camera(pc.Camera):
def __init__(self, i):
super().__init__(i)
self.mean = 1
self.im_num = 0
self.im_mean = None
self.done = 0
self.OpenCamera()
self.InitCamera()
self.set_exposure_ms(1000)
caps = self.GetCameraProperty()
self.SetROIFormat(caps.MaxWidth, caps.MaxHeight, 1, pc.IMG_Y8)
# Trick to make ASI120MC work for short exposures.
# if self.GetCameraProperty().IsUSB3Camera == 0:
# self.SetControlValue(pc.BANDWIDTHOVERLOAD, 20, False)
def capture(self):
self.StartExposure(False)
def get_image(self):
st = self.GetExpStatus()
if st == pc.EXP_FAILED:
# raise RuntimeError("Exposure failed")
print("Exposure failed")
self.capture()
return None
if st == pc.EXP_IDLE:
raise RuntimeError("Exposure not started")
if self.GetExpStatus() == pc.EXP_WORKING:
return None
img = self.GetDataAfterExp()
self.capture()
if self.mean <= 1:
return img
self.im_num += 1
if self.im_num == 1:
self.im_mean = img.astype(np.float)
else:
self.im_mean += img
if self.im_num >= self.mean:
self.im_num = 0
return self.im_mean / self.mean
def set_exposure_ms(self, ms):
self.SetControlValue(pc.EXPOSURE, int(ms*1000), False)
def get_exposure_ms(self):
return self.GetControlValue(pc.EXPOSURE)[0] / 1000.0
def set_gain(self, gain):
self.SetControlValue(pc.GAIN, gain, False)
def get_gain(self):
return self.GetControlValue(pc.GAIN)[0]
class Histo:
def __init__(self):
self.data = None
self.stretch = 0
self.stretch_from = 0
self.stretch_to = 127
self.bins = [2*i - 0.5 for i in range(129)]
def get(self):
self.histo = Gtk.DrawingArea()
self.histo.connect("draw", self.draw)
self.histo.set_property("height-request", 100)
return self.histo
def draw(self, w, cr):
if self.data is None:
return
width = w.get_allocated_width()
height = w.get_allocated_height()
cr.set_source_rgb(0.7, 0.1, 0.1)
cr.move_to(0, 0)
cr.line_to(width, 0)
cr.line_to(width, height)
cr.line_to(0, height)
cr.line_to(0, 0)
cr.stroke()
xscale = width / 127.0
yscale = float(height) / np.max(self.data)
if self.stretch_from >= 0:
cr.set_source_rgb(0.9, 0.6, 0.6)
cr.rectangle(self.stretch_from * xscale, 0,
(self.stretch_to - self.stretch_from) * xscale,
height)
cr.fill()
cr.set_source_rgb(0.1, 0.1, 0.1)
cr.new_path()
cr.move_to(0, height - 0)
cr.line_to(0, height - self.data[0] * yscale)
for i in range(1, 128):
cr.line_to(i * xscale, height - self.data[i] * yscale)
cr.line_to(width, height - 0)
cr.close_path()
cr.fill()
def apply(self, im):
size = im.shape[0]
if im.shape[1] > size:
size = im.shape[1]
n = 1
while size > 256:
size /= 2
n *= 2
self.data = np.histogram(im[::n, ::n], bins=self.bins)[0]
if self.stretch > 0 and self.stretch < 100:
cs = np.cumsum(self.data)/np.sum(self.data) * 100
self.stretch_from = len(cs[cs <= self.stretch])
self.stretch_to = len(cs[cs <= 100 - self.stretch])
s_to = self.stretch_to / 127.0 * 255.0
s_from = self.stretch_from / 127.0 * 255.0
scale = 255.0 / (s_to - s_from)
im = np.clip((im - s_from) * scale, 0, 255)
else:
self.stretch_from = 0
self.stretch_to = 127
self.histo.queue_draw()
return im
class Mainwindow(Gtk.Window):
def __init__(self, cam, *args, **kwargs):
Gtk.Window.__init__(
self, default_width=800, default_height=600,
title="PYASICAM", *args, **kwargs)
self.cam = cam
self.surface = None
self.gamma = 1.0
cam.capture()
scrolledImage = Gtk.ScrolledWindow()
self.image = Gtk.DrawingArea()
self.image.connect("draw", self.draw)
self.image.connect("configure-event", self.configure)
scrolledImage.add(self.image)
mainHBox = Gtk.HBox()
mainHBox.pack_start(scrolledImage, True, True, 0)
controlsBox = Gtk.VBox()
mainHBox.pack_start(controlsBox, False, False, 0)
self.add_controls(controlsBox)
self.add(mainHBox)
self.connect("delete-event", Gtk.main_quit)
self.periodic = GLib.timeout_add(100, self.get_image)
self.show_all()
def process_image(self, im):
im = self.histo.apply(im)
if self.gamma != 1.0:
im = gamma_stretch(im, self.gamma)
self.publish_image(im)
def get_image(self):
im = self.cam.get_image()
if im is not None:
self.im = im
self.process_image(im)
self.periodic = GLib.timeout_add(100, self.get_image)
def publish_image(self, im):
if im.dtype != np.uint8:
im = im.astype(np.uint8)
im32 = np.dstack((im, im, im, im))
self.surface = cairo.ImageSurface.create_for_data(
im32, cairo.FORMAT_RGB24, im.shape[1], im.shape[0])
self.image.set_size_request(im.shape[1], im.shape[0])
self.image.queue_draw()
def draw(self, w, cr):
if not self.surface:
return
cr.set_source_surface(self.surface, 0, 0)
cr.paint()
def configure(self, w, ev):
if not self.surface:
return
self.image.queue_draw()
def create_text_control(self, text, ini, cb):
box = Gtk.HBox()
label = Gtk.Label()
label.set_markup(text)
label.set_justify(Gtk.Justification.RIGHT)
box.pack_start(label, False, False, 0)
entry = Gtk.Entry()
entry.set_text(ini)
entry.connect("activate", cb)
box.pack_start(entry, True, False, 0)
return box
def add_controls(self, box):
self.histo = Histo()
box.pack_start(self.histo.get(), False, False, 0)
exp_ms = self.create_text_control(
"Exposure (ms):",
"%.2f" % self.cam.get_exposure_ms(),
self.set_exposure_ms)
box.pack_start(exp_ms, False, False, 0)
gain = self.create_text_control(
"Gain:",
"%d" % self.cam.get_gain(),
self.set_gain)
box.pack_start(gain, False, False, 0)
mean = self.create_text_control(
"Mean:",
"%d" % self.cam.mean,
self.set_mean)
box.pack_start(mean, False, False, 0)
stretch = self.create_text_control(
"Stretch:",
"%d" % self.histo.stretch,
self.set_stretch)
box.pack_start(stretch, False, False, 0)
gamma = self.create_text_control(
"Gamma:",
"%d" % self.gamma,
self.set_gamma)
box.pack_start(gamma, False, False, 0)
def set_exposure_ms(self, e):
try:
self.cam.set_exposure_ms(float(e.get_text()))
except:
pass
e.set_text("%.2f" % self.cam.get_exposure_ms())
def set_gain(self, e):
try:
self.cam.set_gain(int(e.get_text()))
except:
pass
e.set_text("%d" % self.cam.get_gain())
def set_mean(self, e):
try:
self.cam.mean = int(e.get_text())
except:
pass
e.set_text("%d" % self.cam.mean)
def set_stretch(self, e):
try:
self.histo.stretch = int(e.get_text())
except:
pass
e.set_text("%d" % self.histo.stretch)
def set_gamma(self, e):
try:
self.gamma = float(e.get_text())
except:
pass
e.set_text("%f" % self.gamma)
if len(sys.argv) < 2:
print("Usage: %s [list/camera no.]" % sys.argv[0])
sys.exit(1)
n = pc.GetNumOfConnectedCameras()
if sys.argv[1] == "list":
for i in range(n):
c = pc.Camera(i)
prop = c.GetCameraProperty()
print("%d: %s" % (i, prop.Name.decode("utf-8")))
sys.exit(0)
cam = Camera(int(sys.argv[1]))
window = Mainwindow(cam)
Gtk.main()
| gpl-3.0 | 199,139,717,233,901,900 | 29.026846 | 72 | 0.533862 | false | 3.307948 | false | false | false |
livioferrante/my-final-project | .mywaflib/waflib/extras/boost.py | 1 | 13891 | #!/usr/bin/env python
# encoding: utf-8
#
# partially based on boost.py written by Gernot Vormayr
# written by Ruediger Sonderfeld <[email protected]>, 2008
# modified by Bjoern Michaelsen, 2008
# modified by Luca Fossati, 2008
# rewritten for waf 1.5.1, Thomas Nagy, 2008
# rewritten for waf 1.6.2, Sylvain Rouquette, 2011
'''
This is an extra tool, not bundled with the default waf binary.
To add the boost tool to the waf file:
$ ./waf-light --tools=compat15,boost
or, if you have waf >= 1.6.2
$ ./waf update --files=boost
When using this tool, the wscript will look like:
def options(opt):
opt.load('compiler_cxx boost')
def configure(conf):
conf.load('compiler_cxx boost')
conf.check_boost(lib='system filesystem')
def build(bld):
bld(source='main.cpp', target='app', use='BOOST')
Options are generated, in order to specify the location of boost includes/libraries.
The `check_boost` configuration function allows to specify the used boost libraries.
It can also provide default arguments to the --boost-mt command-line arguments.
Everything will be packaged together in a BOOST component that you can use.
When using MSVC, a lot of compilation flags need to match your BOOST build configuration:
- you may have to add /EHsc to your CXXFLAGS or define boost::throw_exception if BOOST_NO_EXCEPTIONS is defined.
Errors: C4530
- boost libraries will try to be smart and use the (pretty but often not useful) auto-linking feature of MSVC
So before calling `conf.check_boost` you might want to disabling by adding
conf.env.DEFINES_BOOST += ['BOOST_ALL_NO_LIB']
Errors:
- boost might also be compiled with /MT, which links the runtime statically.
If you have problems with redefined symbols,
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
self.env['CXXFLAGS_%s' % var] += ['/MD', '/EHsc']
Passing `--boost-linkage_autodetect` might help ensuring having a correct linkage in some basic cases.
'''
import sys
import re
from waflib import Utils, Logs, Errors
from waflib.Configure import conf
from waflib.TaskGen import feature, after_method
BOOST_LIBS = ['/usr/lib/x86_64-linux-gnu', '/usr/lib/i386-linux-gnu',
'/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib']
BOOST_INCLUDES = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include']
BOOST_VERSION_FILE = 'boost/version.hpp'
BOOST_VERSION_CODE = '''
#include <iostream>
#include <boost/version.hpp>
int main() { std::cout << BOOST_LIB_VERSION << std::endl; }
'''
BOOST_ERROR_CODE = '''
#include <boost/system/error_code.hpp>
int main() { boost::system::error_code c; }
'''
BOOST_THREAD_CODE = '''
#include <boost/thread.hpp>
int main() { boost::thread t; }
'''
# toolsets from {boost_dir}/tools/build/v2/tools/common.jam
PLATFORM = Utils.unversioned_sys_platform()
detect_intel = lambda env: (PLATFORM == 'win32') and 'iw' or 'il'
detect_clang = lambda env: (PLATFORM == 'darwin') and 'clang-darwin' or 'clang'
detect_mingw = lambda env: (re.search('MinGW', env.CXX[0])) and 'mgw' or 'gcc'
BOOST_TOOLSETS = {
'borland': 'bcb',
'clang': detect_clang,
'como': 'como',
'cw': 'cw',
'darwin': 'xgcc',
'edg': 'edg',
'g++': detect_mingw,
'gcc': detect_mingw,
'icpc': detect_intel,
'intel': detect_intel,
'kcc': 'kcc',
'kylix': 'bck',
'mipspro': 'mp',
'mingw': 'mgw',
'msvc': 'vc',
'qcc': 'qcc',
'sun': 'sw',
'sunc++': 'sw',
'tru64cxx': 'tru',
'vacpp': 'xlc'
}
def options(opt):
opt.add_option('--boost-includes', type='string',
default='', dest='boost_includes',
help='''path to the boost includes root (~boost root)
e.g. /path/to/boost_1_47_0''')
opt.add_option('--boost-libs', type='string',
default='', dest='boost_libs',
help='''path to the directory where the boost libs are
e.g. /path/to/boost_1_47_0/stage/lib''')
opt.add_option('--boost-mt', action='store_true',
default=False, dest='boost_mt',
help='select multi-threaded libraries')
opt.add_option('--boost-abi', type='string', default='', dest='boost_abi',
help='''select libraries with tags (gd for debug, static is automatically added),
see doc Boost, Getting Started, chapter 6.1''')
opt.add_option('--boost-linkage_autodetect', action="store_true", dest='boost_linkage_autodetect',
help="auto-detect boost linkage options (don't get used to it / might break other stuff)")
opt.add_option('--boost-toolset', type='string',
default='', dest='boost_toolset',
help='force a toolset e.g. msvc, vc90, \
gcc, mingw, mgw45 (default: auto)')
py_version = '%d%d' % (sys.version_info[0], sys.version_info[1])
opt.add_option('--boost-python', type='string',
default=py_version, dest='boost_python',
help='select the lib python with this version \
(default: %s)' % py_version)
@conf
def __boost_get_version_file(self, d):
if not d:
return None
dnode = self.root.find_dir(d)
if dnode:
return dnode.find_node(BOOST_VERSION_FILE)
return None
@conf
def boost_get_version(self, d):
"""silently retrieve the boost version number"""
node = self.__boost_get_version_file(d)
if node:
try:
txt = node.read()
except (OSError, IOError):
Logs.error("Could not read the file %r" % node.abspath())
else:
re_but = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.*)"', re.M)
m = re_but.search(txt)
if m:
return m.group(1)
return self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[d], execute=True, define_ret=True)
@conf
def boost_get_includes(self, *k, **kw):
includes = k and k[0] or kw.get('includes', None)
if includes and self.__boost_get_version_file(includes):
return includes
for d in self.environ.get('INCLUDE', '').split(';') + BOOST_INCLUDES:
if self.__boost_get_version_file(d):
return d
if includes:
self.end_msg('headers not found in %s' % includes)
self.fatal('The configuration failed')
else:
self.end_msg('headers not found, please provide a --boost-includes argument (see help)')
self.fatal('The configuration failed')
@conf
def boost_get_toolset(self, cc):
toolset = cc
if not cc:
build_platform = Utils.unversioned_sys_platform()
if build_platform in BOOST_TOOLSETS:
cc = build_platform
else:
cc = self.env.CXX_NAME
if cc in BOOST_TOOLSETS:
toolset = BOOST_TOOLSETS[cc]
return isinstance(toolset, str) and toolset or toolset(self.env)
@conf
def __boost_get_libs_path(self, *k, **kw):
''' return the lib path and all the files in it '''
if 'files' in kw:
return self.root.find_dir('.'), Utils.to_list(kw['files'])
libs = k and k[0] or kw.get('libs', None)
if libs:
path = self.root.find_dir(libs)
files = path.ant_glob('*boost_*')
if not libs or not files:
for d in self.environ.get('LIB', '').split(';') + BOOST_LIBS:
if not d:
continue
path = self.root.find_dir(d)
if path:
files = path.ant_glob('*boost_*')
if files:
break
path = self.root.find_dir(d + '64')
if path:
files = path.ant_glob('*boost_*')
if files:
break
if not path:
if libs:
self.end_msg('libs not found in %s' % libs)
self.fatal('The configuration failed')
else:
self.end_msg('libs not found, please provide a --boost-libs argument (see help)')
self.fatal('The configuration failed')
self.to_log('Found the boost path in %r with the libraries:' % path)
for x in files:
self.to_log(' %r' % x)
return path, files
@conf
def boost_get_libs(self, *k, **kw):
'''
return the lib path and the required libs
according to the parameters
'''
path, files = self.__boost_get_libs_path(**kw)
files = sorted(files, key=lambda f: (len(f.name), f.name), reverse=True)
toolset = self.boost_get_toolset(kw.get('toolset', ''))
toolset_pat = '(-%s[0-9]{0,3})' % toolset
version = '-%s' % self.env.BOOST_VERSION
def find_lib(re_lib, files):
for file in files:
if re_lib.search(file.name):
self.to_log('Found boost lib %s' % file)
return file
return None
def format_lib_name(name):
if name.startswith('lib') and self.env.CC_NAME != 'msvc':
name = name[3:]
return name[:name.rfind('.')]
def match_libs(lib_names, is_static):
libs = []
lib_names = Utils.to_list(lib_names)
if not lib_names:
return libs
t = []
if kw.get('mt', False):
t.append('-mt')
if kw.get('abi', None):
t.append('%s%s' % (is_static and '-s' or '-', kw['abi']))
elif is_static:
t.append('-s')
tags_pat = t and ''.join(t) or ''
ext = is_static and self.env.cxxstlib_PATTERN or self.env.cxxshlib_PATTERN
ext = ext.partition('%s')[2] # remove '%s' or 'lib%s' from PATTERN
for lib in lib_names:
if lib == 'python':
# for instance, with python='27',
# accepts '-py27', '-py2', '27' and '2'
# but will reject '-py3', '-py26', '26' and '3'
tags = '({0})?((-py{2})|(-py{1}(?=[^0-9]))|({2})|({1}(?=[^0-9]))|(?=[^0-9])(?!-py))'.format(tags_pat, kw['python'][0], kw['python'])
else:
tags = tags_pat
# Trying libraries, from most strict match to least one
for pattern in ['boost_%s%s%s%s%s$' % (lib, toolset_pat, tags, version, ext),
'boost_%s%s%s%s$' % (lib, tags, version, ext),
# Give up trying to find the right version
'boost_%s%s%s%s$' % (lib, toolset_pat, tags, ext),
'boost_%s%s%s$' % (lib, tags, ext),
'boost_%s%s$' % (lib, ext),
'boost_%s' % lib]:
self.to_log('Trying pattern %s' % pattern)
file = find_lib(re.compile(pattern), files)
if file:
libs.append(format_lib_name(file.name))
break
else:
self.end_msg('lib %s not found in %s' % (lib, path.abspath()))
self.fatal('The configuration failed')
return libs
return path.abspath(), match_libs(kw.get('lib', None), False), match_libs(kw.get('stlib', None), True)
@conf
def check_boost(self, *k, **kw):
"""
Initialize boost libraries to be used.
Keywords: you can pass the same parameters as with the command line (without "--boost-").
Note that the command line has the priority, and should preferably be used.
"""
if not self.env['CXX']:
self.fatal('load a c++ compiler first, conf.load("compiler_cxx")')
params = {
'lib': k and k[0] or kw.get('lib', None),
'stlib': kw.get('stlib', None)
}
for key, value in self.options.__dict__.items():
if not key.startswith('boost_'):
continue
key = key[len('boost_'):]
params[key] = value and value or kw.get(key, '')
var = kw.get('uselib_store', 'BOOST')
self.start_msg('Checking boost includes')
self.env['INCLUDES_%s' % var] = inc = self.boost_get_includes(**params)
self.env.BOOST_VERSION = self.boost_get_version(inc)
self.end_msg(self.env.BOOST_VERSION)
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % self.env['INCLUDES_%s' % var])
if not params['lib'] and not params['stlib']:
return
if 'static' in kw or 'static' in params:
Logs.warn('boost: static parameter is deprecated, use stlib instead.')
self.start_msg('Checking boost libs')
path, libs, stlibs = self.boost_get_libs(**params)
self.env['LIBPATH_%s' % var] = [path]
self.env['STLIBPATH_%s' % var] = [path]
self.env['LIB_%s' % var] = libs
self.env['STLIB_%s' % var] = stlibs
self.end_msg('ok')
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % path)
Logs.pprint('CYAN', ' shared libs : %s' % libs)
Logs.pprint('CYAN', ' static libs : %s' % stlibs)
def try_link():
if (params['lib'] and 'system' in params['lib']) or \
params['stlib'] and 'system' in params['stlib']:
self.check_cxx(fragment=BOOST_ERROR_CODE, use=var, execute=False)
if (params['lib'] and 'thread' in params['lib']) or \
params['stlib'] and 'thread' in params['stlib']:
self.check_cxx(fragment=BOOST_THREAD_CODE, use=var, execute=False)
if params.get('linkage_autodetect', False):
self.start_msg("Attempting to detect boost linkage flags")
toolset = self.boost_get_toolset(kw.get('toolset', ''))
if toolset in ('vc',):
# disable auto-linking feature, causing error LNK1181
# because the code wants to be linked against
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
# if no dlls are present, we guess the .lib files are not stubs
has_dlls = False
for x in Utils.listdir(path):
if x.endswith(self.env.cxxshlib_PATTERN % ''):
has_dlls = True
break
if not has_dlls:
self.env['STLIBPATH_%s' % var] = [path]
self.env['STLIB_%s' % var] = libs
del self.env['LIB_%s' % var]
del self.env['LIBPATH_%s' % var]
# we attempt to play with some known-to-work CXXFLAGS combinations
for cxxflags in (['/MD', '/EHsc'], []):
self.env.stash()
self.env["CXXFLAGS_%s" % var] += cxxflags
try:
try_link()
self.end_msg("ok: winning cxxflags combination: %s" % (self.env["CXXFLAGS_%s" % var]))
exc = None
break
except Errors.ConfigurationError as e:
self.env.revert()
exc = e
if exc is not None:
self.end_msg("Could not auto-detect boost linking flags combination, you may report it to boost.py author", ex=exc)
self.fatal('The configuration failed')
else:
self.end_msg("Boost linkage flags auto-detection not implemented (needed ?) for this toolchain")
self.fatal('The configuration failed')
else:
self.start_msg('Checking for boost linkage')
try:
try_link()
except Errors.ConfigurationError as e:
self.end_msg("Could not link against boost libraries using supplied options")
self.fatal('The configuration failed')
self.end_msg('ok')
@feature('cxx')
@after_method('apply_link')
def install_boost(self):
if install_boost.done or not Utils.is_win32 or not self.bld.cmd.startswith('install'):
return
install_boost.done = True
inst_to = getattr(self, 'install_path', '${BINDIR}')
for lib in self.env.LIB_BOOST:
try:
file = self.bld.find_file(self.env.cxxshlib_PATTERN % lib, self.env.LIBPATH_BOOST)
self.bld.install_files(inst_to, self.bld.root.find_node(file))
except:
continue
install_boost.done = False
| bsd-3-clause | -5,572,335,086,933,527,000 | 32.798054 | 136 | 0.653373 | false | 2.867671 | true | false | false |
Arkapravo/morse-0.6 | src/morse/actuators/destination.py | 1 | 3226 | import logging; logger = logging.getLogger("morse." + __name__)
import morse.core.actuator
from morse.helpers.components import add_data, add_property
class DestinationActuatorClass(morse.core.actuator.MorseActuatorClass):
""" Destination motion controller
This controller will receive a destination point and
make the robot move to that location by moving without turning.
"""
_name = "Destination"
_short_desc = "Instruct the robot to move towards a given target"
add_data('x', 'current X pos')
add_data('y', 'current Y pos')
add_data('z', 'current Z pos')
add_property('_tolerance', 0.5, 'Tolerance')
add_property('_speed', 5.0, 'Speed')
def __init__(self, obj, parent=None):
logger.info('%s initialization' % obj.name)
# Call the constructor of the parent class
super(self.__class__,self).__init__(obj, parent)
self.destination = self.blender_obj.position
#self.local_data['speed'] = 0.0
self.local_data['x'] = self.destination[0]
self.local_data['y'] = self.destination[1]
self.local_data['z'] = self.destination[2]
logger.info('Component initialized')
def default_action(self):
""" Move the object towards the destination. """
parent = self.robot_parent
self.destination = [ self.local_data['x'], self.local_data['y'], self.local_data['z'] ]
logger.debug("STRAIGHT GOT DESTINATION: {0}".format(self.destination))
logger.debug("Robot {0} move status: '{1}'".format(parent.blender_obj.name, parent.move_status))
# Vectors returned are already normalised
distance, global_vector, local_vector = self.blender_obj.getVectTo(self.destination)
logger.debug("My position: {0}".format(self.blender_obj.position))
logger.debug("GOT DISTANCE: {0}".format(distance))
logger.debug("Global vector: {0}".format(global_vector))
logger.debug("Local vector: {0}".format(local_vector))
if distance > self._tolerance:
# Set the robot status
parent.move_status = "Transit"
# Scale the speeds to the time used by Blender
try:
vx = global_vector[0] * self._speed / self.frequency
vy = global_vector[1] * self._speed / self.frequency
vz = global_vector[2] * self._speed / self.frequency
# For the moment ignoring the division by zero
# It happens apparently when the simulation starts
except ZeroDivisionError:
pass
# If the target has been reached, change the status
else:
# Reset movement variables
vx, vy, vz = 0.0, 0.0, 0.0
#rx, ry, rz = 0.0, 0.0, 0.0
parent.move_status = "Stop"
logger.debug("TARGET REACHED")
logger.debug("Robot {0} move status: '{1}'".format(parent.blender_obj.name, parent.move_status))
# Give the movement instructions directly to the parent
# The second parameter specifies a "local" movement
parent.blender_obj.applyMovement([vx, vy, vz], False)
#parent.blender_obj.applyRotation([rx, ry, rz], False)
| bsd-3-clause | 6,813,234,703,560,069,000 | 37.86747 | 108 | 0.619653 | false | 3.845054 | false | false | false |
cpcloud/ibis | ibis/pandas/execution/tests/test_structs.py | 1 | 2175 | from collections import OrderedDict
import pandas as pd
import pandas.util.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
@pytest.fixture(scope="module")
def value():
return OrderedDict([("fruit", "pear"), ("weight", 0)])
@pytest.fixture(scope="module")
def struct_client(value):
df = pd.DataFrame(
{
"s": [
OrderedDict([("fruit", "apple"), ("weight", None)]),
value,
OrderedDict([("fruit", "pear"), ("weight", 1)]),
],
"key": list("aab"),
"value": [1, 2, 3],
}
)
return ibis.pandas.connect({"t": df})
@pytest.fixture
def struct_table(struct_client):
return struct_client.table(
"t",
schema={
"s": dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
},
)
def test_struct_field_literal(value):
struct = ibis.literal(value)
assert struct.type() == dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
expr = struct.fruit
result = ibis.pandas.execute(expr)
assert result == "pear"
expr = struct.weight
result = ibis.pandas.execute(expr)
assert result == 0
def test_struct_field_series(struct_table):
t = struct_table
expr = t.s.fruit
result = expr.execute()
expected = pd.Series(["apple", "pear", "pear"], name="fruit")
tm.assert_series_equal(result, expected)
def test_struct_field_series_group_by_key(struct_table):
t = struct_table
expr = t.groupby(t.s.fruit).aggregate(total=t.value.sum())
result = expr.execute()
expected = pd.DataFrame(
[("apple", 1), ("pear", 5)], columns=["fruit", "total"]
)
tm.assert_frame_equal(result, expected)
def test_struct_field_series_group_by_value(struct_table):
t = struct_table
expr = t.groupby(t.key).aggregate(total=t.s.weight.sum())
result = expr.execute()
# these are floats because we have a NULL value in the input data
expected = pd.DataFrame([("a", 0.0), ("b", 1.0)], columns=["key", "total"])
tm.assert_frame_equal(result, expected)
| apache-2.0 | -4,332,289,009,526,960,600 | 25.204819 | 79 | 0.585287 | false | 3.48 | true | false | false |
radinformatics/whatisit | whatisit/apps/wordfish/storage.py | 1 | 2207 | from django.core.files.storage import FileSystemStorage
from django.core.files.move import file_move_safe
from django.contrib.auth.models import User
from django.apps import apps
from fnmatch import fnmatch
from whatisit.settings import (
MEDIA_ROOT,
MEDIA_URL
)
import errno
import itertools
import os
import tempfile
############################################################################
# Storage Models
############################################################################
class WhatisitStorage(FileSystemStorage):
def __init__(self, location=None, base_url=None):
if location is None:
location = MEDIA_ROOT
if base_url is None:
base_url = MEDIA_URL
super(WhatisitStorage, self).__init__(location, base_url)
def url(self, name):
uid = None
spath, file_name = os.path.split(name)
urlsects = [v for v in spath.split('/') if v]
for i in range(len(urlsects)):
sect = urlsects.pop(0)
if sect.isdigit():
collection_id = sect
break
report_path = '/'.join(urlsects)
coll_model = apps.get_model('whatisit', 'ReportCollection')
collection = coll_model.objects.get(id=uid)
#if collection.private:
# cid = collection.private_token
#else:
cid = collection.id
return os.path.join(self.base_url, str(cid), cont_path, file_name)
class ImageStorage(WhatisitStorage):
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext))
return name
| mit | -5,028,986,313,260,818,000 | 33.484375 | 89 | 0.580879 | false | 4.117537 | false | false | false |
chriha/GistTerminal | helpers.py | 1 | 2293 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import os
import re
import sys
import tempfile
# see http://en.wikipedia.org/wiki/ANSI_escape_code for more ANSI escape codes
class textColors( object ):
grey = '37m'
white = '97m'
cyan = '36m'
lightcyan = '96m'
pink = '35m'
lightpink = '95m'
blue = '34m'
lightblue = '94m'
yellow = '33m'
lightyellow = '93m'
green = '32m'
lightgreen = '92m'
red = '31m'
lightred = '91m'
black = '30m'
darkgrey = '90m'
def get( self, sColor, isBold = False ):
if ( isBold ):
return '\033[1;' + vars( textColors )[sColor]
else:
return '\033[0;' + vars( textColors )[sColor]
def end( self ):
return '\033[0m'
@contextmanager
def namedTempfile():
tmpFile = tempfile.NamedTemporaryFile( delete = False )
try:
yield tmpFile
finally:
tmpFile.close()
os.unlink( tmpFile.name )
class showText( object ):
def help( self ):
print 'Usage: gist [-b] [-c] [-h] [-l] [-o] [-s <search string>] [-t <GitHub API token>]'
print 'Options:'
print ' -h Show this help'
print ' -b Open a selected Gist in the Webbrowser'
print ' -c Copy a selected Gist into your clipboard'
print ' -l List all your Gists'
print ' -s <search string> Search for a string in all Gist descriptions'
print ' -t <GitHub API token> Set your GitHub API token to access your Gists'
print '\r'
print 'Legend: ' + textColors().get( 'yellow' ) + 'private Gist' + textColors().end() + ', ' + textColors().get( 'green' ) + 'public Gist' + textColors().end() + ', ' + textColors().get( 'red' ) + 'error' + textColors().end()
class SimpleHTTPError( Exception ):
def __init__( self, code, response ):
response = json.loads( response.decode( 'utf8', 'ignore' ) )
print helpers.textColors().get( 'red' ) + response['message'] + ' (' + str( code ) + ')' + helpers.textColors().end()
sys.exit()
| mit | 7,451,939,604,386,966,000 | 31.757143 | 233 | 0.523768 | false | 3.571651 | false | false | false |
lucasa/landell_gst-gengui | sltv/gstmanager/sbinmanager.py | 2 | 1441 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger('sbinmanager')
class SBinManager(object):
def __init__(self):
self.pipeline_desc = ""
self.check_for_compat = True
def add_sbin(self, element):
if self.check_for_compat and element.type.find("source")!= -1:
if element.sbin.find("tee name=%s_tee" %element.tags[0])!=-1:
logger.info("Adding %s source %s to pipeline" %(element.type, element.description))
self._add_sbin(element.sbin)
else:
oks = 0
for tag in element.tags:
if self.pipeline_desc.find("name=%s_tee" %tag)!=-1:
oks += 1
if not len(element.tags) == oks:
logger.error("Compatible %s source branch not found to fit %s" %(element.type, element.description))
else:
logger.info("Adding branch %s %s to pipeline" %(element.type, element.description))
self._add_sbin(element.sbin)
else:
self._add_sbin(element.sbin)
def add_many(self, *args):
for element in args:
if element is not None:
self.add_sbin(element)
def _add_sbin(self, sbin):
self.pipeline_desc += "%s " %sbin
def get_pipeline(self):
logger.info("Pipeline is:\n%s" %self.pipeline_desc)
| gpl-2.0 | 537,184,325,939,206,140 | 35.025 | 120 | 0.539903 | false | 3.832447 | false | false | false |
andrewyoung1991/abjad | abjad/tools/abctools/AbjadObject.py | 1 | 3350 | # -*- encoding: utf-8 -*-
import abc
AbstractBase = abc.ABCMeta(
'AbstractBase',
(),
{
'__metaclass__': abc.ABCMeta,
'__module__': __name__,
'__slots__': (),
},
)
class AbjadObject(AbstractBase):
'''Abstract base class from which many custom classes inherit.
'''
### CLASS VARIABLES ###
__slots__ = ()
### SPECIAL METHODS ###
def __eq__(self, expr):
r'''Is true when ID of `expr` equals ID of Abjad object.
Otherwise false.
Returns boolean.
'''
return id(self) == id(expr)
def __format__(self, format_specification=''):
r'''Formats Abjad object.
Set `format_specification` to `''` or `'storage'`.
Interprets `''` equal to `'storage'`.
Returns string.
'''
from abjad.tools import systemtools
if format_specification in ('', 'storage'):
return systemtools.StorageFormatManager.get_storage_format(self)
return str(self)
def __getstate__(self):
r'''Gets state of Abjad object.
Returns dictionary.
'''
if hasattr(self, '__dict__'):
return vars(self)
state = {}
for class_ in type(self).__mro__:
for slot in getattr(class_, '__slots__', ()):
state[slot] = getattr(self, slot, None)
return state
def __hash__(self):
r'''Hashes Abjad object.
Required to be explicitly re-defined on Python 3 if __eq__ changes.
Returns integer.
'''
return super(AbjadObject, self).__hash__()
def __ne__(self, expr):
r'''Is true when Abjad object does not equal `expr`.
Otherwise false.
Returns boolean.
'''
return not self == expr
def __repr__(self):
r'''Gets interpreter representation of Abjad object.
Returns string.
'''
from abjad.tools import systemtools
return systemtools.StorageFormatManager.get_repr_format(self)
def __setstate__(self, state):
r'''Sets state of Abjad object.
Returns none.
'''
for key, value in state.items():
setattr(self, key, value)
### PRIVATE PROPERTIES ###
@property
def _one_line_menu_summary(self):
return str(self)
@property
def _repr_specification(self):
from abjad.tools.topleveltools import new
return new(
self._storage_format_specification,
is_indented=False,
)
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
return systemtools.StorageFormatSpecification(self)
### PRIVATE METHODS ###
def _debug(self, value, annotation=None, blank=False):
if annotation is None:
print('debug: {!r}'.format(value))
else:
print('debug ({}): {!r}'.format(annotation, value))
if blank:
print('')
def _debug_values(self, values, annotation=None, blank=True):
if values:
for value in values:
self._debug(value, annotation=annotation)
if blank:
print('')
else:
self._debug(repr(values), annotation=annotation)
if blank:
print('') | gpl-3.0 | 3,573,645,687,400,127,000 | 24.580153 | 76 | 0.541791 | false | 4.361979 | false | false | false |
ISN-LYSTCHA17/glowing-invention | personnalize.py | 1 | 2940 | # import drawing lib
import pygame
# pygame constants as events' constants
from pygame.locals import *
# game constants
from constants import *
from buttonwimage import ButtonWImage
import glob
import os
import textentry
from button import Button
import shutil
class Personnalize:
def __init__(self, win):
self.running = False
self.win = win
self.dbox = textentry.TextBox(self.win, font=pygame.font.SysFont("arial", 18), sy=22, x=((WIDTH - 120) // 2), y=HEIGHT - 32)
self.btns = []
x, y = 200, 20
i = 0
for folder in sorted(glob.glob("gfx/personnalize/*")):
self.btns.append(ButtonWImage(x, y, 64, 64, folder + "/front.png", (128, 48, 120)))
i += 1
if i == 5:
y = 20
x = WIDTH - 264
else:
y += 74
self.valid_btn = Button((WIDTH - 80) // 2, (HEIGHT - 32), 50, 22, "Valider", (12, 200, 35), pygame.font.SysFont("arial", 18), (0, 0, 0))
self.has_valid = False
self.selected = -1
def load(self):
self.running = True
def update(self):
pass
def render(self):
pygame.draw.rect(self.win, (20, 175, 170), (0, 0) + self.win.get_size())
for btn in self.btns:
btn.render(self.win)
if not self.has_valid:
self.valid_btn.render(self.win)
else:
self.dbox.mainloop()
def create_game(self):
with open("saves/game", "w") as file:
file.write(self.dbox.input)
folder = sorted(glob.glob("gfx/personnalize/*"))[self.selected]
for f in glob.glob(folder + "/*.png"):
if os.path.exists("gfx/player/" + os.path.basename(f)):
os.remove("gfx/player/" + os.path.basename(f))
shutil.copyfile(f, "gfx/player/" + os.path.basename(f))
def run(self):
while self.running:
for ev in pygame.event.get():
if ev.type == QUIT:
self.running = False
elif ev.type == MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
for i, btn in enumerate(self.btns):
if btn.collide(x, y):
if self.selected != -1:
self.btns[self.selected].color = (128, 48, 120)
self.selected = i
btn.color = (50, 120, 50)
break
if not self.has_valid:
if self.valid_btn.collide(x, y):
self.has_valid = True
if not self.dbox.is_running():
self.create_game()
self.running = False
self.update()
self.render()
pygame.display.flip()
| gpl-3.0 | -7,840,768,914,036,989,000 | 32.186047 | 144 | 0.483333 | false | 3.79845 | false | false | false |
cysuncn/python | spark/crm/PROC_M_MID_PER_ASSETS.py | 1 | 3335 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_M_MID_PER_ASSETS').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#----------------------------------------------业务逻辑开始----------------------------------------------------------
#源表
TMP_PER_ASSETS_LEND = sqlContext.read.parquet(hdfs+'/TMP_PER_ASSETS_LEND/*')
TMP_PER_ASSETS_LEND.registerTempTable("TMP_PER_ASSETS_LEND")
TMP_PER_ASSETS_INSU = sqlContext.read.parquet(hdfs+'/TMP_PER_ASSETS_INSU/*')
TMP_PER_ASSETS_INSU.registerTempTable("TMP_PER_ASSETS_INSU")
TMP_PER_ASSETS_ACCS = sqlContext.read.parquet(hdfs+'/TMP_PER_ASSETS_ACCS/*')
TMP_PER_ASSETS_ACCS.registerTempTable("TMP_PER_ASSETS_ACCS")
TMP_PER_ASSETS_SAVE = sqlContext.read.parquet(hdfs+'/TMP_PER_ASSETS_SAVE/*')
TMP_PER_ASSETS_SAVE.registerTempTable("TMP_PER_ASSETS_SAVE")
#目标表
#TMP_PER_ASSETS_SUM 全量表
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT
CUST_ID
,'0' AS PRD_TYP
,SUM(MONTH_BAL) AS MONTH_BAL
,SUM(MONTH_AVG_BAL) AS MONTH_AVG_BAL
,SUM(THREE_MONTH_AVG_BAL) AS THREE_MONTH_AVG_BAL
,SUM(LAST_MONTH_BAL) AS LAST_MONTH_BAL
,SUM(LAST_MONTH_AVG_BAL) AS LAST_MONTH_AVG_BAL
,SUM(LTHREE_MONTH_AVG_BAL) AS LTHREE_MONTH_AVG_BAL
,SUM(YEAR_BAL) AS YEAR_BAL
,SUM(YEAR_AVG_BAL) AS YEAR_AVG_BAL
,SUM(YEAR_THREE_AVG_BAL) AS YEAR_THREE_AVG_BAL
,FR_ID
FROM
(SELECT * FROM TMP_PER_ASSETS_LEND
UNION ALL
SELECT * FROM TMP_PER_ASSETS_INSU
UNION ALL
SELECT * FROM TMP_PER_ASSETS_ACCS
UNION ALL
SELECT * FROM TMP_PER_ASSETS_SAVE
)A
GROUP BY CUST_ID,FR_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_PER_ASSETS_SUM = sqlContext.sql(sql)
TMP_PER_ASSETS_SUM.registerTempTable("TMP_PER_ASSETS_SUM")
dfn="TMP_PER_ASSETS_SUM/"+V_DT+".parquet"
TMP_PER_ASSETS_SUM.cache()
nrows = TMP_PER_ASSETS_SUM.count()
TMP_PER_ASSETS_SUM.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_PER_ASSETS_SUM.unpersist()
#全量表,保存后需要删除前一天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/TMP_PER_ASSETS_SUM/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert TMP_PER_ASSETS_SUM lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| gpl-3.0 | -6,297,165,228,514,295,000 | 35.123596 | 170 | 0.621462 | false | 2.346715 | false | true | false |
evanthebouncy/nnhmm | uai_sushi/script_test_sort.py | 1 | 1245 | from model import *
from draw import *
from naive_baseline import *
from quicksort import *
# ------------- helpers --------------
def get_id_map(start_sort, truth):
ret = dict(zip(start_sort, truth))
return ret
def pred_acc(preds, qry):
num_cor = 0
for i in range(L):
for j in range(L):
if np.argmax(preds[i][j]) == np.argmax(qry((i,j))):
num_cor += 1
return float(num_cor) / L*L
def ord_2_pred(ordr):
ret = np.zeros([L,L,2])
for i in range(L):
for j in range(L):
if ordr.index(i) < ordr.index(j):
ret[i][j] = [1.0, 0.0]
else:
ret[i][j] = [0.0, 1.0]
return ret
num_sort = np.array([0.0 for _ in range(L*L)])
for _ in range(2500):
img, _x = get_img_class(test=True, idx=_)
qry = mk_query(_x)
start_sort = np.random.permutation(L)
id_mapping = get_id_map(range(L), _x)
trace = sorta(start_sort)
print "truth"
print _x
for idx, blah in enumerate(trace):
trace[idx] = map(lambda x: id_mapping[x], blah)
for i in range(L*L):
tr = trace[i] if i < len(trace) else trace[-1]
preds = ord_2_pred(tr)
num_sort[i] += pred_acc(preds, qry)
print num_sort / (_ + 1)
| mit | 8,447,393,334,369,140,000 | 22.490566 | 87 | 0.533333 | false | 2.79148 | false | false | false |
cGVuaXM/botcnt | PYodbcutils.py | 1 | 2151 | #https://github.com/mkleehammer/pyodbc/wiki/
import os
import re
import logging
import pyodbc
from openpyxl import Workbook
from openpyxl.styles import Font
class SQLserver:
def __init__(self, params, autocommit=False, searchescape=None, timeout=None):
params_str = r'DRIVER={ODBC Driver 11 for SQL Server};'
params_str += r"SERVER=%s;" % (params["server"])
params_str += r"DATABASE=%s;" % (params["database"])
params_str += r"UID=%s;" % (params["uid"])
params_str += r"PWD=%s;" % (params["pwd"])
params_str += r"APP=%s;" % (params.get("app", None) or "TelegramBot")
self._conn = pypyodbc.connect(params_str, autocommit=autocommit, searchescape=searchescape, timeout=timeout) #mantieni la connessione al db viva
def commit(self):
if not self._conn.autocommit: #ignora se autocommit attivo
self._conn.commit()
return True
else:
return False
def execSQL(self, SQLcommand):
cursor = self._connection.cursor()
try:
cursor.execute(SQLCommand)
except pypyodbc.ProgrammingError:
return 0
return cursor
def _toExcel(self, rows, file_path, sheet2=None):
"""
sheet2 = {
"title": "nome folgio 2",
"A1": "contenuto cella A1"
}
"""
wb = Workbook()
bold_font = Font(color='00FF0000', bold=True)
ws = wb.active #usa il foglio corrente
ws.title("Risultati") #cambia nome foglio
ws.sheet_properties.tabColor = "5BF77D" #colore background foglio (verde)
for row in rows:
ws.append(row)
for cell in ws["1:1"]:
cell.font = bold_font
if sheet2:
ws = wb.create_sheet(sheet2['title']) #foglio con la query
ws.sheet_properties.tabColor = "7BC1ED" #cambia colore background foglio (blu)
ws['A1'] = sheet2['A1'] #scrivi stringa col comando nella prima cella del foglio
wb.save(file_path)
return file_path
def to_excel(self, SQLcommand, file_path):
cursor = execSQL(SQLcommand)
if cursor == 0:
return cursor
columns = [column[0] for column in cursor.description]
rows = []
rows.append(columns)
for row in cursor.fetchall():
rows.append(row)
return self._toExcel(rows, file_path, {"title": "query", "A1": SQLcommand})
| mit | 3,703,125,957,708,237,000 | 25.231707 | 146 | 0.67457 | false | 2.868 | false | false | false |
edwatt/REU2014 | usrp_info_and_test.py | 1 | 2853 | #!/usr/bin/env python
"""
Retrieve operating parameters of connected USRP and loop through the operating spectrum trasmitting a constant wave signal
"""
from gnuradio import gr
from gnuradio import analog
from gnuradio import uhd
from time import sleep
MAX_RATE = 1000e6
class build_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
args = "" #only supporting USB USRPs for now
#find uhd devices
d = uhd.find_devices(uhd.device_addr(args))
if d:
uhd_type = d[0].get('type')
print "\nFound '%s'" % uhd_type
else:
print "\nNo device found"
self.u_tx = None
return
#check version of USRP and set num_channels
if uhd_type == "usrp":
tx_nchan = 2
rx_nchan = 2
else:
tx_nchan = 1
rx_nchan = 1
#setup transmit chain (usrp sink, signal source)
#usrp sink
stream_args = uhd.stream_args('fc32', channels = range(tx_nchan))
self.u_tx = uhd.usrp_sink(device_addr=args, stream_args=stream_args)
self.u_tx.set_samp_rate(MAX_RATE)
#analog signal source - sig_source_c(sampling_freq,waveform, wave_freq, ampl, offset=0)
self.tx_src0 = analog.sig_source_c(self.u_tx.get_samp_rate(), analog.GR_CONST_WAVE, 0, 1.0, 0)
#check and output freq range, gain range, num_channels
#gain range and max
tx_gain_range = self.u_tx.get_gain_range()
tx_gain_min = tx_gain_range.start()
tx_gain_max = tx_gain_range.stop()
#freq range
tx_freq_range = self.u_tx.get_freq_range()
tx_freq_low = tx_freq_range.start()
tx_freq_high = tx_freq_range.stop()
tx_freq_mid = (tx_freq_low + tx_freq_high) / 2.0
#output info
print "\nDevice Info"
print "\n\tType: %s" % uhd_type
print "\n\tMin Freq: %d MHz" % (tx_freq_low/1e6)
print "\tMax Freq: %d MHz" % (tx_freq_high/1e6)
print "\tMid Freq: %d MHz" % (tx_freq_mid/1e6)
print "\n\tMin Gain: %d dB" % tx_gain_min
print "\tMax Gain: %d dB" % tx_gain_max
#set initial parameters
for i in xrange(tx_nchan):
self.u_tx.set_center_freq(tx_freq_mid + i*1e6, i)
self.u_tx.set_gain(tx_gain_max, i)
#connect blocks
self.connect(self.tx_src0, self.u_tx)
def main():
try:
tb = build_block()
tb.start()
if tb.u_tx is not None:
print "Transmission test will cycle once through the operating frequencies hopping 10 MHz at a time"
raw_input("Press Enter to begin transmission test & Ctrl-C to exit\n")
start = tb.u_tx.get_freq_range().start()
stop = tb.u_tx.get_freq_range().stop()
freq_hops = int((stop - start) / 10e6) + 1
print "\nTransmit Frequencies:"
channel = 0 #default to first channel
for i in xrange(freq_hops):
trans_freq = start + i * 10e6
tb.u_tx.set_center_freq(trans_freq,channel)
print "\n%d MHz" % (trans_freq/1e6)
sleep(.3)
print "\nTest Over"
tb.stop()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| gpl-3.0 | 8,491,993,155,991,852,000 | 23.177966 | 122 | 0.654399 | false | 2.610247 | false | false | false |
paxos1977/QuickFixLogFixup | QuickFixLogViewer.py | 1 | 52083 | import sublime, sublime_plugin
import re
def multiple_replace(dict, text):
pattern = re.compile("^(%s)\=" % "|".join(map(re.escape, dict.keys())))
lines = text.split("\x01")
newLines = []
for line in lines:
new_line = pattern.sub(lambda match: dict[match.string[match.start():match.end()-1]] + "=", line)
newLines.append(new_line)
return "\n".join(newLines)
class QuickFixLogFixupCommand(sublime_plugin.TextCommand):
""" When run on a QuickFix log, it will split on SOH fields,
then replace field numbers with names.
This makes things much more readable for a human."""
def run(self, edit):
""" Run the plugin"""
#self.view.insert(edit, 0, "Hello, World!")
documentRange = sublime.Region(0, self.view.size())
text = self.view.substr(documentRange)
text = multiple_replace(self.field_map, text)
self.view.replace(edit, documentRange, text)
""" The map of FIX field IDs to string descriptions."""
field_map = {
"1" : "Account"
, "10" : "CheckSum"
, "100" : "ExDestination"
, "1000": "UnderlyingTimeUnit"
, "1001": "LegTimeUnit"
, "1002": "AllocMethod"
, "1003": "TradeID"
, "1005": "SideTradeReportID"
, "1006": "SideFillStationCd"
, "1007": "SideReasonCd"
, "1008": "SideTrdSubTyp"
, "1009": "SideLastQty"
, "1009": "SideQty"
, "1011": "MessageEventSource"
, "1012": "SideTrdRegTimestamp"
, "1013": "SideTrdRegTimestampType"
, "1014": "SideTrdRegTimestampSrc"
, "1015": "AsOfIndicator"
, "1016": "NoSideTrdRegTS"
, "1017": "LegOptionRatio"
, "1018": "NoInstrumentParties"
, "1019": "InstrumentPartyID"
, "102" : "CxlRejReason"
, "1020": "TradeVolume"
, "1021": "MDBookType"
, "1022": "MDFeedType"
, "1023": "MDPriceLevel"
, "1024": "MDOriginType"
, "1025": "FirstPx"
, "1026": "MDEntrySpotRate"
, "1027": "MDEntryForwardPoints"
, "1028": "ManualOrderIndicator"
, "1029": "CustDirectedOrder"
, "103" : "OrdRejReason"
, "1030": "ReceivedDeptID"
, "1031": "CustOrderHandlingInst"
, "1032": "OrderHandlingInstSource"
, "1033": "DeskType"
, "1034": "DeskTypeSource"
, "1035": "DeskOrderHandlingInst"
, "1036": "ExecAckStatus"
, "1037": "UnderlyingDeliveryAmount"
, "1038": "UnderlyingCapValue"
, "1039": "UnderlyingSettlMethod"
, "104" : "IOIQualifier"
, "1040": "SecondaryTradeID"
, "1041": "FirmTradeID"
, "1042": "SecondaryFirmTradeID"
, "1043": "CollApplType"
, "1044": "UnderlyingAdjustedQuantity"
, "1045": "UnderlyingFXRate"
, "1046": "UnderlyingFXRateCalc"
, "1047": "AllocPositionEffect"
, "1048": "DealingCapacity"
, "1049": "InstrmtAssignmentMethod"
, "105" : "WaveNo"
, "1050": "InstrumentPartyIDSource"
, "1051": "InstrumentPartyRole"
, "1052": "NoInstrumentPartySubIDs"
, "1053": "InstrumentPartySubID"
, "1054": "InstrumentPartySubIDType"
, "1055": "PositionCurrency"
, "1056": "CalculatedCcyLastQty"
, "1057": "AggressorIndicator"
, "1058": "NoUndlyInstrumentParties"
, "1059": "UnderlyingInstrumentPartyID"
, "1059": "UndlyInstrumentPartyID"
, "106" : "Issuer"
, "1060": "UnderlyingInstrumentPartyIDSource"
, "1060": "UndlyInstrumentPartyIDSource"
, "1061": "UnderlyingInstrumentPartyRole"
, "1061": "UndlyInstrumentPartyRole"
, "1062": "NoUndlyInstrumentPartySubIDs"
, "1063": "UnderlyingInstrumentPartySubID"
, "1063": "UndlyInstrumentPartySubID"
, "1064": "UnderlyingInstrumentPartySubIDType"
, "1064": "UndlyInstrumentPartySubIDType"
, "1065": "BidSwapPoints"
, "1066": "OfferSwapPoints"
, "1067": "LegBidForwardPoints"
, "1068": "LegOfferForwardPoints"
, "1069": "SwapPoints"
, "107" : "SecurityDesc"
, "1070": "MDQuoteType"
, "1071": "LastSwapPoints"
, "1072": "SideGrossTradeAmt"
, "1073": "LegLastForwardPoints"
, "1074": "LegCalculatedCcyLastQty"
, "1075": "LegGrossTradeAmt"
, "1079": "MaturityTime"
, "108" : "HeartBtInt"
, "1080": "RefOrderID"
, "1081": "RefOrderIDSource"
, "1082": "SecondaryDisplayQty"
, "1083": "DisplayWhen"
, "1084": "DisplayMethod"
, "1085": "DisplayLowQty"
, "1086": "DisplayHighQty"
, "1087": "DisplayMinIncr"
, "1088": "RefreshQty"
, "1089": "MatchIncrement"
, "109" : "ClientID"
, "1090": "MaxPriceLevels"
, "1091": "PreTradeAnonymity"
, "1092": "PriceProtectionScope"
, "1093": "LotType"
, "1094": "PegPriceType"
, "1095": "PeggedRefPrice"
, "1096": "PegSecurityIDSource"
, "1097": "PegSecurityID"
, "1098": "PegSymbol"
, "1099": "PegSecurityDesc"
, "11" : "ClOrdID"
, "110" : "MinQty"
, "1100": "TriggerType"
, "1101": "TriggerAction"
, "1102": "TriggerPrice"
, "1103": "TriggerSymbol"
, "1104": "TriggerSecurityID"
, "1105": "TriggerSecurityIDSource"
, "1106": "TriggerSecurityDesc"
, "1107": "TriggerPriceType"
, "1108": "TriggerPriceTypeScope"
, "1109": "TriggerPriceDirection"
, "111" : "MaxFloor"
, "1110": "TriggerNewPrice"
, "1111": "TriggerOrderType"
, "1112": "TriggerNewQty"
, "1113": "TriggerTradingSessionID"
, "1114": "TriggerTradingSessionSubID"
, "1115": "OrderCategory"
, "1116": "NoRootPartyIDs"
, "1117": "RootPartyID"
, "1118": "RootPartyIDSource"
, "1119": "RootPartyRole"
, "112" : "TestReqID"
, "1120": "NoRootPartySubIDs"
, "1121": "RootPartySubID"
, "1122": "RootPartySubIDType"
, "1123": "TradeHandlingInstr"
, "1124": "OrigTradeHandlingInstr"
, "1125": "OrigTradeDate"
, "1126": "OrigTradeID"
, "1127": "OrigSecondaryTradeID"
, "1128": "ApplVerID"
, "1129": "CstmApplVerID"
, "113" : "ReportToExch"
, "1130": "RefApplVerID"
, "1131": "RefCstmApplVerID"
, "1132": "TZTransactTime"
, "1133": "ExDestinationIDSource"
, "1134": "ReportedPxDiff"
, "1135": "RptSys"
, "1136": "AllocClearingFeeIndicator"
, "1137": "DefaultApplVerID"
, "1138": "DisplayQty"
, "1139": "ExchangeSpecialInstructions"
, "114" : "LocateReqd"
, "1140": "MaxTradeVol"
, "1141": "NoMDFeedTypes"
, "1142": "MatchAlgorithm"
, "1143": "MaxPriceVariation"
, "1144": "ImpliedMarketIndicator"
, "1145": "EventTime"
, "1146": "MinPriceIncrementAmount"
, "1147": "UnitOfMeasureQty"
, "1148": "LowLimitPrice"
, "1149": "HighLimitPrice"
, "115" : "OnBehalfOfCompID"
, "1150": "TradingReferencePrice"
, "1151": "SecurityGroup"
, "1152": "LegNumber"
, "1153": "SettlementCycleNo"
, "1154": "SideCurrency"
, "1155": "SideSettlCurrency"
, "1156": "ApplExtID"
, "1157": "CcyAmt"
, "1158": "NoSettlDetails"
, "1159": "SettlObligMode"
, "116" : "OnBehalfOfSubID"
, "1160": "SettlObligMsgID"
, "1161": "SettlObligID"
, "1162": "SettlObligTransType"
, "1163": "SettlObligRefID"
, "1164": "SettlObligSource"
, "1165": "NoSettlOblig"
, "1166": "QuoteMsgID"
, "1167": "QuoteEntryStatus"
, "1168": "TotNoCxldQuotes"
, "1169": "TotNoAccQuotes"
, "117" : "QuoteID"
, "1170": "TotNoRejQuotes"
, "1171": "PrivateQuote"
, "1172": "RespondentType"
, "1173": "MDSubBookType"
, "1174": "SecurityTradingEvent"
, "1175": "NoStatsIndicators"
, "1176": "StatsType"
, "1177": "NoOfSecSizes"
, "1178": "MDSecSizeType"
, "1179": "MDSecSize"
, "118" : "NetMoney"
, "1180": "ApplID"
, "1181": "ApplSeqNum"
, "1182": "ApplBegSeqNum"
, "1183": "ApplEndSeqNum"
, "1184": "SecurityXMLLen"
, "1185": "SecurityXML"
, "1186": "SecurityXMLSchema"
, "1187": "RefreshIndicator"
, "1188": "Volatility"
, "1189": "TimeToExpiration"
, "119" : "SettlCurrAmt"
, "1190": "RiskFreeRate"
, "1191": "PriceUnitOfMeasure"
, "1192": "PriceUnitOfMeasureQty"
, "1193": "SettlMethod"
, "1194": "ExerciseStyle"
, "1195": "OptPayAmount"
, "1195": "OptPayoutAmount"
, "1196": "PriceQuoteMethod"
, "1197": "FuturesValuationMethod"
, "1197": "ValuationMethod"
, "1198": "ListMethod"
, "1199": "CapPrice"
, "12" : "Commission"
, "120" : "SettlCurrency"
, "1200": "FloorPrice"
, "1201": "NoStrikeRules"
, "1202": "StartStrikePxRange"
, "1203": "EndStrikePxRange"
, "1204": "StrikeIncrement"
, "1205": "NoTickRules"
, "1206": "StartTickPriceRange"
, "1207": "EndTickPriceRange"
, "1208": "TickIncrement"
, "1209": "TickRuleType"
, "121" : "ForexReq"
, "1210": "NestedInstrAttribType"
, "1211": "NestedInstrAttribValue"
, "1212": "LegMaturityTime"
, "1213": "UnderlyingMaturityTime"
, "1214": "DerivativeSymbol"
, "1215": "DerivativeSymbolSfx"
, "1216": "DerivativeSecurityID"
, "1217": "DerivativeSecurityIDSource"
, "1218": "NoDerivativeSecurityAltID"
, "1219": "DerivativeSecurityAltID"
, "122" : "OrigSendingTime"
, "1220": "DerivativeSecurityAltIDSource"
, "1221": "SecondaryLowLimitPrice"
, "1222": "MaturityRuleID"
, "1223": "StrikeRuleID"
, "1224": "LegUnitOfMeasureQty"
, "1225": "DerivativeOptPayAmount"
, "1226": "EndMaturityMonthYear"
, "1227": "ProductComplex"
, "1228": "DerivativeProductComplex"
, "1229": "MaturityMonthYearIncrement"
, "123" : "GapFillFlag"
, "1230": "SecondaryHighLimitPrice"
, "1231": "MinLotSize"
, "1232": "NoExecInstRules"
, "1234": "NoLotTypeRules"
, "1235": "NoMatchRules"
, "1236": "NoMaturityRules"
, "1237": "NoOrdTypeRules"
, "1239": "NoTimeInForceRules"
, "124" : "NoExecs"
, "1240": "SecondaryTradingReferencePrice"
, "1241": "StartMaturityMonthYear"
, "1242": "FlexProductEligibilityIndicator"
, "1243": "DerivFlexProductEligibilityIndicator"
, "1244": "FlexibleIndicator"
, "1245": "TradingCurrency"
, "1246": "DerivativeProduct"
, "1247": "DerivativeSecurityGroup"
, "1248": "DerivativeCFICode"
, "1249": "DerivativeSecurityType"
, "125" : "CxlType"
, "1250": "DerivativeSecuritySubType"
, "1251": "DerivativeMaturityMonthYear"
, "1252": "DerivativeMaturityDate"
, "1253": "DerivativeMaturityTime"
, "1254": "DerivativeSettleOnOpenFlag"
, "1255": "DerivativeInstrmtAssignmentMethod"
, "1256": "DerivativeSecurityStatus"
, "1257": "DerivativeInstrRegistry"
, "1258": "DerivativeCountryOfIssue"
, "1259": "DerivativeStateOrProvinceOfIssue"
, "126" : "ExpireTime"
, "1260": "DerivativeLocaleOfIssue"
, "1261": "DerivativeStrikePrice"
, "1262": "DerivativeStrikeCurrency"
, "1263": "DerivativeStrikeMultiplier"
, "1264": "DerivativeStrikeValue"
, "1265": "DerivativeOptAttribute"
, "1266": "DerivativeContractMultiplier"
, "1267": "DerivativeMinPriceIncrement"
, "1268": "DerivativeMinPriceIncrementAmount"
, "1269": "DerivativeUnitOfMeasure"
, "127" : "DKReason"
, "1270": "DerivativeUnitOfMeasureQty"
, "1271": "DerivativeTimeUnit"
, "1272": "DerivativeSecurityExchange"
, "1273": "DerivativePositionLimit"
, "1274": "DerivativeNTPositionLimit"
, "1275": "DerivativeIssuer"
, "1276": "DerivativeIssueDate"
, "1277": "DerivativeEncodedIssuerLen"
, "1278": "DerivativeEncodedIssuer"
, "1279": "DerivativeSecurityDesc"
, "128" : "DeliverToCompID"
, "1280": "DerivativeEncodedSecurityDescLen"
, "1281": "DerivativeEncodedSecurityDesc"
, "1282": "DerivativeSecurityXMLLen"
, "1283": "DerivativeSecurityXML"
, "1284": "DerivativeSecurityXMLSchema"
, "1285": "DerivativeContractSettlMonth"
, "1286": "NoDerivativeEvents"
, "1287": "DerivativeEventType"
, "1288": "DerivativeEventDate"
, "1289": "DerivativeEventTime"
, "129" : "DeliverToSubID"
, "1290": "DerivativeEventPx"
, "1291": "DerivativeEventText"
, "1292": "NoDerivativeInstrumentParties"
, "1293": "DerivativeInstrumentPartyID"
, "1294": "DerivativeInstrumentPartyIDSource"
, "1295": "DerivativeInstrumentPartyRole"
, "1296": "NoDerivativeInstrumentPartySubIDs"
, "1297": "DerivativeInstrumentPartySubID"
, "1298": "DerivativeInstrumentPartySubIDType"
, "1299": "DerivativeExerciseStyle"
, "13" : "CommType"
, "130" : "IOINaturalFlag"
, "1300": "MarketSegmentID"
, "1301": "MarketID"
, "1302": "MaturityMonthYearIncrementUnits"
, "1303": "MaturityMonthYearFormat"
, "1304": "StrikeExerciseStyle"
, "1305": "SecondaryPriceLimitType"
, "1306": "PriceLimitType"
, "1307": "DerivativeSecurityListRequestType"
, "1308": "ExecInstValue"
, "1309": "NoTradingSessionRules"
, "131" : "QuoteReqID"
, "1310": "NoMarketSegments"
, "1311": "NoDerivativeInstrAttrib"
, "1312": "NoNestedInstrAttrib"
, "1313": "DerivativeInstrAttribType"
, "1314": "DerivativeInstrAttribValue"
, "1315": "DerivativePriceUnitOfMeasure"
, "1316": "DerivativePriceUnitOfMeasureQty"
, "1317": "DerivativeSettlMethod"
, "1318": "DerivativePriceQuoteMethod"
, "1319": "DerivativeFuturesValuationMethod"
, "1319": "DerivativeValuationMethod"
, "132" : "BidPx"
, "1320": "DerivativeListMethod"
, "1321": "DerivativeCapPrice"
, "1322": "DerivativeFloorPrice"
, "1323": "DerivativePutOrCall"
, "1324": "ListUpdateAction"
, "1325": "ParentMktSegmID"
, "1326": "TradingSessionDesc"
, "1327": "TradSesUpdateAction"
, "1328": "RejectText"
, "1329": "FeeMultiplier"
, "133" : "OfferPx"
, "1330": "UnderlyingLegSymbol"
, "1331": "UnderlyingLegSymbolSfx"
, "1332": "UnderlyingLegSecurityID"
, "1333": "UnderlyingLegSecurityIDSource"
, "1334": "NoUnderlyingLegSecurityAltID"
, "1335": "UnderlyingLegSecurityAltID"
, "1336": "UnderlyingLegSecurityAltIDSource"
, "1337": "UnderlyingLegSecurityType"
, "1338": "UnderlyingLegSecuritySubType"
, "1339": "UnderlyingLegMaturityMonthYear"
, "134" : "BidSize"
, "1340": "UnderlyingLegStrikePrice"
, "1341": "UnderlyingLegSecurityExchange"
, "1342": "NoOfLegUnderlyings"
, "1343": "UnderlyingLegPutOrCall"
, "1344": "UnderlyingLegCFICode"
, "1345": "UnderlyingLegMaturityDate"
, "1346": "ApplReqID"
, "1347": "ApplReqType"
, "1348": "ApplResponseType"
, "1349": "ApplTotalMessageCount"
, "135" : "OfferSize"
, "1350": "ApplLastSeqNum"
, "1351": "NoApplIDs"
, "1352": "ApplResendFlag"
, "1353": "ApplResponseID"
, "1354": "ApplResponseError"
, "1355": "RefApplID"
, "1356": "ApplReportID"
, "1357": "RefApplLastSeqNum"
, "1358": "LegPutOrCall"
, "1359": "EncodedSymbolLen"
, "136" : "NoMiscFees"
, "1360": "EncodedSymbol"
, "1361": "TotNoFills"
, "1362": "NoFills"
, "1363": "FillExecID"
, "1364": "FillPx"
, "1365": "FillQty"
, "1366": "LegAllocID"
, "1367": "LegAllocSettlCurrency"
, "1368": "TradSesEvent"
, "1369": "MassActionReportID"
, "137" : "MiscFeeAmt"
, "1370": "NoNotAffectedOrders"
, "1371": "NotAffectedOrderID"
, "1372": "NotAffOrigClOrdID"
, "1373": "MassActionType"
, "1374": "MassActionScope"
, "1375": "MassActionResponse"
, "1376": "MassActionRejectReason"
, "1377": "MultilegModel"
, "1378": "MultilegPriceMethod"
, "1379": "LegVolatility"
, "138" : "MiscFeeCurr"
, "1380": "DividendYield"
, "1381": "LegDividendYield"
, "1382": "CurrencyRatio"
, "1383": "LegCurrencyRatio"
, "1384": "LegExecInst"
, "1385": "ContingencyType"
, "1386": "ListRejectReason"
, "1387": "NoTrdRepIndicators"
, "1388": "TrdRepPartyRole"
, "1389": "TrdRepIndicator"
, "139" : "MiscFeeType"
, "1390": "TradePublishIndicator"
, "1391": "UnderlyingLegOptAttribute"
, "1392": "UnderlyingLegSecurityDesc"
, "1393": "MarketReqID"
, "1394": "MarketReportID"
, "1395": "MarketUpdateAction"
, "1396": "MarketSegmentDesc"
, "1397": "EncodedMktSegmDescLen"
, "1398": "EncodedMktSegmDesc"
, "1399": "ApplNewSeqNum"
, "14" : "CumQty"
, "140" : "PrevClosePx"
, "1400": "EncryptedPasswordMethod"
, "1401": "EncryptedPasswordLen"
, "1402": "EncryptedPassword"
, "1403": "EncryptedNewPasswordLen"
, "1404": "EncryptedNewPassword"
, "1405": "UnderlyingLegMaturityTime"
, "1406": "RefApplExtID"
, "1407": "DefaultApplExtID"
, "1408": "DefaultCstmApplVerID"
, "1409": "SessionStatus"
, "141" : "ResetSeqNumFlag"
, "1410": "DefaultVerIndicator"
, "1411": "Nested4PartySubIDType"
, "1412": "Nested4PartySubID"
, "1413": "NoNested4PartySubIDs"
, "1414": "NoNested4PartyIDs"
, "1415": "Nested4PartyID"
, "1416": "Nested4PartyIDSource"
, "1417": "Nested4PartyRole"
, "1418": "LegLastQty"
, "1419": "UnderlyingExerciseStyle"
, "142" : "SenderLocationID"
, "1420": "LegExerciseStyle"
, "1421": "LegPriceUnitOfMeasure"
, "1422": "LegPriceUnitOfMeasureQty"
, "1423": "UnderlyingUnitOfMeasureQty"
, "1424": "UnderlyingPriceUnitOfMeasure"
, "1425": "UnderlyingPriceUnitOfMeasureQty"
, "1426": "ApplReportType"
, "1427": "SideExecID"
, "1428": "OrderDelay"
, "1429": "OrderDelayUnit"
, "143" : "TargetLocationID"
, "1430": "VenueType"
, "1431": "RefOrdIDReason"
, "1432": "OrigCustOrderCapacity"
, "1433": "RefApplReqID"
, "1434": "ModelType"
, "1435": "ContractMultiplierUnit"
, "1436": "LegContractMultiplierUnit"
, "1437": "UnderlyingContractMultiplierUnit"
, "1438": "DerivativeContractMultiplierUnit"
, "1439": "FlowScheduleType"
, "144" : "OnBehalfOfLocationID"
, "1440": "LegFlowScheduleType"
, "1441": "UnderlyingFlowScheduleType"
, "1442": "DerivativeFlowScheduleType"
, "1443": "FillLiquidityInd"
, "1444": "SideLiquidityInd"
, "1445": "NoRateSources"
, "1446": "RateSource"
, "1447": "RateSourceType"
, "1448": "ReferencePage"
, "1449": "RestructuringType"
, "145" : "DeliverToLocationID"
, "1450": "Seniority"
, "1451": "NotionalPercentageOutstanding"
, "1452": "OriginalNotionalPercentageOutstanding"
, "1453": "UnderlyingRestructuringType"
, "1454": "UnderlyingSeniority"
, "1455": "UnderlyingNotionalPercentageOutstanding"
, "1456": "UnderlyingOriginalNotionalPercentageOutstanding"
, "1457": "AttachmentPoint"
, "1458": "DetachmentPoint"
, "1459": "UnderlyingAttachmentPoint"
, "146" : "NoRelatedSym"
, "1460": "UnderlyingDetachmentPoint"
, "1461": "NoTargetPartyIDs"
, "1462": "TargetPartyID"
, "1463": "TargetPartyIDSource"
, "1464": "TargetPartyRole"
, "1465": "SecurityListID"
, "1466": "SecurityListRefID"
, "1467": "SecurityListDesc"
, "1468": "EncodedSecurityListDescLen"
, "1469": "EncodedSecurityListDesc"
, "147" : "Subject"
, "1470": "SecurityListType"
, "1471": "SecurityListTypeSource"
, "1472": "NewsID"
, "1473": "NewsCategory"
, "1474": "LanguageCode"
, "1475": "NoNewsRefIDs"
, "1476": "NewsRefID"
, "1477": "NewsRefType"
, "1478": "StrikePriceDeterminationMethod"
, "1479": "StrikePriceBoundaryMethod"
, "148" : "Headline"
, "1480": "StrikePriceBoundaryPrecision"
, "1481": "UnderlyingPriceDeterminationMethod"
, "1482": "OptPayoutType"
, "1483": "NoComplexEvents"
, "1484": "ComplexEventType"
, "1485": "ComplexOptPayoutAmount"
, "1486": "ComplexEventPrice"
, "1487": "ComplexEventPriceBoundaryMethod"
, "1488": "ComplexEventPriceBoundaryPrecision"
, "1489": "ComplexEventPriceTimeType"
, "149" : "URLLink"
, "1490": "ComplexEventCondition"
, "1491": "NoComplexEventDates"
, "1492": "ComplexEventStartDate"
, "1493": "ComplexEventEndDate"
, "1494": "NoComplexEventTimes"
, "1495": "ComplexEventStartTime"
, "1496": "ComplexEventEndTime"
, "1497": "StreamAsgnReqID"
, "1498": "StreamAsgnReqType"
, "1499": "NoAsgnReqs"
, "15" : "Currency"
, "150" : "ExecType"
, "1500": "MDStreamID"
, "1501": "StreamAsgnRptID"
, "1502": "StreamAsgnRejReason"
, "1503": "StreamAsgnAckType"
, "1504": "RelSymTransactTime"
, "1505": "PartyDetailsListRequestID"
, "1506": "NoPartyListResponseTypes"
, "1507": "PartyListResponseType"
, "1508": "NoRequestedPartyRoles"
, "1509": "RequestedPartyRole"
, "151" : "LeavesQty"
, "1510": "PartyDetailsListReportID"
, "1511": "PartyDetailsRequestResult"
, "1512": "TotNoPartyList"
, "1513": "NoPartyList"
, "1514": "NoPartyRelationships"
, "1515": "PartyRelationship"
, "1516": "NoPartyAltIDs"
, "1517": "PartyAltID"
, "1518": "PartyAltIDSource"
, "1519": "NoPartyAltSubIDs"
, "152" : "CashOrderQty"
, "1520": "PartyAltSubID"
, "1521": "PartyAltSubIDType"
, "1522": "NoContextPartyIDs"
, "1523": "ContextPartyID"
, "1524": "ContextPartyIDSource"
, "1525": "ContextPartyRole"
, "1526": "NoContextPartySubIDs"
, "1527": "ContextPartySubID"
, "1528": "ContextPartySubIDType"
, "1529": "NoRiskLimits"
, "153" : "AllocAvgPx"
, "1530": "RiskLimitType"
, "1531": "RiskLimitAmount"
, "1532": "RiskLimitCurrency"
, "1533": "RiskLimitPlatform"
, "1534": "NoRiskInstruments"
, "1535": "RiskInstrumentOperator"
, "1536": "RiskSymbol"
, "1537": "RiskSymbolSfx"
, "1538": "RiskSecurityID"
, "1539": "RiskSecurityIDSource"
, "154" : "AllocNetMoney"
, "1540": "NoRiskSecurityAltID"
, "1541": "RiskSecurityAltID"
, "1542": "RiskSecurityAltIDSource"
, "1543": "RiskProduct"
, "1544": "RiskProductComplex"
, "1545": "RiskSecurityGroup"
, "1546": "RiskCFICode"
, "1547": "RiskSecurityType"
, "1548": "RiskSecuritySubType"
, "1549": "RiskMaturityMonthYear"
, "155" : "SettlCurrFxRate"
, "1550": "RiskMaturityTime"
, "1551": "RiskRestructuringType"
, "1552": "RiskSeniority"
, "1553": "RiskPutOrCall"
, "1554": "RiskFlexibleIndicator"
, "1555": "RiskCouponRate"
, "1556": "RiskSecurityDesc"
, "1557": "RiskInstrumentSettlType"
, "1558": "RiskInstrumentMultiplier"
, "1559": "NoRiskWarningLevels"
, "156" : "SettlCurrFxRateCalc"
, "1560": "RiskWarningLevelPercent"
, "1561": "RiskWarningLevelName"
, "1562": "NoRelatedPartyIDs"
, "1563": "RelatedPartyID"
, "1564": "RelatedPartyIDSource"
, "1565": "RelatedPartyRole"
, "1566": "NoRelatedPartySubIDs"
, "1567": "RelatedPartySubID"
, "1568": "RelatedPartySubIDType"
, "1569": "NoRelatedPartyAltIDs"
, "157" : "NumDaysInterest"
, "1570": "RelatedPartyAltID"
, "1571": "RelatedPartyAltIDSource"
, "1572": "NoRelatedPartyAltSubIDs"
, "1573": "RelatedPartyAltSubID"
, "1574": "RelatedPartyAltSubIDType"
, "1575": "NoRelatedContextPartyIDs"
, "1576": "RelatedContextPartyID"
, "1577": "RelatedContextPartyIDSource"
, "1578": "RelatedContextPartyRole"
, "1579": "NoRelatedContextPartySubIDs"
, "158" : "AccruedInterestRate"
, "1580": "RelatedContextPartySubID"
, "1581": "RelatedContextPartySubIDType"
, "1582": "NoRelationshipRiskLimits"
, "1583": "RelationshipRiskLimitType"
, "1584": "RelationshipRiskLimitAmount"
, "1585": "RelationshipRiskLimitCurrency"
, "1586": "RelationshipRiskLimitPlatform"
, "1587": "NoRelationshipRiskInstruments"
, "1588": "RelationshipRiskInstrumentOperator"
, "1589": "RelationshipRiskSymbol"
, "159" : "AccruedInterestAmt"
, "1590": "RelationshipRiskSymbolSfx"
, "1591": "RelationshipRiskSecurityID"
, "1592": "RelationshipRiskSecurityIDSource"
, "1593": "NoRelationshipRiskSecurityAltID"
, "1594": "RelationshipRiskSecurityAltID"
, "1595": "RelationshipRiskSecurityAltIDSource"
, "1596": "RelationshipRiskProduct"
, "1597": "RelationshipRiskProductComplex"
, "1598": "RelationshipRiskSecurityGroup"
, "1599": "RelationshipRiskCFICode"
, "16" : "EndSeqNo"
, "160" : "SettlInstMode"
, "1600": "RelationshipRiskSecurityType"
, "1601": "RelationshipRiskSecuritySubType"
, "1602": "RelationshipRiskMaturityMonthYear"
, "1603": "RelationshipRiskMaturityTime"
, "1604": "RelationshipRiskRestructuringType"
, "1605": "RelationshipRiskSeniority"
, "1606": "RelationshipRiskPutOrCall"
, "1607": "RelationshipRiskFlexibleIndicator"
, "1608": "RelationshipRiskCouponRate"
, "1609": "RelationshipRiskSecurityExchange"
, "161" : "AllocText"
, "1610": "RelationshipRiskSecurityDesc"
, "1611": "RelationshipRiskInstrumentSettlType"
, "1612": "RelationshipRiskInstrumentMultiplier"
, "1613": "NoRelationshipRiskWarningLevels"
, "1614": "RelationshipRiskWarningLevelPercent"
, "1615": "RelationshipRiskWarningLevelName"
, "1616": "RiskSecurityExchange"
, "1617": "StreamAsgnType"
, "1618": "RelationshipRiskEncodedSecurityDescLen"
, "1619": "RelationshipRiskEncodedSecurityDesc"
, "162" : "SettlInstID"
, "1620": "RiskEncodedSecurityDescLen"
, "1621": "RiskEncodedSecurityDesc"
, "163" : "SettlInstTransType"
, "164" : "EmailThreadID"
, "165" : "SettlInstSource"
, "166" : "SettlLocation"
, "167" : "SecurityType"
, "168" : "EffectiveTime"
, "169" : "StandInstDbType"
, "17" : "ExecID"
, "170" : "StandInstDbName"
, "171" : "StandInstDbID"
, "172" : "SettlDeliveryType"
, "173" : "SettlDepositoryCode"
, "174" : "SettlBrkrCode"
, "175" : "SettlInstCode"
, "176" : "SecuritySettlAgentName"
, "177" : "SecuritySettlAgentCode"
, "178" : "SecuritySettlAgentAcctNum"
, "179" : "SecuritySettlAgentAcctName"
, "18" : "ExecInst"
, "180" : "SecuritySettlAgentContactName"
, "181" : "SecuritySettlAgentContactPhone"
, "182" : "CashSettlAgentName"
, "183" : "CashSettlAgentCode"
, "184" : "CashSettlAgentAcctNum"
, "185" : "CashSettlAgentAcctName"
, "186" : "CashSettlAgentContactName"
, "187" : "CashSettlAgentContactPhone"
, "188" : "BidSpotRate"
, "189" : "BidForwardPoints"
, "19" : "ExecRefID"
, "190" : "OfferSpotRate"
, "191" : "OfferForwardPoints"
, "192" : "OrderQty2"
, "193" : "FutSettDate2"
, "193" : "SettlDate2"
, "194" : "LastSpotRate"
, "195" : "LastForwardPoints"
, "196" : "AllocLinkID"
, "197" : "AllocLinkType"
, "198" : "SecondaryOrderID"
, "199" : "NoIOIQualifiers"
, "2" : "AdvId"
, "20" : "ExecTransType"
, "200" : "MaturityMonthYear"
, "201" : "PutOrCall"
, "202" : "StrikePrice"
, "203" : "CoveredOrUncovered"
, "204" : "CustomerOrFirm"
, "205" : "MaturityDay"
, "206" : "OptAttribute"
, "207" : "SecurityExchange"
, "208" : "NotifyBrokerOfCredit"
, "209" : "AllocHandlInst"
, "21" : "HandlInst"
, "210" : "MaxShow"
, "211" : "PegDifference"
, "211" : "PegOffsetValue"
, "212" : "XmlDataLen"
, "213" : "XmlData"
, "214" : "SettlInstRefID"
, "215" : "NoRoutingIDs"
, "216" : "RoutingType"
, "217" : "RoutingID"
, "218" : "Spread"
, "218" : "SpreadToBenchmark"
, "219" : "Benchmark"
, "22" : "IDSource"
, "22" : "SecurityIDSource"
, "220" : "BenchmarkCurveCurrency"
, "221" : "BenchmarkCurveName"
, "222" : "BenchmarkCurvePoint"
, "223" : "CouponRate"
, "224" : "CouponPaymentDate"
, "225" : "IssueDate"
, "226" : "RepurchaseTerm"
, "227" : "RepurchaseRate"
, "228" : "Factor"
, "229" : "TradeOriginationDate"
, "23" : "IOIid"
, "23" : "IOIID"
, "230" : "ExDate"
, "231" : "ContractMultiplier"
, "232" : "NoStipulations"
, "233" : "StipulationType"
, "234" : "StipulationValue"
, "235" : "YieldType"
, "236" : "Yield"
, "237" : "TotalTakedown"
, "238" : "Concession"
, "239" : "RepoCollateralSecurityType"
, "24" : "IOIOthSvc"
, "240" : "RedemptionDate"
, "241" : "UnderlyingCouponPaymentDate"
, "242" : "UnderlyingIssueDate"
, "243" : "UnderlyingRepoCollateralSecurityType"
, "244" : "UnderlyingRepurchaseTerm"
, "245" : "UnderlyingRepurchaseRate"
, "246" : "UnderlyingFactor"
, "247" : "UnderlyingRedemptionDate"
, "248" : "LegCouponPaymentDate"
, "249" : "LegIssueDate"
, "25" : "IOIQltyInd"
, "250" : "LegRepoCollateralSecurityType"
, "251" : "LegRepurchaseTerm"
, "252" : "LegRepurchaseRate"
, "253" : "LegFactor"
, "254" : "LegRedemptionDate"
, "255" : "CreditRating"
, "256" : "UnderlyingCreditRating"
, "257" : "LegCreditRating"
, "258" : "TradedFlatSwitch"
, "259" : "BasisFeatureDate"
, "26" : "IOIRefID"
, "260" : "BasisFeaturePrice"
, "262" : "MDReqID"
, "263" : "SubscriptionRequestType"
, "264" : "MarketDepth"
, "265" : "MDUpdateType"
, "266" : "AggregatedBook"
, "267" : "NoMDEntryTypes"
, "268" : "NoMDEntries"
, "269" : "MDEntryType"
, "27" : "IOIQty"
, "27" : "IOIShares"
, "270" : "MDEntryPx"
, "271" : "MDEntrySize"
, "272" : "MDEntryDate"
, "273" : "MDEntryTime"
, "274" : "TickDirection"
, "275" : "MDMkt"
, "276" : "QuoteCondition"
, "277" : "TradeCondition"
, "278" : "MDEntryID"
, "279" : "MDUpdateAction"
, "28" : "IOITransType"
, "280" : "MDEntryRefID"
, "281" : "MDReqRejReason"
, "282" : "MDEntryOriginator"
, "283" : "LocationID"
, "284" : "DeskID"
, "285" : "DeleteReason"
, "286" : "OpenCloseSettleFlag"
, "286" : "OpenCloseSettlFlag"
, "287" : "SellerDays"
, "288" : "MDEntryBuyer"
, "289" : "MDEntrySeller"
, "29" : "LastCapacity"
, "290" : "MDEntryPositionNo"
, "291" : "FinancialStatus"
, "292" : "CorporateAction"
, "293" : "DefBidSize"
, "294" : "DefOfferSize"
, "295" : "NoQuoteEntries"
, "296" : "NoQuoteSets"
, "297" : "QuoteAckStatus"
, "297" : "QuoteStatus"
, "298" : "QuoteCancelType"
, "299" : "QuoteEntryID"
, "3" : "AdvRefID"
, "30" : "LastMkt"
, "300" : "QuoteRejectReason"
, "301" : "QuoteResponseLevel"
, "302" : "QuoteSetID"
, "303" : "QuoteRequestType"
, "304" : "TotNoQuoteEntries"
, "304" : "TotQuoteEntries"
, "305" : "UnderlyingIDSource"
, "305" : "UnderlyingSecurityIDSource"
, "306" : "UnderlyingIssuer"
, "307" : "UnderlyingSecurityDesc"
, "308" : "UnderlyingSecurityExchange"
, "309" : "UnderlyingSecurityID"
, "31" : "LastPx"
, "310" : "UnderlyingSecurityType"
, "311" : "UnderlyingSymbol"
, "312" : "UnderlyingSymbolSfx"
, "313" : "UnderlyingMaturityMonthYear"
, "314" : "UnderlyingMaturityDay"
, "315" : "UnderlyingPutOrCall"
, "316" : "UnderlyingStrikePrice"
, "317" : "UnderlyingOptAttribute"
, "318" : "UnderlyingCurrency"
, "319" : "RatioQty"
, "32" : "LastQty"
, "32" : "LastShares"
, "320" : "SecurityReqID"
, "321" : "SecurityRequestType"
, "322" : "SecurityResponseID"
, "323" : "SecurityResponseType"
, "324" : "SecurityStatusReqID"
, "325" : "UnsolicitedIndicator"
, "326" : "SecurityTradingStatus"
, "327" : "HaltReasonChar"
, "327" : "HaltReasonInt"
, "328" : "InViewOfCommon"
, "329" : "DueToRelated"
, "33" : "LinesOfText"
, "33" : "NoLinesOfText"
, "330" : "BuyVolume"
, "331" : "SellVolume"
, "332" : "HighPx"
, "333" : "LowPx"
, "334" : "Adjustment"
, "335" : "TradSesReqID"
, "336" : "TradingSessionID"
, "337" : "ContraTrader"
, "338" : "TradSesMethod"
, "339" : "TradSesMode"
, "34" : "MsgSeqNum"
, "340" : "TradSesStatus"
, "341" : "TradSesStartTime"
, "342" : "TradSesOpenTime"
, "343" : "TradSesPreCloseTime"
, "344" : "TradSesCloseTime"
, "345" : "TradSesEndTime"
, "346" : "NumberOfOrders"
, "347" : "MessageEncoding"
, "348" : "EncodedIssuerLen"
, "349" : "EncodedIssuer"
, "35" : "MsgType"
, "350" : "EncodedSecurityDescLen"
, "351" : "EncodedSecurityDesc"
, "352" : "EncodedListExecInstLen"
, "353" : "EncodedListExecInst"
, "354" : "EncodedTextLen"
, "355" : "EncodedText"
, "356" : "EncodedSubjectLen"
, "357" : "EncodedSubject"
, "358" : "EncodedHeadlineLen"
, "359" : "EncodedHeadline"
, "36" : "NewSeqNo"
, "360" : "EncodedAllocTextLen"
, "361" : "EncodedAllocText"
, "362" : "EncodedUnderlyingIssuerLen"
, "363" : "EncodedUnderlyingIssuer"
, "364" : "EncodedUnderlyingSecurityDescLen"
, "365" : "EncodedUnderlyingSecurityDesc"
, "366" : "AllocPrice"
, "367" : "QuoteSetValidUntilTime"
, "368" : "QuoteEntryRejectReason"
, "369" : "LastMsgSeqNumProcessed"
, "37" : "OrderID"
, "370" : "OnBehalfOfSendingTime"
, "371" : "RefTagID"
, "372" : "RefMsgType"
, "373" : "SessionRejectReason"
, "374" : "BidRequestTransType"
, "375" : "ContraBroker"
, "376" : "ComplianceID"
, "377" : "SolicitedFlag"
, "378" : "ExecRestatementReason"
, "379" : "BusinessRejectRefID"
, "38" : "OrderQty"
, "380" : "BusinessRejectReason"
, "381" : "GrossTradeAmt"
, "382" : "NoContraBrokers"
, "383" : "MaxMessageSize"
, "384" : "NoMsgTypes"
, "385" : "MsgDirection"
, "386" : "NoTradingSessions"
, "387" : "TotalVolumeTraded"
, "388" : "DiscretionInst"
, "389" : "DiscretionOffset"
, "389" : "DiscretionOffsetValue"
, "39" : "OrdStatus"
, "390" : "BidID"
, "391" : "ClientBidID"
, "392" : "ListName"
, "393" : "TotalNumSecurities"
, "393" : "TotNoRelatedSym"
, "394" : "BidType"
, "395" : "NumTickets"
, "396" : "SideValue1"
, "397" : "SideValue2"
, "398" : "NoBidDescriptors"
, "399" : "BidDescriptorType"
, "4" : "AdvSide"
, "40" : "OrdType"
, "400" : "BidDescriptor"
, "401" : "SideValueInd"
, "402" : "LiquidityPctLow"
, "403" : "LiquidityPctHigh"
, "404" : "LiquidityValue"
, "405" : "EFPTrackingError"
, "406" : "FairValue"
, "407" : "OutsideIndexPct"
, "408" : "ValueOfFutures"
, "409" : "LiquidityIndType"
, "41" : "OrigClOrdID"
, "410" : "WtAverageLiquidity"
, "411" : "ExchangeForPhysical"
, "412" : "OutMainCntryUIndex"
, "413" : "CrossPercent"
, "414" : "ProgRptReqs"
, "415" : "ProgPeriodInterval"
, "416" : "IncTaxInd"
, "417" : "NumBidders"
, "418" : "BidTradeType"
, "418" : "TradeType"
, "419" : "BasisPxType"
, "42" : "OrigTime"
, "420" : "NoBidComponents"
, "421" : "Country"
, "422" : "TotNoStrikes"
, "423" : "PriceType"
, "424" : "DayOrderQty"
, "425" : "DayCumQty"
, "426" : "DayAvgPx"
, "427" : "GTBookingInst"
, "428" : "NoStrikes"
, "429" : "ListStatusType"
, "43" : "PossDupFlag"
, "430" : "NetGrossInd"
, "431" : "ListOrderStatus"
, "432" : "ExpireDate"
, "433" : "ListExecInstType"
, "434" : "CxlRejResponseTo"
, "435" : "UnderlyingCouponRate"
, "436" : "UnderlyingContractMultiplier"
, "437" : "ContraTradeQty"
, "438" : "ContraTradeTime"
, "439" : "ClearingFirm"
, "44" : "Price"
, "440" : "ClearingAccount"
, "441" : "LiquidityNumSecurities"
, "442" : "MultiLegReportingType"
, "443" : "StrikeTime"
, "444" : "ListStatusText"
, "445" : "EncodedListStatusTextLen"
, "446" : "EncodedListStatusText"
, "447" : "PartyIDSource"
, "448" : "PartyID"
, "449" : "TotalVolumeTradedDate"
, "45" : "RefSeqNum"
, "450" : "TotalVolumeTradedTime"
, "451" : "NetChgPrevDay"
, "452" : "PartyRole"
, "453" : "NoPartyIDs"
, "454" : "NoSecurityAltID"
, "455" : "SecurityAltID"
, "456" : "SecurityAltIDSource"
, "457" : "NoUnderlyingSecurityAltID"
, "458" : "UnderlyingSecurityAltID"
, "459" : "UnderlyingSecurityAltIDSource"
, "46" : "RelatdSym"
, "460" : "Product"
, "461" : "CFICode"
, "462" : "UnderlyingProduct"
, "463" : "UnderlyingCFICode"
, "464" : "TestMessageIndicator"
, "465" : "QuantityType"
, "466" : "BookingRefID"
, "467" : "IndividualAllocID"
, "468" : "RoundingDirection"
, "469" : "RoundingModulus"
, "47" : "Rule80A"
, "470" : "CountryOfIssue"
, "471" : "StateOrProvinceOfIssue"
, "472" : "LocaleOfIssue"
, "473" : "NoRegistDtls"
, "474" : "MailingDtls"
, "475" : "InvestorCountryOfResidence"
, "476" : "PaymentRef"
, "477" : "DistribPaymentMethod"
, "478" : "CashDistribCurr"
, "479" : "CommCurrency"
, "48" : "SecurityID"
, "480" : "CancellationRights"
, "481" : "MoneyLaunderingStatus"
, "482" : "MailingInst"
, "483" : "TransBkdTime"
, "484" : "ExecPriceType"
, "485" : "ExecPriceAdjustment"
, "486" : "DateOfBirth"
, "487" : "TradeReportTransType"
, "488" : "CardHolderName"
, "489" : "CardNumber"
, "49" : "SenderCompID"
, "490" : "CardExpDate"
, "491" : "CardIssNo"
, "491" : "CardIssNum"
, "492" : "PaymentMethod"
, "493" : "RegistAcctType"
, "494" : "Designation"
, "495" : "TaxAdvantageType"
, "496" : "RegistRejReasonText"
, "497" : "FundRenewWaiv"
, "498" : "CashDistribAgentName"
, "499" : "CashDistribAgentCode"
, "5" : "AdvTransType"
, "50" : "SenderSubID"
, "500" : "CashDistribAgentAcctNumber"
, "501" : "CashDistribPayRef"
, "502" : "CashDistribAgentAcctName"
, "503" : "CardStartDate"
, "504" : "PaymentDate"
, "505" : "PaymentRemitterID"
, "506" : "RegistStatus"
, "507" : "RegistRejReasonCode"
, "508" : "RegistRefID"
, "509" : "RegistDetls"
, "509" : "RegistDtls"
, "51" : "SendingDate"
, "510" : "NoDistribInsts"
, "511" : "RegistEmail"
, "512" : "DistribPercentage"
, "513" : "RegistID"
, "514" : "RegistTransType"
, "515" : "ExecValuationPoint"
, "516" : "OrderPercent"
, "517" : "OwnershipType"
, "518" : "NoContAmts"
, "519" : "ContAmtType"
, "52" : "SendingTime"
, "520" : "ContAmtValue"
, "521" : "ContAmtCurr"
, "522" : "OwnerType"
, "523" : "PartySubID"
, "524" : "NestedPartyID"
, "525" : "NestedPartyIDSource"
, "526" : "SecondaryClOrdID"
, "527" : "SecondaryExecID"
, "528" : "OrderCapacity"
, "529" : "OrderRestrictions"
, "53" : "Quantity"
, "53" : "Shares"
, "530" : "MassCancelRequestType"
, "531" : "MassCancelResponse"
, "532" : "MassCancelRejectReason"
, "533" : "TotalAffectedOrders"
, "534" : "NoAffectedOrders"
, "535" : "AffectedOrderID"
, "536" : "AffectedSecondaryOrderID"
, "537" : "QuoteType"
, "538" : "NestedPartyRole"
, "539" : "NoNestedPartyIDs"
, "54" : "Side"
, "540" : "TotalAccruedInterestAmt"
, "541" : "MaturityDate"
, "542" : "UnderlyingMaturityDate"
, "543" : "InstrRegistry"
, "544" : "CashMargin"
, "545" : "NestedPartySubID"
, "546" : "Scope"
, "547" : "MDImplicitDelete"
, "548" : "CrossID"
, "549" : "CrossType"
, "55" : "Symbol"
, "550" : "CrossPrioritization"
, "551" : "OrigCrossID"
, "552" : "NoSides"
, "553" : "Username"
, "554" : "Password"
, "555" : "NoLegs"
, "556" : "LegCurrency"
, "557" : "TotalNumSecurityTypes"
, "557" : "TotNoSecurityTypes"
, "558" : "NoSecurityTypes"
, "559" : "SecurityListRequestType"
, "56" : "TargetCompID"
, "560" : "SecurityRequestResult"
, "561" : "RoundLot"
, "562" : "MinTradeVol"
, "563" : "MultiLegRptTypeReq"
, "564" : "LegPositionEffect"
, "565" : "LegCoveredOrUncovered"
, "566" : "LegPrice"
, "567" : "TradSesStatusRejReason"
, "568" : "TradeRequestID"
, "569" : "TradeRequestType"
, "57" : "TargetSubID"
, "570" : "PreviouslyReported"
, "571" : "TradeReportID"
, "572" : "TradeReportRefID"
, "573" : "MatchStatus"
, "574" : "MatchType"
, "575" : "OddLot"
, "576" : "NoClearingInstructions"
, "577" : "ClearingInstruction"
, "578" : "TradeInputSource"
, "579" : "TradeInputDevice"
, "58" : "Text"
, "580" : "NoDates"
, "581" : "AccountType"
, "582" : "CustOrderCapacity"
, "583" : "ClOrdLinkID"
, "584" : "MassStatusReqID"
, "585" : "MassStatusReqType"
, "586" : "OrigOrdModTime"
, "587" : "LegSettlmntTyp"
, "587" : "LegSettlType"
, "588" : "LegFutSettDate"
, "588" : "LegSettlDate"
, "589" : "DayBookingInst"
, "59" : "TimeInForce"
, "590" : "BookingUnit"
, "591" : "PreallocMethod"
, "592" : "UnderlyingCountryOfIssue"
, "593" : "UnderlyingStateOrProvinceOfIssue"
, "594" : "UnderlyingLocaleOfIssue"
, "595" : "UnderlyingInstrRegistry"
, "596" : "LegCountryOfIssue"
, "597" : "LegStateOrProvinceOfIssue"
, "598" : "LegLocaleOfIssue"
, "599" : "LegInstrRegistry"
, "6" : "AvgPx"
, "60" : "TransactTime"
, "600" : "LegSymbol"
, "601" : "LegSymbolSfx"
, "602" : "LegSecurityID"
, "603" : "LegSecurityIDSource"
, "604" : "NoLegSecurityAltID"
, "605" : "LegSecurityAltID"
, "606" : "LegSecurityAltIDSource"
, "607" : "LegProduct"
, "608" : "LegCFICode"
, "609" : "LegSecurityType"
, "61" : "Urgency"
, "610" : "LegMaturityMonthYear"
, "611" : "LegMaturityDate"
, "612" : "LegStrikePrice"
, "613" : "LegOptAttribute"
, "614" : "LegContractMultiplier"
, "615" : "LegCouponRate"
, "616" : "LegSecurityExchange"
, "617" : "LegIssuer"
, "618" : "EncodedLegIssuerLen"
, "619" : "EncodedLegIssuer"
, "62" : "ValidUntilTime"
, "620" : "LegSecurityDesc"
, "621" : "EncodedLegSecurityDescLen"
, "622" : "EncodedLegSecurityDesc"
, "623" : "LegRatioQty"
, "624" : "LegSide"
, "625" : "TradingSessionSubID"
, "626" : "AllocType"
, "627" : "NoHops"
, "628" : "HopCompID"
, "629" : "HopSendingTime"
, "63" : "SettlmntTyp"
, "63" : "SettlType"
, "630" : "HopRefID"
, "631" : "MidPx"
, "632" : "BidYield"
, "633" : "MidYield"
, "634" : "OfferYield"
, "635" : "ClearingFeeIndicator"
, "636" : "WorkingIndicator"
, "637" : "LegLastPx"
, "638" : "PriorityIndicator"
, "639" : "PriceImprovement"
, "64" : "FutSettDate"
, "64" : "SettlDate"
, "640" : "Price2"
, "641" : "LastForwardPoints2"
, "642" : "BidForwardPoints2"
, "643" : "OfferForwardPoints2"
, "644" : "RFQReqID"
, "645" : "MktBidPx"
, "646" : "MktOfferPx"
, "647" : "MinBidSize"
, "648" : "MinOfferSize"
, "649" : "QuoteStatusReqID"
, "65" : "SymbolSfx"
, "650" : "LegalConfirm"
, "651" : "UnderlyingLastPx"
, "652" : "UnderlyingLastQty"
, "653" : "SecDefStatus"
, "654" : "LegRefID"
, "655" : "ContraLegRefID"
, "656" : "SettlCurrBidFxRate"
, "657" : "SettlCurrOfferFxRate"
, "658" : "QuoteRequestRejectReason"
, "659" : "SideComplianceID"
, "66" : "ListID"
, "660" : "AcctIDSource"
, "661" : "AllocAcctIDSource"
, "662" : "BenchmarkPrice"
, "663" : "BenchmarkPriceType"
, "664" : "ConfirmID"
, "665" : "ConfirmStatus"
, "666" : "ConfirmTransType"
, "667" : "ContractSettlMonth"
, "668" : "DeliveryForm"
, "669" : "LastParPx"
, "67" : "ListSeqNo"
, "670" : "NoLegAllocs"
, "671" : "LegAllocAccount"
, "672" : "LegIndividualAllocID"
, "673" : "LegAllocQty"
, "674" : "LegAllocAcctIDSource"
, "675" : "LegSettlCurrency"
, "676" : "LegBenchmarkCurveCurrency"
, "677" : "LegBenchmarkCurveName"
, "678" : "LegBenchmarkCurvePoint"
, "679" : "LegBenchmarkPrice"
, "68" : "ListNoOrds"
, "68" : "TotNoOrders"
, "680" : "LegBenchmarkPriceType"
, "681" : "LegBidPx"
, "682" : "LegIOIQty"
, "683" : "NoLegStipulations"
, "684" : "LegOfferPx"
, "685" : "LegOrderQty"
, "686" : "LegPriceType"
, "687" : "LegQty"
, "688" : "LegStipulationType"
, "689" : "LegStipulationValue"
, "69" : "ListExecInst"
, "690" : "LegSwapType"
, "691" : "Pool"
, "692" : "QuotePriceType"
, "693" : "QuoteRespID"
, "694" : "QuoteRespType"
, "695" : "QuoteQualifier"
, "696" : "YieldRedemptionDate"
, "697" : "YieldRedemptionPrice"
, "698" : "YieldRedemptionPriceType"
, "699" : "BenchmarkSecurityID"
, "7" : "BeginSeqNo"
, "70" : "AllocID"
, "700" : "ReversalIndicator"
, "701" : "YieldCalcDate"
, "702" : "NoPositions"
, "703" : "PosType"
, "704" : "LongQty"
, "705" : "ShortQty"
, "706" : "PosQtyStatus"
, "707" : "PosAmtType"
, "708" : "PosAmt"
, "709" : "PosTransType"
, "71" : "AllocTransType"
, "710" : "PosReqID"
, "711" : "NoUnderlyings"
, "712" : "PosMaintAction"
, "713" : "OrigPosReqRefID"
, "714" : "PosMaintRptRefID"
, "715" : "ClearingBusinessDate"
, "716" : "SettlSessID"
, "717" : "SettlSessSubID"
, "718" : "AdjustmentType"
, "719" : "ContraryInstructionIndicator"
, "72" : "RefAllocID"
, "720" : "PriorSpreadIndicator"
, "721" : "PosMaintRptID"
, "722" : "PosMaintStatus"
, "723" : "PosMaintResult"
, "724" : "PosReqType"
, "725" : "ResponseTransportType"
, "726" : "ResponseDestination"
, "727" : "TotalNumPosReports"
, "728" : "PosReqResult"
, "729" : "PosReqStatus"
, "73" : "NoOrders"
, "730" : "SettlPrice"
, "731" : "SettlPriceType"
, "732" : "UnderlyingSettlPrice"
, "733" : "UnderlyingSettlPriceType"
, "734" : "PriorSettlPrice"
, "735" : "NoQuoteQualifiers"
, "736" : "AllocSettlCurrency"
, "737" : "AllocSettlCurrAmt"
, "738" : "InterestAtMaturity"
, "739" : "LegDatedDate"
, "74" : "AvgPrxPrecision"
, "74" : "AvgPxPrecision"
, "740" : "LegPool"
, "741" : "AllocInterestAtMaturity"
, "742" : "AllocAccruedInterestAmt"
, "743" : "DeliveryDate"
, "744" : "AssignmentMethod"
, "745" : "AssignmentUnit"
, "746" : "OpenInterest"
, "747" : "ExerciseMethod"
, "748" : "TotNumTradeReports"
, "749" : "TradeRequestResult"
, "75" : "TradeDate"
, "750" : "TradeRequestStatus"
, "751" : "TradeReportRejectReason"
, "752" : "SideMultiLegReportingType"
, "753" : "NoPosAmt"
, "754" : "AutoAcceptIndicator"
, "755" : "AllocReportID"
, "756" : "NoNested2PartyIDs"
, "757" : "Nested2PartyID"
, "758" : "Nested2PartyIDSource"
, "759" : "Nested2PartyRole"
, "76" : "ExecBroker"
, "760" : "Nested2PartySubID"
, "761" : "BenchmarkSecurityIDSource"
, "762" : "SecuritySubType"
, "763" : "UnderlyingSecuritySubType"
, "764" : "LegSecuritySubType"
, "765" : "AllowableOneSidednessPct"
, "766" : "AllowableOneSidednessValue"
, "767" : "AllowableOneSidednessCurr"
, "768" : "NoTrdRegTimestamps"
, "769" : "TrdRegTimestamp"
, "77" : "OpenClose"
, "77" : "PositionEffect"
, "770" : "TrdRegTimestampType"
, "771" : "TrdRegTimestampOrigin"
, "772" : "ConfirmRefID"
, "773" : "ConfirmType"
, "774" : "ConfirmRejReason"
, "775" : "BookingType"
, "776" : "IndividualAllocRejCode"
, "777" : "SettlInstMsgID"
, "778" : "NoSettlInst"
, "779" : "LastUpdateTime"
, "78" : "NoAllocs"
, "780" : "AllocSettlInstType"
, "781" : "NoSettlPartyIDs"
, "782" : "SettlPartyID"
, "783" : "SettlPartyIDSource"
, "784" : "SettlPartyRole"
, "785" : "SettlPartySubID"
, "786" : "SettlPartySubIDType"
, "787" : "DlvyInstType"
, "788" : "TerminationType"
, "789" : "NextExpectedMsgSeqNum"
, "79" : "AllocAccount"
, "790" : "OrdStatusReqID"
, "791" : "SettlInstReqID"
, "792" : "SettlInstReqRejCode"
, "793" : "SecondaryAllocID"
, "794" : "AllocReportType"
, "795" : "AllocReportRefID"
, "796" : "AllocCancReplaceReason"
, "797" : "CopyMsgIndicator"
, "798" : "AllocAccountType"
, "799" : "OrderAvgPx"
, "8" : "BeginString"
, "80" : "AllocQty"
, "80" : "AllocShares"
, "800" : "OrderBookingQty"
, "801" : "NoSettlPartySubIDs"
, "802" : "NoPartySubIDs"
, "803" : "PartySubIDType"
, "804" : "NoNestedPartySubIDs"
, "805" : "NestedPartySubIDType"
, "806" : "NoNested2PartySubIDs"
, "807" : "Nested2PartySubIDType"
, "808" : "AllocIntermedReqType"
, "81" : "ProcessCode"
, "810" : "UnderlyingPx"
, "811" : "PriceDelta"
, "812" : "ApplQueueMax"
, "813" : "ApplQueueDepth"
, "814" : "ApplQueueResolution"
, "815" : "ApplQueueAction"
, "816" : "NoAltMDSource"
, "817" : "AltMDSourceID"
, "818" : "SecondaryTradeReportID"
, "819" : "AvgPxIndicator"
, "82" : "NoRpts"
, "820" : "TradeLinkID"
, "821" : "OrderInputDevice"
, "822" : "UnderlyingTradingSessionID"
, "823" : "UnderlyingTradingSessionSubID"
, "824" : "TradeLegRefID"
, "825" : "ExchangeRule"
, "826" : "TradeAllocIndicator"
, "827" : "ExpirationCycle"
, "828" : "TrdType"
, "829" : "TrdSubType"
, "83" : "RptSeq"
, "830" : "TransferReason"
, "831" : "AsgnReqID"
, "832" : "TotNumAssignmentReports"
, "833" : "AsgnRptID"
, "834" : "ThresholdAmount"
, "835" : "PegMoveType"
, "836" : "PegOffsetType"
, "837" : "PegLimitType"
, "838" : "PegRoundDirection"
, "839" : "PeggedPrice"
, "84" : "CxlQty"
, "840" : "PegScope"
, "841" : "DiscretionMoveType"
, "842" : "DiscretionOffsetType"
, "843" : "DiscretionLimitType"
, "844" : "DiscretionRoundDirection"
, "845" : "DiscretionPrice"
, "846" : "DiscretionScope"
, "847" : "TargetStrategy"
, "848" : "TargetStrategyParameters"
, "849" : "ParticipationRate"
, "85" : "NoDlvyInst"
, "850" : "TargetStrategyPerformance"
, "851" : "LastLiquidityInd"
, "852" : "PublishTrdIndicator"
, "853" : "ShortSaleReason"
, "854" : "QtyType"
, "855" : "SecondaryTrdType"
, "856" : "TradeReportType"
, "857" : "AllocNoOrdersType"
, "858" : "SharedCommission"
, "859" : "ConfirmReqID"
, "86" : "DlvyInst"
, "860" : "AvgParPx"
, "861" : "ReportedPx"
, "862" : "NoCapacities"
, "863" : "OrderCapacityQty"
, "864" : "NoEvents"
, "865" : "EventType"
, "866" : "EventDate"
, "867" : "EventPx"
, "868" : "EventText"
, "869" : "PctAtRisk"
, "87" : "AllocStatus"
, "870" : "NoInstrAttrib"
, "871" : "InstrAttribType"
, "872" : "InstrAttribValue"
, "873" : "DatedDate"
, "874" : "InterestAccrualDate"
, "875" : "CPProgram"
, "876" : "CPRegType"
, "877" : "UnderlyingCPProgram"
, "878" : "UnderlyingCPRegType"
, "879" : "UnderlyingQty"
, "88" : "AllocRejCode"
, "880" : "TrdMatchID"
, "881" : "SecondaryTradeReportRefID"
, "882" : "UnderlyingDirtyPrice"
, "883" : "UnderlyingEndPrice"
, "884" : "UnderlyingStartValue"
, "885" : "UnderlyingCurrentValue"
, "886" : "UnderlyingEndValue"
, "887" : "NoUnderlyingStips"
, "888" : "UnderlyingStipType"
, "889" : "UnderlyingStipValue"
, "89" : "Signature"
, "890" : "MaturityNetMoney"
, "891" : "MiscFeeBasis"
, "892" : "TotNoAllocs"
, "893" : "LastFragment"
, "894" : "CollReqID"
, "895" : "CollAsgnReason"
, "896" : "CollInquiryQualifier"
, "897" : "NoTrades"
, "898" : "MarginRatio"
, "899" : "MarginExcess"
, "9" : "BodyLength"
, "90" : "SecureDataLen"
, "900" : "TotalNetValue"
, "901" : "CashOutstanding"
, "902" : "CollAsgnID"
, "903" : "CollAsgnTransType"
, "904" : "CollRespID"
, "905" : "CollAsgnRespType"
, "906" : "CollAsgnRejectReason"
, "907" : "CollAsgnRefID"
, "908" : "CollRptID"
, "909" : "CollInquiryID"
, "91" : "SecureData"
, "910" : "CollStatus"
, "911" : "TotNumReports"
, "912" : "LastRptRequested"
, "913" : "AgreementDesc"
, "914" : "AgreementID"
, "915" : "AgreementDate"
, "916" : "StartDate"
, "917" : "EndDate"
, "918" : "AgreementCurrency"
, "919" : "DeliveryType"
, "92" : "BrokerOfCredit"
, "920" : "EndAccruedInterestAmt"
, "921" : "StartCash"
, "922" : "EndCash"
, "923" : "UserRequestID"
, "924" : "UserRequestType"
, "925" : "NewPassword"
, "926" : "UserStatus"
, "927" : "UserStatusText"
, "928" : "StatusValue"
, "929" : "StatusText"
, "93" : "SignatureLength"
, "930" : "RefCompID"
, "931" : "RefSubID"
, "932" : "NetworkResponseID"
, "933" : "NetworkRequestID"
, "934" : "LastNetworkResponseID"
, "935" : "NetworkRequestType"
, "936" : "NoCompIDs"
, "937" : "NetworkStatusResponseType"
, "938" : "NoCollInquiryQualifier"
, "939" : "TrdRptStatus"
, "94" : "EmailType"
, "940" : "AffirmStatus"
, "941" : "UnderlyingStrikeCurrency"
, "942" : "LegStrikeCurrency"
, "943" : "TimeBracket"
, "944" : "CollAction"
, "945" : "CollInquiryStatus"
, "946" : "CollInquiryResult"
, "947" : "StrikeCurrency"
, "948" : "NoNested3PartyIDs"
, "949" : "Nested3PartyID"
, "95" : "RawDataLength"
, "950" : "Nested3PartyIDSource"
, "951" : "Nested3PartyRole"
, "952" : "NoNested3PartySubIDs"
, "953" : "Nested3PartySubID"
, "954" : "Nested3PartySubIDType"
, "955" : "LegContractSettlMonth"
, "956" : "LegInterestAccrualDate"
, "957" : "NoStrategyParameters"
, "958" : "StrategyParameterName"
, "959" : "StrategyParameterType"
, "96" : "RawData"
, "960" : "StrategyParameterValue"
, "961" : "HostCrossID"
, "962" : "SideTimeInForce"
, "963" : "MDReportID"
, "964" : "SecurityReportID"
, "965" : "SecurityStatus"
, "966" : "SettleOnOpenFlag"
, "967" : "StrikeMultiplier"
, "968" : "StrikeValue"
, "969" : "MinPriceIncrement"
, "97" : "PossResend"
, "970" : "PositionLimit"
, "971" : "NTPositionLimit"
, "972" : "UnderlyingAllocationPercent"
, "973" : "UnderlyingCashAmount"
, "974" : "UnderlyingCashType"
, "975" : "UnderlyingSettlementType"
, "976" : "QuantityDate"
, "977" : "ContIntRptID"
, "978" : "LateIndicator"
, "979" : "InputSource"
, "98" : "EncryptMethod"
, "980" : "SecurityUpdateAction"
, "981" : "NoExpiration"
, "982" : "ExpirationQtyType"
, "982" : "ExpType"
, "983" : "ExpQty"
, "984" : "NoUnderlyingAmounts"
, "985" : "UnderlyingPayAmount"
, "986" : "UnderlyingCollectAmount"
, "987" : "UnderlyingSettlementDate"
, "988" : "UnderlyingSettlementStatus"
, "989" : "SecondaryIndividualAllocID"
, "99" : "StopPx"
, "990" : "LegReportID"
, "991" : "RndPx"
, "992" : "IndividualAllocType"
, "993" : "AllocCustomerCapacity"
, "994" : "TierCode"
, "996" : "UnitOfMeasure"
, "997" : "TimeUnit"
, "998" : "UnderlyingUnitOfMeasure"
, "999" : "LegUnitOfMeasure"
}
| bsd-3-clause | 1,577,408,565,323,987,200 | 29.983343 | 99 | 0.624138 | false | 2.380284 | false | false | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.0-py2.5.egg/sqlalchemy/databases/access.py | 1 | 15034 | # access.py
# Copyright (C) 2007 Paul Johnston, [email protected]
# Portions derived from jet2sql.py by Matt Keranen, [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import random
from sqlalchemy import sql, schema, types, exceptions, pool
from sqlalchemy.sql import compiler, expression
from sqlalchemy.engine import default, base
class AcNumeric(types.Numeric):
def result_processor(self, dialect):
return None
def bind_processor(self, dialect):
def process(value):
if value is None:
# Not sure that this exception is needed
return value
else:
return str(value)
return process
def get_col_spec(self):
return "NUMERIC"
class AcFloat(types.Float):
def get_col_spec(self):
return "FLOAT"
def bind_processor(self, dialect):
"""By converting to string, we can use Decimal types round-trip."""
def process(value):
if not value is None:
return str(value)
return None
return process
class AcInteger(types.Integer):
def get_col_spec(self):
return "INTEGER"
class AcTinyInteger(types.Integer):
def get_col_spec(self):
return "TINYINT"
class AcSmallInteger(types.Smallinteger):
def get_col_spec(self):
return "SMALLINT"
class AcDateTime(types.DateTime):
def __init__(self, *a, **kw):
super(AcDateTime, self).__init__(False)
def get_col_spec(self):
return "DATETIME"
class AcDate(types.Date):
def __init__(self, *a, **kw):
super(AcDate, self).__init__(False)
def get_col_spec(self):
return "DATETIME"
class AcText(types.TEXT):
def get_col_spec(self):
return "MEMO"
class AcString(types.String):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcUnicode(types.Unicode):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
def bind_processor(self, dialect):
return None
def result_processor(self, dialect):
return None
class AcChar(types.CHAR):
def get_col_spec(self):
return "TEXT" + (self.length and ("(%d)" % self.length) or "")
class AcBinary(types.Binary):
def get_col_spec(self):
return "BINARY"
class AcBoolean(types.Boolean):
def get_col_spec(self):
return "YESNO"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
return value and True or False
return process
def bind_processor(self, dialect):
def process(value):
if value is True:
return 1
elif value is False:
return 0
elif value is None:
return None
else:
return value and True or False
return process
class AcTimeStamp(types.TIMESTAMP):
def get_col_spec(self):
return "TIMESTAMP"
def descriptor():
return {'name':'access',
'description':'Microsoft Access',
'arguments':[
('user',"Database user name",None),
('password',"Database password",None),
('db',"Path to database file",None),
]}
class AccessExecutionContext(default.DefaultExecutionContext):
def _has_implicit_sequence(self, column):
if column.primary_key and column.autoincrement:
if isinstance(column.type, types.Integer) and not column.foreign_key:
if column.default is None or (isinstance(column.default, schema.Sequence) and \
column.default.optional):
return True
return False
def post_exec(self):
"""If we inserted into a row with a COUNTER column, fetch the ID"""
if self.compiled.isinsert:
tbl = self.compiled.statement.table
if not hasattr(tbl, 'has_sequence'):
tbl.has_sequence = None
for column in tbl.c:
if getattr(column, 'sequence', False) or self._has_implicit_sequence(column):
tbl.has_sequence = column
break
if bool(tbl.has_sequence):
# TBD: for some reason _last_inserted_ids doesn't exist here
# (but it does at corresponding point in mssql???)
#if not len(self._last_inserted_ids) or self._last_inserted_ids[0] is None:
self.cursor.execute("SELECT @@identity AS lastrowid")
row = self.cursor.fetchone()
self._last_inserted_ids = [int(row[0])] #+ self._last_inserted_ids[1:]
# print "LAST ROW ID", self._last_inserted_ids
super(AccessExecutionContext, self).post_exec()
const, daoEngine = None, None
class AccessDialect(default.DefaultDialect):
colspecs = {
types.Unicode : AcUnicode,
types.Integer : AcInteger,
types.Smallinteger: AcSmallInteger,
types.Numeric : AcNumeric,
types.Float : AcFloat,
types.DateTime : AcDateTime,
types.Date : AcDate,
types.String : AcString,
types.Binary : AcBinary,
types.Boolean : AcBoolean,
types.TEXT : AcText,
types.CHAR: AcChar,
types.TIMESTAMP: AcTimeStamp,
}
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
def type_descriptor(self, typeobj):
newobj = types.adapt_type(typeobj, self.colspecs)
return newobj
def __init__(self, **params):
super(AccessDialect, self).__init__(**params)
self.text_as_varchar = False
self._dtbs = None
def dbapi(cls):
import win32com.client
win32com.client.gencache.EnsureModule('{00025E01-0000-0000-C000-000000000046}', 0, 5, 0)
global const, daoEngine
if const is None:
const = win32com.client.constants
daoEngine = win32com.client.Dispatch('DAO.DBEngine.36')
import pyodbc as module
return module
dbapi = classmethod(dbapi)
def create_connect_args(self, url):
opts = url.translate_connect_args()
connectors = ["Driver={Microsoft Access Driver (*.mdb)}"]
connectors.append("Dbq=%s" % opts["database"])
user = opts.get("username", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % opts.get("password", ""))
return [[";".join(connectors)], {}]
def create_execution_context(self, *args, **kwargs):
return AccessExecutionContext(self, *args, **kwargs)
def last_inserted_ids(self):
return self.context.last_inserted_ids
def do_execute(self, cursor, statement, params, **kwargs):
if params == {}:
params = ()
super(AccessDialect, self).do_execute(cursor, statement, params, **kwargs)
def _execute(self, c, statement, parameters):
try:
if parameters == {}:
parameters = ()
c.execute(statement, parameters)
self.context.rowcount = c.rowcount
except Exception, e:
raise exceptions.DBAPIError.instance(statement, parameters, e)
def has_table(self, connection, tablename, schema=None):
# This approach seems to be more reliable that using DAO
try:
connection.execute('select top 1 * from [%s]' % tablename)
return True
except Exception, e:
return False
def reflecttable(self, connection, table, include_columns):
# This is defined in the function, as it relies on win32com constants,
# that aren't imported until dbapi method is called
if not hasattr(self, 'ischema_names'):
self.ischema_names = {
const.dbByte: AcBinary,
const.dbInteger: AcInteger,
const.dbLong: AcInteger,
const.dbSingle: AcFloat,
const.dbDouble: AcFloat,
const.dbDate: AcDateTime,
const.dbLongBinary: AcBinary,
const.dbMemo: AcText,
const.dbBoolean: AcBoolean,
const.dbText: AcUnicode, # All Access strings are unicode
}
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
try:
for tbl in dtbs.TableDefs:
if tbl.Name.lower() == table.name.lower():
break
else:
raise exceptions.NoSuchTableError(table.name)
for col in tbl.Fields:
coltype = self.ischema_names[col.Type]
if col.Type == const.dbText:
coltype = coltype(col.Size)
colargs = \
{
'nullable': not(col.Required or col.Attributes & const.dbAutoIncrField),
}
default = col.DefaultValue
if col.Attributes & const.dbAutoIncrField:
colargs['default'] = schema.Sequence(col.Name + '_seq')
elif default:
if col.Type == const.dbBoolean:
default = default == 'Yes' and '1' or '0'
colargs['default'] = schema.PassiveDefault(sql.text(default))
table.append_column(schema.Column(col.Name, coltype, **colargs))
# TBD: check constraints
# Find primary key columns first
for idx in tbl.Indexes:
if idx.Primary:
for col in idx.Fields:
thecol = table.c[col.Name]
table.primary_key.add(thecol)
if isinstance(thecol.type, AcInteger) and \
not (thecol.default and isinstance(thecol.default.arg, schema.Sequence)):
thecol.autoincrement = False
# Then add other indexes
for idx in tbl.Indexes:
if not idx.Primary:
if len(idx.Fields) == 1:
col = table.c[idx.Fields[0].Name]
if not col.primary_key:
col.index = True
col.unique = idx.Unique
else:
pass # TBD: multi-column indexes
for fk in dtbs.Relations:
if fk.ForeignTable != table.name:
continue
scols = [c.ForeignName for c in fk.Fields]
rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields]
table.append_constraint(schema.ForeignKeyConstraint(scols, rcols))
finally:
dtbs.Close()
def table_names(self, connection, schema):
# A fresh DAO connection is opened for each reflection
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
names = [t.Name for t in dtbs.TableDefs if t.Name[:4] != "MSys" and t.Name[:4] <> "~TMP"]
dtbs.Close()
return names
class AccessCompiler(compiler.DefaultCompiler):
def visit_select_precolumns(self, select):
"""Access puts TOP, it's version of LIMIT here """
s = select.distinct and "DISTINCT " or ""
if select.limit:
s += "TOP %s " % (select.limit)
if select.offset:
raise exceptions.InvalidRequestError('Access does not support LIMIT with an offset')
return s
def limit_clause(self, select):
"""Limit in access is after the select keyword"""
return ""
def binary_operator_string(self, binary):
"""Access uses "mod" instead of "%" """
return binary.operator == '%' and 'mod' or binary.operator
def label_select_column(self, select, column):
if isinstance(column, expression._Function):
return column.label(column.name + "_" + hex(random.randint(0, 65535))[2:])
else:
return super(AccessCompiler, self).label_select_column(select, column)
function_rewrites = {'current_date': 'now',
'current_timestamp': 'now',
'length': 'len',
}
def visit_function(self, func):
"""Access function names differ from the ANSI SQL names; rewrite common ones"""
func.name = self.function_rewrites.get(func.name, func.name)
super(AccessCompiler, self).visit_function(func)
def for_update_clause(self, select):
"""FOR UPDATE is not supported by Access; silently ignore"""
return ''
class AccessSchemaGenerator(compiler.SchemaGenerator):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + column.type.dialect_impl(self.dialect).get_col_spec()
# install a sequence if we have an implicit IDENTITY column
if (not getattr(column.table, 'has_sequence', False)) and column.primary_key and \
column.autoincrement and isinstance(column.type, types.Integer) and not column.foreign_key:
if column.default is None or (isinstance(column.default, schema.Sequence) and column.default.optional):
column.sequence = schema.Sequence(column.name + '_seq')
if not column.nullable:
colspec += " NOT NULL"
if hasattr(column, 'sequence'):
column.table.has_sequence = column
colspec = self.preparer.format_column(column) + " counter"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
class AccessSchemaDropper(compiler.SchemaDropper):
def visit_index(self, index):
self.append("\nDROP INDEX [%s].[%s]" % (index.table.name, index.name))
self.execute()
class AccessDefaultRunner(base.DefaultRunner):
pass
class AccessIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = compiler.RESERVED_WORDS.copy()
reserved_words.update(['value', 'text'])
def __init__(self, dialect):
super(AccessIdentifierPreparer, self).__init__(dialect, initial_quote='[', final_quote=']')
dialect = AccessDialect
dialect.poolclass = pool.SingletonThreadPool
dialect.statement_compiler = AccessCompiler
dialect.schemagenerator = AccessSchemaGenerator
dialect.schemadropper = AccessSchemaDropper
dialect.preparer = AccessIdentifierPreparer
dialect.defaultrunner = AccessDefaultRunner
| bsd-3-clause | 4,976,746,506,832,466,000 | 35.052758 | 115 | 0.581016 | false | 4.258924 | false | false | false |
a-rank/cassandra-tools | cassandra_tools/ui.py | 1 | 3032 | # Copyright 2016 Allan Rank
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
def prompt(choices, text, close=False):
items = ["{} {}".format(idx, c) for idx, c in enumerate(choices)]
if close:
items.append("c <close>")
click.echo()
click.secho("\n".join(items), bold=True)
try:
return int(click.prompt(text))
except ValueError:
return len(choices)
def format_columns(columns):
if columns:
return ", ".join([c.name for c in columns])
else:
return "no columns"
def print_header(text, bold=False):
click.echo()
line = "".join(["-" for _ in xrange(len(text) + 2)])
click.secho(line, bold=bold)
click.secho(" {}".format(text), bold=bold)
click.secho(line, bold=bold)
def print_dict(map, name):
if map:
print_header("{}: {}".format(name, len(map)))
items = "\n".join(map.keys())
click.echo(items)
def print_host(host):
click.echo("{}\tv{}\t{}\t{}".format(
host.broadcast_address,
host.release_version,
host.rack,
host.datacenter))
def print_keyspace(keyspace_meta):
print_header("{} {}".format("keyspace:", keyspace_meta.name), True)
replication_strategy = keyspace_meta.replication_strategy
if replication_strategy:
click.echo("replication:\t{}".format(replication_strategy.export_for_schema()))
click.echo("durable writes:\t{}".format(keyspace_meta.durable_writes))
print_dict(keyspace_meta.tables, "tables")
print_dict(keyspace_meta.views, "views")
print_dict(keyspace_meta.indexes, "indexes")
print_dict(keyspace_meta.user_types, "user types")
print_dict(keyspace_meta.functions, "functions")
print_dict(keyspace_meta.aggregates, "aggregates")
def print_table(table_meta):
def max_column(column):
return len(column.name)
print_header("table: {}.{}".format(table_meta.keyspace_name, table_meta.name), True)
click.echo("primary key:\t(({}), {})".format(format_columns(table_meta.partition_key),
format_columns(table_meta.clustering_key)))
columns = table_meta.columns.values()
columns_text = "\n".join(["{}\t{}".format(c.name, c.cql_type) for c in columns])
max_len_column = max(columns, key=max_column)
print_header("{}: {}".format("columns", len(columns)))
click.echo(columns_text.expandtabs(len(max_len_column.name) + 2))
print_dict(table_meta.views, "views")
print_dict(table_meta.indexes, "indexes")
| apache-2.0 | -2,171,579,964,705,164,800 | 32.318681 | 92 | 0.652045 | false | 3.618138 | false | false | false |
Grumpy-Mike/Mikes-Pi-Bakery | CurveBall/curvedBall.py | 1 | 3415 | # Curved Ball - a game for the Pi Glow board
# By Mike Cook - March 2015
import time, random, sys
from smbus import SMBus
import wiringpi2 as io
# command register addresses for the SN3218 IC used in PiGlow
CMD_ENABLE_OUTPUT = 0x00
CMD_ENABLE_LEDS = 0x13
CMD_SET_PWM_VALUES = 0x01
CMD_UPDATE = 0x16
SN3218 = 0x54 # i2c address of SN3218 IC
bus = None
try :
io.wiringPiSetupGpio()
except :
print"start IDLE with 'gksudo idle' from command line"
sys.exit()
pinList= [7,8,25] # GPIO pins for switches
lights = [0x00 for i in range(0,18)] # the LED brightness list
red = [0,6,17] # red LEDs
orange = [1,7,16] # orange LEDs
yellow = [2,8,15] # yellow LEDs
green = [3,5,13] # green LEDs
blue = [14,4,11] # blue LEDs
white = [12,9,10] # white LEDs
triangleIn = [red,orange,yellow,green,blue,white]
triangleOut = [white,blue,green,yellow,orange,red]
speed = 0.03 # delay is twice this
returnSpeed = 0.1 # for hit back
score = 0
def main():
initGPIO()
busInit()
while True: # repeat forever
wipe()
updateLEDs(lights)
while scanSwitches() != -1: #make sure fingers off
pass
pitch()
def pitch(): # throw the ball
global score
time.sleep(1.0) # delay before the throw - try making this random
arm = random.randint(0,2) # direction of curved ball
bat = False
push = -1
for triangle in range(0,5):
wipe() # clear all LEDs in the list
if bat:
lights[white[push]] = 0x20 # turn on bat LED
lights[triangleIn[triangle][arm]] = 0x80
updateLEDs(lights)
time.sleep(speed)
if not bat: # no switch pressed so far so look for one
push = scanSwitches() # switched pressed?
if push != -1:
bat = True # no more looking at switches
score = 6 - triangle # sooner you see it the higher the score
else:
lights[white[push]] = 0x20
updateLEDs(lights)
time.sleep(speed)
if arm == push:
print "hit - score ",score
for triangle in range(0,6): # hit it back
wipe()
lights[triangleOut[triangle][arm]] = 0x80
updateLEDs(lights)
time.sleep(returnSpeed)
time.sleep(0.7)
def initGPIO(): # set up the GPIO pins
for pin in range (0,3):
io.pinMode(pinList[pin],0) # make pin into an input
io.pullUpDnControl(pinList[pin],2) # enable pull up
def scanSwitches(): # look at each pin in turn
down = -1 # default return value means no switch pressed
for pin in range (0,3):
if io.digitalRead(pinList[pin]) == 0:
down = pin
return down
def busInit(): # start up the I2C bus and enable the outputs on the SN3218
global bus
bus = SMBus(1)
bus.write_byte_data(SN3218,CMD_ENABLE_OUTPUT, 0x01)
bus.write_i2c_block_data(SN3218, CMD_ENABLE_LEDS, [0xFF, 0xFF, 0xFF])
def updateLEDs(lights): # update the LEDs to reflect the lights list
bus.write_i2c_block_data(SN3218, CMD_SET_PWM_VALUES, lights)
bus.write_byte_data(SN3218,CMD_UPDATE, 0xFF)
def wipe(): # clear the lights list
global lights
for i in range(0,18):
lights[i] = 0
# Main program logic:
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
# set all the LEDs to "off" when Ctrl+C is pressed before exiting
wipe()
updateLEDs(lights)
| gpl-2.0 | 4,215,678,575,726,886,400 | 30.330275 | 74 | 0.622255 | false | 3.096102 | false | false | false |
gamajr/EZNCoder | engine/generator.py | 1 | 3020 | # -*- coding: utf-8 -*-
import string
from infoparser import MInfo
class MEGenerator():
"""Classe que gera linhas de comando para o MEncoder."""
def __init__(self):
self._cut_cmd = string.Template("")
self.info = MInfo()
self._supported_ops = ['sub','wmv2avi','avixvid']
def gen_convert_line(self, media_file, operation):
#TODO: Escrever DocString
if operation == 'sub':
resp = self._subtitle(media_file)
elif operation == 'wmv2avi':
resp = self._wmv2avi(media_file)
elif operation == 'avixvid':
resp = self._avixvid(media_file)
else:
resp = None
return resp
def gen_cut_line(self, media_file, cut_point=None):
"""Gera uma lista com as linhas de comando para cortar um video
atraves do MEncoder. Se os dois argumentos forem None, os video e
dividido em dois."""
pass
def _subtitle(self, media_file):
cmd = string.Template("""mencoder -oac $audio_opts -ovc xvid -xvidencopts
bitrate=$br -sub $srt_file -subpos 90 -subfont-text-scale 3
-subfont-outline 2 -subcp ISO-8859-1 -sub-bg-alpha 200 -o $conv_file $orig_file""")
base_name=media_file[:-4]
self.info.parse_data(base_name+'.avi')
kbps = int(self.info.get_vdata('Bit rate').split()[0])
if kbps % 50 != 0:
br = str(kbps + (50 - kbps % 50))
else:
br = str(kbps)
audio_opts=''
if self.info.get_adata('Codec ID/Hint')=='MP3':
audio_opts = 'copy'
else:
audio_opts = 'mp3lame -lameopts cbr:mode=2:br=192'
return ' '.join(cmd.substitute({'audio_opts':audio_opts, 'br':br,
'srt_file': base_name+'.srt', 'conv_file':base_name+'_sub.avi',
'orig_file':base_name+'.avi'}).split())
def _wmv2avi(self, media_file):
cmd = string.Template("""mencoder -oac mp3lame -lameopts cbr:mode=2:br=64
-ovc lavc -ofps 23.976 -o $conv_file $orig_file""")
base_name=media_file[:-4]
return ' '.join(cmd.substitute({'conv_file':base_name+'_conv.avi', 'orig_file':base_name+'.wmv'}).split())
def _avixvid(self, media_file):
cmd = string.Template("""mencoder -oac $audio_opts -ovc xvid -xvidencopts
bitrate=850 -o $conv_file $orig_file""")
base_name=media_file[:-4]
self.info.parse_data(base_name+'.avi')
audio_opts=''
if self.info.get_adata('Codec ID/Hint')=='MP3':
audio_opts = 'copy'
else:
audio_opts = 'mp3lame -lameopts cbr:mode=2:br=192'
return ' '.join(cmd.substitute({'audio_opts':audio_opts,
'conv_file':base_name+'_conv.avi', 'orig_file':base_name+'.avi'}).split())
def get_supported_operations(self):
return self._supported_ops
#TODO: Implementar gen_cut_line!!!!
#mencoder infile.wmv -ofps 23.976 -ovc lavc -oac copy -o outfile.avi | gpl-3.0 | 5,803,673,885,986,638,000 | 38.75 | 114 | 0.568543 | false | 3.25431 | false | false | false |
indico/indico | indico/modules/events/sessions/operations.py | 1 | 6547 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import session
from indico.core import signals
from indico.core.db import db
from indico.modules.events.logs.models.entries import EventLogKind, EventLogRealm
from indico.modules.events.logs.util import make_diff_log
from indico.modules.events.models.events import EventType
from indico.modules.events.sessions import COORDINATOR_PRIV_SETTINGS, COORDINATOR_PRIV_TITLES, logger, session_settings
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.sessions import Session
from indico.util.i18n import orig_string
def create_session(event, data):
"""
Create a new session with the information passed in the `data` argument.
"""
event_session = Session(event=event)
event_session.populate_from_dict(data)
db.session.flush()
event.log(EventLogRealm.management, EventLogKind.positive, 'Sessions',
f'Session "{event_session.title}" has been created', session.user,
meta={'session_id': event_session.id})
logger.info('Session %s created by %s', event_session, session.user)
return event_session
def create_session_block(session_, data):
block = SessionBlock(session=session_)
block.populate_from_dict(data)
db.session.flush()
session_.event.log(EventLogRealm.management, EventLogKind.positive, 'Sessions',
'Session block "{}" for session "{}" has been created'
.format(block.title, session_.title), session.user,
meta={'session_block_id': block.id})
logger.info('Session block %s created by %s', block, session.user)
return block
def update_session(event_session, data):
"""Update a session based on the information in the `data`."""
event_session.populate_from_dict(data)
db.session.flush()
signals.event.session_updated.send(event_session)
event_session.event.log(EventLogRealm.management, EventLogKind.change, 'Sessions',
f'Session "{event_session.title}" has been updated', session.user,
meta={'session_id': event_session.id})
logger.info('Session %s modified by %s', event_session, session.user)
def _delete_session_timetable_entries(event_session):
for block in event_session.blocks:
for contribution in block.contributions:
if contribution.timetable_entry:
db.session.delete(contribution.timetable_entry)
if not block.timetable_entry:
continue
for child_block in block.timetable_entry.children:
db.session.delete(child_block)
db.session.delete(block.timetable_entry)
def delete_session(event_session):
"""Delete session from the event."""
event_session.is_deleted = True
for contribution in event_session.contributions[:]:
contribution.session = None
_delete_session_timetable_entries(event_session)
signals.event.session_deleted.send(event_session)
event_session.event.log(EventLogRealm.management, EventLogKind.negative, 'Sessions',
f'Session "{event_session.title}" has been deleted', session.user,
meta={'session_id': event_session.id})
logger.info('Session %s deleted by %s', event_session, session.user)
def update_session_block(session_block, data):
"""Update a session block with data passed in the `data` argument."""
from indico.modules.events.timetable.operations import update_timetable_entry
start_dt = data.pop('start_dt', None)
if start_dt is not None:
session_block.timetable_entry.move(start_dt)
update_timetable_entry(session_block.timetable_entry, {'start_dt': start_dt})
session_block.populate_from_dict(data)
db.session.flush()
signals.event.session_block_updated.send(session_block)
session_block.event.log(EventLogRealm.management, EventLogKind.change, 'Sessions',
f'Session block "{session_block.title}" has been updated', session.user,
meta={'session_block_id': session_block.id})
logger.info('Session block %s modified by %s', session_block, session.user)
def delete_session_block(session_block):
from indico.modules.events.contributions.operations import delete_contribution
from indico.modules.events.timetable.operations import delete_timetable_entry
session_ = session_block.session
event = session_.event
unschedule_contribs = session_.event.type_ == EventType.conference
for contribution in session_block.contributions[:]:
contribution.session_block = None
if unschedule_contribs:
delete_timetable_entry(contribution.timetable_entry, log=False)
else:
delete_contribution(contribution)
for entry in session_block.timetable_entry.children[:]:
delete_timetable_entry(entry, log=False)
delete_timetable_entry(session_block.timetable_entry, log=False)
signals.event.session_block_deleted.send(session_block)
if session_block in session_.blocks:
session_.blocks.remove(session_block)
if not session_.blocks and session_.event.type != 'conference':
delete_session(session_)
db.session.flush()
event.log(EventLogRealm.management, EventLogKind.negative, 'Sessions',
f'Session block "{session_block.title}" has been deleted', session.user,
meta={'session_block_id': session_block.id})
logger.info('Session block %s deleted by %s', session_block, session.user)
def update_session_coordinator_privs(event, data):
changes = {}
for priv, enabled in data.items():
setting = COORDINATOR_PRIV_SETTINGS[priv]
if session_settings.get(event, setting) == enabled:
continue
session_settings.set(event, setting, enabled)
changes[priv] = (not enabled, enabled)
db.session.flush()
logger.info('Session coordinator privs of event %r updated with %r by %r', event, data, session.user)
if changes:
log_fields = {priv: orig_string(title) for priv, title in COORDINATOR_PRIV_TITLES.items()}
event.log(EventLogRealm.management, EventLogKind.change, 'Sessions', 'Coordinator privileges updated',
session.user, data={'Changes': make_diff_log(changes, log_fields)})
| mit | 4,079,880,723,672,840,700 | 45.764286 | 119 | 0.691462 | false | 3.88546 | false | false | false |
umlfri/umlfri2 | umlfri2/qtgui/base/contextmenu.py | 1 | 1680 | from functools import partial
from PyQt5.QtGui import QIcon, QKeySequence
from PyQt5.QtWidgets import QMenu, QAction
from umlfri2.application import Application
from umlfri2.qtgui.base import image_loader
class ContextMenu(QMenu):
def _add_menu_item(self, icon, label, shortcut, action=None, sub_menu=None):
ret = QAction(label, sub_menu or self)
if shortcut is not None:
ret.setShortcut(QKeySequence(shortcut))
if isinstance(icon, str):
ret.setIcon(QIcon.fromTheme(icon))
elif isinstance(icon, QIcon):
ret.setIcon(icon)
if action is None:
ret.setEnabled(False)
else:
ret.triggered.connect(action)
(sub_menu or self).addAction(ret)
return ret
def _add_type_menu_item(self, type, action=None, sub_menu=None, format="{0}"):
translation = type.metamodel.get_translation(Application().language.current_language)
ret = QAction(format.format(translation.translate(type)), sub_menu or self)
ret.setIcon(image_loader.load_icon(type.icon))
if action is None:
ret.setEnabled(False)
else:
ret.triggered.connect(partial(action, type))
(sub_menu or self).addAction(ret)
return ret
def _add_sub_menu_item(self, label, enabled=True, sub_menu=None):
ret = QAction(label, sub_menu or self)
menu = QMenu()
ret.setMenu(menu)
ret.setEnabled(enabled)
(sub_menu or self).addAction(ret)
return menu
| gpl-3.0 | 3,216,306,957,850,884,000 | 28.473684 | 93 | 0.591667 | false | 4.07767 | false | false | false |
JJMinton/conferenceTimer | file_change_handler.py | 1 | 2937 | import path
import asyncio
from datetime import datetime, timedelta
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from read_schedule import read_schedule
import config
from config import logging
class FileChangeHandler(PatternMatchingEventHandler):
def __init__(self, watch_file, controller_function, args=[], loop=None):
PatternMatchingEventHandler.__init__(self, patterns=[watch_file])
self.controller_function = controller_function
self.args = args
self.loop = asyncio.SelectorEventLoop() if loop is None else loop
self.async_task = None
self.watch_file = watch_file
def process(self, schedule_file_name=None):
if schedule_file_name is None:
schedule_file_name = self.watch_file
logging.debug('FileChangeHnadler.process: Processing {}'.format(schedule_file_name))
schedule = read_schedule(schedule_file_name)
#Stop current run_schedule
if self.async_task is not None:
logging.debug('Stopping previous async_task')
self.async_task.cancel()
asyncio.wait_for(self.async_task, 100, loop=self.loop)
del self.async_task
self.async_task = None
#Start new run_schedule
logging.debug('FileChangeHandler.process: Starting new async_task')
self.async_task = asyncio.ensure_future(self.controller_function(schedule, self.loop, *self.args), loop=self.loop)
logging.debug('FileChangeHandler.process: Return from processing')
return
#ensure immediate return
def on_created(self, event):
logging.info('FileChangeHandler.on_created: File creation detected')
self.process(event.src_path)
def on_modified(self, event):
logging.info('FileChangeHandler.on_modified: File change detected')
self.process(event.src_path)
if __name__=="__main__":
if config.LIGHT_DEBUG:
from light_controls import debug
debug()
from schedule_handler import Schedule_Runner
schedule_runner = Schedule_Runner()
loop = schedule_runner.controller.loop
file_change_handler = FileChangeHandler(config.SCHEDULE_FILE, schedule_runner.run_schedule, loop=loop)
obs = Observer();
obs.schedule(file_change_handler, path.Path(config.SCHEDULE_FILE).abspath().dirname()) #Define what file to watch and how
obs.start() #start watching file
file_change_handler.process() #start schedule running
try:
while True:
#This does nothing except step through the loops (why is this necessary?)
file_change_handler.loop.run_until_complete(asyncio.ensure_future(asyncio.sleep(0.1, loop=file_change_handler.loop), loop=file_change_handler.loop)) #arbitrary sleep time here I think. Could it be forever?
except KeyboardInterrupt:
obs.stop();
#finally:
# obs.join();
| gpl-3.0 | -4,415,477,771,282,434,000 | 39.232877 | 217 | 0.688798 | false | 4.113445 | false | false | false |
HPI-SWA-Lab/RSqueak | rsqueakvm/test/test_socket_primitives.py | 1 | 6424 | import py
import time
from rsqueakvm import constants
from rsqueakvm.model.compiled_methods import W_PreSpurCompiledMethod
from rsqueakvm.model.variable import W_BytesObject
from rsqueakvm.primitives import prim_table
from rsqueakvm.primitives.constants import EXTERNAL_CALL
from rsqueakvm.error import PrimitiveFailedError
from rsqueakvm.plugins import socket_plugin as socket
from .util import create_space, copy_to_module, cleanup_module
from .test_primitives import mock
def setup_module():
space = create_space(bootstrap = True)
space.set_system_attribute(constants.SYSTEM_ATTRIBUTE_IMAGE_NAME_INDEX, "IMAGENAME")
wrap = space.w
bootstrap_class = space.bootstrap_class
new_frame = space.make_frame
copy_to_module(locals(), __name__)
def teardown_module():
cleanup_module(__name__)
IMAGENAME = "anImage.image"
def _prim(space, name, module, stack, context = None):
interp, w_frame, argument_count = mock(space, stack, context)
orig_stack = list(w_frame.as_context_get_shadow(space).stack())
prim_meth = W_PreSpurCompiledMethod(space, 0, header=17045052)
prim_meth._primitive = EXTERNAL_CALL
prim_meth.argsize = argument_count - 1
descr = space.wrap_list([space.wrap_string(module), space.wrap_string(name)])
prim_meth.literalatput0(space, 1, descr)
def call():
prim_table[EXTERNAL_CALL](interp, w_frame.as_context_get_shadow(space), argument_count-1, prim_meth)
return w_frame, orig_stack, call
def prim(name, module=None, stack = None, context = None):
if module is None: module = "SocketPlugin"
if stack is None: stack = [space.w_nil]
w_frame, orig_stack, call = _prim(space, name, module, stack, context)
call()
res = w_frame.as_context_get_shadow(space).pop()
s_frame = w_frame.as_context_get_shadow(space)
assert not s_frame.stackdepth() - s_frame.tempsize() # check args are consumed
return res
def prim_fails(name, module, stack):
w_frame, orig_stack, call = _prim(name, module, stack)
with py.test.raises(PrimitiveFailedError):
call()
assert w_frame.as_context_get_shadow(space).stack() == orig_stack
def test_vmdebugging():
assert prim("isRSqueak", "VMDebugging") is space.w_true
def test_resolver_start_lookup():
assert prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")]) == space.w_nil
def test_resolver_lookup_result():
assert prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")]) == space.w_nil
w_res = prim("primitiveResolverNameLookupResult", "SocketPlugin")
assert isinstance(w_res, W_BytesObject)
def test_socket_create():
assert isinstance(prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15]), socket.W_SocketHandle)
assert isinstance(prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 0, 0, 8000, 8000, 13, 14, 15]), socket.W_SocketHandle)
def test_socket_status():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, handle]).value == 0
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, 3200]).value == -1
def test_socket_connect():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")])
w_host = prim("primitiveResolverNameLookupResult", "SocketPlugin")
assert prim("primitiveSocketConnectToPort", "SocketPlugin",
[space.w_nil, handle, w_host, space.wrap_int(80)])
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, handle]).value == 2
def test_socket_ready():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")])
w_host = prim("primitiveResolverNameLookupResult", "SocketPlugin")
assert prim("primitiveSocketConnectToPort", "SocketPlugin",
[space.w_nil, handle, w_host, space.wrap_int(80)])
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, handle]).value == 2
time.sleep(0.5)
assert prim("primitiveSocketReceiveDataAvailable", "SocketPlugin",
[space.w_nil, handle]) == space.w_false
_http_get = """
GET / HTTP/1.1
User-Agent: curl/7.37.1
Host: www.google.de
Accept: */*
"""
def test_socket_send_and_read_into():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
prim("primitiveResolverStartNameLookup", "SocketPlugin",
[space.w_nil, space.wrap_string("google.com")])
w_host = prim("primitiveResolverNameLookupResult", "SocketPlugin")
assert prim("primitiveSocketConnectToPort", "SocketPlugin",
[space.w_nil, handle, w_host, space.wrap_int(80)])
assert prim("primitiveSocketConnectionStatus", "SocketPlugin",
[space.w_nil, handle]).value == 2
assert prim("primitiveSocketSendDataBufCount", "SocketPlugin",
[space.w_nil, handle, space.wrap_string(_http_get),
space.wrap_int(1), space.wrap_int(len(_http_get))]).value == len(_http_get)
time.sleep(0.5)
assert prim("primitiveSocketReceiveDataAvailable", "SocketPlugin",
[space.w_nil, handle]) == space.w_true
w_str = space.wrap_string("_hello")
assert prim("primitiveSocketReceiveDataBufCount", "SocketPlugin",
[space.w_nil, handle, w_str, space.wrap_int(2), space.wrap_int(5)]).value == 5
assert w_str.unwrap_string(None) == "_HTTP/"
def test_socket_destroy():
handle = prim("primitiveSocketCreate3Semaphores", "SocketPlugin",
[space.w_nil, 2, 0, 8000, 8000, 13, 14, 15])
assert prim("primitiveSocketDestroy", "SocketPlugin",
[space.w_nil, handle]).value == -1
| bsd-3-clause | -5,544,249,213,781,126,000 | 43.611111 | 108 | 0.669988 | false | 3.444504 | true | false | false |
mlabru/ptracks | view/piloto/dlg_aproximacao_ui.py | 1 | 2854 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './dlg_aproximacao.ui'
#
# Created: Tue Dec 6 11:23:22 2016
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CDlgAproximacao(object):
def setupUi(self, CDlgAproximacao):
CDlgAproximacao.setObjectName(_fromUtf8("CDlgAproximacao"))
CDlgAproximacao.resize(259, 151)
self.verticalLayout_2 = QtGui.QVBoxLayout(CDlgAproximacao)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.gbx_aproximacao = QtGui.QGroupBox(CDlgAproximacao)
self.gbx_aproximacao.setObjectName(_fromUtf8("gbx_aproximacao"))
self.verticalLayout = QtGui.QVBoxLayout(self.gbx_aproximacao)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.cbx_apx = QtGui.QComboBox(self.gbx_aproximacao)
self.cbx_apx.setObjectName(_fromUtf8("cbx_apx"))
self.verticalLayout.addWidget(self.cbx_apx)
self.verticalLayout_2.addWidget(self.gbx_aproximacao)
self.lbl_comando = QtGui.QLabel(CDlgAproximacao)
self.lbl_comando.setStyleSheet(_fromUtf8("background-color:rgb(0, 0, 0);\n"
"color:rgb(0, 190, 0)"))
self.lbl_comando.setObjectName(_fromUtf8("lbl_comando"))
self.verticalLayout_2.addWidget(self.lbl_comando)
self.bbx_aproximacao = QtGui.QDialogButtonBox(CDlgAproximacao)
self.bbx_aproximacao.setOrientation(QtCore.Qt.Horizontal)
self.bbx_aproximacao.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.bbx_aproximacao.setObjectName(_fromUtf8("bbx_aproximacao"))
self.verticalLayout_2.addWidget(self.bbx_aproximacao)
self.retranslateUi(CDlgAproximacao)
QtCore.QObject.connect(self.bbx_aproximacao, QtCore.SIGNAL(_fromUtf8("accepted()")), CDlgAproximacao.accept)
QtCore.QObject.connect(self.bbx_aproximacao, QtCore.SIGNAL(_fromUtf8("rejected()")), CDlgAproximacao.reject)
QtCore.QMetaObject.connectSlotsByName(CDlgAproximacao)
def retranslateUi(self, CDlgAproximacao):
CDlgAproximacao.setWindowTitle(_translate("CDlgAproximacao", "Aproximação", None))
self.gbx_aproximacao.setTitle(_translate("CDlgAproximacao", "Aproximações", None))
self.lbl_comando.setText(_translate("CDlgAproximacao", "APX 1001", None))
| gpl-3.0 | -5,954,425,699,437,925,000 | 46.5 | 116 | 0.72386 | false | 3.097826 | false | false | false |
sharad/calibre | src/calibre/gui2/dialogs/confirm_delete_location.py | 1 | 1511 | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal [email protected]' \
'2010, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
from functools import partial
from calibre.gui2.dialogs.confirm_delete_location_ui import Ui_Dialog
from PyQt5.Qt import QDialog, Qt, QPixmap, QIcon
class Dialog(QDialog, Ui_Dialog):
def __init__(self, msg, name, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.loc = None
self.msg.setText(msg)
self.name = name
self.buttonBox.setFocus(Qt.OtherFocusReason)
self.button_lib.clicked.connect(partial(self.set_loc, 'lib'))
self.button_device.clicked.connect(partial(self.set_loc, 'dev'))
self.button_both.clicked.connect(partial(self.set_loc, 'both'))
def set_loc(self, loc):
self.loc = loc
self.accept()
def choice(self):
return self.loc
def break_cycles(self):
for x in ('lib', 'device', 'both'):
b = getattr(self, 'button_'+x)
try:
b.clicked.disconnect()
except:
pass
def confirm_location(msg, name, parent=None, pixmap='dialog_warning.png'):
d = Dialog(msg, name, parent)
d.label.setPixmap(QPixmap(I(pixmap)))
d.setWindowIcon(QIcon(I(pixmap)))
d.resize(d.sizeHint())
ret = d.exec_()
d.break_cycles()
if ret == d.Accepted:
return d.choice()
return None
| gpl-3.0 | -5,930,206,761,042,180,000 | 28.627451 | 74 | 0.606883 | false | 3.387892 | false | false | false |
neiljdo/readysaster-icannhas-web | readysaster-icannhas-web/users/views.py | 1 | 2241 | # -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserForm
# Import the customized User model
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserForm
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class FetchFloodMapView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
def get_context_data(self, **kwargs):
context_data = super(FetchFloodMapView, self).get_context_data(**kwargs)
# fetch flood maps using NOAH API
municipality = self.object.lgu.municipality
floodmaps = municipality.get_floodmaps()
# add newly fetched floodmaps to context
context_data.update({
'floodmaps': floodmaps
})
return context_data
| bsd-3-clause | -6,875,853,870,313,985,000 | 28.103896 | 80 | 0.707274 | false | 4.119485 | false | false | false |
glamp/coffe2py | main.py | 1 | 1282 | import sys
from IPython.core.interactiveshell import InteractiveShell
import pandasjson as json
import StringIO
if __name__=="__main__":
mode = "ipython"
line = sys.stdin.readline()
shell = InteractiveShell()
while line:
# explicitly write to stdout
sys.stdout.write(line)
sys.stdout.flush()
# handle incoming data, parse it, and redirect
# stdout so it doesn't interfere
line = sys.stdin.readline()
data = json.loads(line)
codeOut = StringIO.StringIO()
sys.stdout = codeOut
try:
code = data["code"]
if data.get("autocomplete")==True:
_, completions = shell.complete(code)
print json.dumps(completions)
elif code.startswith("print"):
#exec(code)
shell.ex(code)
else:
try:
#print repr(eval(code))
print repr(shell.ev(code))
except:
#exec(code)
shell.ex(code)
except Exception, e:
pass
sys.stdout = sys.__stdout__
data["result"] = codeOut.getvalue()
sys.stdout.write(json.dumps(data) + "\n")
sys.stdout.flush() | bsd-2-clause | 5,789,231,768,680,157,000 | 30.292683 | 58 | 0.522621 | false | 4.390411 | false | false | false |
capntransit/carfree-council | cfcensus2010.py | 1 | 1828 | import sys, os, json, time
import pandas as pd
BOROCODE = {'61' : '1', '05' : '2', '47': '3', '81' : '4', '85': '5'}
if (len(sys.argv) < 2):
print ("Usage: cfcensus.py census.csv districts.json")
exit()
censusfile = sys.argv[1]
councilfile = sys.argv[2]
TRACTCOL = 'BoroCT' # rename this for 2000 census
def boroCT (id2):
boro = BOROCODE[str(id2)[3:5]]
tract = str(id2)[5:]
return boro + tract
for (f) in ([censusfile, councilfile]):
if (not os.path.isfile(f)):
print ("File " + f + " is not readable")
exit()
try:
vehDf = pd.read_csv(
censusfile,
skiprows=[1]
)
except Exception as e:
print ("Unable to read census file " + censusfile + ": {0}".format(e))
exit()
try:
with open(councilfile) as councilfo:
councilData = json.load(councilfo)
except Exception as e:
print ("Unable to read council file " + councilfile+": {0}".format(e))
exit()
vehDf['pctNoVeh'] = vehDf['HD01_VD03'].astype('int') / vehDf['HD01_VD01'].astype('int')
vehDf[TRACTCOL] = vehDf['GEO.id2'].apply(boroCT)
vehDf2 = pd.DataFrame(vehDf[[TRACTCOL, 'HD01_VD01', 'HD01_VD03', 'pctNoVeh']]).set_index(TRACTCOL)
f = 0
total = {}
noVeh = {}
councilDistricts = set()
for (t, c) in councilData.items():
for (d) in c:
councilDistricts.add(d)
try:
total[d] = total.get(d, 0) + c[d] * vehDf2.loc[str(t)]['HD01_VD01']
noVeh[d] = noVeh.get(d, 0) + c[d] * vehDf2.loc[str(t)]['HD01_VD03']
except KeyError as e:
print("No entry for census tract " + str(t))
for (d) in sorted(councilDistricts, key=int):
print (','.join([
d,
str(int(total[d])),
str(int(noVeh[d])),
str(round((noVeh[d] / total[d]), 3))
]))
| gpl-3.0 | 7,753,463,791,986,384,000 | 26.283582 | 98 | 0.555252 | false | 2.563815 | false | false | false |
eharney/cinder | cinder/scheduler/filters/capacity_filter.py | 1 | 8982 | # Copyright (c) 2012 Intel
# Copyright (c) 2012 OpenStack Foundation
# Copyright (c) 2015 EMC Corporation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from oslo_log import log as logging
from cinder.scheduler import filters
LOG = logging.getLogger(__name__)
class CapacityFilter(filters.BaseBackendFilter):
"""Capacity filters based on volume backend's capacity utilization."""
def backend_passes(self, backend_state, filter_properties):
"""Return True if host has sufficient capacity."""
volid = None
# If the volume already exists on this host, don't fail it for
# insufficient capacity (e.g., if we are retyping)
if backend_state.backend_id == filter_properties.get('vol_exists_on'):
return True
spec = filter_properties.get('request_spec')
if spec:
volid = spec.get('volume_id')
grouping = 'cluster' if backend_state.cluster_name else 'host'
if filter_properties.get('new_size'):
# If new_size is passed, we are allocating space to extend a volume
requested_size = (int(filter_properties.get('new_size')) -
int(filter_properties.get('size')))
LOG.debug('Checking if %(grouping)s %(grouping_name)s can extend '
'the volume %(id)s in %(size)s GB',
{'grouping': grouping,
'grouping_name': backend_state.backend_id, 'id': volid,
'size': requested_size})
else:
requested_size = filter_properties.get('size')
LOG.debug('Checking if %(grouping)s %(grouping_name)s can create '
'a %(size)s GB volume (%(id)s)',
{'grouping': grouping,
'grouping_name': backend_state.backend_id, 'id': volid,
'size': requested_size})
# requested_size is 0 means that it's a manage request.
if requested_size == 0:
return True
if backend_state.free_capacity_gb is None:
# Fail Safe
LOG.error("Free capacity not set: "
"volume node info collection broken.")
return False
free_space = backend_state.free_capacity_gb
total_space = backend_state.total_capacity_gb
reserved = float(backend_state.reserved_percentage) / 100
if free_space in ['infinite', 'unknown']:
# NOTE(zhiteng) for those back-ends cannot report actual
# available capacity, we assume it is able to serve the
# request. Even if it was not, the retry mechanism is
# able to handle the failure by rescheduling
return True
elif total_space in ['infinite', 'unknown']:
# If total_space is 'infinite' or 'unknown' and reserved
# is 0, we assume the back-ends can serve the request.
# If total_space is 'infinite' or 'unknown' and reserved
# is not 0, we cannot calculate the reserved space.
# float(total_space) will throw an exception. total*reserved
# also won't work. So the back-ends cannot serve the request.
if reserved == 0:
return True
LOG.debug("Cannot calculate GB of reserved space (%s%%) with "
"backend's reported total capacity '%s'",
backend_state.reserved_percentage, total_space)
return False
total = float(total_space)
if total <= 0:
LOG.warning("Insufficient free space for volume creation. "
"Total capacity is %(total).2f on %(grouping)s "
"%(grouping_name)s.",
{"total": total,
"grouping": grouping,
"grouping_name": backend_state.backend_id})
return False
# Calculate how much free space is left after taking into account
# the reserved space.
free = free_space - math.floor(total * reserved)
# NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs,
# we will not use max_over_subscription_ratio and
# provisioned_capacity_gb to determine whether a volume can be
# provisioned. Instead free capacity will be used to evaluate.
thin = True
vol_type = filter_properties.get('volume_type', {}) or {}
provision_type = vol_type.get('extra_specs', {}).get(
'provisioning:type')
if provision_type == 'thick':
thin = False
# Only evaluate using max_over_subscription_ratio if
# thin_provisioning_support is True. Check if the ratio of
# provisioned capacity over total capacity has exceeded over
# subscription ratio.
if (thin and backend_state.thin_provisioning_support and
backend_state.max_over_subscription_ratio >= 1):
provisioned_ratio = ((backend_state.provisioned_capacity_gb +
requested_size) / total)
if provisioned_ratio > backend_state.max_over_subscription_ratio:
msg_args = {
"provisioned_ratio": provisioned_ratio,
"oversub_ratio": backend_state.max_over_subscription_ratio,
"grouping": grouping,
"grouping_name": backend_state.backend_id,
}
LOG.warning(
"Insufficient free space for thin provisioning. "
"The ratio of provisioned capacity over total capacity "
"%(provisioned_ratio).2f has exceeded the maximum over "
"subscription ratio %(oversub_ratio).2f on %(grouping)s "
"%(grouping_name)s.", msg_args)
return False
else:
# Thin provisioning is enabled and projected over-subscription
# ratio does not exceed max_over_subscription_ratio. The host
# passes if "adjusted" free virtual capacity is enough to
# accommodate the volume. Adjusted free virtual capacity is
# the currently available free capacity (taking into account
# of reserved space) which we can over-subscribe.
adjusted_free_virtual = (
free * backend_state.max_over_subscription_ratio)
res = adjusted_free_virtual >= requested_size
if not res:
msg_args = {"available": adjusted_free_virtual,
"size": requested_size,
"grouping": grouping,
"grouping_name": backend_state.backend_id}
LOG.warning("Insufficient free virtual space "
"(%(available)sGB) to accommodate thin "
"provisioned %(size)sGB volume on %(grouping)s"
" %(grouping_name)s.", msg_args)
return res
elif thin and backend_state.thin_provisioning_support:
LOG.warning("Filtering out %(grouping)s %(grouping_name)s "
"with an invalid maximum over subscription ratio "
"of %(oversub_ratio).2f. The ratio should be a "
"minimum of 1.0.",
{"oversub_ratio":
backend_state.max_over_subscription_ratio,
"grouping": grouping,
"grouping_name": backend_state.backend_id})
return False
msg_args = {"grouping_name": backend_state.backend_id,
"grouping": grouping,
"requested": requested_size,
"available": free}
if free < requested_size:
LOG.warning("Insufficient free space for volume creation "
"on %(grouping)s %(grouping_name)s (requested / "
"avail): %(requested)s/%(available)s",
msg_args)
return False
LOG.debug("Space information for volume creation "
"on %(grouping)s %(grouping_name)s (requested / avail): "
"%(requested)s/%(available)s", msg_args)
return True
| apache-2.0 | 4,721,007,963,419,278,000 | 46.273684 | 79 | 0.560009 | false | 4.742344 | false | false | false |
mganeva/mantid | Framework/PythonInterface/test/python/plugins/algorithms/GetNegMuMuonicXRDTest.py | 1 | 7298 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.kernel import *
from mantid.api import *
from mantid.simpleapi import *
class GetNegMuMuonicXRDTest(unittest.TestCase):
au_muonic_xr = [8135.2,8090.6,8105.4,8069.4,5764.89,5594.97,3360.2,
3206.8,2474.22,2341.21,2304.44,1436.05,1391.58,1104.9,
899.14,869.98,405.654,400.143]
as_muonic_xr = [1866.9,1855.8,436.6,427.5]
#TESTING FOR ONE WORKSPACE IN GROUP WORKSPACE
def test_muonic_xrd_single_ws_in_group(self):
self.au_muonic_xr.sort()
self.as_muonic_xr.sort()
#Setting up the work space manually
au_peak_values = self.au_muonic_xr
y_position = -0.001 #same as default used by GetNegMuMuonic
y_pos_ws = [y_position]*len(au_peak_values)
au_muon_xr_ws = CreateWorkspace(au_peak_values[:], y_pos_ws[:])
#Check that au_muon_xr_ws is not null
self.assertFalse(au_muon_xr_ws==None)
au_muon_group = GroupWorkspaces(au_muon_xr_ws)
#Check that au_muon_group is not null
self.assertFalse(au_muon_group==None)
#Get the algorithm to produce the same workspace
neg_mu_xr_group = GetNegMuMuonicXRD("Au") #testing default y-Axis position value
#Check that neg_mu_xr_ws is not null
self.assertFalse(neg_mu_xr_group==None)
#Test number of workspaces in group
self.assertEqual(au_muon_group.getNumberOfEntries(),
neg_mu_xr_group.getNumberOfEntries())
self.assertTrue(au_muon_group.size() == 1)
self.assertTrue(neg_mu_xr_group.size() == 1)
#now testing the one workspace in the workspace group
neg_mu_xr_ws = neg_mu_xr_group[0]
au_muon_ws = au_muon_group[0]
#check number of histograms are equal
self.assertEqual(neg_mu_xr_ws.getNumberHistograms(), au_muon_ws.getNumberHistograms())
#check number of bins is equal
self.assertEqual(au_muon_ws.blocksize(), neg_mu_xr_ws.blocksize())
#check length of XValues is the same
self.assertEqual(len(au_muon_ws.readX(0)), len(neg_mu_xr_ws.readX(0)))
#check all the XValues are the same
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(au_muon_ws.readX(0),neg_mu_xr_ws.readX(0))
#INSTEAD we will use a simple for loop
for x_value in range(len(au_muon_ws.readX(0))):
self.assertEqual(au_muon_ws.readX(0)[x_value], neg_mu_xr_ws.readX(0)[x_value])
#check length of YValues is the same
self.assertEqual(len(au_muon_ws.readY(0)), len(neg_mu_xr_ws.readY(0)))
#check all the YValues are the same
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(au_muon_ws.readY(0),neg_mu_xr_ws.readY(0))
#INSTEAD we will use a simple for loop
for y_value in range(len(au_muon_ws.readY(0))):
self.assertEqual(au_muon_ws.readY(0)[y_value], neg_mu_xr_ws.readY(0)[y_value])
#TESTING FOR MORE THAN ONE WORKSPACE IN GROUP WORKSPACE
def test_muonic_xrd_more_than_one_ws_in_group(self):
self.au_muonic_xr.sort()
self.as_muonic_xr.sort()
y_position = 0.2
#Setting up au_muonic workspace
au_peak_values = self.au_muonic_xr
#check to see if workspace has been set to non-None value
self.assertFalse(au_peak_values == None)
au_y_pos_ws = [y_position]*len(au_peak_values)
#setting up as_muonic workspace
as_peak_values = self.as_muonic_xr
#check to see if workspace has been set to non-None value
self.assertFalse(as_peak_values == None)
as_y_pos_ws = [y_position]*len(as_peak_values)
au_muon_xr_ws = CreateWorkspace(au_peak_values,au_y_pos_ws[:])
#check to see if workspace creation was successful
self.assertFalse(au_muon_xr_ws == None)
as_muon_xr_ws = CreateWorkspace(as_peak_values, as_y_pos_ws[:])
#check to see if workspace creation was successful
self.assertFalse(as_muon_xr_ws == None)
ws_list = [au_muon_xr_ws,as_muon_xr_ws]
grouped_muon_ws = GroupWorkspaces(ws_list)
#check to see whether grouping workspaces was successful
self.assertFalse(grouped_muon_ws == None)
#Run algorithm that creates muonic_xr group workspace
group_muonic_xr_ws = GetNegMuMuonicXRD("Au,As", 0.2)
#check that this has assigned value correctly
self.assertFalse(group_muonic_xr_ws == None)
#Compare histograms for each of the workspaces in GroupWorkspaces created
self.assertEqual(grouped_muon_ws[0].getNumberHistograms(), group_muonic_xr_ws[0].getNumberHistograms())
self.assertEqual(grouped_muon_ws[1].getNumberHistograms(), group_muonic_xr_ws[1].getNumberHistograms())
#Compare length of X values read from each workspace in grouped workspace
self.assertEqual(len(grouped_muon_ws[0].readX(0)), len(group_muonic_xr_ws[0].readX(0)))
self.assertEqual(len(grouped_muon_ws[1].readX(0)), len(group_muonic_xr_ws[1].readX(0)))
#Compare X values read from each workspace in grouped workspace
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(grouped_muon_ws[0].readX(0), group_muonic_xr_ws[0].readX(0))
#self.assertItemsEqual(grouped_muon_ws[1].readX(0), group_muonic_xr_ws[1].readX(0))
#INSTEAD we will use a simple for loop
for x_value in range(len(grouped_muon_ws[0].readX(0))):
self.assertEqual(grouped_muon_ws[0].readX(0)[x_value], group_muonic_xr_ws[0].readX(0)[x_value])
for x_value in range(len(grouped_muon_ws[1].readX(0))):
self.assertEqual(grouped_muon_ws[1].readX(0)[x_value], group_muonic_xr_ws[1].readX(0)[x_value])
#Compare length of Y values read from each workspace in grouped workspace
self.assertEqual(len(grouped_muon_ws[0].readY(0)), len(group_muonic_xr_ws[0].readY(0)))
self.assertEqual(len(grouped_muon_ws[1].readY(0)), len(group_muonic_xr_ws[1].readY(0)))
#Compare Y values read from each workspace in grouped workspace
#For RHEL6 (running an older version of python) this assert is not yet implemented:
#self.assertItemsEqual(grouped_muon_ws[0].readY(0), group_muonic_xr_ws[0].readY(0))
#self.assertItemsEqual(grouped_muon_ws[1].readY(0), group_muonic_xr_ws[1].readY(0))
#INSTEAD we will use a simple for loop
for y_value in range(len(grouped_muon_ws[0].readY(0))):
self.assertEqual(grouped_muon_ws[0].readY(0)[y_value], group_muonic_xr_ws[0].readY(0)[y_value])
for y_value in range(len(grouped_muon_ws[1].readY(0))):
self.assertEqual(grouped_muon_ws[1].readY(0)[y_value], group_muonic_xr_ws[1].readY(0)[y_value])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,650,701,605,140,535,000 | 50.394366 | 111 | 0.65744 | false | 3.093684 | true | false | false |
vorushin/FamilyFeed | sources/facebook.py | 1 | 1748 | from datetime import datetime
import json
from urllib2 import urlopen, HTTPError
from django.db.models import Max
from sources.models import FacebookPost
def time(s):
return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S+0000')
def post_text(item):
return item.get('message', u'') + item.get('description', u'')
def list_posts(access_token):
latest_created_time = FacebookPost.objects\
.filter(access_token=access_token)\
.aggregate(Max('created_time'))['created_time__max']
'''for post in new_posts(access_token, latest_created_time):
if not FacebookPost.objects.filter(
access_token=access_token,
created_time=time(post['created_time'])).exists():
FacebookPost.objects.create(
access_token=access_token,
created_time=time(post['created_time']),
data=post)'''
return [p.data for p in FacebookPost.objects \
.filter(access_token=access_token) \
.order_by('-created_time')]
def new_posts(access_token, older_than=None):
graph_url = 'https://graph.facebook.com/me/feed?access_token=%s' % \
access_token
graph_url += '&limit=1000'
if older_than:
graph_url += '&since=' + older_than.isoformat()
resp = json.loads(urlopen(graph_url).read())
while resp['data']:
for item in resp['data']:
if older_than:
if time(item['created_time']) <= older_than:
return
if item.get('message'):
yield item
try:
resp = json.loads(urlopen(resp['paging']['next']).read())
except HTTPError:
break
| mit | -6,442,804,977,084,851,000 | 33.96 | 76 | 0.57151 | false | 3.981777 | false | false | false |
bytedance/fedlearner | web_console_v2/api/test/fedlearner_webconsole/utils/file_manager_test.py | 1 | 9062 | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import shutil
import tempfile
import unittest
from collections import namedtuple
from pathlib import Path
from tensorflow.io import gfile
from fedlearner_webconsole.utils.file_manager import GFileFileManager, FileManager, File
FakeFileStatistics = namedtuple('FakeFileStatistics', ['length', 'mtime_nsec'])
class GFileFileManagerTest(unittest.TestCase):
_F1_SIZE = 3
_F2_SIZE = 4
_S1_SIZE = 55
_F1_MTIME = 1613982390
_F2_MTIME = 1613982391
_S1_MTIME = 1613982392
def _get_file_stat(self, orig_os_stat, path):
gfile_stat = FakeFileStatistics(2, 1613982390 * 1e9)
if path == self._get_temp_path('f1.txt') or \
path == self._get_temp_path('subdir/f1.txt'):
gfile_stat = FakeFileStatistics(self._F1_SIZE,
self._F1_MTIME * 1e9)
return gfile_stat
elif path == self._get_temp_path('f2.txt') or \
path == self._get_temp_path('f3.txt'):
gfile_stat = FakeFileStatistics(self._F2_SIZE,
self._F2_MTIME * 1e9)
return gfile_stat
elif path == self._get_temp_path('subdir/s1.txt'):
gfile_stat = FakeFileStatistics(self._S1_SIZE,
self._S1_MTIME * 1e9)
return gfile_stat
else:
return orig_os_stat(path)
def setUp(self):
# Create a temporary directory
self._test_dir = tempfile.mkdtemp()
subdir = Path(self._test_dir).joinpath('subdir')
subdir.mkdir(exist_ok=True)
Path(self._test_dir).joinpath('f1.txt').write_text('xxx')
Path(self._test_dir).joinpath('f2.txt').write_text('xxx')
subdir.joinpath('s1.txt').write_text('xxx')
# Mocks os.stat
self._orig_os_stat = os.stat
def fake_stat(path, *arg, **kwargs):
return self._get_file_stat(self._orig_os_stat, path)
gfile.stat = fake_stat
self._fm = GFileFileManager()
def tearDown(self):
os.stat = self._orig_os_stat
# Remove the directory after the test
shutil.rmtree(self._test_dir)
def _get_temp_path(self, file_path: str = None) -> str:
return str(Path(self._test_dir, file_path or '').absolute())
def test_can_handle(self):
self.assertTrue(self._fm.can_handle('/data/abc'))
self.assertFalse(self._fm.can_handle('data'))
def test_ls(self):
# List file
self.assertEqual(self._fm.ls(self._get_temp_path('f1.txt')), [
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME)
])
# List folder
self.assertEqual(
sorted(self._fm.ls(self._get_temp_path()),
key=lambda file: file.path),
sorted([
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME),
File(path=self._get_temp_path('f2.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME)
],
key=lambda file: file.path))
# List folder recursively
self.assertEqual(
sorted(self._fm.ls(self._get_temp_path(), recursive=True),
key=lambda file: file.path),
sorted([
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME),
File(path=self._get_temp_path('f2.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME),
File(path=self._get_temp_path('subdir/s1.txt'),
size=self._S1_SIZE,
mtime=self._S1_MTIME),
],
key=lambda file: file.path))
def test_move(self):
# Moves to another folder
self._fm.move(self._get_temp_path('f1.txt'),
self._get_temp_path('subdir/'))
self.assertEqual(
sorted(self._fm.ls(self._get_temp_path('subdir')),
key=lambda file: file.path),
sorted([
File(path=self._get_temp_path('subdir/s1.txt'),
size=self._S1_SIZE,
mtime=self._S1_MTIME),
File(path=self._get_temp_path('subdir/f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME),
],
key=lambda file: file.path))
# Renames
self._fm.move(self._get_temp_path('f2.txt'),
self._get_temp_path('f3.txt'))
with self.assertRaises(ValueError):
self._fm.ls(self._get_temp_path('f2.txt'))
self.assertEqual(self._fm.ls(self._get_temp_path('f3.txt')), [
File(path=self._get_temp_path('f3.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME)
])
def test_remove(self):
self._fm.remove(self._get_temp_path('f1.txt'))
self._fm.remove(self._get_temp_path('subdir'))
self.assertEqual(self._fm.ls(self._get_temp_path(), recursive=True), [
File(path=self._get_temp_path('f2.txt'),
size=self._F2_SIZE,
mtime=self._F2_MTIME)
])
def test_copy(self):
self._fm.copy(self._get_temp_path('f1.txt'),
self._get_temp_path('subdir'))
self.assertEqual(self._fm.ls(self._get_temp_path('f1.txt')), [
File(path=self._get_temp_path('f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME)
])
self.assertEqual(self._fm.ls(self._get_temp_path('subdir/f1.txt')), [
File(path=self._get_temp_path('subdir/f1.txt'),
size=self._F1_SIZE,
mtime=self._F1_MTIME)
])
def test_mkdir(self):
self._fm.mkdir(os.path.join(self._get_temp_path(), 'subdir2'))
self.assertTrue(os.path.isdir(self._get_temp_path('subdir2')))
def test_read(self):
content = self._fm.read(self._get_temp_path('f1.txt'))
self.assertEqual('xxx', content)
class FileManagerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
fake_fm = 'testing.fake_file_manager:FakeFileManager'
os.environ['CUSTOMIZED_FILE_MANAGER'] = fake_fm
@classmethod
def tearDownClass(cls):
del os.environ['CUSTOMIZED_FILE_MANAGER']
def setUp(self):
self._fm = FileManager()
def test_can_handle(self):
self.assertTrue(self._fm.can_handle('fake://123'))
# Falls back to default manager
self.assertTrue(self._fm.can_handle('/data/123'))
self.assertFalse(self._fm.can_handle('unsupported:///123'))
def test_ls(self):
self.assertEqual(self._fm.ls('fake://data'), [{
'path': 'fake://data/f1.txt',
'size': 0
}])
def test_move(self):
self.assertTrue(self._fm.move('fake://move/123', 'fake://move/234'))
self.assertFalse(
self._fm.move('fake://do_not_move/123', 'fake://move/234'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.move('hdfs://123', 'fake://abc'))
def test_remove(self):
self.assertTrue(self._fm.remove('fake://remove/123'))
self.assertFalse(self._fm.remove('fake://do_not_remove/123'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.remove('unsupported://123'))
def test_copy(self):
self.assertTrue(self._fm.copy('fake://copy/123', 'fake://copy/234'))
self.assertFalse(
self._fm.copy('fake://do_not_copy/123', 'fake://copy/234'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.copy('hdfs://123', 'fake://abc'))
def test_mkdir(self):
self.assertTrue(self._fm.mkdir('fake://mkdir/123'))
self.assertFalse(self._fm.mkdir('fake://do_not_mkdir/123'))
# No file manager can handle this
self.assertRaises(RuntimeError,
lambda: self._fm.mkdir('unsupported:///123'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -8,069,600,054,312,134,000 | 36.446281 | 88 | 0.550541 | false | 3.655506 | true | false | false |
Phixyn/ZoeyBot | modules/utils.py | 1 | 1119 | """
utils.py - Utilities module
ZoeyBot - Python IRC Bot
Copyright 2012-2014 (c) Phixyn
This file is part of ZoeyBot.
ZoeyBot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ZoeyBot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ZoeyBot. If not, see <http://www.gnu.org/licenses/>.
"""
import os, subprocess
from datetime import datetime as dt
def timestamp():
""" Documentation pending """
return dt.strftime(dt.now(), "(%H:%M:%S)")
def clear_screen():
""" Documentation pending """
# TODO try...except block here maybe?
if (os.name == 'nt'):
subprocess.call('cls', shell=True)
elif (os.name == 'posix'):
subprocess.call('clear')
else:
print(chr(27) + "[2J")
| gpl-3.0 | 1,913,154,595,719,887,000 | 25.023256 | 68 | 0.726542 | false | 3.57508 | false | false | false |
doraemonext/DEOnlineJudge | lib/tools/validator.py | 1 | 1501 | # -*- coding: utf-8 -*-
import re
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
class MinValue(object):
"""
最小长度验证
"""
def __init__(self, name, length):
self.name = name
self.length = length
def __call__(self, value, *args, **kwargs):
if len(value) < self.length:
raise ValidationError(u'%s最小长度为%d个字符' % (self.name, self.length))
class MaxValue(object):
"""
最大长度验证
"""
def __init__(self, name, length):
self.name = name
self.length = length
def __call__(self, value, *args, **kwargs):
if len(value) > self.length:
raise ValidationError(u'%s最大长度为%d个字符' % (self.name, self.length))
class SafeValue(object):
"""
安全字符验证
仅允许包含汉字、数字、字母、下划线及短横线
"""
def __init__(self, name):
self.name = name
def __call__(self, value, *args, **kwargs):
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5\-]+$', value):
raise ValidationError(u'%s包含非法字符' % self.name)
class EmailValue(object):
"""
电子邮件验证
"""
def __init__(self, name):
self.name = name
def __call__(self, value, *args, **kwargs):
try:
validate_email(value)
except ValidationError:
raise ValidationError(u'%s不合法' % self.name)
| mit | 8,311,261,738,640,830,000 | 21.683333 | 77 | 0.563556 | false | 2.84728 | false | false | false |
vhaupert/mitmproxy | mitmproxy/proxy/config.py | 1 | 3244 | import os
import re
import typing
from OpenSSL import crypto
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy import options as moptions
from mitmproxy.net import server_spec
class HostMatcher:
def __init__(self, handle, patterns=tuple()):
self.handle = handle
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
if not address:
return False
host = "%s:%s" % address
if self.handle in ["ignore", "tcp"]:
return any(rex.search(host) for rex in self.regexes)
else: # self.handle == "allow"
return not any(rex.search(host) for rex in self.regexes)
def __bool__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(self, options: moptions.Options) -> None:
self.options = options
self.certstore: certs.CertStore
self.check_filter: typing.Optional[HostMatcher] = None
self.check_tcp: typing.Optional[HostMatcher] = None
self.upstream_server: typing.Optional[server_spec.ServerSpec] = None
self.configure(options, set(options.keys()))
options.changed.connect(self.configure)
def configure(self, options: moptions.Options, updated: typing.Any) -> None:
if options.allow_hosts and options.ignore_hosts:
raise exceptions.OptionsError("--ignore-hosts and --allow-hosts are mutually "
"exclusive; please choose one.")
if options.ignore_hosts:
self.check_filter = HostMatcher("ignore", options.ignore_hosts)
elif options.allow_hosts:
self.check_filter = HostMatcher("allow", options.allow_hosts)
else:
self.check_filter = HostMatcher(False)
if "tcp_hosts" in updated:
self.check_tcp = HostMatcher("tcp", options.tcp_hosts)
certstore_path = os.path.expanduser(options.confdir)
if not os.path.exists(os.path.dirname(certstore_path)):
raise exceptions.OptionsError(
"Certificate Authority parent directory does not exist: %s" %
os.path.dirname(certstore_path)
)
key_size = options.key_size
self.certstore = certs.CertStore.from_store(
certstore_path,
moptions.CONF_BASENAME,
key_size
)
for c in options.certs:
parts = c.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
cert = os.path.expanduser(parts[1])
if not os.path.exists(cert):
raise exceptions.OptionsError(
"Certificate file does not exist: %s" % cert
)
try:
self.certstore.add_cert_file(parts[0], cert)
except crypto.Error:
raise exceptions.OptionsError(
"Invalid certificate format: %s" % cert
)
m = options.mode
if m.startswith("upstream:") or m.startswith("reverse:"):
_, spec = server_spec.parse_with_mode(options.mode)
self.upstream_server = spec
| mit | 2,039,066,290,307,979,000 | 35.044444 | 90 | 0.589704 | false | 4.14304 | false | false | false |
jjo31/ATHAM-Fluidity | python/fluidity/microphysics/FortranMicrophysicsWrapper.py | 1 | 4818 | import os
path=os.path.dirname(__file__)
def MakeWrapperFiles(field_dict,call_str,pointwise):
write_to_file(field_dict,call_str,pointwise)
def allocate_str(field_dict):
s=""" subroutine allocate_storage(number_of_tracers,n)
integer :: n
!f2py integer, intent(hide), depend(number_of_tracers) :: n=shape(number_of_tracers,0)
integer :: number_of_tracers(n)
"""
for n,k in enumerate(field_dict):
s+=" if (allocated(%s)) deallocate(%s)\n"%(k,k)
s+=" allocate(%s(number_of_tracers(%d)))\n"%(k,n+1)
s+=" end subroutine allocate_storage\n\n"
return s
def finalize_str(field_dict):
s=" subroutine finalize\n"
for k in field_dict:
s+="deallocate(%s)\n"%k
s+=" end subroutine finalize\n\n"
return s
def set_field_str(fname):
s=""" subroutine set_%s(i,new_val,n,old_val,source,m)
integer :: m,n
!f2py integer, intent(hide), depend(new_val) :: n=shape(new_val,0)
!f2py integer, intent(hide), depend(new_val) :: m=shape(source,0)
real, intent(in), dimension(n), target :: new_val, old_val
real, intent(in), dimension(n), target, optional ::source
!f2py real, intent(inplace), dimension(n) :: new_val, old_val
!f2py real, intent(inplace), dimension(n), optional :: source
integer :: i
%s(i)%%new=>new_val
%s(i)%%old=>old_val
print*, present(source), m
if (present(source) .and. m==n)&
%s(i)%%source=>source
end subroutine set_%s
"""%(fname,fname,fname,fname,fname)
return s
def run_str(field_dict,call_string):
s="""subroutine run_microphysics(current_time,dt)
real, intent(in) :: current_time, dt
interface
subroutine %s(time,timestep"""%call_string
for n,k in enumerate(field_dict):
s+=',&\n t%d'%n
s+=')\n'
s+=' use FW_data_type\n'
s+=' real, intent(in) :: time, timestep\n'
for n,k in enumerate(field_dict):
s+=' type(basic_scalar), intent(inout), dimension(:) :: t%d\n'%n
s+=' end subroutine %s\n'%call_string
s+=""" end interface
call %s(current_time,dt"""%call_string
for k in field_dict:
s+=',&\n %s'%k
s+=')\n\n'
s+=' end subroutine run_microphysics\n\n'
return s
def run_str_pointwise(field_dict,call_string):
s="""subroutine run_microphysics(current_time,dt)
real, intent(in) :: current_time, dt
integer :: i,j\n
"""
for n,k in enumerate(field_dict):
s+=' real, dimension(size(%s),3) :: tracer%d\n'%(k,n)
s+=""" interface\n
subroutine %s(time,timestep"""%call_string
for n,k in enumerate(field_dict):
s+=',&\n t%d'%n
s+=')\n'
s+=' use FW_data_type\n'
s+=' real, intent(in) :: time, timestep\n'
for n,k in enumerate(field_dict):
s+=' real, intent(inout), dimension(:,:) :: t%d\n'%n
s+=' end subroutine %s\n'%call_string
s+=" end interface\n"
s+=" do i=1, size(%s(0)%%new)\n"%(field_dict.keys()[0])
for n,k in enumerate(field_dict):
s+=' do j=1,size(%s)\n'%k
s+=' tracer%d(j,1)=%s(j)%%new(i)\n'%(n,k)
s+=' tracer%d(j,2)=%s(j)%%old(i)\n'%(n,k)
s+=' if (associated(%s(j)%%source))&\n tracer%d(j,3)=%s(j)%%source(i)\n'%(k,n,k)
s+=' end do\n\n'
s+=" call %s(current_time,dt"%call_string
for k in range(len(field_dict)):
s+=',&\n tracer%d'%n
s+=')\n\n'
for n,k in enumerate(field_dict):
s+=' do j=1,size(%s)\n'%k
s+=' %s(j)%%new(i)=tracer%d(j,1)\n'%(k,n)
s+=' %s(j)%%old(i)=tracer%d(j,2)\n'%(k,n)
s+=' if (associated(%s(j)%%source))&\n %s(j)%%source(i)=tracer%d(j,3)\n'%(k,k,n)
s+=' end do\n\n'
s+=' end do\n\n'
s+=' end subroutine run_microphysics\n\n'
return s
def write_to_file(field_dict={},
call_string='',
pointwise=False,
dirname=path+'/src',
src_name='FW_auto',
data_name='FW_data'):
f=open(dirname+'/'+src_name+'.F90','w')
s="""module FW_auto
use FW_data
implicit none
contains
"""
f.write(s)
f.write(allocate_str(field_dict))
f.write(finalize_str(field_dict))
for k in field_dict:
f.write(set_field_str(k))
if pointwise:
f.write(run_str_pointwise(field_dict,call_string))
else:
f.write(run_str(field_dict,call_string))
f.write("end module FW_Auto\n")
f.close()
f=open(dirname+'/'+data_name+'.F90','w')
f.write("""module %s
use FW_data_type
"""%data_name)
for k in field_dict:
f.write(' type(basic_scalar), dimension(:), allocatable :: %s\n'%k)
f.write('end module %s\n'%data_name)
f.close()
| lgpl-2.1 | 37,133,814,416,774,880 | 32.458333 | 100 | 0.545247 | false | 2.793043 | false | false | false |
CartoDB/cartoframes | cartoframes/io/managers/context_manager.py | 1 | 22518 | import time
import pandas as pd
from warnings import warn
from carto.auth import APIKeyAuthClient
from carto.datasets import DatasetManager
from carto.exceptions import CartoException, CartoRateLimitException
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
from pyrestcli.exceptions import NotFoundException
from ..dataset_info import DatasetInfo
from ... import __version__
from ...auth.defaults import get_default_credentials
from ...utils.logger import log
from ...utils.geom_utils import encode_geometry_ewkb
from ...utils.utils import (is_sql_query, check_credentials, encode_row, map_geom_type, PG_NULL, double_quote,
create_tmp_name)
from ...utils.columns import (get_dataframe_columns_info, get_query_columns_info, obtain_converters, date_columns_names,
normalize_name)
DEFAULT_RETRY_TIMES = 3
BATCH_API_PAYLOAD_THRESHOLD = 12000
def retry_copy(func):
def wrapper(*args, **kwargs):
m_retry_times = kwargs.get('retry_times', DEFAULT_RETRY_TIMES)
while m_retry_times >= 1:
try:
return func(*args, **kwargs)
except CartoRateLimitException as err:
m_retry_times -= 1
if m_retry_times <= 0:
warn(('Read call was rate-limited. '
'This usually happens when there are multiple queries being read at the same time.'))
raise err
warn('Read call rate limited. Waiting {s} seconds'.format(s=err.retry_after))
time.sleep(err.retry_after)
warn('Retrying...')
return func(*args, **kwargs)
return wrapper
def not_found(func):
def decorator_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except CartoException as e:
if hasattr(e, 'args') and isinstance(e.args, (list, tuple)) and type(e.args[0]) == NotFoundException:
raise Exception('Resource not found') from None
else:
raise e
return decorator_func
class ContextManager:
def __init__(self, credentials):
self.credentials = credentials or get_default_credentials()
check_credentials(self.credentials)
self.auth_client = _create_auth_client(self.credentials)
self.sql_client = SQLClient(self.auth_client)
self.copy_client = CopySQLClient(self.auth_client)
self.batch_sql_client = BatchSQLClient(self.auth_client)
@not_found
def execute_query(self, query, parse_json=True, do_post=True, format=None, **request_args):
return self.sql_client.send(query.strip(), parse_json, do_post, format, **request_args)
@not_found
def execute_long_running_query(self, query):
return self.batch_sql_client.create_and_wait_for_completion(query.strip())
def copy_to(self, source, schema=None, limit=None, retry_times=DEFAULT_RETRY_TIMES):
query = self.compute_query(source, schema)
columns = self._get_query_columns_info(query)
copy_query = self._get_copy_query(query, columns, limit)
return self._copy_to(copy_query, columns, retry_times)
def copy_from(self, gdf, table_name, if_exists='fail', cartodbfy=True,
retry_times=DEFAULT_RETRY_TIMES):
schema = self.get_schema()
table_name = self.normalize_table_name(table_name)
df_columns = get_dataframe_columns_info(gdf)
if self.has_table(table_name, schema):
if if_exists == 'replace':
table_query = self._compute_query_from_table(table_name, schema)
table_columns = self._get_query_columns_info(table_query)
if self._compare_columns(df_columns, table_columns):
# Equal columns: truncate table
self._truncate_table(table_name, schema)
else:
# Diff columns: truncate table and drop + add columns
self._truncate_and_drop_add_columns(
table_name, schema, df_columns, table_columns)
elif if_exists == 'fail':
raise Exception('Table "{schema}.{table_name}" already exists in your CARTO account. '
'Please choose a different `table_name` or use '
'if_exists="replace" to overwrite it.'.format(
table_name=table_name, schema=schema))
else: # 'append'
cartodbfy = False
else:
self._create_table_from_columns(table_name, schema, df_columns)
self._copy_from(gdf, table_name, df_columns, retry_times)
if cartodbfy is True:
cartodbfy_query = _cartodbfy_query(table_name, schema)
self.execute_long_running_query(cartodbfy_query)
return table_name
def create_table_from_query(self, query, table_name, if_exists):
schema = self.get_schema()
table_name = self.normalize_table_name(table_name)
if self.has_table(table_name, schema):
if if_exists == 'replace':
# TODO: review logic copy_from
self._drop_create_table_from_query(table_name, schema, query)
elif if_exists == 'fail':
raise Exception('Table "{schema}.{table_name}" already exists in your CARTO account. '
'Please choose a different `table_name` or use '
'if_exists="replace" to overwrite it.'.format(
table_name=table_name, schema=schema))
else: # 'append'
pass
else:
self._drop_create_table_from_query(table_name, schema, query)
return table_name
def list_tables(self, schema=None):
datasets = DatasetManager(self.auth_client).filter(
show_table_size_and_row_count='false',
show_table='false',
show_stats='false',
show_likes='false',
show_liked='false',
show_permission='false',
show_uses_builder_features='false',
show_synchronization='false',
load_totals='false'
)
datasets.sort(key=lambda x: x.updated_at, reverse=True)
return pd.DataFrame([dataset.name for dataset in datasets], columns=['tables'])
def has_table(self, table_name, schema=None):
query = self.compute_query(table_name, schema)
return self._check_exists(query)
def delete_table(self, table_name):
query = _drop_table_query(table_name)
output = self.execute_query(query)
return not('notices' in output and 'does not exist' in output['notices'][0])
def _delete_function(self, function_name):
query = _drop_function_query(function_name)
self.execute_query(query)
return function_name
def _create_function(self, schema, statement,
function_name=None, columns_types=None, return_value='VOID', language='plpgsql'):
function_name = function_name or create_tmp_name(base='tmp_func')
safe_schema = double_quote(schema)
query, qualified_func_name = _create_function_query(
schema=safe_schema,
function_name=function_name,
statement=statement,
columns_types=columns_types or '',
return_value=return_value,
language=language)
self.execute_query(query)
return qualified_func_name
def rename_table(self, table_name, new_table_name, if_exists='fail'):
new_table_name = self.normalize_table_name(new_table_name)
if table_name == new_table_name:
raise ValueError('Table names are equal. Please choose a different table name.')
if not self.has_table(table_name):
raise Exception('Table "{table_name}" does not exist in your CARTO account.'.format(
table_name=table_name))
if self.has_table(new_table_name):
if if_exists == 'replace':
log.debug('Removing table "{}"'.format(new_table_name))
self.delete_table(new_table_name)
elif if_exists == 'fail':
raise Exception('Table "{new_table_name}" already exists in your CARTO account. '
'Please choose a different `new_table_name` or use '
'if_exists="replace" to overwrite it.'.format(
new_table_name=new_table_name))
self._rename_table(table_name, new_table_name)
return new_table_name
def update_privacy_table(self, table_name, privacy=None):
DatasetInfo(self.auth_client, table_name).update_privacy(privacy)
def get_privacy(self, table_name):
return DatasetInfo(self.auth_client, table_name).privacy
def get_schema(self):
"""Get user schema from current credentials"""
query = 'SELECT current_schema()'
result = self.execute_query(query, do_post=False)
schema = result['rows'][0]['current_schema']
log.debug('schema: {}'.format(schema))
return schema
def get_geom_type(self, query):
"""Fetch geom type of a remote table or query"""
distict_query = '''
SELECT distinct ST_GeometryType(the_geom) AS geom_type
FROM ({}) q
LIMIT 5
'''.format(query)
response = self.execute_query(distict_query, do_post=False)
if response and response.get('rows') and len(response.get('rows')) > 0:
st_geom_type = response.get('rows')[0].get('geom_type')
if st_geom_type:
return map_geom_type(st_geom_type[3:])
return None
def get_num_rows(self, query):
"""Get the number of rows in the query"""
result = self.execute_query('SELECT COUNT(*) FROM ({query}) _query'.format(query=query))
return result.get('rows')[0].get('count')
def get_bounds(self, query):
extent_query = '''
SELECT ARRAY[
ARRAY[st_xmin(geom_env), st_ymin(geom_env)],
ARRAY[st_xmax(geom_env), st_ymax(geom_env)]
] bounds FROM (
SELECT ST_Extent(the_geom) geom_env
FROM ({}) q
) q;
'''.format(query)
response = self.execute_query(extent_query, do_post=False)
if response and response.get('rows') and len(response.get('rows')) > 0:
return response.get('rows')[0].get('bounds')
return None
def get_column_names(self, source, schema=None, exclude=None):
query = self.compute_query(source, schema)
columns = [c.name for c in self._get_query_columns_info(query)]
if exclude and isinstance(exclude, list):
columns = list(set(columns) - set(exclude))
return columns
def is_public(self, query):
# Used to detect public tables in queries in the publication,
# because privacy only works for tables.
public_auth_client = _create_auth_client(self.credentials, public=True)
public_sql_client = SQLClient(public_auth_client)
exists_query = 'EXPLAIN {}'.format(query)
try:
public_sql_client.send(exists_query, do_post=False)
return True
except CartoException:
return False
def get_table_names(self, query):
# Used to detect tables in queries in the publication.
query = 'SELECT CDB_QueryTablesText($q${}$q$) as tables'.format(query)
result = self.execute_query(query)
tables = []
if result['total_rows'] > 0 and result['rows'][0]['tables']:
# Dataset_info only works with tables without schema
tables = [table.split('.')[1] if '.' in table else table for table in result['rows'][0]['tables']]
return tables
def _compare_columns(self, a, b):
a_copy = [i for i in a if _not_reserved(i.name)]
b_copy = [i for i in b if _not_reserved(i.name)]
a_copy.sort()
b_copy.sort()
return a_copy == b_copy
def _drop_create_table_from_query(self, table_name, schema, query):
log.debug('DROP + CREATE table "{}"'.format(table_name))
query = 'BEGIN; {drop}; {create}; COMMIT;'.format(
drop=_drop_table_query(table_name),
create=_create_table_from_query_query(table_name, query))
self.execute_long_running_query(query)
def _create_table_from_columns(self, table_name, schema, columns):
log.debug('CREATE table "{}"'.format(table_name))
query = 'BEGIN; {create}; COMMIT;'.format(
create=_create_table_from_columns_query(table_name, columns))
self.execute_query(query)
def _truncate_table(self, table_name, schema):
log.debug('TRUNCATE table "{}"'.format(table_name))
query = 'BEGIN; {truncate}; COMMIT;'.format(
truncate=_truncate_table_query(table_name))
self.execute_query(query)
def _truncate_and_drop_add_columns(self, table_name, schema, df_columns, table_columns):
log.debug('TRUNCATE AND DROP + ADD columns table "{}"'.format(table_name))
drop_columns = _drop_columns_query(table_name, table_columns)
add_columns = _add_columns_query(table_name, df_columns)
drop_add_columns = 'ALTER TABLE {table_name} {drop_columns},{add_columns};'.format(
table_name=table_name, drop_columns=drop_columns, add_columns=add_columns)
query = '{regenerate}; BEGIN; {truncate}; {drop_add_columns}; COMMIT;'.format(
regenerate=_regenerate_table_query(table_name, schema) if self._check_regenerate_table_exists() else '',
truncate=_truncate_table_query(table_name),
drop_add_columns=drop_add_columns)
query_length_over_threshold = len(query) > BATCH_API_PAYLOAD_THRESHOLD
if query_length_over_threshold:
qualified_func_name = self._create_function(
schema=schema, statement=drop_add_columns)
drop_add_func_sql = 'SELECT {}'.format(qualified_func_name)
query = '''
{regenerate};
BEGIN;
{truncate};
{drop_add_func_sql};
COMMIT;'''.format(
regenerate=_regenerate_table_query(
table_name, schema) if self._check_regenerate_table_exists() else '',
truncate=_truncate_table_query(table_name),
drop_add_func_sql=drop_add_func_sql)
try:
self.execute_long_running_query(query)
finally:
if query_length_over_threshold:
self._delete_function(qualified_func_name)
def compute_query(self, source, schema=None):
if is_sql_query(source):
return source
schema = schema or self.get_schema()
return self._compute_query_from_table(source, schema)
def _compute_query_from_table(self, table_name, schema):
return 'SELECT * FROM "{schema}"."{table_name}"'.format(
schema=schema or 'public',
table_name=table_name
)
def _check_exists(self, query):
exists_query = 'EXPLAIN {}'.format(query)
try:
self.execute_query(exists_query, do_post=False)
return True
except CartoException:
return False
def _check_regenerate_table_exists(self):
query = '''
SELECT 1
FROM pg_catalog.pg_proc p
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
WHERE p.proname = 'cdb_regeneratetable' AND n.nspname = 'cartodb';
'''
result = self.execute_query(query)
return len(result['rows']) > 0
def _get_query_columns_info(self, query):
query = 'SELECT * FROM ({}) _q LIMIT 0'.format(query)
table_info = self.execute_query(query)
return get_query_columns_info(table_info['fields'])
def _get_copy_query(self, query, columns, limit):
query_columns = [
double_quote(column.name) for column in columns
if (column.name != 'the_geom_webmercator')
]
query = 'SELECT {columns} FROM ({query}) _q'.format(
query=query,
columns=','.join(query_columns))
if limit is not None:
if isinstance(limit, int) and (limit >= 0):
query += ' LIMIT {limit}'.format(limit=limit)
else:
raise ValueError("`limit` parameter must an integer >= 0")
return query
@retry_copy
def _copy_to(self, query, columns, retry_times=DEFAULT_RETRY_TIMES):
log.debug('COPY TO')
copy_query = "COPY ({0}) TO stdout WITH (FORMAT csv, HEADER true, NULL '{1}')".format(query, PG_NULL)
raw_result = self.copy_client.copyto_stream(copy_query)
converters = obtain_converters(columns)
parse_dates = date_columns_names(columns)
df = pd.read_csv(
raw_result,
converters=converters,
parse_dates=parse_dates)
return df
@retry_copy
def _copy_from(self, dataframe, table_name, columns, retry_times=DEFAULT_RETRY_TIMES):
log.debug('COPY FROM')
query = """
COPY {table_name}({columns}) FROM stdin WITH (FORMAT csv, DELIMITER '|', NULL '{null}');
""".format(
table_name=table_name, null=PG_NULL,
columns=','.join(double_quote(column.dbname) for column in columns)).strip()
data = _compute_copy_data(dataframe, columns)
self.copy_client.copyfrom(query, data)
def _rename_table(self, table_name, new_table_name):
query = _rename_table_query(table_name, new_table_name)
self.execute_query(query)
def normalize_table_name(self, table_name):
norm_table_name = normalize_name(table_name)
if norm_table_name != table_name:
log.debug('Table name normalized: "{}"'.format(norm_table_name))
return norm_table_name
def _drop_table_query(table_name, if_exists=True):
return 'DROP TABLE {if_exists} {table_name}'.format(
table_name=table_name,
if_exists='IF EXISTS' if if_exists else '')
def _drop_function_query(function_name, columns_types=None, if_exists=True):
if columns_types and not isinstance(columns_types, dict):
raise ValueError('The columns_types parameter should be a dictionary of column names and types.')
columns_types = columns_types or {}
columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()]
columns_str = ','.join(columns)
return 'DROP FUNCTION {if_exists} {function_name}{columns_str_call}'.format(
function_name=function_name,
if_exists='IF EXISTS' if if_exists else '',
columns_str_call='({columns_str})'.format(columns_str=columns_str) if columns else '')
def _truncate_table_query(table_name):
return 'TRUNCATE TABLE {table_name}'.format(
table_name=table_name)
def _create_function_query(schema, function_name, statement, columns_types, return_value, language):
if columns_types and not isinstance(columns_types, dict):
raise ValueError('The columns_types parameter should be a dictionary of column names and types.')
columns_types = columns_types or {}
columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()]
columns_str = ','.join(columns) if columns else ''
function_query = '''
CREATE FUNCTION {schema}.{function_name}({columns_str})
RETURNS {return_value} AS $$
BEGIN
{statement}
END;
$$ LANGUAGE {language}
'''.format(schema=schema,
function_name=function_name,
statement=statement,
columns_str=columns_str,
return_value=return_value,
language=language)
qualified_func_name = '{schema}.{function_name}({columns_str})'.format(
schema=schema, function_name=function_name, columns_str=columns_str)
return function_query, qualified_func_name
def _drop_columns_query(table_name, columns):
columns = ['DROP COLUMN {name}'.format(name=double_quote(c.dbname))
for c in columns if _not_reserved(c.dbname)]
return ','.join(columns)
def _add_columns_query(table_name, columns):
columns = ['ADD COLUMN {name} {type}'.format(name=double_quote(c.dbname), type=c.dbtype)
for c in columns if _not_reserved(c.dbname)]
return ','.join(columns)
def _not_reserved(column):
RESERVED_COLUMNS = ['cartodb_id', 'the_geom', 'the_geom_webmercator']
return column not in RESERVED_COLUMNS
def _create_table_from_columns_query(table_name, columns):
columns = ['{name} {type}'.format(name=double_quote(c.dbname), type=c.dbtype) for c in columns]
return 'CREATE TABLE {table_name} ({columns})'.format(
table_name=table_name,
columns=','.join(columns))
def _create_table_from_query_query(table_name, query):
return 'CREATE TABLE {table_name} AS ({query})'.format(table_name=table_name, query=query)
def _cartodbfy_query(table_name, schema):
return "SELECT CDB_CartodbfyTable('{schema}', '{table_name}')".format(
schema=schema, table_name=table_name)
def _regenerate_table_query(table_name, schema):
return "SELECT CDB_RegenerateTable('{schema}.{table_name}'::regclass)".format(
schema=schema, table_name=table_name)
def _rename_table_query(table_name, new_table_name):
return 'ALTER TABLE {table_name} RENAME TO {new_table_name};'.format(
table_name=table_name, new_table_name=new_table_name)
def _create_auth_client(credentials, public=False):
return APIKeyAuthClient(
base_url=credentials.base_url,
api_key='default_public' if public else credentials.api_key,
session=credentials.session,
client_id='cartoframes_{}'.format(__version__),
user_agent='cartoframes_{}'.format(__version__))
def _compute_copy_data(df, columns):
for index in df.index:
row_data = []
for column in columns:
val = df.at[index, column.name]
if column.is_geom:
val = encode_geometry_ewkb(val)
row_data.append(encode_row(val))
csv_row = b'|'.join(row_data)
csv_row += b'\n'
yield csv_row
| bsd-3-clause | -1,297,629,793,458,669,800 | 39.282648 | 120 | 0.60445 | false | 3.875731 | false | false | false |
hwjworld/xiaodun-platform | lms/djangoapps/wechat/views.py | 1 | 47459 | import logging
import urllib
from collections import defaultdict
from lxml import html
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from edxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
import django.utils
from courseware import grades
from courseware.access import has_access
from courseware.courses import (get_courses, get_course_with_access, sort_by_announcement, get_course_info_section,
get_course_by_id, get_course, course_image_url, get_course_about_section, get_courses_by_search)
import courseware.tabs as tabs
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor,mobi_toc_for_course
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from student.models import UserTestGroup, CourseEnrollment
from student.views import course_from_id, single_course_reverification_info
from util.cache import cache, cache_if_anonymous
from util.json_request import JsonResponse
from xblock.fragment import Fragment
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.course_module import CourseDescriptor
from xmodule.contentstore.content import StaticContent
import shoppingcart
from microsite_configuration import microsite
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
#@ensure_csrf_cookie
#@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
q = request.GET.get('query', '')
courses_aa = get_courses_by_search(request.META.get('HTTP_HOST'))
courses_list = []
if q != "":
for course in courses_aa:
if q in course.org or q in course.id or q in course.display_name_with_default:
courses_list.append(course)
else:
continue
else:
courses_list = courses_aa
courses = sort_by_announcement(courses_list)
return render_to_response("courseware/courses.html", {'courses': courses})
def return_fixed_courses(request, courses, user=AnonymousUser(), action=None):
default_length = 8
course_id = request.GET.get("course_id")
if course_id:
course_id = course_id.replace(".", '/')
try:
index_course = get_course_by_id(course_id)
course_index = (courses.index(index_course) + 1)
except:
course_index = 0
current_list = courses[course_index:]
if len(current_list) > default_length:
current_list = current_list[course_index:(course_index + 8)]
course_list = []
for course in current_list:
try:
course_json = mobi_course_info(request, course, action)
course_json["registered"] = registered_for_course(course, user)
course_list.append(course_json)
except:
continue
return JsonResponse({"count": len(courses), "course-list": course_list})
def courses_list_handler(request, action):
"""
Return courses based on request params
"""
try:
user = request.user
except:
user = AnonymousUser()
if action not in ["homefalls", "all", "hot", "latest", "my", "search", "rolling"]:
return JsonResponse({"success": False, "errmsg": "not support other actions except homefalls all hot latest rolling and my"})
def get_courses_depend_action():
"""
Return courses depend on action
action: [homefalls, hot, lastest, my, search]
homefalls: get all courses
hot: Number of attended people > ?
lastest: News last week
my: I registered
all: like 'homefalls'
"""
courses = get_courses(user, request.META.get('HTTP_HOST'))
courses = sort_by_announcement(courses)
courses_list = []
if action == "latest":
default_count = 20
if len(courses) < default_count:
default_count = len(courses)
courses_list = courses[0:default_count]
elif action == "my":
# filter my registered courses
for course in courses:
if registered_for_course(course, user):
courses_list.append(course)
elif action == "rolling":
default_count = 5
courses_list = courses[0:default_count]
elif action == 'search':
keyword = request.GET.get("keyword")
if keyword:
for c in courses:
print (keyword in c.org or keyword in c.id or keyword in c.display_name_with_default)
if keyword in c.org or keyword in c.id or keyword in c.display_name_with_default:
courses_list.append(c)
else:
courses_list = courses
return courses_list
courses = get_courses_depend_action()
# get_courses_depend_action()
return return_fixed_courses(request, courses, user, action)
def _course_json(course, course_id):
locator = loc_mapper().translate_location(course_id, course.location, published=False, add_entry_if_missing=True)
is_container = course.has_children
result = {
'display_name': course.display_name,
'id': unicode(locator),
'category': course.category,
'is_draft': getattr(course, 'is_draft', False),
'is_container': is_container
}
if is_container:
result['children'] = [_course_json(child, course_id) for child in course.get_children()]
category = result['category']
if result['category'] == 'video':
result[category + '-url'] = "http://www.diandiyun.com/Clip_480_5sec_6mbps_h264.mp4"
elif result['category'] == 'problem':
result[category + '-url'] = "http://music.163.com/"
return result
def mobi_course_info(request, course, action=None):
course_logo = course_image_url(course)
imgurl = course_logo
if action in ["homefalls", "all", "hot", "latest", "my", "search"]:
try:
course_mini_info = course.id.split('/')
asset_location = StaticContent.compute_location(course_mini_info[0], course_mini_info[1], 'mobi-logo-img.jpg')
imgurl = StaticContent.get_url_path_from_location(asset_location)
except:
print "=========================fail load mobi image==============================="
print "We will load this info to log"
return {
"id": course.id.replace('/', '.'),
"name": course.display_name_with_default,
"logo": request.get_host() + course_image_url(course),
"org": course.display_org_with_default,
"course_number": course.display_number_with_default,
"start_date": course.start.strftime("%Y-%m-%d"),
"about": get_course_about_section(course, 'short_description'),
"category": course.category,
"imgurl": request.get_host() + imgurl
}
def _course_info_content(html_parsed):
"""
Constructs the HTML for the course info update, not including the header.
"""
if len(html_parsed) == 1:
# could enforce that update[0].tag == 'h2'
content = html_parsed[0].tail
else:
content = html_parsed[0].tail if html_parsed[0].tail is not None else ""
content += "\n".join([html.tostring(ele) for ele in html_parsed[1:]])
return content
def parse_updates_html_str(html_str):
try:
course_html_parsed = html.fromstring(html_str)
except:
escaped = django.utils.html.eacape(html_str)
course_html_parsed = html.fromstring(escaped)
course_upd_collection = []
if course_html_parsed.tag == 'section':
for index, update in enumerate(course_html_parsed):
if len(update) > 0:
content = _course_info_content(update)
computer_id = len(course_html_parsed) - index
payload = {
"id": computer_id,
"date": update.findtext("h2"),
"content": content
}
course_upd_collection.append(payload)
return {"updates": course_upd_collection}
def mobi_course_action(request, course_id, action):
try:
course_id_bak = course_id.replace('.', '/')
if action in ["updates", "handouts", "structure"]:
course = get_course_with_access(request.user, course_id_bak, 'see_exists')
user = request.user
if not user:
user = AnonymousUser()
registered = registered_for_course(course, user)
if action == "updates" and registered:
course_updates = get_course_info_section(request, course, action)
return JsonResponse(parse_updates_html_str(course_updates))
elif action == "handouts" and registered:
course_handouts = get_course_info_section(request, course, action)
return JsonResponse({"handouts": course_handouts})
elif action == "structure":
return JsonResponse(_course_json(course, course.location.course_id))
else:
raise Exception
else:
course = get_course_with_access(request.user, course_id_bak, 'see_exists')
return JsonResponse(mobi_course_info(request, course))
except:
return JsonResponse({"success": False, "errmsg": "access denied!"})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first child.
Returns None only if there are no children at all.
"""
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
pos = 0
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# Something is wrong. Default to first child
child = children[0]
else:
child = None
return child
def redirect_to_course_position(course_module):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id}
chapter = get_current_child(course_module)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.url_name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:[email protected]/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module_for_descriptor(request.user,
request,
section_descriptor,
section_field_data_cache,
course_id,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render('student_view')
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def mobi_index(request, course_id, chapter=None, section=None,
position=None):
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:[email protected]/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module_for_descriptor(request.user,
request,
section_descriptor,
section_field_data_cache,
course_id,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render('mobi_student_view')
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('wechat/mobi_courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
def mobi_directory(request, course_id):
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
motoc = mobi_toc_for_course(user, request, course)
show_list = list()
for toc in motoc:
videolist = toc['show_url'][0]
show_list.append(videolist)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': mobi_render_accordion(request, course),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:[email protected]/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
'show_url': show_list[0],
}
result = render_to_response('wechat/mobi_directory.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},".format(
user=user,
course=course,))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
def mobi_render_accordion(request, course):
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = mobi_toc_for_course(user, request, course)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)] + template_imports.items())
return render_to_string('wechat/mobi_accordion.html', context)
@ensure_csrf_cookie
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_location = CourseDescriptor.id_to_location(course_id)
items = modulestore().get_items(
Location('i4x', course_location.org, course_location.course, None, module_id),
course_id=course_id
)
if len(items) == 0:
raise Http404("Could not find id = {0} in course_id = {1}. Referer = {2}".
format(module_id, course_id, request.META.get("HTTP_REFERER", "")))
if len(items) > 1:
log.warning("Multiple items found with id = {0} in course_id = {1}. Referer = {2}. Using first found {3}...".
format(module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.url()))
return jump_to(request, course_id, items[0].location.url())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
# Complain if the location isn't valid
try:
location = Location(location)
except InvalidLocationError:
raise Http404("Invalid location")
# Complain if there's not data for this location
try:
(course_id, chapter, section, position) = path_to_location(modulestore(), course_id, location)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(location))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(location))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_id)
elif section is None:
return redirect('courseware_chapter', course_id=course_id, chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position)
@ensure_csrf_cookie
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
reverifications = fetch_reverify_banner_info(request, course_id)
context = {
'request': request,
'course_id': course_id,
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masq,
'reverifications': reverifications,
}
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
tab = tabs.get_static_tab_by_slug(course, tab_slug)
if tab is None:
raise Http404
contents = tabs.get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/static_tab.html',
{'course': course,
'tab': tab,
'tab_contents': contents,
'staff_access': staff_access, })
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/syllabus.html', {'course': course,
'staff_access': staff_access, })
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
@ensure_csrf_cookie
@cache_if_anonymous
def course_about(request, course_id):
if microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
):
raise Http404
course = get_course_with_access(request.user, course_id, 'see_exists')
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
if (settings.FEATURES.get('ENABLE_SHOPPING_CART') and
settings.FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')):
registration_price = CourseMode.min_course_price_for_currency(course_id,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_id)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id)
# see if we have already filled up all allowed enrollments
is_course_full = CourseEnrollment.is_course_full(course)
return render_to_response('courseware/course_about.html',
{'course': course,
'registered': registered,
'course_target': course_target,
'registration_price': registration_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full})
@ensure_csrf_cookie
@cache_if_anonymous
def mktg_course_about(request, course_id):
"""
This is the button that gets put into an iframe on the Drupal site
"""
try:
course = get_course_with_access(request.user, course_id, 'see_exists')
except (ValueError, Http404) as e:
# if a course does not exist yet, display a coming
# soon button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_id}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
allow_registration = has_access(request.user, course, 'enroll')
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course(course.id)
return render_to_response(
'courseware/mktg_course_about.html',
{
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
)
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
with grades.manual_transaction():
return _progress(request, course_id, student_id)
def _progress(request, course_id, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, course_id, 'load', depth=None)
staff_access = has_access(request.user, course, 'staff')
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {
'course': course,
'courseware_summary': courseware_summary,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_id)
}
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_id):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_or_create_enrollment(request.user, course_id)
course = course_from_id(course_id)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(course_id=course_id,
module_state_key=location,
student_id=student.id)
except User.DoesNotExist:
return HttpResponse(escape("User {0} does not exist.".format(student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape("{0} has never accessed problem {1}".format(student_username, location)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_id
}
return render_to_response('courseware/submission_history.html', context)
def show_video(request):
showurl = request.GET.get("showurl","")
course_id = request.GET.get("course_id")
return render_to_response('wechat/mobi_video.html',{"showurl":showurl, "course_id": course_id}) | agpl-3.0 | -126,619,783,822,553,040 | 38.616027 | 133 | 0.61276 | false | 4.192491 | false | false | false |
sillvan/hyperspy | hyperspy/drawing/_markers/horizontal_line_segment.py | 1 | 3320 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The Hyperspy developers
#
# This file is part of Hyperspy.
#
# Hyperspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hyperspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hyperspy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class HorizontalLineSegment(MarkerBase):
"""Horizontal line segment marker that can be added to the signal figure
Parameters
---------
x1: array or float
The position of the start of the line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the nagivation axes.
x2: array or float
The position of the end of the line segment in x.
see x1 arguments
y: array or float
The position of line segment in y.
see x1 arguments
kwargs:
Kewywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> import numpy as np
>>> im = signals.Image(np.zeros((100, 100)))
>>> m = utils.plot.markers.horizontal_line_segment(
>>> x1=20, x2=70, y=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
"""
def __init__(self, x1, x2, y, **kwargs):
MarkerBase.__init__(self)
lp = {}
lp['color'] = 'black'
lp['linewidth'] = 1
self.marker_properties = lp
self.set_data(x1=x1, x2=x2, y1=y)
self.set_marker_properties(**kwargs)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def plot(self):
if self.ax is None:
raise AttributeError(
"To use this method the marker needs to be first add to a " +
"figure using `s._plot.signal_plot.add_marker(m)` or " +
"`s._plot.navigator_plot.add_marker(m)`")
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
self.marker.set_animated(True)
try:
self.ax.hspy_fig._draw_animated()
except:
pass
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 1] = self.get_data_position('y1')
segments[0][1, 1] = segments[0][0, 1]
if self.get_data_position('x1') is None:
segments[0][0, 0] = plt.getp(self.marker.axes, 'xlim')[0]
else:
segments[0][0, 0] = self.get_data_position('x1')
if self.get_data_position('x2') is None:
segments[0][1, 0] = plt.getp(self.marker.axes, 'xlim')[1]
else:
segments[0][1, 0] = self.get_data_position('x2')
self.marker.set_segments(segments)
| gpl-3.0 | -754,029,751,437,374,500 | 33.947368 | 78 | 0.614157 | false | 3.656388 | false | false | false |
giacomov/lclike | lclike/duration_computation.py | 1 | 12141 | __author__ = 'giacomov'
# !/usr/bin/env python
# add |^| to the top line to run the script without needing 'python' to run it at cmd
# importing modules1
import numpy as np
# cant use 'show' inside the farm
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib import gridspec
import os
import argparse
import decayLikelihood
import warnings
####################################################################
mycmd = argparse.ArgumentParser() # this is a class
mycmd.add_argument('triggername', help="The name of the GRB in YYMMDDXXX format (ex. bn080916009)")
mycmd.add_argument('redshift', help="Redshift for object.")
mycmd.add_argument('function', help="Function to model. (ex. crystalball2, band)")
mycmd.add_argument('directory', help="Directory containing the file produced by gtburst")
if __name__ == "__main__":
args = mycmd.parse_args()
os.chdir(args.directory)
##############################################################################
textfile = os.path.join(args.directory, '%s_res.txt' % (args.triggername))
tbin = np.recfromtxt(textfile, names=True)
textfile = os.path.join(args.directory, '%s_MCsamples_%s.txt' % (args.triggername, args.function))
samples = np.recfromtxt(textfile, names=True)
# function for returning 1 and 2 sigma errors from sample median
def getErr(sampleArr):
# compute sample percentiles for 1 and 2 sigma
m, c, p = np.percentile(sampleArr, [16, 50, 84])
# print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
m2, c2, p2 = np.percentile(sampleArr, [3, 50, 97])
return m, c, p, m2, c2, p2
# prepare for plotting and LOOP
t = np.logspace(0, 4, 100)
t = np.append(t, np.linspace(0, 1, 10))
t.sort()
t = np.unique(t)
print('NUMBER OF times to iterate: %s' % (len(t)))
x = decayLikelihood.DecayLikelihood()
if args.function == 'crystalball2':
crystal = decayLikelihood.CrystalBall2() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(crystal)
# CrystalBall DiffFlux####################################################
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0])
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
# NORMALIZATION IS THE FLUX AT THE PEAK
pB = parameters[3] # decay time is independent of scale # (y*.001) # scale =0.001, for all xml files
fBe = pB / np.e
# t = (fBe/N)**(-1/a) defined to be 1
mu = parameters[0]
tP = mu
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
teP = mu + (fBe / parameters[3]) ** (
-1 / parameters[2]) # sometimes 'RuntimeWarning: overflow encountered in double_scalars'
except Warning:
print('RuntimeWarning Raised! mu,sigma,decayIndex,and N:', parameters)
teP = parameters[0] + (fBe / parameters[3]) ** (-1 / parameters[2])
Peak[i] = pB
ePeak[i] = fBe
# redshift correcting t/(1+z)
tPeak[i] = tP / (1 + float(args.redshift)) ################################
tePeak[i] = teP / (1 + float(args.redshift)) ################################
elif args.function == 'band':
band = decayLikelihood.DecayBand() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(band)
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0]) # fractional brightness used in calcuating char-time, but not needed otherwise
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0]) # characteristic time
T05 = np.zeros(samples.shape[0])
T90 = np.zeros(samples.shape[0])
T95 = np.zeros(samples.shape[0])
T25 = np.zeros(samples.shape[0])
T50 = np.zeros(samples.shape[0])
T75 = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
tc = band.getCharacteristicTime() # get the characteristic time.
# T50/T90 TAKING TOO LONG (1/4)
# t90, t05, t95 = band.getTsomething( 90 ) # if the argument is 90, returns the T90 as well as the T05 and the T95. If the argument is 50, returns the T50 as well as the T25 and T75, and so on.
# t50, t25, t75 = band.getTsomething( 50 )
tp, fp = band.getPeakTimeAndFlux() # returns the time of the peak, as well as the peak flux
tePeak[i] = tc / (1 + float(args.redshift)) ################################
tPeak[i] = tp / (1 + float(args.redshift))
Peak[i] = fp
# T50/T90 TAKING TOO LONG (2/4)
# T05[i] = t05/(1+float(args.redshift))
# T90[i] = t90/(1+float(args.redshift))
# T95[i] = t95/(1+float(args.redshift))
# T50/T90 TAKING TOO LONG (3/4)
# T25[i] = t25/(1+float(args.redshift))
# T50[i] = t50/(1+float(args.redshift))
# T75[i] = t75/(1+float(args.redshift))
# Defining sigma bands
print('ENTERING Percentile LOOP')
upper = np.zeros(t.shape[0])
lower = np.zeros(t.shape[0])
upper2 = np.zeros(t.shape[0])
lower2 = np.zeros(t.shape[0])
meas = np.zeros(t.shape[0])
fluxMatrix = np.zeros([samples.shape[0], t.shape[0]])
for i, s in enumerate(samples):
x.decayFunction.setParameters(*s)
fluxes = map(x.decayFunction.getDifferentialFlux, t)
fluxMatrix[i, :] = np.array(fluxes)
for i, tt in enumerate(t):
allFluxes = fluxMatrix[:, i]
m, p = np.percentile(allFluxes, [16, 84])
lower[i] = m
upper[i] = p
m2, p2 = np.percentile(allFluxes, [2.5, 97.5])
lower2[i] = m2
upper2[i] = p2
wdir = '%s' % (args.directory)
# save TXT files instead of .npy
placeFile = os.path.join(wdir, "%s_tBrightness_%s" % (args.triggername, args.function))
with open(placeFile, 'w+') as f:
f.write("Peak tPeak ePeak tePeak\n")
for i, s in enumerate(Peak):
f.write("%s %s %s %s\n" % (Peak[i], tPeak[i], ePeak[i], tePeak[i]))
# CALCULATING T50/T90 TAKES TOO LONG
# T50/T90 TAKING TOO LONG (4/4)
# if args.function == 'band':
# #compute percentiles for 1 sigma
# m90,c90,p90 = np.percentile(T90,[16,50,84])
# m50,c50,p50 = np.percentile(T50,[16,50,84])
# #compute percentiles for 1 and 2 sigma
# #90m,90c,90p,90m2,90c2,90p2 = getErr(T90)
# #50m,50c,50p,50m2,50c2,50p2 = getErr(T50)
# #print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
#
# placeFile=os.path.join(wdir,"%s_t90_t50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 90minus 90plus t50 50minus 50plus\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s %s %s %s\n" % (m90,m90-c90,p90-c90,c50,m50-c50,p50-c50)) #c,m-c,p-c
#
# placeFile=os.path.join(wdir,"%s_samplesT90_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 t05 t95\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s\n" % (T90[i],T05[i],T95[i]))
# placeFile=os.path.join(wdir,"%s_samplesT50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t50 t25 t25\n")
# for i,s in enumerate(T50):
# f.write("%s %s %s\n" % (T50[i],T25[i],T75[i]))
# compute char-time percentiles for 1 and 2 sigma
m, c, p, m2, c2, p2 = getErr(tePeak)
# saves txt file
wkdir = '%s' % (args.directory)
fileDir = os.path.join(wkdir, '%s_timeRes_%s' % (args.triggername, args.function))
with open(fileDir, 'w+') as f:
f.write('%s %s %s\n' % ('median', 'minus', 'plus'))
f.write('%s %s %s\n' % (c, m - c, p - c))
# PLOTTING BINS AND SIGMA BAND
print("PLOTTING...")
fig = plt.figure()
# median is your "x"
# Y is your "y"
# DY is the array containing the errors
# DY==0 filters only the zero error
data = tbin
# redshift correction /(1+args.redshif)
median = (data["tstart"] + data["tstop"]) / 2 / (1 + float(args.redshift))
start = data['tstart'] / (1 + float(args.redshift)) ##
stop = data['tstop'] / (1 + float(args.redshift)) ##
y = data["photonFlux"]
Dy = data["photonFluxError"]
try:
y = np.core.defchararray.replace(y, "<", "", count=None) # runs through array and removes strings
except:
print('No Upper-Limits Found in %s.' % (args.triggername))
try:
Dy = np.core.defchararray.replace(Dy, "n.a.", "0",
count=None) ## 0 error is nonphysical, and will be checked for in plotting
except:
print('No 0-Error Found in %s.' % (args.triggername))
bar = 0.5
color = "blue"
Y = np.empty(0, dtype=float) # makes empty 1-D array for float values
for i in y:
Y = np.append(Y, float(i))
DY = np.empty(0, dtype=float)
for i in Dy:
DY = np.append(DY, float(i))
plt.clf()
if (DY > 0).sum() > 0: # if sum() gives a non-zero value then there are error values
plt.errorbar(median[DY > 0], Y[DY > 0],
xerr=[median[DY > 0] - start[DY > 0], stop[DY > 0] - median[DY > 0]],
yerr=DY[DY > 0], ls='None', marker='o', mfc=color, mec=color, ecolor=color, lw=2, label=None)
if (DY == 0).sum() > 0:
plt.errorbar(median[DY == 0], Y[DY == 0],
xerr=[median[DY == 0] - start[DY == 0], stop[DY == 0] - median[DY == 0]],
yerr=[bar * Y[DY == 0], 0.0 * Y[DY == 0]], lolims=True, ls='None', marker='', mfc=color, mec=color,
ecolor=color, lw=2, label=None)
plt.suptitle('%s photonFlux per Time' % (args.triggername))
plt.xlabel('Rest Frame Time(s)')
plt.ylabel('Photon Flux')
plt.xscale('symlog')
plt.yscale('log')
plt.grid(True)
if args.function == 'crystalball2':
SCALE = 0.001
elif args.function == 'band':
SCALE = 1.0 # 0.1 # shouldn't need a scale anymore for Band function
ylo = 1e-7 # min(lower2*SCALE)*1e-1 # CANT GET THIS TO WORK YET DYNAMICALLY
yup = max(upper2 * SCALE) * 10
plt.ylim([ylo, yup])
# correcting for redshift t/(1+args.redshift)
plt.fill_between(t / (1 + float(args.redshift)), lower * SCALE, upper * SCALE, alpha=0.5, color='blue')
plt.fill_between(t / (1 + float(args.redshift)), lower2 * SCALE, upper2 * SCALE, alpha=0.3, color='green')
# y = map(x.decayFunction.getDifferentialFlux, t) # maps infinitesimal values of flux at time t to y
# raw_input("Press ENTER")
# PowerLaw
# plt.plot(t,,'o')
# saves plots
wdir = '%s' % (args.directory)
imsave = os.path.join(wdir, '%s_objFit_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
# histograms of 1/e and save
print("Making histograms")
fig = plt.figure(figsize=(10, 6))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1])
bins = np.linspace(min(tePeak), np.max(tePeak), 100)
ax0 = plt.subplot(gs[0])
ax0.hist(tePeak, bins, normed=True)
plt.title('1/e (min to medx2)')
plt.xlabel('1/e time (s)')
plt.xlim([min(tePeak), np.median(tePeak) * 2])
ax1 = plt.subplot(gs[1])
ax1.hist(tePeak, bins, normed=True)
plt.title('1/e (min to max)')
plt.xlabel('time (s)')
plt.tight_layout()
imsave = os.path.join(wdir, '%s_hist_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
print("Finished Potting/Saving!")
| bsd-3-clause | -413,746,837,952,500,500 | 35.459459 | 205 | 0.567581 | false | 3.026171 | false | false | false |
alingse/jsoncsv | jsoncsv/dumptool.py | 1 | 3539 | # coding=utf-8
# author@alingse
# 2015.10.09
import json
import unicodecsv as csv
import xlwt
class Dump(object):
def __init__(self, fin, fout, **kwargs):
self.fin = fin
self.fout = fout
self.initialize(**kwargs)
def initialize(self, **kwargs):
pass
def prepare(self):
pass
def dump_file(self, obj):
raise NotImplementedError
def on_finish(self):
pass
def dump(self):
self.prepare()
self.dump_file()
self.on_finish()
class ReadHeadersMixin(object):
@staticmethod
def load_headers(fin, read_row=None, sort_type=None):
headers = set()
datas = []
# read
if not read_row or read_row < 1:
read_row = -1
for line in fin:
obj = json.loads(line)
headers.update(obj.keys())
datas.append(obj)
read_row -= 1
if not read_row:
break
# TODO: add some sort_type here
headers = sorted(list(headers))
return (list(headers), datas)
class DumpExcel(Dump, ReadHeadersMixin):
def initialize(self, **kwargs):
super(DumpExcel, self).initialize(**kwargs)
self._read_row = kwargs.get('read_row')
self._sort_type = kwargs.get('sort_type')
def prepare(self):
headers, datas = self.load_headers(self.fin, self._read_row,
self._sort_type)
self._headers = headers
self._datas = datas
def write_headers(self):
raise NotImplementedError
def write_obj(self):
raise NotImplementedError
def dump_file(self):
self.write_headers()
for obj in self._datas:
self.write_obj(obj)
for line in self.fin:
obj = json.loads(line)
self.write_obj(obj)
class DumpCSV(DumpExcel):
def initialize(self, **kwargs):
super(DumpCSV, self).initialize(**kwargs)
self.csv_writer = None
def write_headers(self):
self.csv_writer = csv.DictWriter(self.fout, self._headers)
self.csv_writer.writeheader()
def write_obj(self, obj):
patched_obj = {
key: self.patch_value(value)
for key, value in obj.items()
}
self.csv_writer.writerow(patched_obj)
def patch_value(self, value):
if value in (None, {}, []):
return ""
return value
class DumpXLS(DumpExcel):
def initialize(self, **kwargs):
super(DumpXLS, self).initialize(**kwargs)
self.sheet = kwargs.get('sheet', 'Sheet1')
self.wb = xlwt.Workbook(encoding='utf-8')
self.ws = self.wb.add_sheet(self.sheet)
self.row = 0
self.cloumn = 0
def write_headers(self):
for head in self._headers:
self.ws.write(self.row, self.cloumn, head)
self.cloumn += 1
self.row += 1
def write_obj(self, obj):
self.cloumn = 0
for head in self._headers:
value = obj.get(head)
# patch
if value in ({},):
value = "{}"
self.ws.write(self.row, self.cloumn, value)
self.cloumn += 1
self.row += 1
def on_finish(self):
self.wb.save(self.fout)
def dump_excel(fin, fout, klass, **kwargs):
if not isinstance(klass, type) or not issubclass(klass, DumpExcel):
raise ValueError("unknow dumpexcel type")
dump = klass(fin, fout, **kwargs)
dump.dump()
| apache-2.0 | 5,747,975,556,471,629,000 | 22.912162 | 71 | 0.552133 | false | 3.817691 | false | false | false |
DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/examples/openmdao.examples.bar3simulation/openmdao/examples/bar3simulation/bar3_optimization.py | 1 | 4444 | """
bar3_optimization.py - Top level assembly for the example problem.
"""
# Optimize the bar3 design using the CONMIN optimizer.
# pylint: disable-msg=E0611,F0401
from openmdao.lib.drivers.api import CONMINdriver
from openmdao.main.api import Assembly
from openmdao.main.datatypes.api import Float
# from openmdao.examples.bar3simulation.bar3 import Bar3Truss
from openmdao.examples.bar3simulation.bar3_wrap_f import Bar3Truss
class Bar3Optimization(Assembly):
""" Optimization of a three bar truss. """
# set up interface to the framework
# pylint: disable-msg=E1101
# Constraint allowables
bar1_stress_allowable = Float(20., iotype='in',
units='lb/(inch*inch)',
desc='Stress allowable in bar 1')
bar2_stress_allowable = Float(20., iotype='in',
units='lb/(inch*inch)',
desc='Stress allowable in bar 2')
bar3_stress_allowable = Float(20., iotype='in',
units='lb/(inch*inch)',
desc='Stress allowable in bar 3')
displacement_x_dir_allowable = Float(0.20, iotype='in', units='inch',
desc='Displacement limitation in x-direction')
displacement_y_dir_allowable = Float(0.05, iotype='in', units='inch',
desc='Displacement limitation in y-direction')
frequency_allowable = Float(14.1421, iotype='in', units='Hz',
desc='Frequency limitation in Hertz')
def configure(self):
# Create CONMIN Optimizer instance
self.add('driver', CONMINdriver())
# Create Bar3_Truss component instances
self.add('bar3_truss', Bar3Truss())
self.driver.workflow.add('bar3_truss')
# CONMIN Flags
self.driver.iprint = 0
self.driver.itmax = 30
self.driver.fdch = .00001
self.driver.fdchm = .00001
self.driver.ct = -.001
# CONMIN Objective
self.driver.add_objective('bar3_truss.weight')
# CONMIN Design Variables
for param, low, high in zip(['bar3_truss.bar1_area',
'bar3_truss.bar2_area',
'bar3_truss.bar3_area'],
[0.001, 0.001, 0.001],
[10000.0, 10000.0, 10000.0]):
self.driver.add_parameter(param, low=low, high=high)
# CONMIN Constraints
constraints = [
'abs(bar3_truss.bar1_stress/bar1_stress_allowable) <= 1.0',
'abs(bar3_truss.bar2_stress/bar2_stress_allowable) <= 1.0',
'abs(bar3_truss.bar3_stress/bar3_stress_allowable) <= 1.0',
'abs(bar3_truss.displacement_x_dir/displacement_x_dir_allowable) <= 1.0',
'abs(bar3_truss.displacement_y_dir/displacement_y_dir_allowable) <= 1.0',
'frequency_allowable**2 <= bar3_truss.frequency**2']
map(self.driver.add_constraint, constraints)
if __name__ == "__main__": # pragma: no cover
import time
# pylint: disable-msg=E1101
opt_bar3 = Bar3Optimization()
def prz(title):
""" Print before and after"""
print '---------------------------------'
print title
print '---------------------------------'
print 'Bar3: Weight = ', opt_bar3.bar3_truss.weight
print 'DV1: Bar1_area = ', opt_bar3.bar3_truss.bar1_area
print 'DV2: Bar2_area = ', opt_bar3.bar3_truss.bar2_area
print 'Dv3: Bar3_area = ', opt_bar3.bar3_truss.bar3_area
print '---------------------------------'
print 'Con1: Bar1_stress = ', opt_bar3.bar3_truss.bar1_stress
print 'Con2: Bar2_stress = ', opt_bar3.bar3_truss.bar2_stress
print 'Con3: Bar3_stress = ', opt_bar3.bar3_truss.bar3_stress
print 'Con4: Displ_u = ', opt_bar3.bar3_truss.displacement_x_dir
print 'Con5: Displ_v = ', opt_bar3.bar3_truss.displacement_y_dir
print 'Con6: Frequency = ', opt_bar3.bar3_truss.frequency
print '\n'
opt_bar3.bar3_truss.run()
prz('Old Design')
time1 = time.time()
opt_bar3.run()
prz('New Design')
print "CONMIN Iterations: ", opt_bar3.driver.iter_count
print ""
print "Elapsed time: ", time.time() - time1
# end bar3_optimization.py
| mit | 6,631,293,098,315,025,000 | 36.982906 | 87 | 0.560981 | false | 3.524187 | false | false | false |
hall1467/wikidata_usage_tracking | python_analysis_scripts/edit_analyses/session_stats.py | 1 | 2861 | """
Selects number of distinct revisions.
Usage:
session_stats (-h|--help)
session_stats <input> <output>
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input> Path to input file to process.
<output> Where output will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import logging
import operator
import sys
import mysqltsv
from collections import defaultdict
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_file = mysqltsv.Reader(
open(args['<input>'],'rt'), headers=True,
types=[str, str, str, str, str, int, str, str, str, str, str, str,
str, str])
output_file = open(args['<output>'], "w")
verbose = args['--verbose']
run(input_file, output_file, verbose)
def run(input_file, output_file, verbose):
sessions = defaultdict(lambda: defaultdict(int))
bot_sessions = defaultdict(lambda: defaultdict(int))
human_sessions = defaultdict(lambda: defaultdict(int))
revision_namespaces = defaultdict(int)
bot_revisions_sum = 0
human_revisions_sum = 0
for i, line in enumerate(input_file):
sessions[line["user"]][line["session_start"]] = 1
revision_namespaces[line["namespace"]] += 1
if line["edit_type"] == 'bot':
bot_revisions_sum += 1
bot_sessions[line["user"]][line["session_start"]] = 1
else:
human_revisions_sum += 1
human_sessions[line["user"]][line["session_start"]] = 1
if verbose and i % 10000 == 0 and i != 0:
sys.stderr.write("Revisions analyzed: {0}\n".format(i))
sys.stderr.flush()
session_sum = 0
for user in sessions:
for session_start in sessions[user]:
session_sum += 1
bot_session_sum = 0
for user in bot_sessions:
for session_start in bot_sessions[user]:
bot_session_sum += 1
human_session_sum = 0
for user in human_sessions:
for session_start in human_sessions[user]:
human_session_sum += 1
output_file.write("Sessions: {0}\n".format(session_sum))
output_file.write("Bot sessions: {0}\n".format(bot_session_sum))
output_file.write("Bot revisions: {0}\n".format(bot_revisions_sum))
output_file.write("Human sessions: {0}\n".format(human_session_sum))
output_file.write("Human revisions: {0}\n".format(human_revisions_sum))
output_file.write("Revision namespaces: {0}\n".format(revision_namespaces))
main()
| mit | 18,738,158,364,081,736 | 26.509615 | 79 | 0.606082 | false | 3.774406 | false | false | false |
StevenCHowell/code_sas_modeling | sas_modeling/calc_i0.py | 1 | 13051 | #!/usr/bin/env python
# coding:utf-8
'''
Author: Steven C. Howell --<[email protected]>
Purpose: calculating the Guinier fit
Created: 12/21/2016
00000000011111111112222222222333333333344444444445555555555666666666677777777778
12345678901234567890123456789012345678901234567890123456789012345678901234567890
'''
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from scipy import optimize
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
def fit_line_v0(x, y, dy):
'''
Fit data for y = mx + b
return m and b
http://scipy-cookbook.readthedocs.io/items/FittingData.html#id2
error estimate seems reasonable compared to input data
'''
w = 1 / dy
# define our (line) fitting function
fitfunc = lambda p, x: p[0] * x + p[1]
errfunc = lambda p, x, y, w: (y - fitfunc(p, x)) * w
# use the last two points to guess the initial values
m_guess = (y[-2] - y[-1]) / (x[-2] - x[-1]) # use 2 points to guess slope
b_guess = y[-1] - m_guess * x[-1] # gues the y-intercept from 2 points
p_guess = [m_guess, b_guess]
out = optimize.leastsq(errfunc, p_guess, args=(x, y, w), full_output=1)
p_final = out[0]
m = p_final[0]
b = p_final[1]
# from the docs page:
# cov_x : ndarray
# Uses the fjac and ipvt optional outputs to construct an estimate
# of the jacobian around the solution. None if a singular matrix
# encountered (indicates very flat curvature in some direction).
# This matrix must be multiplied by the residual variance to get the
# covariance of the parameter estimates – see curve_fit.
#
# curve_fit documentation says:
# The diagonals provide the variance of the parameter estimate.
# To compute one standard deviation errors on the parameters use
# perr = np.sqrt(np.diag(pcov)).
#
# How the sigma parameter affects the estimated covariance depends
# on absolute_sigma argument, as described above.
#
# If the Jacobian matrix at the solution doesn’t have a full rank,
# then ‘lm’ method returns a matrix filled with np.inf, on the other
# hand ‘trf’ and ‘dogbox’ methods use Moore-Penrose pseudoinverse to
# compute the covariance matrix.
cov = out[1]
m_err = np.sqrt(cov[0, 0])
b_err = np.sqrt(cov[1, 1])
return m, b, m_err, b_err
def fit_line_v1(x, y, dy):
'''
Fit data for y = mx + b
return m and b
no error estimates
'''
w = 1 / dy ** 2
A = np.vstack([x * w, 1.0 * w]).T
p, residuals, _, _ = np.linalg.lstsq(A, y * w)
m = p[0]
b = p[1]
# from the docs page:
# residuals : {(), (1,), (K,)} ndarray
# Sums of residuals; squared Euclidean 2-norm for each column in b - a*x.
# If the rank of a is < N or M <= N, this is an empty array. If b is
# 1-dimensional, this is a (1,) shape array. Otherwise the shape is (K,).
# rank : int
# Rank of matrix a.
# s : (min(M, N),) ndarray
# Singular values of a.
m_err = 0.0
b_err = 0.0
return m, b, m_err, b_err
def fit_line_v2(x, y, dy):
'''
Fit data for y = mx + b
return m and b
essentially the same results as fit_line_v0
no error estimates
'''
w = 1 / dy ** 2
out = np.polynomial.polynomial.polyfit(x, y, 1, w=w, full=True)
# does not provide the covariance matrix, not sure how to extract error
p_final = out[0]
m = p_final[1]
b = p_final[0]
# from the docs page:
# [residuals, rank, singular_values, rcond] : list
# These values are only returned if full = True
# resid – sum of squared residuals of the least squares fit
# rank – the numerical rank of the scaled Vandermonde matrix
# sv – singular values of the scaled Vandermonde matrix
# rcond – value of rcond.
# For more details, see linalg.lstsq.
b_err = 0.0
m_err = 0.0
return m, b, m_err, b_err
def fit_line_v3(x, y, dy):
'''
Fit data for y = mx + b
return m and b
method taken from SasView:
github.com/SasView/sasview/blob/master/src/sas/sascalc/invariant/invariant.py
error estimate seems reasonable
'''
A = np.vstack([x / dy, 1.0 / dy]).T
p, residuals, _, _ = np.linalg.lstsq(A, y / dy)
m = p[0]
b = p[1]
# Get the covariance matrix, defined as inv_cov = a_transposed * a
inv_cov = np.dot(A.transpose(), A)
cov = np.linalg.pinv(inv_cov)
err_matrix = np.abs(residuals) * cov
m_err, b_err = np.sqrt(np.diag(err_matrix))
return m, b, m_err, b_err
def fit_line_v4(x, y, dy):
'''
Fit data for y = mx + b
return m and b
error estimate seems much too small
'''
w = 1 / dy ** 2
p, cov = np.polyfit(x, y, 1, w=w, cov=True)
m, b = p
# From docs page:
# The diagonal of this matrix (cov) are the
# variance estimates for each coefficient.
m_err, b_err = np.sqrt(np.diag(cov)) # standard devaitions
# m_err, b_err = np.diag(cov)
return m, b, m_err, b_err
def fit_line_v5(x, y, dy):
'''
Fit data for y = mx + b
return m and b
method taken from wikipedia:
https://en.wikipedia.org/wiki/Linear_least_squares_(mathematics)#Python
error estimate seems reasonable comared to input data
This result is identical to v0 and v7
'''
w = 1 / dy ** 2
n = len(x)
X = np.array([x, np.ones(n)]).T
Y = np.array(y).reshape(-1, 1)
W = np.eye(n) * w # weight using the inverse of the variance
# calculate the parameters
xtwx_inv = np.linalg.inv(X.T.dot(W).dot(X))
m, b = xtwx_inv.dot(X.T).dot(W).dot(Y).reshape(2)
# calculate the error of the parameters:
# (X.T * W * X)^-1 * X.T * W * M * W.T * X * (X.T * W.T * X)^-1
# cov_xy = covariance(x, y)
# var_x = covariance(x, x)
# var_y = covariance(y, y)
# M = np.eye(m) * dy ** 2
# xtwtx_inv = np.linalg.inv(X.T.dot(W.T).dot(X))
# M_beta = xtwx_inv.dot(X.T).dot(W).dot(M).dot(W.T).dot(X).dot(xtwtx_inv)
# M_beta = xtwx_inv # because M = W^-1
cov = xtwx_inv
m_err, b_err = np.sqrt(np.diag(cov))
return m, b, m_err, b_err
def fit_line_v6(x, y, dy):
'''
Fit data for y = mx + b
return m and b
method taken from Baird's "Experimentation": pg 138-140
The dy's in the derivation are not the same as the error of the y values
This method does not propagate the error
'''
var = dy ** 2 # variance, when dy is the standard deviation
wx = x / var
wy = y / var
sum_xy = np.sum(wx * wy)
sum_x = np.sum(wx)
sum_y = np.sum(wy)
sum_x_dy_inv = np.sum(wx / var)
sum_dy_inv = np.sum(1 / var)
sum_x2 = np.sum(wx ** 2)
den = sum_dy_inv * sum_x2 - sum_x * sum_x_dy_inv
m_num = sum_dy_inv * sum_xy - sum_x_dy_inv * sum_y
m = m_num / den
b_num = sum_x2 * sum_y - sum_x * sum_xy
b = b_num / den
n = len(x)
y_fit = m * x + b
delta_y = y - y_fit
y_err = np.sqrt(np.sum(delta_y ** 2) / (n - 2))
m_err = y_err * np.sqrt(n / den)
b_err = y_err * np.sqrt(sum_x2 / den)
return m, b, m_err, b_err
def fit_line_v7(x, y, dy):
'''
Fit data for y = mx + b
return m and b
from Huges & Hase "Measurements and their Uncertainties", pg 69-70
and Press et al. "Numerical Recipes 3rd Edition", pg 781-783
'''
w = 1 / dy ** 2 # weight is the inverse square of the uncertainty
s = np.sum(w)
sx = np.sum(w * x)
sy = np.sum(w * y)
sxx = np.sum(w * x ** 2)
sxy = np.sum(w * x * y)
den = s * sxx - sx ** 2
m_num = s * sxy - sx * sy
m = m_num / den
b_num = sxx * sy - sx * sxy
b = b_num / den
m_err = np.sqrt(s / den)
b_err = np.sqrt(sxx / den)
return m, b, m_err, b_err
def fit_line_v8(x, y, dy):
'''
Fit data for y = mx + b
return m and b
from Press et al. "Numerical Recipes 3rd Edition", pg 781-783
using numerically robust formalism
'''
w = 1 / dy ** 2 # weight is the inverse square of the uncertainty
s = np.sum(w)
sx = np.sum(w * x)
sy = np.sum(w * y)
t = 1 / dy * (x - sx / s)
stt = np.sum(t ** 2)
m = np.sum(t * y / dy) / stt
b = (sy - sx * m) / s
m_err = np.sqrt(1 / stt)
b_err = np.sqrt((1 + sx ** 2 / (s * stt)) / s)
return m, b, m_err, b_err
def guinier_fit(q, iq, diq, dq=None, q_min=0.0, q_max=0.1, view_fit=False,
fit_method=fit_line_v5, save_fname='guiner_fit.html',
refine=False):
'''
perform Guinier fit
return I(0) and Rg
'''
# Identify the range for the fit
id_x = (q >= q_min) & (q <= q_max)
q2 = q[id_x] ** 2
log_iq = np.log(iq[id_x])
dlog_iq = diq[id_x] / iq[id_x]
if dq is not None:
dq2 = 2 * q[id_x] * dq[id_x]
m, b, m_err, b_err = fit_method(q2, log_iq, dlog_iq)
rg = np.sqrt(-3 * m)
rg_err = 3 / (2 * rg) * m_err
rg, rg_err = round_error(rg, rg_err)
i0 = np.exp(b)
i0_err = i0 * b_err
i0, i0_err = round_error(i0, i0_err)
rg_q_max = 1.3 / rg
if rg_q_max < q[id_x][-1]:
logging.warning('initial q-max too high, 1.3/Rg={} < {}'.format(
rg_q_max, q[id_x][-1]))
if refine:
logging.warning('repeating fit with q-max={}'.format(rg_q_max))
return guinier_fit(q, iq, diq, dq=dq, q_min=q_min, q_max=rg_q_max,
view_fit=view_fit, fit_method=fit_method,
save_fname=save_fname)
if view_fit:
from sas_modeling import make_figures
q2 = np.insert(q2, 0, 0.0)
log_iq = np.insert(log_iq, 0, b)
dlog_iq = np.insert(dlog_iq, 0, b_err)
fit_line = m * q2 + b
q_range = q[id_x][[0, -1]]
fig = make_figures.plot_guinier_fit(q2, log_iq, fit_line, i0, i0_err,
rg, rg_err, dlog_iq, q_range,
save_fname=save_fname)
return i0, rg, i0_err, rg_err, fig
return i0, rg, i0_err, rg_err
def round_error(val, val_err, sig_figs=2):
'''
Round a value and its error estimate to a certain number
of significant figures (on the error estimate). By default 2
significant figures are used.
'''
# round number to a certain number of significant figures
n = int(np.log10(val_err)) # displacement from ones place
if val_err >= 1:
n += 1
scale = 10 ** (sig_figs - n)
val = round(val * scale) / scale
val_err = round(val_err * scale) / scale
return val, val_err
def compare_guinier_fit(q, iq, diq, **args):
'''
perform Guinier fit
return I(0) and Rg
'''
fit_methods = [
fit_line_v0,
fit_line_v1,
fit_line_v2,
fit_line_v3,
fit_line_v4,
fit_line_v5,
fit_line_v6,
fit_line_v7,
fit_line_v8,
]
for fit_method in fit_methods:
save_fname = 'fit_{}_comparison.html'.format(fit_method.__name__[-2:])
i0, rg, i0_err, rg_err = guinier_fit(q, iq, diq, fit_method=fit_method,
save_fname=save_fname,
view_fit=True, **args)
def covariance(x, y):
assert len(x) == len(y)
cov = ((x - x.mean()) * (y - y.mean())).sum() / (len(x) - 1)
return cov
def bayesian():
NotImplemented
if __name__ == '__main__':
import os
import make_figures
# data_fname = 'data/1mgml_LysoSANS.sub'; skiprows = 1
skiprows = 0
data_fname = 'data/1mgml_lys_sans.dat'; q_max = 0.091 # lys
# data_fname = 'data/5mgml_nist_mab_sans.dat'; q_max = 0.0296 # mab
assert os.path.exists(data_fname)
data = np.asfortranarray(np.loadtxt(data_fname, skiprows=skiprows))
# data[:, 1:3] *= 1 / data[0, 1]
# column 4 is the effective q-values, accounting for the beam spread
if True:
plot_fname = 'I(q)_and_guinier-no_scale.html'
make_figures.plot_iq_and_guinier(data[:, 0], data[:, 1], data[:, 2],
save_fname=plot_fname)
# scale the data
# data[:, 1:3] *= 1 / data[0, 1] # set the first measured point to 1
# data[:, 1:3] *= 10 / data[0, 1] # set the first measured point to 10
# data[:, 1:3] *= 100 / data[0, 1] # set the first measured point to 100
# data[:, 1:3] *= 1000 / data[0, 1] # set the first measured point to 1000
# compare_guinier_fit(data[:, 0], data[:, 1], data[:, 2], q_max=q_max,
# refine=True)
save_fname = data_fname.replace('.dat', '.html')
i0, rg, i0_err, rg_err = guinier_fit(data[:, 0], data[:, 1], data[:, 2],
dq=data[:, 3], q_max=q_max,
view_fit=True, fit_method=fit_line_v8,
refine=True, save_fname=save_fname)
logging.debug('\m/ >.< \m/')
| gpl-3.0 | 3,665,040,384,251,789,300 | 27.567982 | 81 | 0.555231 | false | 2.894889 | false | false | false |
SeedScientific/polio | source_data/migrations/0053_auto__chg_field_sourcedatapoint_error_msg.py | 1 | 71250 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SourceDataPoint.error_msg'
db.alter_column(u'source_data_sourcedatapoint', 'error_msg', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'SourceDataPoint.error_msg'
db.alter_column(u'source_data_sourcedatapoint', 'error_msg', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datapoints.campaign': {
'Meta': {'object_name': 'Campaign', 'db_table': "'campaign'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'get_full_name'"}),
'start_date': ('django.db.models.fields.DateField', [], {'unique': 'True'})
},
u'datapoints.indicator': {
'Meta': {'object_name': 'Indicator', 'db_table': "'indicator'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '55', 'populate_from': "'name'", 'unique_with': '()'})
},
u'datapoints.office': {
'Meta': {'object_name': 'Office', 'db_table': "'office'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.region': {
'Meta': {'unique_together': "(('source', 'source_guid'),)", 'object_name': 'Region', 'db_table': "'region'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '10', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '13', 'decimal_places': '10', 'blank': 'True'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'settlement_code': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'shape_file_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '55', 'populate_from': "'full_name'"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'datapoints.source': {
'Meta': {'object_name': 'Source', 'db_table': "'source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'source_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
'source_data.activityreport': {
'Meta': {'object_name': 'ActivityReport'},
'activity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_hh_pending_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_local_leadership_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_hh_affected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_pro_opv_cd': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_caregiver_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_husband_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_positive': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_sett': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipds_community_leader_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_reported': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_children': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_hh': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_other_issue': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.campaignmap': {
'Meta': {'object_name': 'CampaignMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
'source_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceCampaign']", 'unique': 'True'})
},
'source_data.clustersupervisor': {
'Meta': {'object_name': 'ClusterSupervisor'},
'coord_rfp_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_smwg_meetings': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_vcm_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'end_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fund_transparency': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_conducted': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_planned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_endorsed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_implementation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_socialdata': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_special_pop': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_workplan_aligned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_lgac': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ri_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisee_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_birthtracking': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.document': {
'Meta': {'object_name': 'Document'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'docfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'source_data.etljob': {
'Meta': {'object_name': 'EtlJob'},
'date_attempted': ('django.db.models.fields.DateTimeField', [], {}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'error_msg': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'success_msg': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
'source_data.healthcamp': {
'Meta': {'object_name': 'HealthCamp'},
'agencyname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'formhub_uuid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_photo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'megaphone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.indicatormap': {
'Meta': {'object_name': 'IndicatorMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'source_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceIndicator']", 'unique': 'True'})
},
'source_data.knowthepeople': {
'Meta': {'object_name': 'KnowThePeople'},
'brothers': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'citiesvisited': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofpax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'prefferedcity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sisters': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state_country': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.paxlistreporttraining': {
'Meta': {'object_name': 'PaxListReportTraining'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'emailaddr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofparticipant': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.phoneinventory': {
'Meta': {'object_name': 'PhoneInventory'},
'asset_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'colour_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'telephone_no': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmbirthrecord': {
'Meta': {'object_name': 'PracticeVCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsettcoordinates': {
'Meta': {'object_name': 'PracticeVCMSettCoordinates'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsummary': {
'Meta': {'object_name': 'PracticeVCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.processstatus': {
'Meta': {'object_name': 'ProcessStatus'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status_text': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'source_data.regionmap': {
'Meta': {'object_name': 'RegionMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'source_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceRegion']", 'unique': 'True'})
},
u'source_data.sourcecampaign': {
'Meta': {'unique_together': "(('source', 'campaign_string'),)", 'object_name': 'SourceCampaign'},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.sourcedatapoint': {
'Meta': {'unique_together': "(('source', 'source_guid', 'indicator_string'),)", 'object_name': 'SourceDataPoint'},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cell_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
'error_msg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'row_number': ('django.db.models.fields.IntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"})
},
u'source_data.sourceindicator': {
'Meta': {'unique_together': "(('source', 'indicator_string'),)", 'object_name': 'SourceIndicator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.sourceregion': {
'Meta': {'object_name': 'SourceRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'lon': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlement_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmbirthrecord': {
'Meta': {'object_name': 'VCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsettlement': {
'Meta': {'object_name': 'VCMSettlement'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummary': {
'Meta': {'object_name': 'VCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummarynew': {
'Meta': {'object_name': 'VCMSummaryNew'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax4': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax6': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax7': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax8': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax9': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_display_msd3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_tot_missed_check': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_12_59months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_2_11months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_census': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_missed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_newborns': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax12_59mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax2_11mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vaxnewborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vwsregister': {
'Meta': {'object_name': 'VWSRegister'},
'acceptphoneresponsibility': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 26, 0, 0)'}),
'datephonecollected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'personal_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'wardcode': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['source_data'] | agpl-3.0 | 1,803,691,983,139,624,700 | 91.055556 | 195 | 0.568393 | false | 3.419891 | false | false | false |
RandyMoore/mySiteDjango | my_site_django/weblog/models.py | 1 | 3693 | from django.db import models
from django.db.models.fields import CharField
from django.utils.safestring import mark_safe
from markdown import markdown
from pygments import highlight
from pygments.formatters import get_formatter_by_name
from pygments.lexers import get_lexer_by_name
from wagtail.core import blocks
from wagtail.core.blocks import BlockQuoteBlock, RawHTMLBlock
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.search import index
# Custom blocks for StreamField. From https://gist.github.com/frankwiles/74a882f16704db9caa27
# See also http://docs.wagtail.io/en/v1.9/releases/1.6.html#render-and-render-basic-methods-on-streamfield-blocks-now-accept-a-context-keyword-argument
class CodeBlock(blocks.StructBlock):
"""
Code Highlighting Block
"""
LANGUAGE_CHOICES = (
('python', 'Python'),
('bash', 'Bash/Shell'),
('html', 'HTML'),
('css', 'CSS'),
('scss', 'SCSS'),
)
language = blocks.ChoiceBlock(choices=LANGUAGE_CHOICES)
code = blocks.TextBlock()
class Meta:
icon = 'code'
def render(self, value, context=None):
src = value['code'].strip('\n')
lang = value['language']
lexer = get_lexer_by_name(lang)
formatter = get_formatter_by_name(
'html',
linenos=None,
cssclass='codehilite',
style='default',
noclasses=False,
)
return mark_safe(highlight(src, lexer, formatter))
class MarkDownBlock(blocks.TextBlock):
""" MarkDown Block """
class Meta:
icon = 'code'
def render_basic(self, value, context=None):
md = markdown(
value,
[
'markdown.extensions.fenced_code',
'codehilite',
],
)
return mark_safe(md)
# Page Models
class BlogIndexPage(Page):
subheading = CharField(max_length=255)
content_panels = Page.content_panels + [
FieldPanel('subheading', classname="full"),
]
@property
def blogs(self):
blogs = WeblogPage.objects.live().descendant_of(self)
blogs = blogs.order_by('-date')
return blogs
def get_context(self, request):
blogs = self.blogs
context = super(BlogIndexPage, self).get_context(request)
context['blogs'] = blogs
context['title'] = self.title
context['subheading'] = self.subheading
return context
class WeblogPage(Page):
body = StreamField([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
('html', RawHTMLBlock()),
('block_quote', BlockQuoteBlock()),
('embed', EmbedBlock()),
('code', CodeBlock()),
('markdown', MarkDownBlock()),
])
subheading = CharField(max_length=255)
date = models.DateField("Post date")
search_fields = Page.search_fields + [
index.SearchField('body'),
index.FilterField('date'),
]
content_panels = Page.content_panels + [
FieldPanel('subheading', classname="full"),
FieldPanel('date'),
StreamFieldPanel('body', classname="full"),
]
def get_context(self, request):
context = super(WeblogPage, self).get_context(request)
context['title'] = self.title
context['subheading'] = self.subheading
context['body'] = self.body
return context
| gpl-3.0 | -1,130,170,314,572,524,900 | 27.19084 | 151 | 0.629299 | false | 3.988121 | false | false | false |
citrix-openstack-build/neutron-vpnaas | neutron_vpnaas/db/vpn/vpn_db.py | 1 | 31697 | # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.utils import excutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.common import constants as n_constants
from neutron.db import common_db_mixin as base_db
from neutron.db import l3_agentschedulers_db as l3_agent_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import vpnaas
from neutron.i18n import _LW
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.plugins.common import utils
from neutron_vpnaas.db.vpn import vpn_validator
LOG = logging.getLogger(__name__)
class IPsecPeerCidr(model_base.BASEV2):
"""Internal representation of a IPsec Peer Cidrs."""
cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)
ipsec_site_connection_id = sa.Column(
sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IPsecPolicy Object."""
__tablename__ = 'ipsecpolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp",
name="ipsec_transform_protocols"),
nullable=False)
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport",
name="ipsec_encapsulations"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IKEPolicy Object."""
__tablename__ = 'ikepolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
phase1_negotiation_mode = sa.Column(sa.Enum("main",
name="ike_phase1_mode"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"),
nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IPsecSiteConnection(model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a IPsecSiteConnection Object."""
__tablename__ = 'ipsec_site_connections'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
peer_address = sa.Column(sa.String(255), nullable=False)
peer_id = sa.Column(sa.String(255), nullable=False)
route_mode = sa.Column(sa.String(8), nullable=False)
mtu = sa.Column(sa.Integer, nullable=False)
initiator = sa.Column(sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False)
auth_mode = sa.Column(sa.String(16), nullable=False)
psk = sa.Column(sa.String(255), nullable=False)
dpd_action = sa.Column(sa.Enum("hold", "clear",
"restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False)
dpd_interval = sa.Column(sa.Integer, nullable=False)
dpd_timeout = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vpnservice_id = sa.Column(sa.String(36),
sa.ForeignKey('vpnservices.id'),
nullable=False)
ipsecpolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsecpolicies.id'),
nullable=False)
ikepolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ikepolicies.id'),
nullable=False)
ipsecpolicy = orm.relationship(
IPsecPolicy, backref='ipsec_site_connection')
ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')
peer_cidrs = orm.relationship(IPsecPeerCidr,
backref='ipsec_site_connection',
lazy='joined',
cascade='all, delete, delete-orphan')
class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 VPNService Object."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=False)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
nullable=False)
subnet = orm.relationship(models_v2.Subnet)
router = orm.relationship(l3_db.Router)
ipsec_site_connections = orm.relationship(
IPsecSiteConnection,
backref='vpnservice',
cascade="all, delete-orphan")
class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin):
"""VPN plugin database class using SQLAlchemy models."""
def _get_validator(self):
"""Obtain validator to use for attribute validation.
Subclasses may override this with a different valdiator, as needed.
Note: some UTs will directly create a VPNPluginDb object and then
call its methods, instead of creating a VPNDriverPlugin, which
will have a service driver associated that will provide a
validator object. As a result, we use the reference validator here.
"""
return vpn_validator.VpnReferenceValidator()
def update_status(self, context, model, v_id, status):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, v_id)
v_db.update({'status': status})
def _get_resource(self, context, model, v_id):
try:
r = self._get_by_id(context, model, v_id)
except exc.NoResultFound:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
if issubclass(model, IPsecSiteConnection):
raise vpnaas.IPsecSiteConnectionNotFound(
ipsec_site_conn_id=v_id
)
elif issubclass(model, IKEPolicy):
raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id)
elif issubclass(model, IPsecPolicy):
raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id)
elif issubclass(model, VPNService):
raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id)
ctx.reraise = True
return r
def assert_update_allowed(self, obj):
status = getattr(obj, 'status', None)
_id = getattr(obj, 'id', None)
if utils.in_pending_status(status):
raise vpnaas.VPNStateInvalidToUpdate(id=_id, state=status)
def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None):
res = {'id': ipsec_site_conn['id'],
'tenant_id': ipsec_site_conn['tenant_id'],
'name': ipsec_site_conn['name'],
'description': ipsec_site_conn['description'],
'peer_address': ipsec_site_conn['peer_address'],
'peer_id': ipsec_site_conn['peer_id'],
'route_mode': ipsec_site_conn['route_mode'],
'mtu': ipsec_site_conn['mtu'],
'auth_mode': ipsec_site_conn['auth_mode'],
'psk': ipsec_site_conn['psk'],
'initiator': ipsec_site_conn['initiator'],
'dpd': {
'action': ipsec_site_conn['dpd_action'],
'interval': ipsec_site_conn['dpd_interval'],
'timeout': ipsec_site_conn['dpd_timeout']
},
'admin_state_up': ipsec_site_conn['admin_state_up'],
'status': ipsec_site_conn['status'],
'vpnservice_id': ipsec_site_conn['vpnservice_id'],
'ikepolicy_id': ipsec_site_conn['ikepolicy_id'],
'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'],
'peer_cidrs': [pcidr['cidr']
for pcidr in ipsec_site_conn['peer_cidrs']]
}
return self._fields(res, fields)
def _get_subnet_ip_version(self, context, vpnservice_id):
vpn_service_db = self._get_vpnservice(context, vpnservice_id)
subnet = vpn_service_db.subnet['cidr']
ip_version = netaddr.IPNetwork(subnet).version
return ip_version
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
validator = self._get_validator()
validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon)
with context.session.begin(subtransactions=True):
#Check permissions
self._get_resource(context,
VPNService,
ipsec_sitecon['vpnservice_id'])
self._get_resource(context,
IKEPolicy,
ipsec_sitecon['ikepolicy_id'])
self._get_resource(context,
IPsecPolicy,
ipsec_sitecon['ipsecpolicy_id'])
vpnservice_id = ipsec_sitecon['vpnservice_id']
ip_version = self._get_subnet_ip_version(context, vpnservice_id)
validator.validate_ipsec_site_connection(context,
ipsec_sitecon,
ip_version)
ipsec_site_conn_db = IPsecSiteConnection(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsec_sitecon['name'],
description=ipsec_sitecon['description'],
peer_address=ipsec_sitecon['peer_address'],
peer_id=ipsec_sitecon['peer_id'],
route_mode='static',
mtu=ipsec_sitecon['mtu'],
auth_mode='psk',
psk=ipsec_sitecon['psk'],
initiator=ipsec_sitecon['initiator'],
dpd_action=ipsec_sitecon['dpd_action'],
dpd_interval=ipsec_sitecon['dpd_interval'],
dpd_timeout=ipsec_sitecon['dpd_timeout'],
admin_state_up=ipsec_sitecon['admin_state_up'],
status=constants.PENDING_CREATE,
vpnservice_id=vpnservice_id,
ikepolicy_id=ipsec_sitecon['ikepolicy_id'],
ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id']
)
context.session.add(ipsec_site_conn_db)
for cidr in ipsec_sitecon['peer_cidrs']:
peer_cidr_db = IPsecPeerCidr(
cidr=cidr,
ipsec_site_connection_id=ipsec_site_conn_db['id']
)
context.session.add(peer_cidr_db)
return self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
def update_ipsec_site_connection(
self, context,
ipsec_site_conn_id, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
changed_peer_cidrs = False
validator = self._get_validator()
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context,
IPsecSiteConnection,
ipsec_site_conn_id)
vpnservice_id = ipsec_site_conn_db['vpnservice_id']
ip_version = self._get_subnet_ip_version(context, vpnservice_id)
validator.assign_sensible_ipsec_sitecon_defaults(
ipsec_sitecon, ipsec_site_conn_db)
validator.validate_ipsec_site_connection(
context,
ipsec_sitecon,
ip_version)
self.assert_update_allowed(ipsec_site_conn_db)
if "peer_cidrs" in ipsec_sitecon:
changed_peer_cidrs = True
old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs']
old_peer_cidr_dict = dict(
(peer_cidr['cidr'], peer_cidr)
for peer_cidr in old_peer_cidr_list)
new_peer_cidr_set = set(ipsec_sitecon["peer_cidrs"])
old_peer_cidr_set = set(old_peer_cidr_dict)
new_peer_cidrs = list(new_peer_cidr_set)
for peer_cidr in old_peer_cidr_set - new_peer_cidr_set:
context.session.delete(old_peer_cidr_dict[peer_cidr])
for peer_cidr in new_peer_cidr_set - old_peer_cidr_set:
pcidr = IPsecPeerCidr(
cidr=peer_cidr,
ipsec_site_connection_id=ipsec_site_conn_id)
context.session.add(pcidr)
del ipsec_sitecon["peer_cidrs"]
if ipsec_sitecon:
ipsec_site_conn_db.update(ipsec_sitecon)
result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
if changed_peer_cidrs:
result['peer_cidrs'] = new_peer_cidrs
return result
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id
)
context.session.delete(ipsec_site_conn_db)
def _get_ipsec_site_connection(
self, context, ipsec_site_conn_id):
return self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id)
def get_ipsec_site_connection(self, context,
ipsec_site_conn_id, fields=None):
ipsec_site_conn_db = self._get_ipsec_site_connection(
context, ipsec_site_conn_id)
return self._make_ipsec_site_connection_dict(
ipsec_site_conn_db, fields)
def get_ipsec_site_connections(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecSiteConnection,
self._make_ipsec_site_connection_dict,
filters=filters, fields=fields)
def update_ipsec_site_conn_status(self, context, conn_id, new_status):
with context.session.begin():
self._update_connection_status(context, conn_id, new_status, True)
def _update_connection_status(self, context, conn_id, new_status,
updated_pending):
"""Update the connection status, if changed.
If the connection is not in a pending state, unconditionally update
the status. Likewise, if in a pending state, and have an indication
that the status has changed, then update the database.
"""
try:
conn_db = self._get_ipsec_site_connection(context, conn_id)
except vpnaas.IPsecSiteConnectionNotFound:
return
if not utils.in_pending_status(conn_db.status) or updated_pending:
conn_db.status = new_status
def _make_ikepolicy_dict(self, ikepolicy, fields=None):
res = {'id': ikepolicy['id'],
'tenant_id': ikepolicy['tenant_id'],
'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'],
'lifetime': {
'units': ikepolicy['lifetime_units'],
'value': ikepolicy['lifetime_value'],
},
'ike_version': ikepolicy['ike_version'],
'pfs': ikepolicy['pfs']
}
return self._fields(res, fields)
def create_ikepolicy(self, context, ikepolicy):
ike = ikepolicy['ikepolicy']
tenant_id = self._get_tenant_id_for_create(context, ike)
lifetime_info = ike.get('lifetime', [])
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ike_db = IKEPolicy(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ike['name'],
description=ike['description'],
auth_algorithm=ike['auth_algorithm'],
encryption_algorithm=ike['encryption_algorithm'],
phase1_negotiation_mode=ike['phase1_negotiation_mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
ike_version=ike['ike_version'],
pfs=ike['pfs']
)
context.session.add(ike_db)
return self._make_ikepolicy_dict(ike_db)
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
ike = ikepolicy['ikepolicy']
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
if ike:
lifetime_info = ike.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ike['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ike['lifetime_value'] = lifetime_info['value']
ike_db.update(ike)
return self._make_ikepolicy_dict(ike_db)
def delete_ikepolicy(self, context, ikepolicy_id):
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
context.session.delete(ike_db)
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
return self._make_ikepolicy_dict(ike_db, fields)
def get_ikepolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IKEPolicy,
self._make_ikepolicy_dict,
filters=filters, fields=fields)
def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None):
res = {'id': ipsecpolicy['id'],
'tenant_id': ipsecpolicy['tenant_id'],
'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'transform_protocol': ipsecpolicy['transform_protocol'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'lifetime': {
'units': ipsecpolicy['lifetime_units'],
'value': ipsecpolicy['lifetime_value'],
},
'pfs': ipsecpolicy['pfs']
}
return self._fields(res, fields)
def create_ipsecpolicy(self, context, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
tenant_id = self._get_tenant_id_for_create(context, ipsecp)
lifetime_info = ipsecp['lifetime']
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsecp['name'],
description=ipsecp['description'],
transform_protocol=ipsecp['transform_'
'protocol'],
auth_algorithm=ipsecp['auth_algorithm'],
encryption_algorithm=ipsecp['encryption_'
'algorithm'],
encapsulation_mode=ipsecp['encapsulation_'
'mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
pfs=ipsecp['pfs'])
context.session.add(ipsecp_db)
return self._make_ipsecpolicy_dict(ipsecp_db)
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsecp_db = self._get_resource(context,
IPsecPolicy,
ipsecpolicy_id)
if ipsecp:
lifetime_info = ipsecp.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ipsecp['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ipsecp['lifetime_value'] = lifetime_info['value']
ipsecp_db.update(ipsecp)
return self._make_ipsecpolicy_dict(ipsecp_db)
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
context.session.delete(ipsec_db)
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
return self._make_ipsecpolicy_dict(ipsec_db, fields)
def get_ipsecpolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecPolicy,
self._make_ipsecpolicy_dict,
filters=filters, fields=fields)
def _make_vpnservice_dict(self, vpnservice, fields=None):
res = {'id': vpnservice['id'],
'name': vpnservice['name'],
'description': vpnservice['description'],
'tenant_id': vpnservice['tenant_id'],
'subnet_id': vpnservice['subnet_id'],
'router_id': vpnservice['router_id'],
'admin_state_up': vpnservice['admin_state_up'],
'status': vpnservice['status']}
return self._fields(res, fields)
def create_vpnservice(self, context, vpnservice):
vpns = vpnservice['vpnservice']
tenant_id = self._get_tenant_id_for_create(context, vpns)
validator = self._get_validator()
with context.session.begin(subtransactions=True):
validator.validate_vpnservice(context, vpns)
vpnservice_db = VPNService(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=vpns['name'],
description=vpns['description'],
subnet_id=vpns['subnet_id'],
router_id=vpns['router_id'],
admin_state_up=vpns['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(vpnservice_db)
return self._make_vpnservice_dict(vpnservice_db)
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpns = vpnservice['vpnservice']
with context.session.begin(subtransactions=True):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
self.assert_update_allowed(vpns_db)
if vpns:
vpns_db.update(vpns)
return self._make_vpnservice_dict(vpns_db)
def delete_vpnservice(self, context, vpnservice_id):
with context.session.begin(subtransactions=True):
if context.session.query(IPsecSiteConnection).filter_by(
vpnservice_id=vpnservice_id
).first():
raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
context.session.delete(vpns_db)
def _get_vpnservice(self, context, vpnservice_id):
return self._get_resource(context, VPNService, vpnservice_id)
def get_vpnservice(self, context, vpnservice_id, fields=None):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
return self._make_vpnservice_dict(vpns_db, fields)
def get_vpnservices(self, context, filters=None, fields=None):
return self._get_collection(context, VPNService,
self._make_vpnservice_dict,
filters=filters, fields=fields)
def check_router_in_use(self, context, router_id):
vpnservices = self.get_vpnservices(
context, filters={'router_id': [router_id]})
if vpnservices:
raise vpnaas.RouterInUseByVPNService(
router_id=router_id,
vpnservice_id=vpnservices[0]['id'])
def check_subnet_in_use(self, context, subnet_id):
with context.session.begin(subtransactions=True):
vpnservices = context.session.query(VPNService).filter_by(
subnet_id=subnet_id
).first()
if vpnservices:
raise vpnaas.SubnetInUseByVPNService(
subnet_id=subnet_id,
vpnservice_id=vpnservices['id'])
class VPNPluginRpcDbMixin():
def _get_agent_hosting_vpn_services(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(VPNService)
query = query.join(IPsecSiteConnection)
query = query.join(IKEPolicy)
query = query.join(IPsecPolicy)
query = query.join(IPsecPeerCidr)
query = query.join(l3_agent_db.RouterL3AgentBinding,
l3_agent_db.RouterL3AgentBinding.router_id ==
VPNService.router_id)
query = query.filter(
l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id)
return query
def update_status_by_agent(self, context, service_status_info_list):
"""Updating vpnservice and vpnconnection status.
:param context: context variable
:param service_status_info_list: list of status
The structure is
[{id: vpnservice_id,
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
ipsec_site_connections: {
ipsec_site_connection_id: {
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
}
}]
The agent will set updated_pending_status as True,
when agent update any pending status.
"""
with context.session.begin(subtransactions=True):
for vpnservice in service_status_info_list:
try:
vpnservice_db = self._get_vpnservice(
context, vpnservice['id'])
except vpnaas.VPNServiceNotFound:
LOG.warn(_LW('vpnservice %s in db is already deleted'),
vpnservice['id'])
continue
if (not utils.in_pending_status(vpnservice_db.status)
or vpnservice['updated_pending_status']):
vpnservice_db.status = vpnservice['status']
for conn_id, conn in vpnservice[
'ipsec_site_connections'].items():
self._update_connection_status(
context, conn_id, conn['status'],
conn['updated_pending_status'])
| apache-2.0 | 1,094,159,555,559,583,400 | 46.23845 | 79 | 0.562482 | false | 4.16682 | false | false | false |
Hiestaa/RLViz | src/problems/base.py | 1 | 6050 | # -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
import gym
from parametizable import Parametizable
from consts import ParamsTypes, Spaces
class ProblemException(Exception):
pass
class BaseProblem(Parametizable):
"""
Mostly a wrapper around gym's environment, but also provide additional
parameters and statistics to play with.
The class is setup for a default behaviour on any gym's environment. When
subclassing, part of the job should already be done by setting up the
right parameters. Additional specific behavior can be obtained by overriding
the functions but care should be taken to call the parent's corresponding
method using `super(<Class>, self)`
"""
# These will be or-ed at each step to know whether the environment
# considers the episode terminated
EPISODE_TERMINATION_CRITERIA = [
lambda self, **kwargs: self._done,
lambda self, stepI, **kwargs: stepI >= self.maxSteps
]
PARAMS = {
'maxSteps': ParamsTypes.Number
}
PARAMS_DOMAIN = {
'maxSteps': {
'range': (-1, float('inf')),
'values': [100, 500, 1000]
},
}
PARAMS_DEFAULT = {
'maxSteps': 500
}
PARAMS_DESCRIPTION = {
'maxSteps': "Maximum number of steps per episode. Set to -1 to disable."
}
# Override to specify a Gym environment that should be loaded.
GYM_ENVIRONMENT_NAME = None
# Override to specify compatible algorithm
DOMAIN = {
'action': Spaces.Discrete,
'state': Spaces.Discrete
}
# optional: override to give a specific name to each action
# action space is assumed to be discrete and 1 dimensional.
# first action should be in first position, second action in second,
# and so on.
ACTION_NAMES = []
# optional: override to give a specific name to each dimension of
# the state space. List should be in the same order of the dimensions
# of the state space (dimension 1 in first position, etc...)
STATE_DIMENSION_NAMES = []
def __init__(self, **kwargs):
super(BaseProblem, self).__init__(**kwargs)
self._done = False
self._env = None
self.observationSpace = None
self.actionSpace = None
@property
def env(self):
return self._env
def terminate(self):
self._done = True
def episodeDone(self, stepI):
return any(
crit(self, stepI=stepI)
for crit in self.EPISODE_TERMINATION_CRITERIA)
def setup(self):
"""
Setup the environment - this shouldn't be done in the constructor to
enable override.
This asusmes the problem uses a gym environment. Override otherwise.
"""
logger.info("[%s] Problem setup" % self.__class__.__name__)
if self.GYM_ENVIRONMENT_NAME is None:
raise NotImplementedError()
self._env = gym.make(self.GYM_ENVIRONMENT_NAME)
self.observationSpace = self._env.observation_space
self.actionSpace = self._env.action_space
###
# Some helper function to retrieve information about the environment.
# These are pre-implemented for any gym environment, and should
# be overriden otherwise
###
def getStatesList(self):
"""
Returns the list of possible states.
Override this function if you're not defining a gym environment.
This function should only be called if the problem bears a discrete
state space.
"""
if self.env is None:
raise NotImplementedError()
if self.DOMAIN['state'] == Spaces.Discrete:
return range(self.env.action_space.n)
raise ProblemException("Continuous state space")
def getStatesDim(self):
"""
Return the number of dimension of the state space
"""
if self.env is None:
raise NotImplementedError()
return len(self.env.observation_space.low)
def getStatesBounds(self):
"""
Returns the max and min values each dimension can take.
These are returned as two tuples, `low` and `high`, where both
are a list of as many elements as there is dimension to the state space.
"""
if self.env is None:
raise NotImplementedError()
return (
self.env.observation_space.low,
self.env.observation_space.high)
def getActionsList(self):
"""
Returns the list of possible actions.
Override this function if you're not defining a gym environment.
This function should only be called if the problem bears a discrete
state space.
"""
if self.env is None:
raise NotImplementedError()
if self.DOMAIN['action'] == Spaces.Discrete:
return range(self.env.action_space.n)
raise NotImplementedError()
# Problem execution methods
def step(self, action):
"""
The agent take the given action and receives back the new state,
the reward, whether the episode is terminated and optionally
some additional debug information.
Override this function if you're not defining a gym environment.
"""
newObservation, reward, self._done, info = self._env.step(action)
return newObservation, reward, self._done, info
def reset(self):
"""
Reset the state of the environment for a new episode.
Override this function if you're not defining a gym environment.
"""
self._done = False
return self._env.reset()
def render(self, close=False):
"""
Render the environment (server-side)
Override this function if you're not defining a gym environment.
"""
return self._env.render(close=close)
def release(self):
"""
Release handles and memory if manual intervention is required.
"""
pass
| mit | -6,629,210,410,368,838,000 | 31.180851 | 80 | 0.629421 | false | 4.481481 | false | false | false |
blabla1337/skf-flask | skf/rabbit_mq_workers/deletion-worker.py | 1 | 3009 | #!/usr/bin/env python
import pika, time, random, yaml
from os import path
from skf import settings
from kubernetes import client, config
creds = pika.PlainCredentials('admin', 'admin-skf-secret')
connection = pika.BlockingConnection(pika.ConnectionParameters(host=settings.RABBIT_MQ_CONN_STRING, credentials=creds))
channel = connection.channel()
channel.queue_declare(queue='deletion_qeue')
def delete_container(rpc_body):
user_id = string_split_user_id(rpc_body)
deployment = string_split_deployment(rpc_body)
delete_deployment(deployment, user_id)
delete_service(deployment, user_id)
time.sleep(3)
return {'message': 'If present, the container image was deleted from the cluster!'}
def delete_deployment(instance_name, user_id):
try:
config.load_kube_config()
api_instance = client.AppsV1Api()
api_response = api_instance.delete_namespaced_deployment(
name=instance_name,
namespace=user_id,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print("Deployment deleted. status='%s'" % str(api_response.status))
return {'message': 'Deployment deleted.'}
except:
return {'message': 'Kubernetes configuration is either missing or done incorrectly, error deployment delete!'}
def delete_service(instance_name, user_id):
try:
config.load_kube_config()
api_instance = client.CoreV1Api()
api_response = api_instance.delete_namespaced_service(
name=instance_name,
namespace=user_id,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print("Deployment deleted. status='%s'" % str(api_response.status))
return {'message': 'Deployment deleted.'}
except:
return {'message': 'Kubernetes configuration is either missing or done incorrectly, error service delete!'}
def string_split_user_id(body):
try:
user_id = body.split(':')
return user_id[1]
except:
return {'message': 'Failed to deploy, error no user_id found!'}
def string_split_deployment(body):
try:
deployment = body.split(':')
return deployment[0]
except:
return {'message': 'Failed to delete, error no deployment found!'}
def on_request(ch, method, props, body):
response = delete_container(str(body, 'utf-8'))
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id,
expiration='30000'),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='deletion_qeue', on_message_callback=on_request)
print(" [x] Awaiting RPC requests")
channel.start_consuming() | agpl-3.0 | 8,975,749,133,928,293,000 | 34.833333 | 119 | 0.648721 | false | 3.954008 | true | false | false |
google-research/social_cascades | news/graph_processing.py | 1 | 1943 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph processing script."""
import os
from absl import app
from absl import flags
from absl import logging
import networkx as nx
import pandas as pd
from utils import graph_filter_with_degree
from utils import load_graph_from_edgelist_csv
FLAGS = flags.FLAGS
flags.DEFINE_string(
'g_file',
'../proj_Data/cat_data/test3/sr_timespan_post_graph-00000-of-00001.csv',
'raw graph edgelist csv file')
flags.DEFINE_integer('low', 40, 'low degree threshold')
flags.DEFINE_integer('high', 80, 'high degree threshold')
flags.DEFINE_string('data_file', '', 'raw data path')
flags.DEFINE_string('filename', '', 'graph filename')
flags.DEFINE_string('save_path', '', 'graph save path')
def main(_):
df = pd.read_csv(FLAGS.data_file)
author_set = set(df['author'].unique())
graph = load_graph_from_edgelist_csv(FLAGS.g_file)
logging.info('Original Graph size: %d nodes, %d edges',
graph.number_of_nodes(), graph.number_of_edges())
graph = graph_filter_with_degree(graph, FLAGS.low, FLAGS.high, author_set)
logging.info('Filtered Graph size: %d nodes, %d edges',
graph.number_of_nodes(), graph.number_of_edges())
nx.write_gpickle(graph, os.path.join(
FLAGS.save_path, FLAGS.filename + '%s_%s.gpickle' %
(FLAGS.low, FLAGS.high)))
logging.info('Saved graph.')
if __name__ == '__main__':
app.run(main)
| apache-2.0 | -6,876,329,124,565,391,000 | 33.087719 | 76 | 0.705095 | false | 3.47585 | false | false | false |
euroscipy/www.euroscipy.org | papercall_grabbing.py | 1 | 4306 | """
Functions to grab info from papercall.io
"""
import os
import time
import requests
token = 'your_papercall_token' # ,<-- fill this in
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
des_template = """
Title: {title}
URL: 2017/descriptions/{id}.html
save_as: 2017/descriptions/{id}.html
{description}
""".lstrip()
def get_submission_ids():
# Query all submission ids
all_ids = []
for state in ('submitted', 'accepted', 'rejected', 'waitlist'):
url = 'https://www.papercall.io/api/v1/submissions?_token=%s&per_page=999&state=%s'
all = requests.get(url % (token, state)).json()
all_ids.extend([x['id'] for x in all])
return all_ids
def get_reviewer_list():
""" Print out the names of all people who did reviews.
"""
# Collect submission ids
all_ids = get_submission_ids()
# Collect all reviewers
reviewers = set()
for id in all_ids:
url = 'https://www.papercall.io/api/v1/submissions/%s/ratings?_token=%s'
ratings = requests.get(url % (id, token)).json()
for rating in ratings:
reviewers.add(rating['user']['name'])
# Print a list
for reviewer in sorted(reviewers):
print(reviewer)
def get_talk_descriptions():
""" Get talk descriptions and store each in a markdown file.
"""
# Collect submission ids
all_ids = get_submission_ids()
# Collect descriptions
index = {}
for id in all_ids:
url = 'https://www.papercall.io/api/v1/submissions/%s?_token=%s'
submission = requests.get(url % (id, token)).json()
id = str(submission['id'])
title = submission['talk']['title']
page = des_template.format(description=submission['talk']['description'],
title=title, id=id)
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', id + '.md')
with open(fname, 'wb') as f:
f.write(page.encode())
index[id] = title
time.sleep(0.1)
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', 'index.md')
with open(fname, 'wb') as f:
for id in sorted(index):
line = id + ' - ' + index[id] + '\n'
f.write(line.encode())
def make_links_in_program():
""" Make the talk titles in the program link to description pages,
as far as we can, anyway. The rest should be done by hand by making use of
the descriptions.index.md.
Beware, this is ugly, and makes all kinds of assumptions about how the program
table is formatted, and it needs manual corrections, and it does not work after
it has applied the changes. We should probably just throw it away.
"""
# Build reverse index
rindex = {}
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', 'index.md')
with open(fname, 'rb') as f:
for line in f.read().decode().splitlines():
if line.strip():
id, _, title = line.partition('-')
rindex[title.strip().lower()] = 'descriptions/' + id.strip() + '.html'
default_link = 'descriptions/oops.html'
# Add links
fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'program.md')
text = open(fname, 'rb').read().decode()
lines = text.splitlines()
for i in range(len(lines)-1):
line = lines[i]
if line.lstrip().startswith("<td>") and not line.rstrip().endswith(">"):
if ' ' not in lines[i+1]:
title = line.lstrip()[4:]
id = rindex.get(title.strip().lower(), default_link)
lines[i] = " <td><a href='%s'>%s</a>" % (id, title)
if line.lstrip().startswith("<td>") and line.rstrip().endswith("</td>"):
if '<br>' in line and ' ' not in line:
title, _, rest = line.lstrip()[4:].partition('<br>')
id = rindex.get(title.strip().lower(), default_link)
lines[i] = " <td><a href='%s'>%s</a><br>%s" % (id, title, rest)
with open(fname, 'wb') as f:
text = '\n'.join(lines)
f.write(text.encode())
if __name__ == '__main__':
pass
# get_reviewer_list()
# get_talk_descriptions()
# make_links_in_program()
| mit | 2,127,778,262,498,097,200 | 33.448 | 94 | 0.571064 | false | 3.582363 | false | false | false |
roopeshsivam/certify | certificates/CreateCertView.py | 1 | 5826 | from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import logout as django_logout
from django.shortcuts import redirect, render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.views import generic
from django.utils.decorators import method_decorator
from django.views.generic.edit import FormView
from django.forms import BaseModelFormSet
from django.views.generic.edit import CreateView
from django.views.decorators.http import condition
from django.views.generic.edit import FormMixin
from django.views.generic.edit import UpdateView
from .ContextData import *
@method_decorator(login_required(login_url="/in/login/"), name='dispatch')
class CreateCertificateView(CreateView):
def get_form_class(self, **kwargs):
"""
Returns an instance of the form to be used in this view.
kwarg from database
"""
return ContextData[self.kwargs['cert_id']]['FormName']
def get_template_names(self, **kwargs):
ShipID = self.request.GET.get('shipid')
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
# if str(ModelObject.objects.filter(CertState='c', ShipMainData__pk=ShipID)) == '<QuerySet []>':
# return 'pages/create-'+ContextData[self.kwargs['cert_id']]['TemplateName']
# else:
# return 'pages/active-certificate-error.html'
return 'pages/certificate-base-form.html'
def get_form(self, form_class=None):
form = super(CreateCertificateView, self).get_form()
return form
def form_valid(self, form, **kwargs):
ShipID = self.request.GET.get('shipid')
form.instance.DocAuthor = self.request.user
form.instance.ShipMainData = ShipMainData.objects.get(id=ShipID)
form.instance.CertState = 'd'
return super(CreateCertificateView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(CreateCertificateView, self).get_context_data(**kwargs)
ShipID = self.request.GET.get('shipid')
context['CertName'] = ContextData[self.kwargs['cert_id']]['CertName']
context['TemplateName'] = 'forms/update/update-'+ContextData[self.kwargs['cert_id']]['TemplateName']
context['State'] = "Create New Certificate"
context['ButtonState'] = "Add"
context['ShipName'] = ShipMainData.objects.get(id=ShipID)
return context
@method_decorator(login_required(login_url="/in/login/"), name='dispatch')
class UpdateCertificateView(UpdateView):
queryset = None
def get_form_class(self, **kwargs):
"""
Returns the form class to use in this view
"""
return ContextData[self.kwargs['cert_id']]['FormName']
def get_queryset(self, **kwargs):
"""
Return the `QuerySet` that will be used to look up the object.
Note that this method is called by the default implementation of
`get_object` and may not be called if `get_object` is overridden.
"""
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
return ModelObject.objects.all()
def get_template_names(self, **kwargs):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
ModelObject = ModelObject.objects.get(pk=self.kwargs['pk'])
if ModelObject.CertState=='d':
return 'pages/certificate-base-form.html'
# return 'forms/update/update-'+ContextData[self.kwargs['cert_id']]['TemplateName']
else:
return 'pages/form-error-update.html'
def form_valid(self, form):
form = self.get_form()
form.save()
return super(UpdateCertificateView, self).form_valid(form)
def get_success_url(self):
return "../"
def post(self, request, *args, **kwargs):
request.POST = (request.POST.copy())
ModelObject = ContextData[self.kwargs['cert_id']]['ModelName']
Certificate = ModelObject.objects.get(pk=self.kwargs['pk'])
CertFilter = ModelObject.objects.filter(ShipMainData_id=Certificate.ShipMainData.id)
State = 'c'
for Certificates in CertFilter: #Check simultaneous confirmation of multiple certificates
if Certificates.CertState == "c":
State = 'd'
if 'save' in request.POST: #Check before editing or saving confirmed certificates
form = self.get_form()
if Certificate.CertState != "c":
return super(UpdateCertificateView, self).post(request, *args, **kwargs)
else:
return HttpResponseRedirect('../') # change to redirect
if 'confirm' in request.POST:
ModelObject.objects.filter(pk=self.kwargs['pk']).update(CertState=State)
return HttpResponseRedirect('../') # change to redirect
if 'deactivate' in request.POST:
ModelObject.objects.filter(pk=self.kwargs['pk']).update(CertState='x')
return HttpResponseRedirect('../') #change to redirect
def get_context_data(self, **kwargs):
context = super(UpdateCertificateView, self).get_context_data(**kwargs)
CertData = ContextData[self.kwargs['cert_id']]['ModelName']
Certificate= CertData.objects.get(pk=self.kwargs['pk'])
context['CertName'] = ContextData[self.kwargs['cert_id']]['CertName']
context['TemplateName'] = 'forms/update/update-'+ContextData[self.kwargs['cert_id']]['TemplateName']
context['State'] = "Edit Certificate"
context['ButtonState'] = "Update"
context['ShipName'] = Certificate.ShipMainData
return context | gpl-3.0 | -462,516,706,902,927,000 | 44.170543 | 108 | 0.659286 | false | 4.001374 | false | false | false |
diN0bot/ProcrasDonate | lib/html_emailer.py | 1 | 1392 | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import settings
def send_email(sender, recipient, subject, text, html):
if settings.DJANGO_SERVER:
print "="*60
print "FROM:", sender
print "TO:", recipient
print "SUBJECT:", subject
print "========= TEXT MESSAGE =========\n", text
print "\n\n========= HTML MESSAGE ==========\n", html
else:
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
s = smtplib.SMTP('localhost')
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(sender, recipient, msg.as_string())
s.quit()
| agpl-3.0 | 2,829,949,119,464,530,000 | 35.631579 | 84 | 0.600575 | false | 4.269939 | false | false | false |
JING-TIME/ustc-course | tests/resize_avatar.py | 1 | 1146 | #!/usr/bin/env python3
import sys
sys.path.append('..') # fix import directory
from app import app
from app.models import User
from PIL import Image
from app.utils import rand_str
ctx = app.test_request_context()
ctx.push()
users = User.query.all()
for u in users:
if u._avatar:
with Image.open('../uploads/images/' + u._avatar) as img:
image_width, image_height = img.size
thumbnail_width = 192
thumbnail_height = 192
if image_width <= thumbnail_width and image_height <= thumbnail_height:
continue
# generate thumbnail if the avatar is too large
new_filename = rand_str() + '.png'
try:
img.thumbnail((thumbnail_width, thumbnail_height), Image.ANTIALIAS)
img.save('../uploads/images/' + new_filename, "PNG")
except IOError:
print("Failed to create thumbnail from '" + u._avatar + "' to '" + new_filename + "'")
u._avatar = new_filename
u.save()
print('User ID ' + str(u.id) + ' original ' + u._avatar + ' thumbnail ' + new_filename)
| agpl-3.0 | -4,566,116,130,143,257,000 | 35.967742 | 102 | 0.576789 | false | 4.006993 | false | false | false |
prechelt/pyth | pyth/__init__.py | 1 | 1207 | """
Pyth -- Python text markup and conversion
"""
from __future__ import absolute_import
import os.path
__version__ = '0.5.6'
writerMap = {
'.rtf': 'pyth.plugins.rtf15.writer.Rtf15Writer',
'.html': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.xhtml': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.txt': 'pyth.plugins.plaintext.writer.PlaintextWriter',
'.pdf': 'pyth.plugins.pdf.writer.PDFWriter',
}
mimeMap = {
'.rtf': 'application/rtf',
'.html': 'text/html',
'.xhtml': 'application/xhtml+xml',
'.txt': 'text/plain',
}
def write(doc, filename):
ext = os.path.splitext(filename)[1]
writer = namedObject(writerMap[ext])
buff = writer.write(doc)
buff.seek(0)
return (buff, mimeMap[ext])
# Stolen from twisted.python.reflect
def namedModule(name):
"""Return a module given its name."""
topLevel = __import__(name)
packages = name.split(".")[1:]
m = topLevel
for p in packages:
m = getattr(m, p)
return m
def namedObject(name):
"""Get a fully named module-global object.
"""
classSplit = name.split('.')
module = namedModule('.'.join(classSplit[:-1]))
return getattr(module, classSplit[-1])
| mit | -6,107,775,287,688,249,000 | 21.351852 | 60 | 0.624689 | false | 3.235925 | false | false | false |
jonnyhtw/cylc | lib/cylc/batch_sys_manager.py | 1 | 33030 | #!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2018 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Manage submission, poll and kill of a job to the batch systems.
Export the BatchSysManager class.
Batch system handler (a.k.a. job submission method) modules should be placed
under the "cylc.batch_sys_handlers" package. Each module should export the
symbol "BATCH_SYS_HANDLER" for the singleton instance that implements the job
system handler logic.
Each batch system handler class should instantiate with no argument, and may
have the following constants and methods:
batch_sys.filter_poll_output(out, job_id) => boolean
* If this method is available, it will be called after the batch system's
poll command is called and returns zero. The method should read the
output to see if job_id is still alive in the batch system, and return
True if so.
batch_sys.filter_poll_many_output(out) => job_ids
* Called after the batch system's poll many command. The method should read
the output and return a list of job IDs that are still in the batch
system.
batch_sys.filter_submit_output(out, err) => new_out, new_err
* Filter the standard output and standard error of the job submission
command. This is useful if the job submission command returns information
that should just be ignored. See also "batch_sys.SUBMIT_CMD_TMPL".
batch_sys.format_directives(job_conf) => lines
* If relevant, this method formats the job directives for a job file, if
job file directives are relevant for the batch system. The argument
"job_conf" is a dict containing the job configuration.
batch_sys.get_fail_signals(job_conf) => list of strings
* Return a list of names of signals to trap for reporting errors. Default
is ["EXIT", "ERR", "TERM", "XCPU"]. ERR and EXIT are always recommended.
EXIT is used to report premature stopping of the job script, and its trap
is unset at the end of the script.
batch_sys.get_poll_many_cmd(job-id-list) => list
* Return a list containing the shell command to poll the jobs in the
argument list.
batch_sys.get_vacation_signal(job_conf) => str
* If relevant, return a string containing the name of the signal that
indicates the job has been vacated by the batch system.
batch_sys.submit(job_file_path) => ret_code, out, err
* Submit a job and return an instance of the Popen object for the
submission. This method is useful if the job submission requires logic
beyond just running a system or shell command. See also
"batch_sys.SUBMIT_CMD".
batch_sys.SHOULD_KILL_PROC_GROUP
* A boolean to indicate whether it is necessary to kill a job by sending
a signal to its Unix process group.
batch_sys.SHOULD_POLL_PROC_GROUP
* A boolean to indicate whether it is necessary to poll a job by its PID
as well as the job ID.
batch_sys.KILL_CMD_TMPL
* A Python string template for getting the batch system command to remove
and terminate a job ID. The command is formed using the logic:
batch_sys.KILL_CMD_TMPL % {"job_id": job_id}
batch_sys.REC_ID_FROM_SUBMIT_ERR
batch_sys.REC_ID_FROM_SUBMIT_OUT
* A regular expression (compiled) to extract the job "id" from the standard
output or standard error of the job submission command.
batch_sys.SUBMIT_CMD_ENV
* A Python dict (or an iterable that can be used to update a dict)
containing extra environment variables for getting the batch system
command to submit a job file.
batch_sys.SUBMIT_CMD_TMPL
* A Python string template for getting the batch system command to submit a
job file. The command is formed using the logic:
batch_sys.SUBMIT_CMD_TMPL % {"job": job_file_path}
See also "batch_sys._job_submit_impl".
"""
import os
import shlex
from shutil import rmtree
from signal import SIGKILL
import stat
from subprocess import Popen, PIPE
import sys
import traceback
from cylc.mkdir_p import mkdir_p
from cylc.task_message import (
CYLC_JOB_PID, CYLC_JOB_INIT_TIME, CYLC_JOB_EXIT_TIME, CYLC_JOB_EXIT,
CYLC_MESSAGE)
from cylc.task_outputs import TASK_OUTPUT_SUCCEEDED
from cylc.task_job_logs import (
JOB_LOG_JOB, JOB_LOG_OUT, JOB_LOG_ERR, JOB_LOG_STATUS)
from cylc.wallclock import get_current_time_string
class JobPollContext(object):
"""Context object for a job poll.
0 ctx.job_log_dir -- cycle/task/submit_num
1 ctx.batch_sys_name -- batch system name
2 ctx.batch_sys_job_id -- job ID in batch system
3 ctx.batch_sys_exit_polled -- 0 for false, 1 for true
4 ctx.run_status -- 0 for success, 1 for failure
5 ctx.run_signal -- signal received on run failure
6 ctx.time_submit_exit -- submit (exit) time
7 ctx.time_run -- run start time
8 ctx.time_run_exit -- run exit time
"""
def __init__(self, job_log_dir):
self.job_log_dir = job_log_dir
self.batch_sys_name = None
self.batch_sys_job_id = None
self.batch_sys_exit_polled = None
self.pid = None
self.run_status = None
self.run_signal = None
self.time_submit_exit = None
self.time_run = None
self.time_run_exit = None
self.messages = []
def get_summary_str(self):
"""Return the poll context as a summary string delimited by "|"."""
items = []
for item in [
self.job_log_dir,
self.batch_sys_name,
self.batch_sys_job_id,
self.batch_sys_exit_polled,
self.run_status,
self.run_signal,
self.time_submit_exit,
self.time_run,
self.time_run_exit]:
if item is None:
items.append("")
else:
items.append(str(item))
return "|".join(items)
class BatchSysManager(object):
"""Job submission, poll and kill.
Manage the importing of job submission method modules.
"""
CYLC_BATCH_SYS_NAME = "CYLC_BATCH_SYS_NAME"
CYLC_BATCH_SYS_JOB_ID = "CYLC_BATCH_SYS_JOB_ID"
CYLC_BATCH_SYS_JOB_SUBMIT_TIME = "CYLC_BATCH_SYS_JOB_SUBMIT_TIME"
CYLC_BATCH_SYS_EXIT_POLLED = "CYLC_BATCH_SYS_EXIT_POLLED"
LINE_PREFIX_CYLC_DIR = "export CYLC_DIR="
LINE_PREFIX_BATCH_SYS_NAME = "# Job submit method: "
LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL = "# Job submit command template: "
LINE_PREFIX_EXECUTION_TIME_LIMIT = "# Execution time limit: "
LINE_PREFIX_EOF = "#EOF: "
LINE_PREFIX_JOB_LOG_DIR = "# Job log directory: "
LINE_UPDATE_CYLC_DIR = (
"# N.B. CYLC_DIR has been updated on the remote host\n")
OUT_PREFIX_COMMAND = "[TASK JOB COMMAND]"
OUT_PREFIX_MESSAGE = "[TASK JOB MESSAGE]"
OUT_PREFIX_SUMMARY = "[TASK JOB SUMMARY]"
OUT_PREFIX_CMD_ERR = "[TASK JOB ERROR]"
_INSTANCES = {}
@classmethod
def configure_suite_run_dir(cls, suite_run_dir):
"""Add local python module paths if not already done."""
for sub_dir in ["python", os.path.join("lib", "python")]:
# TODO - eventually drop the deprecated "python" sub-dir.
suite_py = os.path.join(suite_run_dir, sub_dir)
if os.path.isdir(suite_py) and suite_py not in sys.path:
sys.path.append(suite_py)
def _get_sys(self, batch_sys_name):
"""Return an instance of the class for "batch_sys_name"."""
if batch_sys_name in self._INSTANCES:
return self._INSTANCES[batch_sys_name]
for key in [
"cylc.batch_sys_handlers." + batch_sys_name,
batch_sys_name]:
try:
mod_of_name = __import__(key, fromlist=[key])
self._INSTANCES[batch_sys_name] = getattr(
mod_of_name, "BATCH_SYS_HANDLER")
return self._INSTANCES[batch_sys_name]
except ImportError:
if key == batch_sys_name:
raise
def format_directives(self, job_conf):
"""Format the job directives for a job file, if relevant."""
batch_sys = self._get_sys(job_conf['batch_system_name'])
if hasattr(batch_sys, "format_directives"):
return batch_sys.format_directives(job_conf)
def get_fail_signals(self, job_conf):
"""Return a list of failure signal names to trap in the job file."""
batch_sys = self._get_sys(job_conf['batch_system_name'])
if hasattr(batch_sys, "get_fail_signals"):
return batch_sys.get_fail_signals(job_conf)
return ["EXIT", "ERR", "TERM", "XCPU"]
def get_vacation_signal(self, job_conf):
"""Return the vacation signal name for a job file."""
batch_sys = self._get_sys(job_conf['batch_system_name'])
if hasattr(batch_sys, "get_vacation_signal"):
return batch_sys.get_vacation_signal(job_conf)
def jobs_kill(self, job_log_root, job_log_dirs):
"""Kill multiple jobs.
job_log_root -- The log/job/ sub-directory of the suite.
job_log_dirs -- A list containing point/name/submit_num for task jobs.
"""
# Note: The more efficient way to do this is to group the jobs by their
# batch systems, and call the kill command for each batch system once.
# However, this will make it more difficult to determine if the kill
# command for a particular job is successful or not.
if "$" in job_log_root:
job_log_root = os.path.expandvars(job_log_root)
self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
now = get_current_time_string()
for job_log_dir in job_log_dirs:
ret_code, err = self.job_kill(
os.path.join(job_log_root, job_log_dir, JOB_LOG_STATUS))
sys.stdout.write("%s%s|%s|%d\n" % (
self.OUT_PREFIX_SUMMARY, now, job_log_dir, ret_code))
# Note: Print STDERR to STDOUT may look a bit strange, but it
# requires less logic for the suite to parse the output.
if err.strip():
for line in err.splitlines(True):
if not line.endswith("\n"):
line += "\n"
sys.stdout.write("%s%s|%s|%s" % (
self.OUT_PREFIX_CMD_ERR, now, job_log_dir, line))
def jobs_poll(self, job_log_root, job_log_dirs):
"""Poll multiple jobs.
job_log_root -- The log/job/ sub-directory of the suite.
job_log_dirs -- A list containing point/name/submit_num for task jobs.
"""
if "$" in job_log_root:
job_log_root = os.path.expandvars(job_log_root)
self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
ctx_list = [] # Contexts for all relevant jobs
ctx_list_by_batch_sys = {} # {batch_sys_name1: [ctx1, ...], ...}
for job_log_dir in job_log_dirs:
ctx = self._jobs_poll_status_files(job_log_root, job_log_dir)
if ctx is None:
continue
ctx_list.append(ctx)
if not ctx.batch_sys_name or not ctx.batch_sys_job_id:
# Lost batch system information for some reason.
# Mark the job as if it is no longer in the batch system.
ctx.batch_sys_exit_polled = 1
sys.stderr.write(
"%s/%s: incomplete batch system info\n" % (
ctx.job_log_dir, JOB_LOG_STATUS))
# We can trust:
# * Jobs previously polled to have exited the batch system.
# * Jobs succeeded or failed with ERR/EXIT.
if (ctx.batch_sys_exit_polled or ctx.run_status == 0 or
ctx.run_signal in ["ERR", "EXIT"]):
continue
if ctx.batch_sys_name not in ctx_list_by_batch_sys:
ctx_list_by_batch_sys[ctx.batch_sys_name] = []
ctx_list_by_batch_sys[ctx.batch_sys_name].append(ctx)
for batch_sys_name, my_ctx_list in ctx_list_by_batch_sys.items():
self._jobs_poll_batch_sys(
job_log_root, batch_sys_name, my_ctx_list)
cur_time_str = get_current_time_string()
for ctx in ctx_list:
for message in ctx.messages:
sys.stdout.write("%s%s|%s|%s\n" % (
self.OUT_PREFIX_MESSAGE,
cur_time_str,
ctx.job_log_dir,
message))
sys.stdout.write("%s%s|%s\n" % (
self.OUT_PREFIX_SUMMARY,
cur_time_str,
ctx.get_summary_str()))
def jobs_submit(self, job_log_root, job_log_dirs, remote_mode=False,
utc_mode=False):
"""Submit multiple jobs.
job_log_root -- The log/job/ sub-directory of the suite.
job_log_dirs -- A list containing point/name/submit_num for task jobs.
remote_mode -- am I running on the remote job host?
utc_mode -- is the suite running in UTC mode?
"""
if "$" in job_log_root:
job_log_root = os.path.expandvars(job_log_root)
self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
if remote_mode:
items = self._jobs_submit_prep_by_stdin(job_log_root, job_log_dirs)
else:
items = self._jobs_submit_prep_by_args(job_log_root, job_log_dirs)
now = get_current_time_string(override_use_utc=utc_mode)
for job_log_dir, batch_sys_name, submit_opts in items:
job_file_path = os.path.join(
job_log_root, job_log_dir, JOB_LOG_JOB)
if not batch_sys_name:
sys.stdout.write("%s%s|%s|1|\n" % (
self.OUT_PREFIX_SUMMARY, now, job_log_dir))
continue
ret_code, out, err, job_id = self._job_submit_impl(
job_file_path, batch_sys_name, submit_opts)
sys.stdout.write("%s%s|%s|%d|%s\n" % (
self.OUT_PREFIX_SUMMARY, now, job_log_dir, ret_code, job_id))
for key, value in [("STDERR", err), ("STDOUT", out)]:
if value is None or not value.strip():
continue
for line in value.splitlines(True):
if not value.endswith("\n"):
value += "\n"
sys.stdout.write("%s%s|%s|[%s] %s" % (
self.OUT_PREFIX_COMMAND, now, job_log_dir, key, line))
def job_kill(self, st_file_path):
"""Ask batch system to terminate the job specified in "st_file_path".
Return 0 on success, non-zero integer on failure.
"""
# SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job.status
self.configure_suite_run_dir(st_file_path.rsplit(os.sep, 6)[0])
try:
st_file = open(st_file_path)
for line in st_file:
if line.startswith(self.CYLC_BATCH_SYS_NAME + "="):
batch_sys = self._get_sys(line.strip().split("=", 1)[1])
break
else:
return (1,
"Cannot determine batch system from %s file" % (
JOB_LOG_STATUS))
st_file.seek(0, 0) # rewind
if getattr(batch_sys, "SHOULD_KILL_PROC_GROUP", False):
for line in st_file:
if line.startswith(CYLC_JOB_PID + "="):
pid = line.strip().split("=", 1)[1]
try:
os.killpg(os.getpgid(int(pid)), SIGKILL)
except (OSError, ValueError) as exc:
traceback.print_exc()
return (1, str(exc))
else:
return (0, "")
st_file.seek(0, 0) # rewind
if hasattr(batch_sys, "KILL_CMD_TMPL"):
for line in st_file:
if not line.startswith(self.CYLC_BATCH_SYS_JOB_ID + "="):
continue
job_id = line.strip().split("=", 1)[1]
command = shlex.split(
batch_sys.KILL_CMD_TMPL % {"job_id": job_id})
try:
proc = Popen(
command, stdin=open(os.devnull), stderr=PIPE)
except OSError as exc:
# subprocess.Popen has a bad habit of not setting the
# filename of the executable when it raises an OSError.
if not exc.filename:
exc.filename = command[0]
traceback.print_exc()
return (1, str(exc))
else:
return (proc.wait(), proc.communicate()[1])
return (1, "Cannot determine batch job ID from %s file" % (
JOB_LOG_STATUS))
except IOError as exc:
return (1, str(exc))
@classmethod
def _create_nn(cls, job_file_path):
"""Create NN symbolic link, if necessary.
If NN => 01, remove numbered directories with submit numbers greater
than 01.
Helper for "self._job_submit_impl".
"""
job_file_dir = os.path.dirname(job_file_path)
source = os.path.basename(job_file_dir)
task_log_dir = os.path.dirname(job_file_dir)
nn_path = os.path.join(task_log_dir, "NN")
try:
old_source = os.readlink(nn_path)
except OSError:
old_source = None
if old_source is not None and old_source != source:
os.unlink(nn_path)
old_source = None
if old_source is None:
os.symlink(source, nn_path)
# On submit 1, remove any left over digit directories from prev runs
if source == "01":
for name in os.listdir(task_log_dir):
if name != source and name.isdigit():
# Ignore errors, not disastrous if rmtree fails
rmtree(
os.path.join(task_log_dir, name), ignore_errors=True)
def _filter_submit_output(self, st_file_path, batch_sys, out, err):
"""Filter submit command output, if relevant."""
job_id = None
if hasattr(batch_sys, "REC_ID_FROM_SUBMIT_ERR"):
text = err
rec_id = batch_sys.REC_ID_FROM_SUBMIT_ERR
elif hasattr(batch_sys, "REC_ID_FROM_SUBMIT_OUT"):
text = out
rec_id = batch_sys.REC_ID_FROM_SUBMIT_OUT
if rec_id:
for line in str(text).splitlines():
match = rec_id.match(line)
if match:
job_id = match.group("id")
job_status_file = open(st_file_path, "a")
job_status_file.write("%s=%s\n" % (
self.CYLC_BATCH_SYS_JOB_ID, job_id))
job_status_file.write("%s=%s\n" % (
self.CYLC_BATCH_SYS_JOB_SUBMIT_TIME,
get_current_time_string()))
job_status_file.close()
break
if hasattr(batch_sys, "filter_submit_output"):
out, err = batch_sys.filter_submit_output(out, err)
return out, err, job_id
def _jobs_poll_status_files(self, job_log_root, job_log_dir):
"""Helper 1 for self.jobs_poll(job_log_root, job_log_dirs)."""
ctx = JobPollContext(job_log_dir)
try:
handle = open(os.path.join(
job_log_root, ctx.job_log_dir, JOB_LOG_STATUS))
except IOError as exc:
sys.stderr.write(str(exc) + "\n")
return
for line in handle:
if "=" not in line:
continue
key, value = line.strip().split("=", 1)
if key == self.CYLC_BATCH_SYS_NAME:
ctx.batch_sys_name = value
elif key == self.CYLC_BATCH_SYS_JOB_ID:
ctx.batch_sys_job_id = value
elif key == self.CYLC_BATCH_SYS_EXIT_POLLED:
ctx.batch_sys_exit_polled = 1
elif key == CYLC_JOB_PID:
ctx.pid = value
elif key == self.CYLC_BATCH_SYS_JOB_SUBMIT_TIME:
ctx.time_submit_exit = value
elif key == CYLC_JOB_INIT_TIME:
ctx.time_run = value
elif key == CYLC_JOB_EXIT_TIME:
ctx.time_run_exit = value
elif key == CYLC_JOB_EXIT:
if value == TASK_OUTPUT_SUCCEEDED.upper():
ctx.run_status = 0
else:
ctx.run_status = 1
ctx.run_signal = value
elif key == CYLC_MESSAGE:
ctx.messages.append(value)
handle.close()
return ctx
def _jobs_poll_batch_sys(self, job_log_root, batch_sys_name, my_ctx_list):
"""Helper 2 for self.jobs_poll(job_log_root, job_log_dirs)."""
exp_job_ids = [ctx.batch_sys_job_id for ctx in my_ctx_list]
bad_job_ids = list(exp_job_ids)
exp_pids = []
bad_pids = []
items = [[self._get_sys(batch_sys_name), exp_job_ids, bad_job_ids]]
if getattr(items[0][0], "SHOULD_POLL_PROC_GROUP", False):
exp_pids = [ctx.pid for ctx in my_ctx_list if ctx.pid is not None]
bad_pids.extend(exp_pids)
items.append([self._get_sys("background"), exp_pids, bad_pids])
for batch_sys, exp_ids, bad_ids in items:
if hasattr(batch_sys, "get_poll_many_cmd"):
# Some poll commands may not be as simple
cmd = batch_sys.get_poll_many_cmd(exp_ids)
else: # if hasattr(batch_sys, "POLL_CMD"):
# Simple poll command that takes a list of job IDs
cmd = [batch_sys.POLL_CMD] + exp_ids
try:
proc = Popen(
cmd, stdin=open(os.devnull), stderr=PIPE, stdout=PIPE)
except OSError as exc:
# subprocess.Popen has a bad habit of not setting the
# filename of the executable when it raises an OSError.
if not exc.filename:
exc.filename = cmd[0]
sys.stderr.write(str(exc) + "\n")
return
proc.wait()
out, err = proc.communicate()
sys.stderr.write(err)
if hasattr(batch_sys, "filter_poll_many_output"):
# Allow custom filter
for id_ in batch_sys.filter_poll_many_output(out):
try:
bad_ids.remove(id_)
except ValueError:
pass
else:
# Just about all poll commands return a table, with column 1
# being the job ID. The logic here should be sufficient to
# ensure that any table header is ignored.
for line in out.splitlines():
try:
head = line.split(None, 1)[0]
except IndexError:
continue
if head in exp_ids:
try:
bad_ids.remove(head)
except ValueError:
pass
for ctx in my_ctx_list:
ctx.batch_sys_exit_polled = int(
ctx.batch_sys_job_id in bad_job_ids)
# Exited batch system, but process still running
# This can happen to jobs in some "at" implementation
if (ctx.batch_sys_exit_polled and
ctx.pid in exp_pids and ctx.pid not in bad_pids):
ctx.batch_sys_exit_polled = 0
# Add information to "job.status"
if ctx.batch_sys_exit_polled:
try:
handle = open(os.path.join(
job_log_root, ctx.job_log_dir, JOB_LOG_STATUS), "a")
handle.write("%s=%s\n" % (
self.CYLC_BATCH_SYS_EXIT_POLLED,
get_current_time_string()))
handle.close()
except IOError as exc:
sys.stderr.write(str(exc) + "\n")
def _job_submit_impl(
self, job_file_path, batch_sys_name, submit_opts):
"""Helper for self.jobs_submit() and self.job_submit()."""
# Create NN symbolic link, if necessary
self._create_nn(job_file_path)
for name in JOB_LOG_ERR, JOB_LOG_OUT:
try:
os.unlink(os.path.join(job_file_path, name))
except OSError:
pass
# Start new status file
job_status_file = open(job_file_path + ".status", "w")
job_status_file.write(
"%s=%s\n" % (self.CYLC_BATCH_SYS_NAME, batch_sys_name))
job_status_file.close()
# Submit job
batch_sys = self._get_sys(batch_sys_name)
proc_stdin_arg = None
proc_stdin_value = open(os.devnull)
if hasattr(batch_sys, "get_submit_stdin"):
proc_stdin_arg, proc_stdin_value = batch_sys.get_submit_stdin(
job_file_path, submit_opts)
if hasattr(batch_sys, "submit"):
# batch_sys.submit should handle OSError, if relevant.
ret_code, out, err = batch_sys.submit(job_file_path, submit_opts)
else:
env = None
if hasattr(batch_sys, "SUBMIT_CMD_ENV"):
env = dict(os.environ)
env.update(batch_sys.SUBMIT_CMD_ENV)
batch_submit_cmd_tmpl = submit_opts.get("batch_submit_cmd_tmpl")
if batch_submit_cmd_tmpl:
# No need to catch OSError when using shell. It is unlikely
# that we do not have a shell, and still manage to get as far
# as here.
batch_sys_cmd = batch_submit_cmd_tmpl % {"job": job_file_path}
proc = Popen(
batch_sys_cmd,
stdin=proc_stdin_arg, stdout=PIPE, stderr=PIPE,
shell=True, env=env)
else:
command = shlex.split(
batch_sys.SUBMIT_CMD_TMPL % {"job": job_file_path})
try:
proc = Popen(
command,
stdin=proc_stdin_arg, stdout=PIPE, stderr=PIPE,
env=env)
except OSError as exc:
# subprocess.Popen has a bad habit of not setting the
# filename of the executable when it raises an OSError.
if not exc.filename:
exc.filename = command[0]
return 1, "", str(exc), ""
out, err = proc.communicate(proc_stdin_value)
ret_code = proc.wait()
# Filter submit command output, if relevant
# Get job ID, if possible
job_id = None
if out or err:
try:
out, err, job_id = self._filter_submit_output(
job_file_path + ".status", batch_sys, out, err)
except OSError:
ret_code = 1
self.job_kill(job_file_path + ".status")
return ret_code, out, err, job_id
def _jobs_submit_prep_by_args(self, job_log_root, job_log_dirs):
"""Prepare job files for submit by reading files in arguments.
Job files are specified in the arguments in local mode. Extract job
submission methods and job submission command templates from each job
file.
Return a list, where each element contains something like:
(job_log_dir, batch_sys_name, submit_opts)
"""
items = []
for job_log_dir in job_log_dirs:
job_file_path = os.path.join(job_log_root, job_log_dir, "job")
batch_sys_name = None
submit_opts = {}
for line in open(job_file_path):
if line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
batch_sys_name = line.replace(
self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
elif line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
submit_opts["batch_submit_cmd_tmpl"] = line.replace(
self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
elif line.startswith(self.LINE_PREFIX_EXECUTION_TIME_LIMIT):
submit_opts["execution_time_limit"] = float(line.replace(
self.LINE_PREFIX_EXECUTION_TIME_LIMIT, "").strip())
items.append((job_log_dir, batch_sys_name, submit_opts))
return items
def _jobs_submit_prep_by_stdin(self, job_log_root, job_log_dirs):
"""Prepare job files for submit by reading from STDIN.
Job files are uploaded via STDIN in remote mode. Modify job
files' CYLC_DIR for this host. Extract job submission methods
and job submission command templates from each job file.
Return a list, where each element contains something like:
(job_log_dir, batch_sys_name, submit_opts)
"""
items = [[job_log_dir, None, {}] for job_log_dir in job_log_dirs]
items_map = {}
for item in items:
items_map[item[0]] = item
handle = None
batch_sys_name = None
submit_opts = {}
job_log_dir = None
lines = []
# Get job files from STDIN.
# Modify CYLC_DIR in job file, if necessary.
# Get batch system name and batch submit command template from each job
# file.
# Write job file in correct location.
while True: # Note: "for cur_line in sys.stdin:" may hang
cur_line = sys.stdin.readline()
if not cur_line:
if handle is not None:
handle.close()
break
if cur_line.startswith(self.LINE_PREFIX_CYLC_DIR):
old_line = cur_line
cur_line = "%s'%s'\n" % (
self.LINE_PREFIX_CYLC_DIR, os.environ["CYLC_DIR"])
if old_line != cur_line:
lines.append(self.LINE_UPDATE_CYLC_DIR)
elif cur_line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
batch_sys_name = cur_line.replace(
self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
elif cur_line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
submit_opts["batch_submit_cmd_tmpl"] = cur_line.replace(
self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
elif cur_line.startswith(self.LINE_PREFIX_EXECUTION_TIME_LIMIT):
submit_opts["execution_time_limit"] = float(cur_line.replace(
self.LINE_PREFIX_EXECUTION_TIME_LIMIT, "").strip())
elif cur_line.startswith(self.LINE_PREFIX_JOB_LOG_DIR):
job_log_dir = cur_line.replace(
self.LINE_PREFIX_JOB_LOG_DIR, "").strip()
mkdir_p(os.path.join(job_log_root, job_log_dir))
handle = open(
os.path.join(job_log_root, job_log_dir, "job.tmp"), "wb")
if handle is None:
lines.append(cur_line)
else:
for line in lines + [cur_line]:
handle.write(line)
lines = []
if cur_line.startswith(self.LINE_PREFIX_EOF + job_log_dir):
handle.close()
# Make it executable
os.chmod(handle.name, (
os.stat(handle.name).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
# Rename from "*/job.tmp" to "*/job"
os.rename(handle.name, handle.name[:-4])
try:
items_map[job_log_dir][1] = batch_sys_name
items_map[job_log_dir][2] = submit_opts
except KeyError:
pass
handle = None
job_log_dir = None
batch_sys_name = None
submit_opts = {}
return items
| gpl-3.0 | 4,543,488,674,901,808,600 | 42.232984 | 79 | 0.550893 | false | 3.855492 | false | false | false |
polysquare/polysquare-ci-scripts | setup.py | 1 | 1485 | # /setup.py
#
# Installation and setup script for polysquare-ci-scripts
#
# See /LICENCE.md for Copyright information
"""Installation and setup script for polysquare-ci-scripts."""
from setuptools import (find_packages, setup)
setup(name="polysquare-ci-scripts",
version="0.0.1",
description="Polysquare Continuous Integration Scripts",
long_description_markdown_filename="README.md",
author="Sam Spilsbury",
author_email="[email protected]",
url="http://github.com/polysquare/polysquare-ci-scripts",
classifiers=["Development Status :: 3 - Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License"],
license="MIT",
keywords="development linters",
packages=find_packages(exclude=["test"]),
requires=[
"setuptools"
],
extras_require={
"upload": ["setuptools-markdown>=0.1"]
},
zip_safe=True,
include_package_data=True)
| mit | -5,243,586,838,271,640,000 | 38.078947 | 66 | 0.577778 | false | 4.486405 | false | false | false |
aonotas/chainer | chainer/links/model/classifier.py | 1 | 4579 | from chainer.functions.evaluation import accuracy
from chainer.functions.loss import softmax_cross_entropy
from chainer import link
from chainer import reporter
class Classifier(link.Chain):
"""A simple classifier model.
This is an example of chain that wraps another chain. It computes the
loss and accuracy based on a given input/label pair.
Args:
predictor (~chainer.Link): Predictor network.
lossfun (function): Loss function.
accfun (function): Function that computes accuracy.
label_key (int or str): Key to specify label variable from arguments.
When it is ``int``, a variable in positional arguments is used.
And when it is ``str``, a variable in keyword arguments is used.
Attributes:
predictor (~chainer.Link): Predictor network.
lossfun (function): Loss function.
accfun (function): Function that computes accuracy.
y (~chainer.Variable): Prediction for the last minibatch.
loss (~chainer.Variable): Loss value for the last minibatch.
accuracy (~chainer.Variable): Accuracy for the last minibatch.
compute_accuracy (bool): If ``True``, compute accuracy on the forward
computation. The default value is ``True``.
.. note::
This link uses :func:`chainer.softmax_cross_entropy` with
default arguments as a loss function (specified by ``lossfun``),
if users do not explicitly change it. In particular, the loss function
does not support double backpropagation.
If you need second or higher order differentiation, you need to turn
it on with ``enable_double_backprop=True``:
>>> import chainer.functions as F
>>> import chainer.links as L
>>>
>>> def lossfun(x, t):
... return F.softmax_cross_entropy(
... x, t, enable_double_backprop=True)
>>>
>>> predictor = L.Linear(10)
>>> model = L.Classifier(predictor, lossfun=lossfun)
"""
compute_accuracy = True
def __init__(self, predictor,
lossfun=softmax_cross_entropy.softmax_cross_entropy,
accfun=accuracy.accuracy,
label_key=-1):
if not (isinstance(label_key, (int, str))):
raise TypeError('label_key must be int or str, but is %s' %
type(label_key))
super(Classifier, self).__init__()
self.lossfun = lossfun
self.accfun = accfun
self.y = None
self.loss = None
self.accuracy = None
self.label_key = label_key
with self.init_scope():
self.predictor = predictor
def __call__(self, *args, **kwargs):
"""Computes the loss value for an input and label pair.
It also computes accuracy and stores it to the attribute.
Args:
args (list of ~chainer.Variable): Input minibatch.
kwargs (dict of ~chainer.Variable): Input minibatch.
When ``label_key`` is ``int``, the correpoding element in ``args``
is treated as ground truth labels. And when it is ``str``, the
element in ``kwargs`` is used.
The all elements of ``args`` and ``kwargs`` except the ground trush
labels are features.
It feeds features to the predictor and compare the result
with ground truth labels.
Returns:
~chainer.Variable: Loss value.
"""
if isinstance(self.label_key, int):
if not (-len(args) <= self.label_key < len(args)):
msg = 'Label key %d is out of bounds' % self.label_key
raise ValueError(msg)
t = args[self.label_key]
if self.label_key == -1:
args = args[:-1]
else:
args = args[:self.label_key] + args[self.label_key + 1:]
elif isinstance(self.label_key, str):
if self.label_key not in kwargs:
msg = 'Label key "%s" is not found' % self.label_key
raise ValueError(msg)
t = kwargs[self.label_key]
del kwargs[self.label_key]
self.y = None
self.loss = None
self.accuracy = None
self.y = self.predictor(*args, **kwargs)
self.loss = self.lossfun(self.y, t)
reporter.report({'loss': self.loss}, self)
if self.compute_accuracy:
self.accuracy = self.accfun(self.y, t)
reporter.report({'accuracy': self.accuracy}, self)
return self.loss
| mit | 4,334,894,875,054,640,600 | 37.158333 | 78 | 0.591832 | false | 4.344402 | false | false | false |
mahmoud/wapiti | wapiti/operations/utils.py | 1 | 12249 | # -*- coding: utf-8 -*-
import sys
from heapq import heappush, heappop
import itertools
from functools import total_ordering
def is_scalar(obj):
return not hasattr(obj, '__iter__') or isinstance(obj, basestring)
def prefixed(arg, prefix=None):
if prefix and not arg.startswith(prefix):
arg = prefix + arg
return arg
@total_ordering
class MaxInt(long):
"""
A quite-large integer type that tries to be like float('inf')
(Infinity), but can be used for slicing and other integer
operations. float('inf') is generally more correct, except that
mixing a float and integer in arithmetic operations will result in
a float, which will raise an error on slicing.
"""
def __new__(cls, *a, **kw):
return super(MaxInt, cls).__new__(cls, sys.maxint + 1)
def __init__(self, name='MAX'):
self._name = str(name)
def __repr__(self):
return self._name
def __str__(self):
return repr(self)
# TODO: better math
for func in ('__add__', '__sub__', '__mul__', '__floordiv__', '__div__',
'__mod__', '__divmod__', '__pow__', '__lshift__',
'__rshift__'):
locals()[func] = lambda self, other: self
def __gt__(self, other):
return not self == other
def __eq__(self, other):
return isinstance(other, MaxInt)
def __int__(self):
return self
class OperationExample(object):
"""
Sort of like a partial, but specialer.
# other types of tests?
"""
def __init__(self,
param=None,
limit=None,
op_type=None,
**kw):
self.op_type = op_type
self.param = param
self.limit = limit
self.doc = kw.pop('doc', '')
self.test = kw.pop('test', None)
# test defaults to limit_equal_or_depleted in test_ops.py
if kw:
raise TypeError('got unexpected keyword arguments: %r' % kw)
@property
def op_name(self):
if self.op_type is None:
return None
return self.op_type.__name__
@property
def disp_name(self):
if not self.op_type:
return '(unbound OperationExample)'
tmpl = '%(type)s(%(param)r, limit=%(limit)s)'
if self.op_type.input_field is None:
tmpl = '%(type)s(limit=%(limit)s)'
return tmpl % {'type': self.op_type.__name__,
'param': self.param,
'limit': self.limit}
def bind_op_type(self, op_type):
if self.op_type is None:
self.op_type = op_type
if self.limit is None:
try:
pql = op_type.per_query_limit
except AttributeError:
pql = op_type.subop_chain[0].per_query_limit
self.limit = pql.get_limit()
return
def make_op(self, mag=None):
if not self.op_type:
raise TypeError('no Operation type assigned')
mag = int(mag or 1)
limit = self.limit * mag
if self.op_type.input_field is None:
return self.op_type(limit=limit)
return self.op_type(self.param, limit=limit)
def __repr__(self):
cn = self.__class__.__name__
kwargs = ['param', 'limit', 'test', 'doc']
kw_parts = ['op_type=%s' % self.op_name]
vals = [getattr(self, a) for a in kwargs if getattr(self, a)]
kw_parts.extend(['%s=%r' % (a, v) for a, v in zip(kwargs, vals)])
kwarg_str = ', '.join(kw_parts)
return '%s(%s)' % (cn, kwarg_str)
__str__ = __repr__
"""
TypeWrapper and MetaTypeWrapper are a pair of what are technically
metaclasses, but really just a very overwrought way of enabling
customized versions of types floating around in some
locations. Because Wapiti is a DSL, but also just a bunch of Python,
we have to deal with the fact that if you modify a type/class, it will
be modified everywhere that references it.
TL;DR: This overblown thing lets Operations use something like
Prioritized(GetCategory, key='total_count'), which sets a priority for
better queueing, without modifying the GetCategory Operation
itself. (Different operations will want to prioritiez different
things.)
(There is almost certainly a better way, but this was a bit of
fun. Ever made an object that is an instance and a subclass of
itself?)
"""
def make_type_wrapper(name, init_args=None):
init_args = init_args or []
args, defaults = [], {}
for ia in init_args:
try:
arg, _default = ia
defaults[arg] = _default
except ValueError:
arg = ia
if not isinstance(arg, basestring):
raise TypeError('expected string arg name, not %r' % arg)
args.append(arg)
attrs = {'_args': args, '_defaults': defaults}
return WrapperType(str(name), (Wrapper,), attrs)
class WrapperType(type):
@property
def _repr_args(self):
ret = []
for a in self._args:
try:
ret.append((a, self._defaults[a]))
except KeyError:
ret.append(a)
return ret
def __repr__(cls):
name, cname = cls.__name__, cls.__class__.__name__
if cls._repr_args:
return '%s(%r, %r)' % (cname, name, cls._repr_args)
else:
return '%s(%r)' % (cname, name)
class Wrapper(object):
__metaclass__ = WrapperType
_args, _defaults = [], {}
def __init__(self, to_wrap, *args, **kwargs):
wrapped_dict = {}
if isinstance(to_wrap, Wrapper):
wrapped_dict = dict(to_wrap._wrapped_dict)
to_wrap = to_wrap._wrapped
self.__dict__['_wrapped'] = to_wrap
self.__dict__['_wrapped_dict'] = wrapped_dict
cn = self.__name__
for arg_i, arg_name in enumerate(self._args):
try:
val = args[arg_i]
if arg_name in kwargs:
raise TypeError('%s got multiple values for arg %r'
% (cn, arg_name))
except IndexError:
try:
val = kwargs.pop(arg_name)
except KeyError:
try:
val = self._defaults[arg_name]
except KeyError:
raise TypeError('%s expected required arg %r'
% (cn, arg_name))
setattr(self, arg_name, val)
return
def __repr__(self):
kv = ', '.join(['%s=%r' % (k, v) for k, v
in self._wrapped_dict.items()])
tmpl = "<wrapped %r (%s)>"
return tmpl % (self._wrapped, kv)
def __getattr__(self, name):
return getattr(self._wrapped, name)
def __setattr__(self, name, val):
super(Wrapper, self).__setattr__(name, val)
self._wrapped_dict[name] = val
def __delattr__(self, name, val):
super(Wrapper, self).__delattr__(name, val)
self._wrapped_dict.pop(name, None)
def __call__(self, *a, **kw):
return self._wrapped(*a, **kw)
REMOVED = '<removed-task>'
class PriorityQueue(object):
"""
Real quick type based on the heapq docs.
"""
def __init__(self):
self._pq = []
self._entry_map = {}
self.counter = itertools.count()
def add(self, task, priority=None):
# larger numbers = higher priority
priority = -int(priority or 0)
if task in self._entry_map:
self.remove_task(task)
count = next(self.counter)
entry = [priority, count, task]
self._entry_map[task] = entry
heappush(self._pq, entry)
def remove(self, task):
entry = self._entry_map.pop(task)
entry[-1] = REMOVED
def _cull(self):
while self._pq:
priority, count, task = self._pq[0]
if task is REMOVED:
heappop(self._pq)
continue
return
raise IndexError('empty priority queue')
def peek(self, default=REMOVED):
try:
self._cull()
_, _, task = self._pq[0]
except IndexError:
if default is not REMOVED:
return default
raise IndexError('peek on empty queue')
return task
def pop(self, default=REMOVED):
try:
self._cull()
_, _, task = heappop(self._pq)
del self._entry_map[task]
except IndexError:
if default is not REMOVED:
return default
raise IndexError('pop on empty queue')
return task
def __len__(self):
return len(self._entry_map)
def chunked_iter(src, size, **kw):
"""
Generates 'size'-sized chunks from 'src' iterable. Unless
the optional 'fill' keyword argument is provided, iterables
not even divisible by 'size' will have a final chunk that is
smaller than 'size'.
Note that fill=None will in fact use None as the fill value.
>>> list(chunked_iter(range(10), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(chunked_iter(range(10), 3, fill=None))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
"""
size = int(size)
if size <= 0:
raise ValueError('expected a positive integer chunk size')
do_fill = True
try:
fill_val = kw.pop('fill')
except KeyError:
do_fill = False
fill_val = None
if kw:
raise ValueError('got unexpected keyword arguments: %r' % kw.keys())
if not src:
return
cur_chunk = []
i = 0
for item in src:
cur_chunk.append(item)
i += 1
if i % size == 0:
yield cur_chunk
cur_chunk = []
if cur_chunk:
if do_fill:
lc = len(cur_chunk)
cur_chunk[lc:] = [fill_val] * (size - lc)
yield cur_chunk
return
# From http://en.wikipedia.org/wiki/Wikipedia:Namespace
NAMESPACES = {
'Main': 0,
'Talk': 1,
'User': 2,
'User talk': 3,
'Wikipedia': 4,
'Wikipedia talk': 5,
'File': 6,
'File talk': 7,
'MediaWiki': 8,
'MediaWiki talk': 9,
'Template': 10,
'Template talk': 11,
'Help': 12,
'Help talk': 13,
'Category': 14,
'Category talk': 15,
'Portal': 100,
'Portal talk': 101,
'Book': 108,
'Book talk': 109,
'Special': -1,
'Media': -2}
def bucketize(src, keyfunc=None):
"""
Group values in 'src' iterable by value returned by 'keyfunc'.
keyfunc defaults to bool, which will group the values by
truthiness; at most there will be two keys, True and False, and
each key will have a list with at least one item.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if keyfunc is None:
keyfunc = bool
if not callable(keyfunc):
raise TypeError('expected callable key function')
ret = {}
for val in src:
key = keyfunc(val)
ret.setdefault(key, []).append(val)
return ret
def bucketize_bool(src, keyfunc=None):
"""
Like bucketize, but for added convenience returns a tuple of
(truthy_values, falsy_values).
>>> nonempty, empty = bucketize_bool(['', '', 'hi', '', 'bye'])
>>> nonempty
['hi', 'bye']
keyfunc defaults to bool, but can be carefully overridden to
use any function that returns either True or False.
>>> import string
>>> is_digit = lambda x: x in string.digits
>>> decimal_digits, hexletters = bucketize_bool(string.hexdigits, is_digit)
>>> ''.join(decimal_digits), ''.join(hexletters)
('0123456789', 'abcdefABCDEF')
"""
bucketized = bucketize(src, keyfunc)
return bucketized.get(True, []), bucketized.get(False, [])
def coerce_namespace(ns_arg):
ns_str = str(ns_arg).capitalize()
return NAMESPACES.get(ns_str, ns_str)
| bsd-3-clause | 6,357,164,023,130,465,000 | 28.236277 | 79 | 0.5478 | false | 3.758515 | true | false | false |
monuszko/django-anothercrm | anothercrm/models.py | 1 | 5607 | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Person(models.Model):
SEX_CHOICES = (
('M', 'Male'),
('F', 'Female'),
)
#TODO: validators for name, mobile...
firstname = models.CharField(max_length=30)
lastname = models.CharField(max_length=30)
sex = models.CharField(max_length=1, choices=SEX_CHOICES)
email = models.EmailField(
max_length=200, verbose_name=_('Email address'), blank=True)
mobile = models.CharField(
max_length=20, verbose_name=_('Mobile Phone Number'), blank=True)
address = models.CharField(max_length=100, verbose_name=_('Address'),
help_text=_('24 Badger Rd., etc.'), blank=True)
zipcode = models.CharField(max_length=10, verbose_name=_('Postal code'),
help_text=_("For example, '80-209' in Poland"), blank=True)
city = models.CharField(max_length=100, verbose_name=_('City'), blank=True)
state = models.CharField(
max_length=100, verbose_name=_('State'), blank=True)
country = models.CharField(
max_length=2, verbose_name=_('Country'), blank=True)
creation_date = models.DateTimeField(
verbose_name=_('Creation Date'), auto_now_add=True)
modification_date = models.DateTimeField(
verbose_name=_('Modification Date'), auto_now=True)
def get_absolute_url(self):
from django.core.urlresolvers import reverse
from django.utils.text import slugify
fname = slugify(self.firstname)
lname = slugify(self.lastname)
kwargs = {
'firstname': fname,
'lastname': lname,
'pk': self.id,
}
return reverse('anothercrm:person', kwargs=kwargs)
def __unicode__(self):
return u'{0} {1}'.format(self.firstname, self.lastname)
def employee_count(self):
'''
Returns the number of relationships where the person
is employed at a company.
'''
return self.relationship_set.filter(relatype__category='E').count()
def client_count(self):
'''
Returns the number of relationships where the person
is a cliento of a company.
'''
return self.relationship_set.filter(relatype__category='C').count()
def company_names(self):
'''
Returns the names of companies the person is involved with.
'''
return ', '.join(self.relationship_set.all().values_list(
'company__name', flat=True))
def employee_relationships(self):
'''
Returns the number of relationships where the person
is employed at a company.
'''
return self.relationship_set.filter(relatype__category='E')
def client_relationships(self):
'''
Returns the number of relationships where the person
is a cliento of a company.
'''
return self.relationship_set.filter(relatype__category='C')
class Trade(models.Model):
name = models.CharField(max_length=100, unique=True,
help_text="the industry the company is in.")
def __unicode__(self):
return self.name
class Company(models.Model):
name = models.CharField(max_length=100)
mission = models.TextField(blank=True, default="To make money.")
trades = models.ManyToManyField(Trade, blank=True)
def __unicode__(self):
return self.name
def get_absolute_url(self):
#TODO: ask on IRC about these imports
from django.core.urlresolvers import reverse
from django.utils.text import slugify
slug = slugify(self.name)
return reverse(
'anothercrm:company', kwargs={'name': slug, 'pk': self.id})
def get_trades(self):
return ', '.join(tr.name for tr in self.trades.all())
get_trades.short_description='Trade(s)'
get_trades.admin_order_field='trades'
def employees_by_position(self):
'''
Returns Relations with employees - not Persons.
'''
return self.relationship_set.filter(
relatype__category='E').order_by('relatype__name')
def clients_by_type(self):
'''
Returns Relations with clients, agents etc - not Persons.
'''
return self.relationship_set.filter(
relatype__category='C').order_by('relatype__name')
class Meta:
verbose_name_plural = _('companies')
class RelationshipType(models.Model):
CATEGORY_CHOICES = (
('E', 'Employee'),
('C', 'Client'),
)
category = models.CharField(max_length=1, choices=CATEGORY_CHOICES)
name = models.CharField(max_length=50, unique=True,
help_text=("For employees, this is position. For customers, it can"
" be 'regular customer', etc."))
notes = models.TextField(blank=True)
def __unicode__(self):
return u'{0} ({1})'.format(self.name, self.get_category_display())
class Relationship(models.Model):
relatype = models.ForeignKey(RelationshipType,
verbose_name=_('relationship type'))
company = models.ForeignKey(Company)
person = models.ForeignKey(Person)
def __unicode__(self):
return u'{0} {1} {2} {3}'.format(self.person.firstname,
self.person.lastname, self.relatype, self.company)
| agpl-3.0 | 7,618,149,162,945,209,000 | 34.713376 | 79 | 0.588193 | false | 4.215789 | false | false | false |
jaeilepp/eggie | mne/viz/_3d.py | 1 | 24122 | """Functions to make 3D plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
from ..externals.six import string_types, advance_iterator
from distutils.version import LooseVersion
import os
import inspect
import warnings
from itertools import cycle
import numpy as np
from scipy import linalg
from ..io.pick import pick_types
from ..surface import get_head_surf, get_meg_helmet_surf, read_surface
from ..transforms import read_trans, _find_trans, apply_trans
from ..utils import get_subjects_dir, logger, _check_subject
from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to eggie in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
types = [sm['kind'] for sm in surf_maps]
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
vlim = np.max(np.abs(data))
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
# Now show our field pattern
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
fsurf.module_manager.scalar_lut_manager.lut.table = colormap
# And the field lines on top
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
cont = mlab.pipeline.contour_surface(mesh, contours=21,
line_width=1.0,
vmin=-vlim, vmax=vlim,
opacity=alpha)
cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
mlab.text(0.01, 0.01, time_label, width=0.4)
mlab.view(10, 60)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'transverse' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Call pyplot.show() at the end.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
if show:
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt.show()
return fig
def plot_trans(info, trans_fname='auto', subject=None, subjects_dir=None,
ch_type=None, source='bem'):
"""Plot MEG/EEG head surface and helmet in 3D.
Parameters
----------
info : dict
The measurement info.
trans_fname : str | 'auto'
The full path to the `*-trans.fif` file produced during
coregistration.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, both the MEG helmet and EEG electrodes will be shown.
If 'meg', only the MEG helmet will be shown. If 'eeg', only the
EEG electrodes will be shown.
source : str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
to 'bem'. Note. For single layer bems it is recommended to use 'head'.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
if ch_type not in [None, 'eeg', 'meg']:
raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
% ch_type)
if trans_fname == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans_fname = _find_trans(subject, subjects_dir)
trans = read_trans(trans_fname)
surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
if ch_type is None or ch_type == 'meg':
surfs.append(get_meg_helmet_surf(info, trans))
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, surf in enumerate(surfs):
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
if ch_type is None or ch_type == 'eeg':
eeg_locs = [l['eeg_loc'][:, 0] for l in info['chs']
if l['eeg_loc'] is not None]
if len(eeg_locs) > 0:
eeg_loc = np.array(eeg_locs)
# Transform EEG electrodes to MRI coordinates
eeg_loc = apply_trans(trans['trans'], eeg_loc)
with warnings.catch_warnings(record=True): # traits
mlab.points3d(eeg_loc[:, 0], eeg_loc[:, 1], eeg_loc[:, 2],
color=(1.0, 0.0, 0.0), scale_factor=0.005)
else:
warnings.warn('EEG electrode locations not found. '
'Cannot plot EEG electrodes.')
mlab.view(90, 90)
return fig
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='hot', time_label='time=%0.2f ms',
smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
transparent=True, alpha=1.0, time_viewer=False,
config_opts={}, subjects_dir=None, figure=None,
views='lat', colorbar=True):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str
The type of colormap to use.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing
fmin : float
The minimum value to display.
fmid : float
The middle value on the colormap.
fmax : float
The maximum value for the colormap.
transparent : bool
If True, use a linear transparency between fmin and fmid.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
import surfer
from surfer import Brain, TimeViewer
if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4':
raise NotImplementedError('hemi type "%s" not supported with your '
'version of pysurfer. Please upgrade to '
'version 0.4 or higher.' % hemi)
try:
import mayavi
from mayavi import mlab
except ImportError:
from enthought import mayavi
from enthought.mayavi import mlab
# import here to avoid circular import problem
from ..source_estimate import SourceEstimate
if not isinstance(stc, SourceEstimate):
raise ValueError('stc has to be a surface source estimate')
if hemi not in ['lh', 'rh', 'split', 'both']:
raise ValueError('hemi has to be either "lh", "rh", "split", '
'or "both"')
n_split = 2 if hemi == 'split' else 1
n_views = 1 if isinstance(views, string_types) else len(views)
if figure is not None:
# use figure with specified id or create new figure
if isinstance(figure, int):
figure = mlab.figure(figure, size=(600, 600))
# make sure it is of the correct type
if not isinstance(figure, list):
figure = [figure]
if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]):
raise TypeError('figure must be a mayavi scene or list of scenes')
# make sure we have the right number of figures
n_fig = len(figure)
if not n_fig == n_split * n_views:
raise RuntimeError('`figure` must be a list with the same '
'number of elements as PySurfer plots that '
'will be created (%s)' % n_split * n_views)
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
subject = _check_subject(stc.subject, subject, False)
if subject is None:
if 'SUBJECT' in os.environ:
subject = os.environ['SUBJECT']
else:
raise ValueError('SUBJECT environment variable not set')
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
args = inspect.getargspec(Brain.__init__)[0]
kwargs = dict(title=title, figure=figure, config_opts=config_opts,
subjects_dir=subjects_dir)
if 'views' in args:
kwargs['views'] = views
else:
logger.info('PySurfer does not support "views" argument, please '
'consider updating to a newer version (0.4 or later)')
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(subject, hemi, surface, **kwargs)
for hemi in hemis:
hemi_idx = 0 if hemi == 'lh' else 1
if hemi_idx == 0:
data = stc.data[:len(stc.vertno[0])]
else:
data = stc.data[len(stc.vertno[0]):]
vertices = stc.vertno[hemi_idx]
time = 1e3 * stc.times
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=smoothing_steps, time=time,
time_label=time_label, alpha=alpha, hemi=hemi,
colorbar=colorbar)
# scale colormap and set time (index) to display
brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
transparent=transparent)
if time_viewer:
TimeViewer(brain)
return brain
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=['cone', 'sphere'],
scale_factors=[1, 0.6],
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
fig_name :
Mayavi figure name.
fig_number :
Matplotlib figure number.
labels : ndarray or list of ndarrays
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
"""
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = COLORS
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
try:
from mayavi import mlab
except ImportError:
from enthought.mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
mlab.clf()
if mlab.options.backend != 'test':
f.scene.disable_render = True
with warnings.catch_warnings(record=True): # traits warnings
surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
points[:, 2], use_faces,
color=brain_color,
opacity=opacity, **kwargs)
import matplotlib.pyplot as plt
# Show time courses
plt.figure(fig_number)
plt.clf()
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [advance_iterator(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = advance_iterator(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple))
and len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
with warnings.catch_warnings(record=True): # traits
mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
mode=mode, scale_factor=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
plt.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
linewidth=linewidth, linestyle=linestyle)
plt.xlabel('Time (ms)', fontsize=18)
plt.ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
plt.title(fig_name)
if show:
plt.show()
surface.actor.property.backface_culling = True
surface.actor.property.shading = True
return surface
| bsd-2-clause | 3,978,885,979,952,764,000 | 36.053763 | 79 | 0.576652 | false | 3.727708 | false | false | false |
jamespcole/home-assistant | homeassistant/components/arlo/alarm_control_panel.py | 1 | 4381 | """Support for Arlo Alarm Control Panels."""
import logging
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
PLATFORM_SCHEMA, AlarmControlPanel)
from homeassistant.const import (
ATTR_ATTRIBUTION, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT, STATE_ALARM_DISARMED)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ATTRIBUTION, DATA_ARLO, SIGNAL_UPDATE_ARLO
_LOGGER = logging.getLogger(__name__)
ARMED = 'armed'
CONF_HOME_MODE_NAME = 'home_mode_name'
CONF_AWAY_MODE_NAME = 'away_mode_name'
CONF_NIGHT_MODE_NAME = 'night_mode_name'
DEPENDENCIES = ['arlo']
DISARMED = 'disarmed'
ICON = 'mdi:security'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOME_MODE_NAME, default=ARMED): cv.string,
vol.Optional(CONF_AWAY_MODE_NAME, default=ARMED): cv.string,
vol.Optional(CONF_NIGHT_MODE_NAME, default=ARMED): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Arlo Alarm Control Panels."""
arlo = hass.data[DATA_ARLO]
if not arlo.base_stations:
return
home_mode_name = config.get(CONF_HOME_MODE_NAME)
away_mode_name = config.get(CONF_AWAY_MODE_NAME)
night_mode_name = config.get(CONF_NIGHT_MODE_NAME)
base_stations = []
for base_station in arlo.base_stations:
base_stations.append(ArloBaseStation(base_station, home_mode_name,
away_mode_name, night_mode_name))
add_entities(base_stations, True)
class ArloBaseStation(AlarmControlPanel):
"""Representation of an Arlo Alarm Control Panel."""
def __init__(self, data, home_mode_name, away_mode_name, night_mode_name):
"""Initialize the alarm control panel."""
self._base_station = data
self._home_mode_name = home_mode_name
self._away_mode_name = away_mode_name
self._night_mode_name = night_mode_name
self._state = None
@property
def icon(self):
"""Return icon."""
return ICON
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ARLO, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Update the state of the device."""
_LOGGER.debug("Updating Arlo Alarm Control Panel %s", self.name)
mode = self._base_station.mode
if mode:
self._state = self._get_state_from_mode(mode)
else:
self._state = None
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
self._base_station.mode = DISARMED
async def async_alarm_arm_away(self, code=None):
"""Send arm away command. Uses custom mode."""
self._base_station.mode = self._away_mode_name
async def async_alarm_arm_home(self, code=None):
"""Send arm home command. Uses custom mode."""
self._base_station.mode = self._home_mode_name
async def async_alarm_arm_night(self, code=None):
"""Send arm night command. Uses custom mode."""
self._base_station.mode = self._night_mode_name
@property
def name(self):
"""Return the name of the base station."""
return self._base_station.name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
'device_id': self._base_station.device_id
}
def _get_state_from_mode(self, mode):
"""Convert Arlo mode to Home Assistant state."""
if mode == ARMED:
return STATE_ALARM_ARMED_AWAY
if mode == DISARMED:
return STATE_ALARM_DISARMED
if mode == self._home_mode_name:
return STATE_ALARM_ARMED_HOME
if mode == self._away_mode_name:
return STATE_ALARM_ARMED_AWAY
if mode == self._night_mode_name:
return STATE_ALARM_ARMED_NIGHT
return mode
| apache-2.0 | 8,865,541,382,343,930,000 | 31.213235 | 78 | 0.640037 | false | 3.593929 | false | false | false |
google/physical-web | web-service/handlers.py | 1 | 3651 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from urllib import unquote_plus
import helpers
import json
import logging
import models
import webapp2
################################################################################
class Index(webapp2.RequestHandler):
def get(self):
self.response.out.write('')
def head(self):
pass
################################################################################
class GoUrl(webapp2.RequestHandler):
def get(self):
return self._redirect()
def head(self):
return self._redirect()
def _redirect(self):
url = self.request.get('url')
url = url.encode('ascii', 'ignore')
self.redirect(url)
################################################################################
class RefreshUrl(webapp2.RequestHandler):
def post(self):
url = self.request.get('url')
helpers.RefreshUrl(url)
################################################################################
class FaviconUrl(webapp2.RequestHandler):
def get(self):
url = unquote_plus(self.request.get('url'))
response = helpers.FaviconUrl(url)
if response:
self.response.headers['Content-Type'] = response.headers['Content-Type']
self.response.write(response.content)
else:
self.error('404')
################################################################################
class ResolveScan(webapp2.RequestHandler):
def post(self):
input_data = self.request.body
try:
input_object = json.loads(input_data) # TODO: Data is not sanitised.
objects = input_object.get('objects', [])
secure_only = bool(input_object.get('secureOnly', helpers.DEFAULT_SECURE_ONLY))
except:
objects = []
secure_only = helpers.DEFAULT_SECURE_ONLY
output = helpers.BuildResponse(objects, secure_only)
self.response.headers['Content-Type'] = 'application/json'
json_data = json.dumps(output);
self.response.write(json_data)
################################################################################
class DemoMetadata(webapp2.RequestHandler):
def get(self):
objects = [
{'url': 'http://www.caltrain.com/schedules/realtime/stations/mountainviewstation-mobile.html'},
{'url': 'http://benfry.com/distellamap/'},
{'url': 'http://en.wikipedia.org/wiki/Le_D%C3%A9jeuner_sur_l%E2%80%99herbe'},
{'url': 'http://sfmoma.org'}
]
output = helpers.BuildResponse(objects)
self.response.headers['Content-Type'] = 'application/json'
json_data = json.dumps(output);
self.response.write(json_data)
def head(self):
pass
################################################################################
app = webapp2.WSGIApplication([
('/', Index),
('/resolve-scan', ResolveScan),
('/refresh-url', RefreshUrl),
('/favicon', FaviconUrl),
('/go', GoUrl),
('/demo', DemoMetadata)
], debug=True)
| apache-2.0 | 3,967,501,291,462,436,400 | 31.026316 | 107 | 0.539852 | false | 4.382953 | false | false | false |
sacharya/nova | nova/virt/baremetal/base.py | 1 | 2612 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal import baremetal_states
class NodeDriver(object):
def __init__(self, virtapi):
self.virtapi = virtapi
def cache_images(self, context, node, instance, **kwargs):
raise NotImplementedError()
def destroy_images(self, context, node, instance):
raise NotImplementedError()
def activate_bootloader(self, context, node, instance, **kwargs):
raise NotImplementedError()
def deactivate_bootloader(self, context, node, instance):
raise NotImplementedError()
def activate_node(self, context, node, instance):
"""For operations after power on."""
raise NotImplementedError()
def deactivate_node(self, context, node, instance):
"""For operations before power off."""
raise NotImplementedError()
def get_console_output(self, node, instance):
raise NotImplementedError()
def dhcp_options_for_instance(self, instance):
"""Optional override to return the DHCP options to use for instance.
If no DHCP options are needed, this should not be overridden or None
should be returned.
"""
return None
class PowerManager(object):
def __init__(self, **kwargs):
self.state = baremetal_states.DELETED
pass
def activate_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def reboot_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def deactivate_node(self):
self.state = baremetal_states.DELETED
return self.state
def is_power_on(self):
"""Returns True or False according as the node's power state."""
return True
# TODO(NTTdocomo): split out console methods to its own class
def start_console(self):
pass
def stop_console(self):
pass
| apache-2.0 | -3,919,610,494,057,842,000 | 29.372093 | 78 | 0.672282 | false | 4.317355 | false | false | false |
Xonshiz/comic-dl | comic_dl/sites/readcomicOnlineli.py | 1 | 7993 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import globalFunctions
import re
import os
import logging
class ReadComicOnlineLi(object):
def __init__(self, manga_url, download_directory, chapter_range, **kwargs):
current_directory = kwargs.get("current_directory")
conversion = kwargs.get("conversion")
keep_files = kwargs.get("keep_files")
self.logging = kwargs.get("log_flag")
self.sorting = kwargs.get("sorting_order")
self.image_quality = kwargs.get("image_quality")
self.comic_name = self.name_cleaner(manga_url)
self.print_index = kwargs.get("print_index")
url_split = str(manga_url).split("/")
if len(url_split) in [5]: # Sometimes, this value came out to be 6, instead of 5. Hmmmmmmmm weird.
# Removing "6" from here, because it caused #47
self.full_series(comic_url=manga_url.replace("&readType=1", ""), comic_name=self.comic_name,
sorting=self.sorting, download_directory=download_directory, chapter_range=chapter_range,
conversion=conversion, keep_files=keep_files)
else:
if "&readType=0" in manga_url:
manga_url = str(manga_url).replace("&readType=0", "&readType=1") # All Images in one page!
# disabled to fix #132 and #145
# elif "&readType=1" not in manga_url:
# manga_url = str(manga_url) + "&readType=1" # All Images in one page!
self.single_chapter(manga_url, self.comic_name, download_directory, conversion=conversion,
keep_files=keep_files)
def single_chapter(self, comic_url, comic_name, download_directory, conversion, keep_files):
# print("Received Comic Url : {0}".format(comic_url))
print("Fooling CloudFlare...Please Wait...")
chapter_number = str(comic_url).split("/")[5].split("?")[0].replace("-", " - ")
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url, scrapper_delay=10)
img_list = re.findall(r"lstImages.push\(\"(.*?)\"\);", str(source))
file_directory = globalFunctions.GlobalFunctions().create_file_directory(chapter_number, comic_name)
# directory_path = os.path.realpath(file_directory)
directory_path = os.path.realpath(str(download_directory) + "/" + str(file_directory))
if not os.path.exists(directory_path):
os.makedirs(directory_path)
# image_len = len(image_list)
if str(self.image_quality).lower().strip() in ["low", "worst", "bad", "cancer", "mobile"]:
print("Downloading In Low Quality...")
links = []
file_names = []
for current_chapter, image_link in enumerate(img_list):
image_link = image_link.replace("\\", "")
logging.debug("Image Link : %s" % image_link)
image_link = image_link.replace("=s1600", "=s0").replace("/s1600", "/s0") # Change low quality to best.
if str(self.image_quality).lower().strip() in ["low", "worst", "bad", "cancer", "mobile"]:
image_link = image_link.replace("=s0", "=s1600").replace("/s0", "/s1600")
current_chapter += 1
file_name = str(globalFunctions.GlobalFunctions().prepend_zeroes(current_chapter, len(img_list))) + ".jpg"
file_names.append(file_name)
links.append(image_link)
globalFunctions.GlobalFunctions().multithread_download(chapter_number, comic_name, comic_url, directory_path,
file_names, links, self.logging)
globalFunctions.GlobalFunctions().conversion(directory_path, conversion, keep_files, comic_name,
chapter_number)
return 0
def name_cleaner(self, url):
initial_name = str(url).split("/")[4].strip()
safe_name = re.sub(r"[0-9][a-z][A-Z]\ ", "", str(initial_name))
manga_name = str(safe_name.title()).replace("-", " ")
return manga_name
def full_series(self, comic_url, comic_name, sorting, download_directory, chapter_range, conversion, keep_files):
print("Fooling CloudFlare...Please Wait...")
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url, scrapper_delay=10)
all_links = []
listing_table = source.find_all("table", {"class": "listing"})
# print(listing_table)
for elements in listing_table:
x = elements.findAll('a')
for a in x:
all_links.append(str(a['href']).strip())
"""Readcomiconline.li shows the chapters in the Descending order. The 1st chapter is at the bottom, hence, at
the end of the list. So, we'll reverse the list, to perform the ranging functionality properly.
This is a fix for issues like #74.
"""
all_links.reverse()
# print("All Links : {0}".format(all_links))
logging.debug("All Links : %s" % all_links)
# Uh, so the logic is that remove all the unnecessary chapters beforehand
# and then pass the list for further operations.
if chapter_range != "All":
# -1 to shift the episode number accordingly to the INDEX of it. List starts from 0 xD!
starting = int(str(chapter_range).split("-")[0]) - 1
if str(chapter_range).split("-")[1].isdigit():
ending = int(str(chapter_range).split("-")[1])
else:
ending = len(all_links)
indexes = [x for x in range(starting, ending)]
all_links = [all_links[x] for x in indexes][::-1]
else:
all_links = all_links
if self.print_index:
idx = 0
for chap_link in all_links:
idx = idx + 1
print(str(idx) + ": " + chap_link)
return
if str(sorting).lower() in ['new', 'desc', 'descending', 'latest']:
for chap_link in all_links:
chap_link = "http://readcomiconline.li" + chap_link
try:
self.single_chapter(comic_url=chap_link, comic_name=comic_name, download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
except Exception as ex:
logging.error("Error downloading : %s" % chap_link)
break # break to continue processing other mangas
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
elif str(sorting).lower() in ['old', 'asc', 'ascending', 'oldest', 'a']:
for chap_link in all_links[::-1]:
chap_link = "http://readcomiconline.to" + chap_link
try:
self.single_chapter(comic_url=chap_link, comic_name=comic_name, download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
except Exception as ex:
logging.error("Error downloading : %s" % chap_link)
break # break to continue processing other mangas
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
return 0
| mit | 6,136,911,422,190,837,000 | 47.150602 | 127 | 0.575378 | false | 3.865087 | false | false | false |
zhiwehu/django-countries | countries/models.py | 1 | 2033 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Country(models.Model):
"""
International Organization for Standardization (ISO) 3166-1 Country list
* ``iso`` = ISO 3166-1 alpha-2
* ``name`` = Official country names used by the ISO 3166/MA in capital letters
* ``printable_name`` = Printable country names for in-text use
* ``iso3`` = ISO 3166-1 alpha-3
* ``numcode`` = ISO 3166-1 numeric
Note::
This model is fixed to the database table 'country' to be more general.
Change ``db_table`` if this cause conflicts with your database layout.
Or comment out the line for default django behaviour.
"""
iso = models.CharField(_('ISO alpha-2'), max_length=2, primary_key=True)
name = models.CharField(_('Official name (CAPS)'), max_length=128)
printable_name = models.CharField(_('Country name'), max_length=128)
iso3 = models.CharField(_('ISO alpha-3'), max_length=3, null=True)
numcode = models.PositiveSmallIntegerField(_('ISO numeric'), null=True)
class Meta:
db_table = 'country'
verbose_name = _('Country')
verbose_name_plural = _('Countries')
ordering = ('name',)
class Admin:
list_display = ('printable_name', 'iso',)
def __unicode__(self):
return self.printable_name
class UsState(models.Model):
"""
United States Postal Service (USPS) State Abbreviations
Note::
This model is fixed to the database table 'usstate' to be more general.
Change ``db_table`` if this cause conflicts with your database layout.
Or comment out the line for default django behaviour.
"""
id = models.AutoField(primary_key=True)
name = models.CharField(_('State name'), max_length=50, null=False)
abbrev = models.CharField(_('Abbreviation'), max_length=2, null=False)
class Meta:
db_table = 'usstate'
verbose_name = _('US State')
verbose_name_plural = _('US States')
ordering = ('name',)
class Admin:
list_display = ('name', 'abbrev',)
def __unicode__(self):
return self.name
| bsd-3-clause | 316,511,480,541,855,040 | 30.276923 | 80 | 0.689129 | false | 3.377076 | false | false | false |
nils-wisiol/pypuf | pypuf/property_test/example.py | 1 | 3649 | """This module is used to store some examples for the documentation"""
from numpy import array, reshape
from pypuf.simulation.arbiter_based.ltfarray import NoisyLTFArray
from pypuf.property_test.base import PropertyTest
from pypuf.tools import sample_inputs
def main():
"""This method is used to execute all example functions."""
example_reliability()
example_reliability_statistic()
def example_reliability():
"""This method shows how to use the PropertyTest.reliability function."""
n = 8
k = 8
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instance = NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, 0.5)
)
challenge = array([-1, 1, 1, 1, -1, 1, 1, 1])
reliability = PropertyTest.reliability(instance, reshape(challenge, (1, n)))
print('The reliability is {}.'.format(reliability))
def example_reliability_statistic():
"""This method shows hot to use the PropertyTest.reliability_statistic."""
n = 8
k = 1
N = 2 ** n
instance_count = 3
measurements = 100
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instances = [
NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, 0.5)
) for _ in range(instance_count)
]
challenges = array(list(sample_inputs(n, N)))
property_test = PropertyTest(instances)
reliability_statistic = property_test.reliability_statistic(challenges, measurements=measurements)
print('The reliability statistic is {}.'.format(reliability_statistic))
def example_uniqueness():
"""
This method shows the function which can be used to calculate the uniqueness of a set of simulation instances.
"""
n = 8
k = 1
instance_count = 3
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instances = [
NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, weights)
) for _ in range(instance_count)
]
challenge = array([-1, 1, 1, 1, -1, 1, 1, 1])
uniqueness = PropertyTest.uniqueness(instances, reshape(challenge, (1, n)))
print('The uniqueness is {}.'.format(uniqueness))
def example_uniqueness_statistic():
"""This method shows the uniqueness statistic function."""
n = 8
k = 1
N = 2 ** n
instance_count = 11
measurements = 1
transformation = NoisyLTFArray.transform_id
combiner = NoisyLTFArray.combiner_xor
weights = NoisyLTFArray.normal_weights(n=n, k=k)
instances = [
NoisyLTFArray(
weight_array=weights,
transform=transformation,
combiner=combiner,
sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, weights)
) for _ in range(instance_count)
]
challenges = array(list(sample_inputs(n, N)))
property_test = PropertyTest(instances)
uniqueness_statistic = property_test.uniqueness_statistic(challenges, measurements=measurements)
print('The uniqueness statistic is {}.'.format(uniqueness_statistic))
if __name__ == '__main__':
main()
| gpl-3.0 | -5,675,355,466,002,031,000 | 33.752381 | 114 | 0.671691 | false | 3.712106 | true | false | false |
denverfoundation/storybase | apps/storybase_user/migrations/0006_auto__add_contact.py | 1 | 14929 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Contact'
db.create_table('storybase_user_contact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('storybase.fields.ShortTextField')(blank=True)),
('info', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('storybase_user', ['Contact'])
def backwards(self, orm):
# Deleting model 'Contact'
db.delete_table('storybase_user_contact')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'storybase_user.contact': {
'Meta': {'object_name': 'Contact'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.organizationtranslation': {
'Meta': {'unique_together': "(('organization', 'language'),)", 'object_name': 'OrganizationTranslation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.projecttranslation': {
'Meta': {'unique_together': "(('project', 'language'),)", 'object_name': 'ProjectTranslation'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['storybase_user']
| mit | -9,083,987,629,193,060,000 | 82.402235 | 268 | 0.557907 | false | 3.681628 | false | false | false |
omaciel/mangonel | mangonel/system.py | 1 | 2191 | from common import *
import datetime
import json
import sys
import time
try:
from katello.client.api.system import SystemAPI
except ImportError, e:
print "Please install Katello CLI package."
sys.exit(-1)
class System(SystemAPI):
def __init__(self):
super(System, self).__init__()
def create(self, org, env, name=None, ak=None, type='system',
release=None, sla=None, facts=None, view_id=None, installed_products=None):
if name is None:
name = "%s.example.com" % generate_name(8)
if facts is None:
facts = generate_facts(name)
sys1 = super(System, self).register(name, org['label'], env['id'], ak, type, release, sla, facts, view_id, installed_products)
logger.debug("Created system '%s'" % sys1['name'])
return sys1
def get_or_create_system(self, org, env, name=None, ak=None, type='system',
release=None, sla=None, facts=None, view_id=None, installed_products=None):
sys = None
query = {}
if name is not None:
query['name'] = name
if query != {}:
systems = super(System, self).systems_by_env(env['id'], query)
if systems != []:
sys = systems[0]
else:
sys = self.create(org, env, name, ak, type,
release, sla, facts, view_id, installed_products)
return sys
def delete_system(self, system):
return super(System, self).unregister(system['uuid'])
def checkin(self, system):
return super(System, self).checkin(system['uuid'])
def update_packages(self, system, packages=None):
if packages is None:
packages = packages_list()
return super(System, self).update_packages(system['uuid'], packages)
def available_pools(self, sId, match_system=False, match_installed=False, no_overlap=False):
return super(System, self).available_pools(sId, match_system, match_installed, no_overlap)['pools']
def subscribe(self, sId, pool=None, qty=1):
return super(System, self).subscribe(sId, pool, qty)
| gpl-2.0 | 7,369,417,449,111,537,000 | 29.430556 | 134 | 0.591967 | false | 3.823735 | false | false | false |
OmnesRes/pan_cancer | paper/figures/figure_1/bar_graphs/CESC.py | 1 | 1883 | ##script for creating a histogram
## Load necessary modules
import pylab as plt
import numpy as np
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
pvalues=map(float,pvalues)
##decide how man bins, 100 is the maximum possible due to only having two sig figs
number=100.0
counts={}
##use a dictionary to populate the bins
for i in range(int(number)):
for j in pvalues:
if i/number<j<=(i+1)/number:
counts[i]=counts.get(i,0)+1
##convert the dictionary to a list
mylist=zip(counts.keys(),counts.values())
##sort the list so that the bins are in order
mylist.sort()
##plot the data with pylab
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(bottom=.2)
ax.bar([i[0]/number for i in mylist],[i[1] for i in mylist],color='b',width=1/number,linewidth=2.0)
ax.set_xlim((0,1))
for item in ax.get_yticklabels():
item.set_fontsize(30)
for item in ax.get_xticklabels():
item.set_fontsize(30)
ax.tick_params(axis='x',length=15,width=3,direction='out',labelsize=30)
ax.tick_params(axis='y',length=15,width=3,direction='out',labelsize=30)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(3)
ax.spines['bottom'].set_linewidth(3)
ax.spines['bottom'].set_position(['outward',10])
ax.spines['left'].set_position(['outward',10])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xticks([i/10.0 for i in range(0,11)])
ax.set_xticklabels(['0']+[str(i/10.0) for i in range(1,11)])
ax.set_ylabel('Frequency',fontsize=60,labelpad=20)
ax.set_xlabel('Raw Cox P-value',fontsize=60,labelpad=20)
plt.show()
| mit | -5,518,581,456,476,088,000 | 29.868852 | 104 | 0.707913 | false | 2.806259 | false | false | false |
jbzdarkid/HearthstonePro | Cards.py | 1 | 13333 | '''
Special:
"Anub'ar Ambusher"
"Blood Warriors"
"Burgly Bully"
"Captain's Parrot"
"Chromaggus"
"Echo of Mediv"
"Ethereal Peddler"
"Flame Leviathan"
"Getaway Kodo"
"Gnomish Experimenter"
"Headcrack"
"Holy Wrath"
"Ivory Knight"
"Kazakus"
"King's Elekk"
"Krul the Unshackled"
"Lock and Load"
"Lorewalker Cho"
"Sea Reaver"
"Shadowfiend"
"Small-Time Recruits"
"Thistle Tea"
"Tinkertown Technician"
"Trade Prince Gallywix"
"Vanish"
"Wilfred Fizzlebang"
"Wrathion"
'''
# Deathrattle: "Voidcaller", "The Skeleton Knight"
# Discard: "Succubus", "Darkshire Librarian", "Astral Communion", "Dark Bargain", "Deathwing"
# Buff: "Smuggler's Crate", "Hidden Cache", "Trogg Beastrager", "Grimscale Chum", "Grimestreet Outfitter", "Grimestreet Enforcer", "Grimestreet Gadgeteer", "Stolen Goods", "Grimestreet Pawnbroker", "Brass Knuckles", "Hobart Grapplehammer", "Grimestreet Smuggler", "Don Han'Cho"
# Within this file, I've separated out names of cards in "double quotes", so that I can search for them via splitter.py.
# It also means there won't be any \'s in card names.
import logging
import Hand, Utilities, Legendaries
# When a card hits the board, and we can see what its name is
def play2(entity):
if entity['player'] == Utilities.them:
if entity['name'] in ['Armor Up!', 'Ballista Shot', 'Dagger Mastery', 'DIE, INSECT!', 'Dire Shapeshift', 'INFERNO!', 'Life Tap', 'Poisoned Daggers', 'Reinforce', 'Shapeshift', 'Soul Tap', 'Steady Shot', 'Tank Up!', 'The Silver Hand', 'The Tidal Hand', 'Totemic Call', 'Totemic Slam']:
logging.info('Opponent uses their hero power')
else:
logging.info('Opponent plays %s' % entity['name'])
if entity['name'] in ["Crackle", "Dunemaul Shaman", "Finders Keepers", "Fireguard Destroyer", "Jinyu Waterspeaker", "Lightning Bolt", "Siltfin Spiritwalker", "Stormforged Axe", "Stormcrack", "Totem Golem"]:
Utilities.overload += 1
elif entity['name'] in ["Ancestral Knowledge", "Doomhammer", "Dust Devil", "Feral Spirit", "Flamewreathed Faceless", "Forked Lightning", "Lava Burst", "Lightning Storm"]:
Utilities.overload += 2
elif entity['name'] in ["Earth Elemental", "Neptulon"]:
Utilities.overload += 3
elif entity['name'] in ["Elemental Destruction"]:
Utilities.overload += 5
elif entity['name'] in ["Eternal Sentinel", "Lava Shock"]:
Utilities.overload = 0
elif entity['name'] in ["Astral Communion", "Dark Bargain", "Darkshire Librarian", "Deathwing", "Doomguard", "Soulfire", "Succubus"]:
global showentity
showentity = discard
elif entity['name'] == "Varian Wrynn":
Legendaries.varianWrynn = True
elif entity['name'] == "A Light in the Darkness":
Hand.draw(source='random', kind='minion', buff=+1)
elif entity['name'] == "Arch-Thief Rafaam":
Hand.draw(note='A powerful artifact', kind='spell')
elif entity['name'] == "Babbling Book":
Hand.draw(source='random', hero='mage', kind='spell')
elif entity['name'] == "Burgle":
Hand.draw(source='random', hero=Utilities.our_hero)
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Cabalist's Tomb":
Hand.draw(source='random', hero='mage', kind='spell')
Hand.draw(source='random', hero='mage', kind='spell')
Hand.draw(source='random', hero='mage', kind='spell')
elif entity['name'] == "Dark Peddler":
Hand.draw(source='discovered', note='A 1-cost card')
elif entity['name'] == "Ethereal Conjurer":
Hand.draw(source='discovered', hero='mage', kind='spell')
elif entity['name'] == "Finders Keepers":
Hand.draw(source='discovered', hero='shaman', note='A card with overload')
elif entity['name'] == "Gorillabot A-3":
Hand.draw(source='discovered', kind='mech minion')
elif entity['name'] == "Grand Crusader":
Hand.draw(source='random', hero='paladin')
elif entity['name'] == "Grimestreet Informant":
Hand.draw(source='discovered', hero='hunter, paladin, or warrior')
elif entity['name'] == "I Know a Guy":
Hand.draw(source='discovered', kind='taunt minion')
elif entity['name'] == "Jeweled Scarab":
Hand.draw(source='discovered', note='A 3-cost card')
elif entity['name'] == "Journey Below":
Hand.draw(source='discovered', note='A deathrattle card')
elif entity['name'] == "Kabal Chemist":
Hand.draw(source='random', kind='potion spell')
elif entity['name'] == "Kabal Courier":
Hand.draw(source='discovered', hero='mage, priest, or warlock')
elif entity['name'] == "Lotus Agents":
Hand.draw(source='discovered', hero='druid, rogue, or shaman')
elif entity['name'] == "Mind Vision":
Hand.draw(note='A card from your hand')
elif entity['name'] == "Mukla, Tyrant of the Vale":
Hand.draw(note='Banana', kind='spell')
Hand.draw(note='Banana', kind='spell')
elif entity['name'] == "Museum Curator":
# I'm ignoring "Tentacles For Arms" because it's bad
Hand.draw(source='discovered', note='A deathrattle card', kind='minion')
elif entity['name'] == "Nefarian":
Hand.draw(source='random', hero=Utilities.our_hero)
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Neptulon":
Hand.draw(source='random', kind='murloc minion')
Hand.draw(source='random', kind='murloc minion')
Hand.draw(source='random', kind='murloc minion')
Hand.draw(source='random', kind='murloc minion')
elif entity['name'] == "Raven Idol":
Hand.draw(source='discovered', kind='minion or spell')
elif entity['name'] == "Sense Demons":
Hand.draw(kind='demon minion')
Hand.draw(kind='demon minion')
elif entity['name'] == "Swashburglar":
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Thoughtsteal":
Hand.draw(note='A random card from your deck')
Hand.draw(note='A random card from your deck')
elif entity['name'] == "Tomb Spider":
Hand.draw(source='discovered', kind='beast minion')
elif entity['name'] == "Toshley":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Unstable Portal":
Hand.draw(source='random', kind='minion', cost=-3)
elif entity['name'] == "Wild Growth":
if Utilities.resources == '10':
Hand.draw(note='Excess Mana', hero='druid', kind='spell')
elif entity['name'] == "Xaril, Poisoned Mind":
Hand.draw(source='random', kind='toxin spell')
elif entity['name'] == "Call Pet":
Hand.notes.append('If it\'s a beast, cost -4')
elif entity['name'] == "Far Sight":
Hand.notes.append('Costs (3) less')
elif entity['player'] == Utilities.us:
if entity['name'] == "King Mukla":
Hand.draw(kind='Banana')
Hand.draw(kind='Banana')
elif entity['name'] == "Mulch":
Hand.draw(source='random', kind='minion')
# if entity['player'] in [Utilities.us, Utilities.them]:
if entity['name'] == "Elite Tauren Chieftain":
Hand.draw(kind='Power Chord spell')
elif entity['name'] == "Lord Jaraxxus":
Utilities.set_hero(entity)
elif entity['name'] == "Spellslinger":
Hand.draw(source='random', kind='spell')
# When a card hits the board and we can see what its name and its target's name is.
def play3(entity, target):
if entity['player'] == Utilities.them:
if entity['name'] in ['Fireblast', 'Fireblast Rank 2', 'Lesser Heal', 'Lightning Jolt', 'Mind Shatter', 'Mind Spike', 'Heal']:
logging.info('Opponent uses their hero power, targetting %s' % target['name'])
else:
logging.info('Opponent plays %s targetting %s' % (entity['name'], target['name']))
if entity['name'] == "Soulfire":
global showentity
showentity = discard
if entity['name'] in ["Ancient Brewmaster", "Convert", "Gadgetzan Ferryman", "Time Rewinder", "Youthful Brewmaster"]:
Hand.draw(note=target['name'], kind='minion')
elif entity['name'] in ["Bloodthistle Toxin", "Shadowstep"]:
Hand.draw(note=target['name'], kind='minion', cost=-2)
elif entity['name'] == "Convert":
Hand.draw(note=target['name'], kind='minion')
elif entity['name'] == "Shadowcaster":
Hand.draw(note='A 1/1 copy of %s which costs (1)' % target['name'], kind='minion')
elif entity['player'] == Utilities.us:
if entity['name'] == "Freezing Trap":
Hand.draw(note=target['name'], kind='minion', cost=+2)
elif entity['name'] == "Sap":
Hand.draw(note=target['name'], kind='minion')
if target['player'] == Utilities.them:
if entity['name'] in ["Dream", "Kindapper"]:
Hand.draw(note=target['name'], kind='minion')
def die(entity):
if entity['player'] == Utilities.them:
logging.info('Opponent\'s %s dies' % entity['name'])
if entity['name'] == "Anub'arak":
Hand.draw(note='Anub\'arak')
elif entity['name'] == "Clockwork Gnome":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Deadly Fork":
Hand.draw(note='Sharp Fork', kind='weapon')
elif entity['name'] == "Rhonin":
Hand.draw(note='Arcane Missles', hero='mage', kind='spell')
Hand.draw(note='Arcane Missles', hero='mage', kind='spell')
Hand.draw(note='Arcane Missles', hero='mage', kind='spell')
elif entity['name'] == "Shifting Shade":
Hand.draw(note='A card from your deck')
elif entity['name'] == "Tentacles for Arms":
Hand.draw(note='Tentacles for Arms')
elif entity['name'] == "Tomb Pillager":
Hand.draw(note='The Coin', kind='spell')
elif entity['name'] == "Toshley":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Undercity Huckster":
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Xaril, Poisoned Mind":
Hand.draw(source='random', kind='toxin spell')
elif entity['name'] == "Webspinner":
Hand.draw(source='random', kind='beast minion')
# if entity['player'] in [Utilities.us, Utilities.them]:
if entity['name'] == "Mechanical Yeti":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Majordomo Executus":
Utilities.set_hero(entity)
def die2(entity):
if entity['player'] == Utilities.them:
if entity['name'] == "Explorer's Hat":
Hand.draw(note='Explorer\'s Hat', hero='Hunter', kind='spell')
elif entity['name'] == "Nerubian Spores": # "Infest"
Hand.draw(source='random', kind='beast minion')
# Be careful of Blessing of Wisdom (others?) which can 'trigger' an effect on a card that already has a triggered effect.
def trigger(entity):
if entity['player'] == Utilities.them:
logging.info('Opponent\'s %s triggers' % entity['name'])
if entity['name'] == "Alarm-o-Bot":
Hand.draw(note='Alarm-o-Bot', kind='minion')
elif entity['name'] == "Archmage Antonidas":
Hand.draw(note='Fireball', hero='mage', kind='spell')
elif entity['name'] == "Colliseum Manager":
Hand.draw(note='Colliseum Manager', kind='minion')
elif entity['name'] == "Cutpurse":
Hand.draw(note='The Coin', kind='spell')
elif entity['name'] == "Emperor Thaurissan":
for card in Hand.hand:
card.cost -= 1
elif entity['name'] == "Gazlowe":
Hand.draw(source='random', kind='mech minion')
elif entity['name'] == "Kabal Trafficker":
Hand.draw(source='random', kind='demon minion')
elif entity['name'] == "Mech-Bear-Cat":
Hand.draw(note='Spare Part', kind='spell')
elif entity['name'] == "Nexus-Champion Saraad":
Hand.draw(source='random', kind='spell')
elif entity['name'] == "Recruiter":
Hand.draw(note='Squire', kind='minion')
elif entity['name'] == "Shaku, the Collector":
Hand.draw(source='random', hero=Utilities.our_hero)
elif entity['name'] == "Ysera":
Hand.draw(note='A Dream card', kind='spell')
# Show Entity blocks are used for a number of things. Here, this is used for
# getting the hand position of discarded cards, and determining cards drawn for
# King's Elekk Joust victories.
def blockEnd():
global showentity
def showentity(data):
pass
blockEnd()
def discard(data):
logging.info('Opponent discards %s' % data['CardID'])
Hand.hand.pop(int(data['Entity']['zonePos'])-1)
def turnover():
if Utilities.overload != 0:
logging.info('Overload next turn: %d' % Utilities.overload)
Utilities.overload = 0
| apache-2.0 | -6,323,426,377,481,312,000 | 48.199262 | 292 | 0.59769 | false | 3.316667 | false | false | false |
WoLpH/dropbox | dropbox/util.py | 1 | 1940 | import os
class AnalyzeFileObjBug(Exception):
msg = ("\n"
"Expected file object to have %d bytes, instead we read %d bytes.\n"
"File size detection may have failed (see dropbox.util.AnalyzeFileObj)\n")
def __init__(self, expected, actual):
self.expected = expected
self.actual = actual
def __str__(self):
return self.msg % (self.expected, self.actual)
def analyze_file_obj(obj):
''' Get the size and contents of a file-like object.
Returns: (size, raw_data)
size: The amount of data waiting to be read
raw_data: If not None, the entire contents of the stream (as a string).
None if the stream should be read() in chunks.
'''
pos = 0
if hasattr(obj, 'tell'):
pos = obj.tell()
# Handle cStringIO and StringIO
if hasattr(obj, 'getvalue'):
# Why using getvalue() makes sense:
# For StringIO, this string is pre-computed anyway by read().
# For cStringIO, getvalue() is the only way
# to determine the length without read()'ing the whole thing.
raw_data = obj.getvalue()
if pos == 0:
return (len(raw_data), raw_data)
else:
# We could return raw_data[pos:], but that could drastically
# increase memory usage. Better to read it block at a time.
size = max(0, len(raw_data) - pos)
return (size, None)
# Handle real files
if hasattr(obj, 'fileno'):
size = max(0, os.fstat(obj.fileno()).st_size - pos)
return (size, None)
# User-defined object with len()
if hasattr(obj, '__len__'):
size = max(0, len(obj) - pos)
return (size, None)
# We don't know what kind of stream this is.
# To determine the size, we must read the whole thing.
raw_data = obj.read()
return (len(raw_data), raw_data)
| mit | 5,704,691,118,987,827,000 | 33.642857 | 89 | 0.57732 | false | 3.919192 | false | false | false |
Rhoana/rh_aligner | old/filter_tiles.py | 1 | 2329 | # Takes a json file that contains many tiles with their bounding boxes (Tile-Spec format)
# and a bounding box, and outputs a json file for each tile that is overlapping with the bounding box
import sys
import os
import argparse
import json
from bounding_box import BoundingBox
# common functions
def load_tiles(tiles_spec_fname, bbox):
relevant_tiles = []
with open(tiles_spec_fname, 'r') as data_file:
data = json.load(data_file)
for tile in data:
tile_bbox = BoundingBox.fromList(tile['bbox'])
if bbox.overlap(tile_bbox):
relevant_tiles.append(tile)
return relevant_tiles
def filter_tiles(tiles_fname, out_fname, bbox):
# parse the bounding box arguments
bbox = BoundingBox.fromStr(bbox)
# load all tiles from the tile-spec json file that are relevant to our bounding box
relevant_tiles = load_tiles(tiles_fname, bbox)
# Create a tile-spec file that includes all relevant tiles
with open(out_fname, 'w') as outfile:
json.dump(relevant_tiles, outfile, sort_keys=True, indent=4)
def main():
# Command line parser
parser = argparse.ArgumentParser(description='Takes a json file that contains many tiles with their bounding boxes (Tile-Spec format)\
and a bounding box, and outputs a json file for each tile that is overlapping with the bounding box')
parser.add_argument('tiles_fname', metavar='tiles_json', type=str,
help='a tile_spec file that contains all the images to be aligned in json format')
parser.add_argument('-o', '--output_file', type=str,
help='an output tile_spec file, that will include only the relevant tiles (default: ./filtered.json)',
default='./filtered.json')
# the default bounding box is as big as the image can be
parser.add_argument('-b', '--bounding_box', type=str,
help='the bounding box of the part of image that needs to be aligned format: "from_x to_x from_y to_y" (default: all tiles)',
default='{0} {1} {2} {3}'.format((-sys.maxint - 1), sys.maxint, (-sys.maxint - 1), sys.maxint))
args = parser.parse_args()
#print args
filter_tiles(args.tiles_fname, args.output_file, args.bounding_box)
if __name__ == '__main__':
main()
| mit | 8,947,238,689,689,303,000 | 38.474576 | 149 | 0.660799 | false | 3.947458 | false | false | false |
dunkhong/grr | grr/server/setup.py | 1 | 7769 | #!/usr/bin/env python
"""This is the setup.py file for the GRR client.
This is just a meta-package which pulls in the minimal requirements to create a
full grr server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
import os
import shutil
import subprocess
import sys
from setuptools import find_packages
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.sdist import sdist
GRR_NO_MAKE_UI_FILES_VAR = "GRR_NO_MAKE_UI_FILES"
# TODO: Fix this import once support for Python 2 is dropped.
# pylint: disable=g-import-not-at-top
if sys.version_info.major == 2:
import ConfigParser as configparser
else:
import configparser
# pylint: enable=g-import-not-at-top
def find_data_files(source, ignore_dirs=None):
ignore_dirs = ignore_dirs or []
result = []
for directory, dirnames, files in os.walk(source):
dirnames[:] = [d for d in dirnames if d not in ignore_dirs]
files = [os.path.join(directory, x) for x in files]
result.append((directory, files))
return result
def make_ui_files():
"""Builds necessary assets from sources."""
# Install node_modules, but keep package(-lock).json frozen.
# Using shell=True, otherwise npm is not found in a nodeenv-built
# virtualenv on Windows.
subprocess.check_call(
"npm ci", shell=True, cwd="grr_response_server/gui/static")
subprocess.check_call(
"npm run gulp compile", shell=True, cwd="grr_response_server/gui/static")
def get_config():
"""Get INI parser with version.ini data."""
ini_path = os.path.join(THIS_DIRECTORY, "version.ini")
if not os.path.exists(ini_path):
ini_path = os.path.join(THIS_DIRECTORY, "../../version.ini")
if not os.path.exists(ini_path):
raise RuntimeError("Couldn't find version.ini")
config = configparser.SafeConfigParser()
config.read(ini_path)
return config
IGNORE_GUI_DIRS = ["node_modules", "tmp"]
THIS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
# If you run setup.py from the root GRR dir you get very different results since
# setuptools uses the MANIFEST.in from the root dir. Make sure we are in the
# package dir.
os.chdir(THIS_DIRECTORY)
VERSION = get_config()
class Develop(develop):
"""Build developer version (pip install -e)."""
user_options = develop.user_options + [
# TODO: This has to be `bytes` on Python 2. Remove this `str`
# call once support for Python 2 is dropped.
(str("no-make-ui-files"), None, "Don't build UI JS/CSS bundles."),
]
def initialize_options(self):
self.no_make_ui_files = None
develop.initialize_options(self)
def run(self):
# pip install -e . --install-option="--no-make-ui-files" passes the
# --no-make-ui-files flag to all GRR dependencies, which doesn't make
# much sense. Checking an environment variable to have an easy way
# to set the flag for grr-response-server package only.
if (not self.no_make_ui_files and
not os.environ.get(GRR_NO_MAKE_UI_FILES_VAR)):
make_ui_files()
develop.run(self)
class Sdist(sdist):
"""Build sdist."""
user_options = sdist.user_options + [
# TODO: This has to be `bytes` on Python 2. Remove this `str`
# call once support for Python 2 is dropped.
(str("no-make-ui-files"), None, "Don't build UI JS/CSS bundles."),
]
def initialize_options(self):
self.no_make_ui_files = None
sdist.initialize_options(self)
def run(self):
# For consistency, respsecting GRR_NO_MAKE_UI_FILES variable just like
# Develop command does.
if (not self.no_make_ui_files and
not os.environ.get(GRR_NO_MAKE_UI_FILES_VAR)):
make_ui_files()
sdist.run(self)
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
sdist_version_ini = os.path.join(base_dir, "version.ini")
if os.path.exists(sdist_version_ini):
os.unlink(sdist_version_ini)
shutil.copy(
os.path.join(THIS_DIRECTORY, "../../version.ini"), sdist_version_ini)
data_files = list(
itertools.chain(
find_data_files("grr_response_server/checks"),
find_data_files("grr_response_server/databases/mysql_migrations"),
find_data_files("grr_response_server/gui/templates"),
find_data_files(
"grr_response_server/gui/static", ignore_dirs=IGNORE_GUI_DIRS),
find_data_files(
"grr_response_server/gui/local/static",
ignore_dirs=IGNORE_GUI_DIRS),
# TODO: This has to be `bytes` on Python 2. Remove this
# `str` call once support for Python 2 is dropped.
[str("version.ini")],
))
setup_args = dict(
name="grr-response-server",
version=VERSION.get("Version", "packageversion"),
description="The GRR Rapid Response Server.",
license="Apache License, Version 2.0",
maintainer="GRR Development Team",
maintainer_email="[email protected]",
url="https://github.com/google/grr",
cmdclass={
"sdist": Sdist,
"develop": Develop
},
packages=find_packages(),
entry_points={
"console_scripts": [
"grr_console = "
"grr_response_server.distro_entry:Console",
"grr_api_shell_raw_access = "
"grr_response_server.distro_entry:ApiShellRawAccess",
"grr_config_updater = "
"grr_response_server.distro_entry:ConfigUpdater",
"grr_frontend = "
"grr_response_server.distro_entry:GrrFrontend",
"grr_server = "
"grr_response_server.distro_entry:GrrServer",
"grr_worker = "
"grr_response_server.distro_entry:Worker",
"grr_admin_ui = "
"grr_response_server.distro_entry:AdminUI",
]
},
install_requires=[
"google-api-python-client==1.7.11",
"google-auth==1.6.3",
"google-cloud-bigquery==1.20.0",
"grr-api-client==%s" % VERSION.get("Version", "packagedepends"),
"grr-response-client-builder==%s" %
VERSION.get("Version", "packagedepends"),
"grr-response-core==%s" % VERSION.get("Version", "packagedepends"),
"Jinja2==2.10.3",
"pexpect==4.7.0",
"portpicker==1.3.1",
"prometheus_client==0.7.1",
"pyjwt==1.7.1",
"pyopenssl==19.0.0", # https://github.com/google/grr/issues/704
"python-crontab==2.3.9",
"python-debian==0.1.36",
"Werkzeug==0.16.0",
],
extras_require={
# This is an optional component. Install to get MySQL data
# store support: pip install grr-response[mysqldatastore]
# When installing from .deb, the python-mysqldb package is used as
# dependency instead of this pip dependency. This is because we run into
# incompatibilities between the system mysqlclient/mariadbclient and the
# Python library otherwise. Thus, this version has to be equal to the
# python-mysqldb version of the system we support. This is currently
# Ubuntu Xenial, see https://packages.ubuntu.com/xenial/python-mysqldb
#
# NOTE: the Xenial-provided 1.3.7 version is not properly Python 3
# compatible. Versions 1.3.13 or later are API-compatible with 1.3.7
# when running on Python 2 and work correctly on Python 3. However,
# they don't have Python 2 wheels released, which makes GRR packaging
# for Python 2 much harder if one of these versions is used.
#
# TODO(user): Find a way to use the latest mysqlclient version
# in GRR server DEB.
"mysqldatastore": ["mysqlclient==1.3.10"],
},
data_files=data_files)
setup(**setup_args)
| apache-2.0 | 2,326,844,858,767,235,600 | 33.528889 | 80 | 0.65195 | false | 3.497974 | true | false | false |
danijar/sets | sets/process/glove.py | 1 | 1218 | from zipfile import ZipFile
import numpy as np
from sets.core import Embedding
class Glove(Embedding):
"""
The pretrained word embeddings from the Standford NLP group computed by the
Glove model. From: http://nlp.stanford.edu/projects/glove/
"""
URL = 'http://nlp.stanford.edu/data/glove.6B.zip'
def __init__(self, size=100, depth=1):
assert size in (50, 100, 300)
words, embeddings = self.disk_cache('data', self._load, size)
super().__init__(words, embeddings, depth)
assert self.shape == (size,)
@classmethod
def _load(cls, size):
filepath = cls.download(cls.URL)
with ZipFile(filepath, 'r') as archive:
filename = 'glove.6B.{}d.txt'.format(size)
with archive.open(filename) as file_:
return cls._parse(file_)
@staticmethod
def _parse(file_):
words = []
embeddings = []
for line in file_:
chunks = line.split()
word = chunks[0].decode('utf-8')
embedding = np.array(chunks[1:]).astype(np.float32)
words.append(word)
embeddings.append(embedding)
return np.array(words), np.array(embeddings)
| mit | 2,540,729,255,029,553,700 | 31.052632 | 79 | 0.591954 | false | 3.702128 | false | false | false |
sorenh/cc | nova/tests/network_unittest.py | 1 | 5207 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import unittest
from nova import vendor
import IPy
from nova import flags
from nova import test
from nova.compute import network
from nova.auth import users
from nova import utils
class NetworkTestCase(test.TrialTestCase):
def setUp(self):
super(NetworkTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
fake_network=True,
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = users.UserManager.instance()
try:
self.manager.create_user('netuser', 'netuser', 'netuser')
except: pass
for i in range(0, 6):
name = 'project%s' % i
if not self.manager.get_project(name):
self.manager.create_project(name, 'netuser', name)
self.network = network.PublicNetworkController()
def tearDown(self):
super(NetworkTestCase, self).tearDown()
for i in range(0, 6):
name = 'project%s' % i
self.manager.delete_project(name)
self.manager.delete_user('netuser')
def test_public_network_allocation(self):
pubnet = IPy.IP(flags.FLAGS.public_range)
address = self.network.allocate_ip("netuser", "project0", "public")
self.assertTrue(IPy.IP(address) in pubnet)
self.assertTrue(IPy.IP(address) in self.network.network)
def test_allocate_deallocate_ip(self):
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
logging.debug("Was allocated %s" % (address))
self.assertEqual(True, address in self._get_project_addresses("project0"))
rv = network.deallocate_ip(address)
self.assertEqual(False, address in self._get_project_addresses("project0"))
def test_range_allocation(self):
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
secondaddress = network.allocate_ip(
"netuser", "project1", utils.generate_mac())
self.assertEqual(True,
address in self._get_project_addresses("project0"))
self.assertEqual(True,
secondaddress in self._get_project_addresses("project1"))
self.assertEqual(False, address in self._get_project_addresses("project1"))
rv = network.deallocate_ip(address)
self.assertEqual(False, address in self._get_project_addresses("project0"))
rv = network.deallocate_ip(secondaddress)
self.assertEqual(False,
secondaddress in self._get_project_addresses("project1"))
def test_subnet_edge(self):
secondaddress = network.allocate_ip("netuser", "project0",
utils.generate_mac())
for project in range(1,5):
project_id = "project%s" % (project)
address = network.allocate_ip(
"netuser", project_id, utils.generate_mac())
address2 = network.allocate_ip(
"netuser", project_id, utils.generate_mac())
address3 = network.allocate_ip(
"netuser", project_id, utils.generate_mac())
self.assertEqual(False,
address in self._get_project_addresses("project0"))
self.assertEqual(False,
address2 in self._get_project_addresses("project0"))
self.assertEqual(False,
address3 in self._get_project_addresses("project0"))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
rv = network.deallocate_ip(secondaddress)
def test_too_many_projects(self):
for i in range(0, 30):
name = 'toomany-project%s' % i
self.manager.create_project(name, 'netuser', name)
address = network.allocate_ip(
"netuser", name, utils.generate_mac())
rv = network.deallocate_ip(address)
self.manager.delete_project(name)
def _get_project_addresses(self, project_id):
project_addresses = []
for addr in network.get_project_network(project_id).list_addresses():
project_addresses.append(addr)
return project_addresses
| apache-2.0 | -6,371,485,918,494,928,000 | 40.991935 | 83 | 0.620895 | false | 4.142403 | true | false | false |
elffersj/cnfgen | cnfformula/families/graphisomorphism.py | 1 | 4562 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Graph isomorphimsm/automorphism formulas
"""
from cnfformula.cnf import CNF
from cnfformula.cmdline import SimpleGraphHelper
from cnfformula.cmdline import register_cnfgen_subcommand
from cnfformula.families import register_cnf_generator
from cnfformula.graphs import enumerate_vertices
from itertools import combinations,product
def _graph_isomorphism_var(u, v):
"""Standard variable name"""
return "x_{{{0},{1}}}".format(u, v)
@register_cnf_generator
def GraphIsomorphism(G1, G2):
"""Graph Isomorphism formula
The formula is the CNF encoding of the statement that two simple
graphs G1 and G2 are isomorphic.
Parameters
----------
G1 : networkx.Graph
an undirected graph object
G2 : networkx.Graph
an undirected graph object
Returns
-------
A CNF formula which is satiafiable if and only if graphs G1 and G2
are isomorphic.
"""
F = CNF()
F.header = "Graph Isomorphism problem between graphs " +\
G1.name + " and " + G2.name + "\n" + F.header
U=enumerate_vertices(G1)
V=enumerate_vertices(G2)
var = _graph_isomorphism_var
for (u, v) in product(U,V):
F.add_variable(var(u, v))
# Defined on both side
for u in U:
F.add_clause([(True, var(u, v)) for v in V], strict=True)
for v in V:
F.add_clause([(True, var(u, v)) for u in U], strict=True)
# Injective on both sides
for u in U:
for v1, v2 in combinations(V, 2):
F.add_clause([(False, var(u, v1)),
(False, var(u, v2))], strict=True)
for v in V:
for u1, u2 in combinations(U, 2):
F.add_clause([(False, var(u1, v)),
(False, var(u2, v))], strict=True)
# Edge consistency
for u1, u2 in combinations(U, 2):
for v1, v2 in combinations(V, 2):
if G1.has_edge(u1, u2) != G2.has_edge(v1, v2):
F.add_clause([(False, var(u1, v1)),
(False, var(u2, v2))], strict=True)
F.add_clause([(False, var(u1, v2)),
(False, var(u2, v1))], strict=True)
return F
@register_cnf_generator
def GraphAutomorphism(G):
"""Graph Automorphism formula
The formula is the CNF encoding of the statement that a graph G
has a nontrivial automorphism, i.e. an automorphism different from
the idential one.
Parameter
---------
G : a simple graph
Returns
-------
A CNF formula which is satiafiable if and only if graph G has a
nontrivial automorphism.
"""
tmp = CNF()
header = "Graph automorphism formula for graph "+ G.name +"\n"+ tmp.header
F = GraphIsomorphism(G, G)
F.header = header
var = _graph_isomorphism_var
F.add_clause([(False, var(u, u)) for u in enumerate_vertices(G)], strict=True)
return F
@register_cnfgen_subcommand
class GAutoCmdHelper(object):
"""Command line helper for Graph Automorphism formula
"""
name='gauto'
description='graph automorphism formula'
@staticmethod
def setup_command_line(parser):
"""Setup the command line options for graph automorphism formula
Arguments:
- `parser`: parser to load with options.
"""
SimpleGraphHelper.setup_command_line(parser)
@staticmethod
def build_cnf(args):
"""Build a graph automorphism formula according to the arguments
Arguments:
- `args`: command line options
"""
G = SimpleGraphHelper.obtain_graph(args)
return GraphAutomorphism(G)
@register_cnfgen_subcommand
class GIsoCmdHelper(object):
"""Command line helper for Graph Isomorphism formula
"""
name='giso'
description='graph isomorphism formula'
@staticmethod
def setup_command_line(parser):
"""Setup the command line options for graph isomorphism formula
Arguments:
- `parser`: parser to load with options.
"""
SimpleGraphHelper.setup_command_line(parser,suffix="1",required=True)
SimpleGraphHelper.setup_command_line(parser,suffix="2",required=True)
@staticmethod
def build_cnf(args):
"""Build a graph automorphism formula according to the arguments
Arguments:
- `args`: command line options
"""
G1 = SimpleGraphHelper.obtain_graph(args,suffix="1")
G2 = SimpleGraphHelper.obtain_graph(args,suffix="2")
return GraphIsomorphism(G1,G2)
| gpl-3.0 | -2,192,167,595,266,328,300 | 25.994083 | 82 | 0.61815 | false | 3.679032 | false | false | false |
lmb/Supermega | supermega/tests/test_session.py | 1 | 2433 | import unittest
import hashlib
import os
import random
from StringIO import StringIO
from .. import Session, User, File, Directory
from .. import errors
USERNAME = os.environ.get('MEGA_USERNAME', None)
PASSWORD = os.environ.get('MEGA_PASSWORD', None)
def random_string(length):
return (('%0'+str(length)+'x') % random.randrange(256**(length/2)))[:length]
def calculate_hash(string):
hash = hashlib.sha256()
hash.update(string)
return hash.hexdigest()
def verify_hash(file, chunks, obj, sha256):
hash = hashlib.sha256()
for chunk in chunks:
hash.update(chunk)
obj.assertEqual(hash.hexdigest(), sha256)
requires_account = unittest.skipUnless(USERNAME and PASSWORD,
"MEGA_USERNAME or MEGA_PASSWORD missing")
class TestSession(unittest.TestCase):
def setUp(self):
self.sess = Session()
def test_public_file_download(self):
url = 'https://mega.co.nz/#!2ctGgQAI!AkJMowjRiXVcSrRLn3d-e1vl47ZxZEK0CbrHGIKFY-E'
sha256 = '9431103cb989f2913cbc503767015ca22c0ae40942932186c59ffe6d6a69830d'
self.sess.download(verify_hash, url, self, sha256)
def test_ephemeral_account(self):
sess = Session.ephemeral()
sess.root # This triggers lazy-loading the datastore
def test_key_derivation(self):
self.assertEqual(User.derive_key("password"), 'd\x039r^n\xbd\x13\xa2_\x00R\x12\x9f|\xb1')
@requires_account
def test_create_from_env(self):
s = Session.from_env()
@requires_account
def test_print_tree(self):
sess = Session(USERNAME, PASSWORD)
sess.root.print_tree()
class TestFile(unittest.TestCase):
def setUp(self):
self.sess = Session(USERNAME, PASSWORD)
self.random_filename = random_string(5)
def tearDown(self):
try:
f = self.sess.root[self.random_filename]
f.delete()
except KeyError, errors.ObjectNotFound:
pass
@requires_account
def test_file_upload_download(self):
length = random.randint(120, 400) * 0x400
contents = chr(random.randint(0,256)) * length
sha256 = calculate_hash(contents)
fileobj = StringIO(contents)
uploaded_file = File.upload(self.sess.root, fileobj,
name=self.random_filename, size=length)
uploaded_file.download(verify_hash, self, sha256)
class TestDirectory(unittest.TestCase):
def setUp(self):
self.sess = Session(USERNAME, PASSWORD)
@requires_account
def test_create(self):
root = self.sess.root
d = None
try:
random_dir = random_string(5)
d = Directory.create(random_dir, root)
finally:
if d:
d.delete()
| bsd-3-clause | -1,089,188,042,587,898,900 | 24.882979 | 91 | 0.731196 | false | 2.90681 | true | false | false |
veltzer/demos-python | src/examples/short/multi_processing/single_process.py | 1 | 1245 | #!/usr/bin/env python
import fcntl
import os
import os.path
import sys
import time
'''
This is an example of how to make sure only a single python process is
running of a specific kind...
References:
- http://stackoverflow.com/questions/220525/ensure-a-single-instance-of-an-application-in-linux
'''
do_fork = False
def single_runner():
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
pid_file = '/tmp/{}.pid'.format(program_name)
try:
fp = os.open(pid_file, os.O_WRONLY | os.O_CREAT)
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# another instance is running
print('this program is already running...', file=sys.stderr)
sys.exit(1)
# this does not work
def single_runner_simple():
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
pid_file = '/tmp/{}.pid'.format(program_name)
# if os.path.isfile(pid_file):
# os.unlink(pid_file)
try:
os.open(pid_file, os.O_CREAT | os.O_EXCL)
except IOError as e:
print(e)
# another instance is running
print('this program is already running...', file=sys.stderr)
sys.exit(1)
single_runner()
while True:
time.sleep(3600)
| gpl-3.0 | 3,317,245,707,534,463,500 | 24.408163 | 95 | 0.648996 | false | 3.217054 | false | false | false |
tonysyu/deli | deli/layout/grid_layout.py | 1 | 4491 | """ Tick generator classes and helper functions for calculating axis
tick-related values (i.e., bounds and intervals).
"""
import numpy as np
from traits.api import (Array, HasStrictTraits, Instance, Property,
cached_property)
from .bounding_box import BoundingBox
class BaseGridLayout(HasStrictTraits):
#: The bounding box containing data added to plot.
data_bbox = Instance(BoundingBox)
#: The data limits of in the grid direction.
axial_limits = Property(Array, depends_on='data_bbox.updated')
#: The grid positions in data space.
axial_offsets = Property(Array, depends_on='axial_limits')
@cached_property
def _get_axial_offsets(self):
a_min, a_max = self.axial_limits
return np.array(auto_ticks(a_min, a_max), np.float64)
class XGridLayout(BaseGridLayout):
@cached_property
def _get_axial_limits(self):
return self.data_bbox.x_limits
class YGridLayout(BaseGridLayout):
@cached_property
def _get_axial_limits(self):
return self.data_bbox.y_limits
def auto_ticks(x_min, x_max):
""" Finds locations for axis tick marks.
Calculates the locations for tick marks on an axis. The *x_min*,
*x_max*, and *tick_interval* parameters specify how the axis end
points and tick interval are calculated.
Parameters
----------
x_min, x_max : 'auto', 'fit', or a number.
The lower and upper bounds of the axis. If the value is a number,
that value is used for the corresponding end point. If the value is
'auto', then the end point is calculated automatically. If the
value is 'fit', then the axis bound is set to the corresponding
*data_low* or *data_high* value.
Returns
-------
An array of tick mark locations. The first and last tick entries are the
axis end points.
"""
lower = float(x_min)
upper = float(x_max)
tick_interval = auto_interval(lower, upper)
# Compute the range of ticks values:
start = np.floor(lower / tick_interval) * tick_interval
end = np.floor(upper / tick_interval) * tick_interval
if upper > end:
end += tick_interval
ticks = np.arange(start, end + (tick_interval / 2.0), tick_interval)
return [tick for tick in ticks if tick >= x_min and tick <= x_max]
def auto_interval(data_low, data_high):
""" Calculates the tick interval for a range.
The function chooses the number of tick marks, which can be between
3 and 9 marks (including end points), and chooses tick intervals at
1, 2, 2.5, 5, 10, 20, ...
Returns
-------
interval : float
tick mark interval for axis
"""
x_range = float(data_high) - float(data_low)
# Choose from between 2 and 8 tick marks. Preference given to more ticks.
# Note: reverse order and see kludge below...
divisions = np.arange(8.0, 2.0, -1.0) # (7, 6, ..., 3)
# Calculate the intervals for the divisions:
candidate_intervals = x_range / divisions
# Get magnitudes and mantissas for each candidate:
magnitudes = 10.0 ** np.floor(np.log10(candidate_intervals))
mantissas = candidate_intervals / magnitudes
# List of "pleasing" intervals between ticks on graph.
# Only the first magnitude are listed, higher mags others are inferred:
magic_intervals = np.array((1.0, 2.0, 2.5, 5.0, 10.0))
# Calculate the absolute differences between the candidates
# (with magnitude removed) and the magic intervals:
differences = abs(magic_intervals[:, np.newaxis] - mantissas)
# Find the division and magic interval combo that produce the
# smallest differences:
# KLUDGE: 'np.argsort' doesn't preserve the order of equal values,
# so we subtract a small, index dependent amount from each difference
# to force correct ordering.
sh = np.shape(differences)
small = 2.2e-16 * np.arange(sh[1]) * np.arange(sh[0])[:, np.newaxis]
small = small[::-1, ::-1] # reverse the order
differences = differences - small
best_mantissa = np.minimum.reduce(differences, axis=0)
best_magic = np.minimum.reduce(differences, axis=-1)
magic_index = np.argsort(best_magic)[0]
mantissa_index = np.argsort(best_mantissa)[0]
# The best interval is the magic_interval multiplied by the magnitude
# of the best mantissa:
interval = magic_intervals[magic_index]
magnitude = magnitudes[mantissa_index]
result = interval * magnitude
return result
| bsd-3-clause | 4,950,409,187,474,593,000 | 32.266667 | 77 | 0.669784 | false | 3.733167 | false | false | false |
dcneeme/droidcontroller | droidcontroller/uniscada.py | 1 | 36234 | # This Python file uses the following encoding: utf-8
# send and receive monitoring and control messages to from UniSCADA monitoring system
# udp kuulamiseks thread?
# neeme
import time, datetime
import sqlite3
import traceback
from socket import *
import sys
import os
import gzip
import tarfile
import requests
import logging
log = logging.getLogger(__name__)
class UDPchannel():
''' Sends away the messages, combining different key:value pairs and adding host id and time. Listens for incoming commands and setup data.
Several UDPchannel instances can be used in parallel, to talk with different servers.
Used by sqlgeneral.py
'''
def __init__(self, id = '000000000000', ip = '127.0.0.1', port = 44445, receive_timeout = 0.1, retrysend_delay = 5, loghost = '0.0.0.0', logport=514): # delays in seconds
#from droidcontroller.connstate import ConnState
from droidcontroller.statekeeper import StateKeeper
self.sk = StateKeeper(off_tout=300, on_tout=0) # conn state with up/down times.
# do hard reboot via 0xFEED when changed to down.
# what to do if never up? keep hard rebooting?
try:
from droidcontroller.gpio_led import GPIOLED
self.led = GPIOLED() # led alarm and conn
except:
log.warning('GPIOLED not imported')
self.host_id = id
self.ip = ip
self.port = port
self.loghost = loghost
self.logport = logport
self.logaddr = (self.loghost,self.logport) # tuple
self.traffic = [0,0] # UDP bytes in, out
self.UDPSock = socket(AF_INET,SOCK_DGRAM)
self.UDPSock.settimeout(receive_timeout)
self.retrysend_delay = retrysend_delay
self.inum = 0 # sent message counter
self.UDPlogSock = socket(AF_INET,SOCK_DGRAM)
self.UDPlogSock.settimeout(None) # for syslog
self.UDPlogSock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1) # broadcast allowed
print('init: created uniscada and syslog connections to '+ip+':'+str(port)+' and '+loghost+':'+str(logport))
self.table = 'buff2server' # can be anything, not accessible to other objects WHY? would be useful to know the queue length...
self.Initialize()
def Initialize(self):
''' initialize time/related variables and create buffer database with one table in memory '''
self.ts = round(time.time(),1)
#self.ts_inum = self.ts # inum increase time, is it used at all? NO!
self.ts_unsent = self.ts # last unsent chk
self.ts_udpsent=self.ts
self.ts_udpgot=self.ts
self.conn = sqlite3.connect(':memory:')
#self.cur=self.conn.cursor() # cursors to read data from tables / cursor can be local
self.makebuff() # create buffer table for unsent messages
self.setIP(self.ip)
self.setLogIP(self.loghost)
def setIP(self, invar):
''' Set the monitoring server ip address '''
self.ip = invar
self.saddr = (self.ip,self.port) # refresh needed
def setLogIP(self, invar):
''' Set the syslog monitor ip address '''
self.loghost = invar
self.logaddr = (self.loghost,self.logport) # refresh needed
def setPort(self, invar):
''' Set the monitoring server UDP port '''
self.port = invar
self.saddr = (self.ip,self.port) # refresh needed
def setID(self, invar):
''' Set the host id '''
self.host_id = invar
def setRetryDelay(self, invar):
''' Set the monitoring server UDP port '''
self.retrysend_delay = invar
def getTS(self):
''' returns timestamps for last send trial and successful receive '''
return self.ts_udpsent, self.ts_udpgot
def getID(self):
''' returns host id for this instance '''
return self.host_id
def getIP(self):
''' returns server ip for this instance '''
return self.ip
def getLogIP(self):
''' returns syslog server ip for this instance '''
return self.loghost
def get_traffic(self):
return self.traffic # tuple in, out
def set_traffic(self, bytes_in = None, bytes_out = None): # set UDP traffic counters (it is possible to update only one of them as well)
''' Restores UDP traffic counter'''
if bytes_in != None:
if not bytes_in < 0:
self.traffic[0] = bytes_in
else:
print('invalid bytes_in',bytes_in)
if bytes_out != None:
if not bytes_out < 0:
self.traffic[1] = bytes_out
else:
print('invalid bytes_out',bytes_out)
def set_inum(self,inum = 0): # set message counter
self.inum=inum
def get_inum(self): #get message counter
return self.inum
def get_ts_udpgot(self): #get ts of last ack from monitoring server
return self.ts_udpgot
def makebuff(self): # drops buffer table and creates
Cmd='drop table if exists '+self.table
sql="CREATE TABLE "+self.table+"(sta_reg,status NUMERIC,val_reg,value,ts_created NUMERIC,inum NUMERIC,ts_tried NUMERIC);" # semicolon needed for NPE for some reason!
try:
self.conn.execute(Cmd) # drop the table if it exists
self.conn.executescript(sql) # read table into database
self.conn.commit()
msg='sqlread: successfully (re)created table '+self.table
return 0
except:
msg='sqlread: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
time.sleep(1)
return 1
def delete_buffer(self): # empty buffer
Cmd='delete from '+self.table
try:
self.conn.execute(Cmd)
self.conn.commit()
print('buffer content deleted')
except:
traceback.print_exc()
def send(self, servicetuple): # store service components to buffer for send and resend
''' Adds service components into buffer table to be sent as a string message
the components are sta_reg = '', status = 0, val_reg = '', value = ''
'''
if servicetuple == None:
log.warning('ignored servicetuple with value None')
return 2
try:
sta_reg=str(servicetuple[0])
status=int(servicetuple[1])
val_reg=str(servicetuple[2])
value=str(servicetuple[3])
self.ts = round(time.time(),1)
Cmd="INSERT into "+self.table+" values('"+sta_reg+"',"+str(status)+",'"+val_reg+"','"+value+"',"+str(self.ts)+",0,0)" # inum and ts_tried left initially empty
#print(Cmd) # debug
self.conn.execute(Cmd)
return 0
except:
msg='FAILED to write svc into buffer'
#syslog(msg) # incl syslog
print(msg)
traceback.print_exc()
return 1
def unsent(self): # delete unsent for too long messages - otherwise the udp messages will contain older key:value duplicates!
''' Counts the non-acknowledged messages and removes older than 3 times retrysend_delay '''
if self.ts - self.ts_unsent < self.retrysend_delay / 2: # no need to recheck too early
return 0
self.ts = round(time.time(),1)
self.ts_unsent = self.ts
mintscreated=0
maxtscreated=0
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # buff2server
self.conn.execute(Cmd)
Cmd="SELECT count(sta_reg),min(ts_created),max(ts_created) from "+self.table+" where ts_created+0+"+str(3*self.retrysend_delay)+"<"+str(self.ts) # yle 3x regular notif
cur = self.conn.cursor()
cur.execute(Cmd)
for rida in cur: # only one line for count if any at all
delcount=rida[0] # int
if delcount>0: # stalled services found
#print repr(rida) # debug
mintscreated=rida[1]
maxtscreated=rida[2]
print(delcount,'services lines waiting ack for',10*self.retrysend_delay,' s to be deleted')
Cmd="delete from "+self.table+" where ts_created+0+"+str(10*self.retrysend_delay)+"<"+str(self.ts) # +" limit 10" # limit lisatud 23.03.2014 aga miks?
self.conn.execute(Cmd)
Cmd="SELECT count(sta_reg),min(ts_created),max(ts_created) from "+self.table
cur.execute(Cmd)
for rida in cur: # only one line for count if any at all
delcount=rida[0] # int
if delcount>50: # delete all!
Cmd="delete from "+self.table
self.conn.execute(Cmd)
msg='deleted '+str(delcount)+' unsent messages from '+self.table+'!'
print(msg)
#syslog(msg)
self.conn.commit() # buff2server transaction end
return delcount # 0
#time.sleep(1) # prooviks
except:
msg='problem with unsent, '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
#sys.stdout.flush()
time.sleep(1)
return 1
#unsent() end
def buff2server(self): # send the buffer content
''' UDP monitoring message creation and sending (using udpsend)
based on already existing buff2server data, does the retransmits too if needed.
buff2server rows successfully send will be deleted by udpread() based on in: contained in the received message
'''
timetoretry = 0 # local
ts_created = 0 # local
svc_count = 0 # local
sendstring = ''
timetoretry=int(self.ts-self.retrysend_delay) # send again services older than that
Cmd = "BEGIN IMMEDIATE TRANSACTION" # buff2server
try:
self.conn.execute(Cmd)
except:
print('could not start transaction on self.conn, '+self.table)
traceback.print_exc()
Cmd = "SELECT * from "+self.table+" where ts_tried=0 or (ts_tried+0>1358756016 and ts_tried+0<"+str(self.ts)+"+0-"+str(timetoretry)+") AND status+0 != 3 order by ts_created asc limit 30"
try:
cur = self.conn.cursor()
cur.execute(Cmd)
for srow in cur:
#print(repr(srow)) # debug, what will be sent
if svc_count == 0: # on first row only increase the inum!
self.inum=self.inum+1 # increase the message number / WHY HERE? ACK WILL NOT DELETE THE ROWS!
if self.inum > 65535:
self.inum = 1 # avoid zero for sending
#self.ts_inum=self.ts # time to set new inum value
svc_count=svc_count+1
sta_reg=srow[0]
status=srow[1]
val_reg=srow[2]
value=srow[3]
ts_created=srow[4]
if val_reg != '':
sendstring += val_reg+":"+str(value)+"\n"
if sta_reg != '':
sendstring += sta_reg+":"+str(status)+"\n"
Cmd="update "+self.table+" set ts_tried="+str(int(self.ts))+",inum="+str(self.inum)+" where sta_reg='"+sta_reg+"' and status="+str(status)+" and ts_created="+str(ts_created)
#print "update Cmd=",Cmd # debug
self.conn.execute(Cmd)
if svc_count>0: # there is something (changed services) to be sent!
#print(svc_count,"services to send using inum",self.inum) # debug
self.udpsend(sendstring) # sending away
Cmd="SELECT count(inum) from "+self.table # unsent service count in buffer
cur.execute(Cmd) #
for srow in cur:
svc_count2=int(srow[0]) # total number of unsent messages
if svc_count2>30: # do not complain below 30
print(svc_count2,"SERVICES IN BUFFER waiting for ack from monitoring server")
except: # buff2server read unsuccessful. unlikely...
msg='problem with '+self.table+' read '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
#sys.stdout.flush()
time.sleep(1)
return 1
self.conn.commit() # buff2server transaction end
return 0
# udpmessage() end
# #################
def udpsend(self, sendstring = ''): # actual udp sending, no resend. give message as parameter. used by buff2server too.
''' Sends UDP data immediately, adding self.inum if >0. '''
if sendstring == '': # nothing to send
print('udpsend(): nothing to send!')
return 1
self.ts = round(time.time(),1)
sendstring += "id:"+str(self.host_id)+"\n" # loodame, et ts_created on enam-vahem yhine neil teenustel...
if self.inum > 0: # "in:inum" to be added
sendstring += "in:"+str(self.inum)+","+str(int(round(self.ts)))+"\n"
self.traffic[1]=self.traffic[1]+len(sendstring) # adding to the outgoing UDP byte counter
try:
self.led.commLED(0) # off, blinking shows sending and time to ack
except:
pass
try:
sendlen=self.UDPSock.sendto(sendstring.encode('utf-8'),self.saddr) # tagastab saadetud baitide arvu
self.traffic[1]=self.traffic[1]+sendlen # traffic counter udp out
msg='==>> sent ' +str(sendlen)+' bytes to '+str(repr(self.saddr))+' '+sendstring.replace('\n',' ') # show as one line
print(msg)
#syslog(msg)
sendstring=''
self.ts_udpsent=self.ts # last successful udp send
return sendlen
except:
msg='udp send failure in udpsend() to saddr '+repr(self.saddr)+', lasting s '+str(int(self.ts - self.ts_udpsent)) # cannot send, this means problem with connectivity
#syslog(msg)
print(msg)
traceback.print_exc()
try:
self.led.alarmLED(1) # send failure
except:
pass
return None
def read_buffer(self, mode = 0): # 0 prints content, 1 is silent but returns record count, min and max ts
''' reads the content of the buffer, debugging needs mainly.
Returns the number of waiting to be deleted messages, the earliest and the latest timestamps. '''
if mode == 0: # just print the waiting messages
Cmd ="SELECT * from "+self.table
cur = self.conn.cursor()
cur.execute(Cmd)
for row in cur:
print(repr(row))
elif mode == 1: # stats
Cmd ="SELECT count(ts_created),min(ts_created),max(ts_created) from "+self.table
cur = self.conn.cursor()
cur.execute(Cmd)
for row in cur:
return row[0],row[1],row[2] # print(repr(row))
def udpread(self):
''' Checks received data for monitoring server to see if the data contains key "in",
then deletes the rows with this inum in the sql table.
If the received datagram contains more data, these key:value pairs are
returned as dictionary.
'''
data=''
data_dict={} # possible setup and commands
sendstring = ''
try: # if anything is comes into udp buffer before timepout
buf=1024
rdata,raddr = self.UDPSock.recvfrom(buf)
data=rdata.decode("utf-8") # python3 related need due to mac in hex
except:
#print('no new udp data received') # debug
#traceback.print_exc()
return None
if len(data) > 0: # something arrived
#log.info('>>> got from receiver '+str(repr(raddr))+' '+str(repr(data)))
self.traffic[0]=self.traffic[0]+len(data) # adding top the incoming UDP byte counter
log.debug('<<<< got from receiver '+str(data.replace('\n', ' ')))
if (int(raddr[1]) < 1 or int(raddr[1]) > 65536):
msg='illegal remote port '+str(raddr[1])+' in the message received from '+raddr[0]
print(msg)
#syslog(msg)
if raddr[0] != self.ip:
msg='illegal sender '+str(raddr[0])+' of message: '+data+' at '+str(int(self.ts)) # ignore the data received!
print(msg)
#syslog(msg)
data='' # data destroy
if "id:" in data: # first check based on host id existence in the received message, must exist to be valid message!
in_id=data[data.find("id:")+3:].splitlines()[0]
if in_id != self.host_id:
log.warning("invalid id "+in_id+" in server message from "+str(raddr[0])) # this is not for us!
data=''
return data # error condition, traffic counter was still increased
else:
self.ts_udpgot=self.ts # timestamp of last udp received
try:
self.led.commLED(1) # data from server, comm OK
except:
pass
self.sk.up()
lines=data.splitlines() # split message into key:value lines
for i in range(len(lines)): # looking into every member of incoming message
if ":" in lines[i]:
#print " "+lines[i]
line = lines[i].split(':')
line = lines[i].split(':')
sregister = line[0] # setup reg name
svalue = line[1] # setup reg value
log.debug('processing key:value '+sregister+':'+svalue)
if sregister != 'in' and sregister != 'id': # may be setup or command (cmd:)
msg='got setup/cmd reg:val '+sregister+':'+svalue # need to reply in order to avoid retransmits of the command(s)
log.info(msg)
data_dict.update({ sregister : svalue }) # in and id are not included in dict
#udp.syslog(msg) # cannot use udp here
#sendstring += sregister+":"+svalue+"\n" # add to the answer - better to answer with real values immediately after change
else:
if sregister == "in": # one such a key in message
inumm=eval(data[data.find("in:")+3:].splitlines()[0].split(',')[0]) # loodaks integerit
if inumm >= 0 and inumm<65536: # valid inum, response to message sent if 1...65535. datagram including "in:0" is a server initiated "fast communication" message
#print "found valid inum",inum,"in the incoming message " # temporary
msg='got ack '+str(inumm)+' in message: '+data.replace('\n',' ')
log.debug(msg)
#syslog(msg)
Cmd="BEGIN IMMEDIATE TRANSACTION" # buff2server, to delete acknowledged rows from the buffer
self.conn.execute(Cmd) # buff2server ack transactioni algus, loeme ja kustutame saadetud read
Cmd="DELETE from "+self.table+" WHERE inum='"+str(inumm)+"'" # deleting all rows where inum matches server ack
try:
self.conn.execute(Cmd) # deleted
except:
msg='problem with '+Cmd+'\n'+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
time.sleep(1)
self.conn.commit() # buff2server transaction end
#if len(sendstring) > 0:
# self.udpsend(sendstring) # send the response right away to avoid multiple retransmits
# log.info('response to server: '+str(sendstring)) # this answers to the server but does not update the setup or service table yet!
#siin ei vasta
return data_dict # possible key:value pairs here for setup change or commands. returns {} for just ack with no cmd
else:
return None
def syslog(self, msg,logaddr=()): # sending out syslog message to self.logaddr.
msg=msg+"\n" # add newline to the end
#print('syslog send to',self.logaddr) # debug
dnsize=0
if self.logaddr == None and logaddr != ():
self.logaddr = logaddr
try: #
self.UDPlogSock.sendto(msg.encode('utf-8'),self.logaddr)
if not '255.255.' in self.logaddr[0] and not '10.0.' in self.logaddr[0] and not '192.168.' in self.logaddr[0]: # sending syslog out of local network
dnsize=len(msg) # udp out increase, payload only
except:
pass # kui udp ei toimi, ei toimi ka syslog
print('could NOT send syslog message to '+repr(self.logaddr))
traceback.print_exc()
self.traffic[1] += dnsize # udp traffic
return 0
def comm(self): # do this regularly, blocks for the time of socket timeout!
''' Communicates with monitoring server, listens to return cmd and setup key:value and sends waiting data. '''
self.ts = round(time.time(),1) # timestamp
self.unsent() # delete old records
udpgot = self.udpread() # check for incoming udp data
# parse_udp()
self.buff2server() # send away. the ack for this is available on next comm() hopefully
return udpgot
class TCPchannel(UDPchannel): # used this parent to share self.syslog()
''' Communication via TCP (pull, push, calendar) '''
def __init__(self, id = '000000000000', supporthost = 'www.itvilla.ee', directory = '/support/pyapp/', uploader='/upload.php', base64string='cHlhcHA6QkVMYXVwb2E='):
self.supporthost = supporthost
self.uploader=uploader
self.base64string=base64string
self.traffic = [0,0] # TCP bytes in, out
self.setID(id)
self.directory=directory
self.ts_cal=time.time()
self.conn = sqlite3.connect(':memory:') # for calendar table
self.makecalendar()
def setID(self, invar):
''' Set the host id '''
self.host_id = invar
def getID(self):
'''returns server ip for this instance '''
return self.host_id
def get_traffic(self): # TCP traffic counter
return self.traffic # tuple in, out
def set_traffic(self, bytes_in = None, bytes_out = None): # set TCP traffic counters (it is possible to update only one of them as well)
''' Restores TCP traffic counter [in, out] '''
if bytes_in != None:
if not bytes_in < 0:
self.traffic[0] = bytes_in
log.debug('set bytes_in to '+str(bytes_in))
else:
log.warning('invalid bytes_in '+str(bytes_in))
if bytes_out != None:
if not bytes_out < 0:
self.traffic[1] = bytes_out
log.debug('set bytes_out to '+str(bytes_in))
else:
print('invalid bytes_out',bytes_out)
log.warning('invalid bytes_out '+str(bytes_in))
def get_ts_cal(self): # last time calendar was accessed
return int(round(self.ts_cal))
def push(self, filename): # send (gzipped) file to supporthost
''' push file filename to supporthost directory using uploader and base64string (for basic auth) '''
if os.path.isfile(filename):
pass
else:
msg='push: found no file '+filename
print(msg)
return 2 # no such file
if '.gz' in filename or '.tgz' in filename: # packed already
pass
else: # lets unpack too
f_in = open(filename, 'rb')
f_out = gzip.open(filename+'.gz', 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
filename = filename+'.gz' # new filename to send
dnsize=os.stat(filename)[6] # file size to be sent
msg='the file was gzipped to '+filename+' with size '+str(dnsize) # the original file is kept!
print(msg)
#udp.syslog(msg)
try:
r = requests.post('http://'+self.supporthost+self.uploader,
files={'file': open(filename, 'rb')},
headers={'Authorization': 'Basic '+self.base64string},
data={'mac': self.directory+self.host_id+'/'}
)
print('post response:',r.text) # nothing?
msg='file '+filename+' with size '+str(dnsize)+' sent to '+self.directory+self.host_id+'/'
#udp.syslog(msg)
print(msg)
self.traffic[1] += dnsize
return 0
except:
msg='the file '+filename+' was NOT sent to '+self.directory+self.host_id+'/ '+str(sys.exc_info()[1])
#udp.syslog(msg)
print(msg)
#traceback.print_exc()
return 1
def pull(self, filename, filesize, start=0):
''' Retrieves file from support server via http get, uncompressing
too if filename contains .gz or tgz and succesfully retrieved.
Parameter start=0 normally, higher with resume.
'''
oksofar=1 # success flag
filename2='' # for uncompressed from the downloaded file
filepart=filename+'.part' # temporary, to be renamed to filename when complete
filebak=filename+'.bak'
dnsize=0 # size of downloaded file
if start>filesize:
msg='pull parameters: file '+filename+' start '+str(start)+' above filesize '+str(filesize)
log.debug(msg)
#udp.syslog(msg)
return 99 # illegal parameters or file bigger than stated during download resume
req = 'http://'+self.supporthost+self.directory+self.host_id+'/'+filename
pullheaders={'Range': 'bytes=%s-' % (start)} # with requests
msg='trying '+req+' from byte '+str(start)+' using '+repr(pullheaders)
log.info(msg)
#udp.syslog(msg)
try:
response = requests.get(req, headers=pullheaders) # with python3
output = open(filepart,'wb')
output.write(response.content)
output.close()
except:
msg='pull: partial or failed download of temporary file '+filepart+' '+str(sys.exc_info()[1])
log.warning(msg)
#udp.syslog(msg)
#traceback.print_exc()
try:
dnsize=os.stat(filepart)[6] # int(float(subexec('ls -l '+filename,1).split(' ')[4]))
except:
msg='pull: got no size for file '+os.getcwd()+'/'+filepart+' '+str(sys.exc_info()[1])
print(msg)
#udp.syslog(msg)
#traceback.print_exc()
oksofar=0
if dnsize == filesize: # ok
msg='pull: file '+filename+' download OK, size '+str(dnsize)
print(msg)
#udp.syslog(msg)
try:
os.rename(filename, filebak) # keep the previous version if exists
#msg='renamed '+filename+' to '+filebak
except:
#traceback.print_exc()
msg='FAILED to rename '+filename+' to '+filebak+' '+str(sys.exc_info()[1])
print(msg)
#udp.syslog(msg)
oksofar=0
try:
os.rename(filepart, filename) #rename filepart to filename2
#msg='renamed '+filepart+' to '+filename
except:
msg='FAILED to rename '+filepart+' to '+filename+' '+str(sys.exc_info()[1])
print(msg)
#udp.syslog(msg)
oksofar=0
#traceback.print_exc()
if oksofar == 0: # trouble, exit
self.traffic[0] += dnsize
return 1
if '.gz' in filename: # lets unpack too
filename2=filename.replace('.gz','')
try:
os.rename(filename2, filename2+'.bak') # keep the previous versioon if exists
except:
#traceback.print_exc()
pass
try:
f = gzip.open(filename,'rb')
output = open(filename2,'wb')
output.write(f.read());
output.close() # file with filename2 created
msg='pull: gz file '+filename+' unzipped to '+filename2+', previous file kept as '+filebak
print(msg)
except:
os.rename(filename2+'.bak', filename2) # restore the previous versioon if unzip failed
msg='pull: file '+filename+' unzipping failure, previous file '+filename2+' restored. '+str(sys.exc_info()[1])
#traceback.print_exc()
print(msg)
#udp.syslog(msg)
self.traffic[0] += dnsize
return 1
if '.tgz' in filename: # possibly contains a directory
try:
f = tarfile.open(filename,'r')
f.extractall() # extract all into the current directory
f.close()
msg='pull: tgz file '+filename+' successfully unpacked'
print(msg)
#udp.syslog(msg)
except:
msg='pull: tgz file '+filename+' unpacking failure! '+str(sys.exc_info()[1])
#traceback.print_exc()
print(msg)
#udp.syslog(msg)
self.traffic[0] += dnsize
return 1
# temporarely switching off this chmod feature, failing!!
#if '.py' in filename2 or '.sh' in filename2: # make it executable, only works with gzipped files!
# try:
# st = os.stat('filename2')
# os.chmod(filename2, st.st_mode | stat.S_IEXEC) # add +x for the owner
# msg='made the pulled file executable'
# print(msg)
# syslog(msg)
# return 0
# except:
# msg='FAILED to make pulled file executable!'
# print(msg)
## syslog(msg)
# traceback.print_exc()
# return 99
self.traffic[0] += dnsize
return 0
else:
if dnsize<filesize:
msg='pull: file '+filename+' received partially with size '+str(dnsize)
print(msg)
#udp.syslog(msg)
self.traffic[0] += dnsize
return 1 # next try will continue
else:
msg='pull: file '+filename+' received larger than unexpected, in size '+str(dnsize)
print(msg)
#udp.syslog(msg)
self.traffic[0] += dnsize
return 99
def makecalendar(self, table='calendar'): # creates buffer table in memory for calendar events
Cmd='drop table if exists '+table
sql="CREATE TABLE "+table+"(title,timestamp,value);CREATE INDEX ts_calendar on "+table+"(timestamp);" # semicolon needed for NPE for some reason!
try:
self.conn.execute(Cmd) # drop the table if it exists
self.conn.executescript(sql) # read table into database
self.conn.commit()
msg='successfully (re)created table '+table
return 0
except:
msg='sqlread: '+str(sys.exc_info()[1])
print(msg)
#udp.syslog(msg)
traceback.print_exc()
time.sleep(1)
return 1
def get_calendar(self, id, days = 3): # query to SUPPORTHOST, returning txt. started by cmd:GCAL too for testing
''' google calendar events via monitoring server '''
# example: http://www.itvilla.ee/cgi-bin/gcal.cgi?mac=000101000001&days=10
self.ts_cal=time.time() # calendar access timestamp
cur=self.conn.cursor()
req = 'http://www.itvilla.ee/cgi-bin/gcal.cgi?mac='+id+'&days='+str(days)+'&format=json'
headers={'Authorization': 'Basic YmFyaXg6Y29udHJvbGxlcg=='} # Base64$="YmFyaXg6Y29udHJvbGxlcg==" ' barix:controller
msg='starting gcal query '+req
print(msg) # debug
try:
response = requests.get(req, headers = headers)
except:
msg='gcal query '+req+' failed!'
traceback.print_exc()
print(msg)
#udp.syslog(msg)
return 1 # kui ei saa gcal yhendust, siis lopetab ja vana ei havita!
try:
events = eval(response.content) # string to list
except:
msg='getting calendar events failed for host id '+id
print(msg)
#udp.syslog(msg)
traceback.print_exc() # debug
return 1 # kui ei saa normaalseid syndmusi, siis ka lopetab
#print(repr(events)) # debug
Cmd = "BEGIN IMMEDIATE TRANSACTION"
try:
self.conn.execute(Cmd)
Cmd="delete from calendar"
self.conn.execute(Cmd)
for event in events:
#print('event',event) # debug
columns=str(list(event.keys())).replace('[','(').replace(']',')')
values=str(list(event.values())).replace('[','(').replace(']',')')
#columns=str(list(event.keys())).replace('{','(').replace('}',')')
#values=str(list(event.values())).replace('{','(').replace('}',')')
Cmd = "insert into calendar"+columns+" values"+values
print(Cmd) # debug
self.conn.execute(Cmd)
self.conn.commit()
msg='calendar table updated'
print(msg)
#udp.syslog(msg) # FIXME - syslog via UDPchannel does not work. syslog() is found, but not it's logaddr?
#self.syslog(msg) # common parent UDP TCP channel
return 0
except:
msg='delete + insert to calendar table failed!'
print(msg)
#udp.syslog(msg)
print('logaddr in tcp',self.logaddr)
#self.syslog(msg,logaddr=self.logaddr) # class UDPchannel is parent to TCPchannel
#UDPchannel.syslog(msg)
traceback.print_exc() # debug
return 1 # kui insert ei onnestu, siis ka delete ei toimu
def chk_calevents(self, title = ''): # set a new setpoint if found in table calendar (sharing database connection with setup)
''' Obsolete, functionality moved to gcal.py '''
ts=time.time()
cur=self.conn.cursor()
value='' # local string value
if title == '':
return None
Cmd = "BEGIN IMMEDIATE TRANSACTION"
try:
conn.execute(Cmd)
Cmd="select value from calendar where title='"+title+"' and timestamp+0<"+str(ts)+" order by timestamp asc" # find the last passed event value
cur.execute(Cmd)
for row in cur:
value=row[0] # overwrite with the last value before now
#print(Cmd4,', value',value) # debug. voib olla mitu rida, viimane value jaab iga title jaoks kehtima
self.conn.commit()
return value # last one for given title becomes effective. can be empty string too, then use default value for setpoint related to title
except:
traceback.print_exc()
return None
| gpl-3.0 | 1,857,257,954,211,608,300 | 41.280047 | 194 | 0.545951 | false | 4.105836 | false | false | false |
mauricioabreu/speakerfight | deck/models.py | 1 | 12968 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db import models, transaction
from django.db.models import Count
from django.db.models.aggregates import Sum
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils import timezone, six
from django.utils.encoding import python_2_unicode_compatible
from django_extensions.db.fields import AutoSlugField
from allauth.account.signals import user_signed_up
from textwrap import dedent
from jury.models import Jury
class DeckBaseManager(models.QuerySet):
def cached_authors(self):
return super(DeckBaseManager, self).select_related('author')
def published_ones(self):
return self.cached_authors().filter(is_published=True)
def upcoming(self, published_only=True):
return self.filter(due_date__gte=timezone.now(), is_published=published_only)
def order_by_never_voted(self, user_id):
if self.model != Proposal:
raise AttributeError(
"%s object has no attribute %s" % (
self.model, 'order_by_never_voted'))
order_by_criteria = dedent("""
SELECT 1
FROM deck_vote
WHERE deck_vote.user_id = %s AND
deck_vote.proposal_id = deck_proposal.activity_ptr_id
LIMIT 1
""")
new_ordering = ['-never_voted']
if settings.DATABASES['default'].get('ENGINE') == 'django.db.backends.sqlite3':
new_ordering = ['never_voted']
new_ordering.extend(Proposal._meta.ordering)
return self.extra(
select=dict(never_voted=order_by_criteria % user_id),
order_by=new_ordering
)
@python_2_unicode_compatible
class DeckBaseModel(models.Model):
title = models.CharField(_('Title'), max_length=200)
slug = AutoSlugField(populate_from='title', overwrite=True,
max_length=200, unique=True, db_index=True)
description = models.TextField(
_('Description'), max_length=10000, blank=True)
created_at = models.DateTimeField(_('Created At'), auto_now_add=True)
is_published = models.BooleanField(_('Publish'), default=True)
# relations
author = models.ForeignKey(to=settings.AUTH_USER_MODEL,
related_name='%(class)ss')
# managers
objects = DeckBaseManager.as_manager()
class Meta:
abstract = True
def __str__(self):
return six.text_type(self.title)
@python_2_unicode_compatible
class Vote(models.Model):
ANGRY, SLEEPY, SAD, HAPPY, LAUGHING = range(-1, 4)
VOTE_TITLES = dict(
angry=_('Angry'), sad=_('Sad'),
sleepy=_('Sleepy'), happy=_('Happy'),
laughing=_('Laughing')
)
VOTE_RATES = ((ANGRY, 'angry'),
(SAD, 'sad'),
(SLEEPY, 'sleepy'),
(HAPPY, 'happy'),
(LAUGHING, 'laughing'))
rate = models.SmallIntegerField(_('Rate Index'), null=True, blank=True,
choices=VOTE_RATES)
# relations
proposal = models.ForeignKey(to='deck.Proposal', related_name='votes')
user = models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='votes')
class Meta:
verbose_name = _('Vote')
verbose_name_plural = _('Votes')
unique_together = (('proposal', 'user'),)
def __str__(self):
return six.text_type("{0.user}: {0.rate} in {0.proposal}".format(self))
def save(self, *args, **kwargs):
validation_message = None
user_is_in_jury = self.proposal.event.jury.users.filter(
pk=self.user.pk).exists()
if (self.user.is_superuser or user_is_in_jury):
pass
elif self.user == self.proposal.author:
validation_message = _(u'You cannot Rate your own proposals.')
elif not self.proposal.event.allow_public_voting:
validation_message = _(u"Proposal doesn't accept Public Voting.")
if validation_message:
raise ValidationError(_(validation_message))
return super(Vote, self).save(*args, **kwargs)
class Activity(DeckBaseModel):
PROPOSAL = 'proposal'
WORKSHOP = 'workshop'
OPENNING = 'openning'
COFFEEBREAK = 'coffee-break'
LUNCH = 'lunch'
LIGHTNINGTALKS = 'lightning-talks'
ENDING = 'ending'
ACTIVITY_TYPES = (
(PROPOSAL, _('Proposal')),
(WORKSHOP, _('Workshop')),
(OPENNING, _('Openning')),
(COFFEEBREAK, _('Coffee Break')),
(LUNCH, _('Lunch')),
(LIGHTNINGTALKS, _('Lightning Talks')),
(ENDING, _('Ending')),
)
start_timetable = models.TimeField(
_('Start Timetable'), null=True, blank=False)
end_timetable = models.TimeField(
_('End Timetable'), null=True, blank=False)
track_order = models.SmallIntegerField(_('Order'), null=True, blank=True)
activity_type = models.CharField(
_('Type'), choices=ACTIVITY_TYPES, default=PROPOSAL, max_length=50)
# relations
track = models.ForeignKey(to='deck.Track', related_name='activities',
null=True, blank=True)
class Meta:
ordering = ('track_order', 'start_timetable', 'pk')
verbose_name = _('Activity')
verbose_name_plural = _('Activities')
@property
def timetable(self):
if all([self.start_timetable is None, self.end_timetable is None]):
return '--:--'
return '{0} - {1}'.format(
self.start_timetable.strftime('%H:%M'),
self.end_timetable.strftime('%H:%M')
)
class Proposal(Activity):
is_approved = models.BooleanField(_('Is approved'), default=False)
more_information = models.TextField(
_('More information'), max_length=10000, null=True, blank=True)
# relations
event = models.ForeignKey(to='deck.Event', related_name='proposals')
class Meta:
ordering = ['title']
verbose_name = _('Proposal')
verbose_name_plural = _('Proposals')
def save(self, *args, **kwargs):
if not self.pk and self.event.due_date_is_passed:
raise ValidationError(
_("This Event doesn't accept Proposals anymore."))
return super(Proposal, self).save(*args, **kwargs)
@property
def get_rate(self):
rate = None
try:
rate = self.votes__rate__sum
except AttributeError:
rate = self.votes.aggregate(Sum('rate'))['rate__sum']
finally:
return rate or 0
def rate(self, user, rate):
rate_int = [r[0] for r in Vote.VOTE_RATES if rate in r][0]
with transaction.atomic():
self.votes.update_or_create(user=user, defaults={'rate': rate_int})
def user_already_voted(self, user):
if isinstance(user, AnonymousUser):
return False
return self.votes.filter(user=user).exists()
def user_can_vote(self, user):
can_vote = False
if self.author == user and not self.event.author == user:
pass
elif self.event.allow_public_voting:
can_vote = True
elif user.is_superuser:
can_vote = True
elif self.event.jury.users.filter(pk=user.pk).exists():
can_vote = True
return can_vote
def user_can_approve(self, user):
can_approve = False
if user.is_superuser:
can_approve = True
elif self.event.jury.users.filter(pk=user.pk).exists():
can_approve = True
return can_approve
def get_absolute_url(self):
return reverse('view_event', kwargs={'slug': self.event.slug}) + \
'#' + self.slug
def approve(self):
if self.is_approved:
raise ValidationError(_("This Proposal was already approved."))
self.is_approved = True
self.save()
def disapprove(self):
if not self.is_approved:
raise ValidationError(_("This Proposal was already disapproved."))
self.is_approved = False
self.save()
@python_2_unicode_compatible
class Track(models.Model):
title = models.CharField(_('Title'), max_length=200)
slug = AutoSlugField(populate_from='title', overwrite=True,
max_length=200, unique=True, db_index=True)
# relations
event = models.ForeignKey(to='deck.Event', related_name='tracks')
class Meta:
verbose_name = _('Track')
verbose_name_plural = _('Tracks')
def __str__(self):
return six.text_type('Track for: "%s"' % self.event.title)
@property
def proposals(self):
return Proposal.objects.filter(
pk__in=self.activities.values_list('pk', flat=True)
)
class Event(DeckBaseModel):
allow_public_voting = models.BooleanField(_('Allow Public Voting'),
default=True)
due_date = models.DateTimeField(null=False, blank=False)
slots = models.SmallIntegerField(_('Slots'), default=10)
# relations
jury = models.OneToOneField(to='jury.Jury', related_name='event',
null=True, blank=True)
anonymous_voting = models.BooleanField(
_('Anonymous Voting?'), default=False)
class Meta:
ordering = ['-due_date', '-created_at']
verbose_name = _('Event')
verbose_name_plural = _('Events')
@property
def due_date_is_passed(self):
return timezone.now() > self.due_date
@property
def due_date_is_close(self):
if self.due_date_is_passed:
return False
return timezone.now() > self.due_date - timezone.timedelta(days=7)
def get_absolute_url(self):
return reverse('view_event', kwargs={'slug': self.slug})
def user_can_see_proposals(self, user):
can_see_proposals = False
if user.is_superuser or self.author == user:
can_see_proposals = True
elif self.allow_public_voting:
can_see_proposals = True
elif (not user.is_anonymous() and
self.jury.users.filter(pk=user.pk).exists()):
can_see_proposals = True
return can_see_proposals
def get_proposers_count(self):
return self.proposals.values_list(
'author', flat=True).distinct().count()
def get_votes_count(self):
return self.proposals.values_list('votes', flat=True).count()
def get_votes_to_export(self):
return self.proposals.values(
'id', 'title', 'author__username', 'author__email'
).annotate(
Sum('votes__rate')
).annotate(Count('votes'))
def get_schedule(self):
schedule = Activity.objects.filter(track__event=self)\
.cached_authors()\
.annotate(Sum('proposal__votes__rate'))\
.extra(select=dict(track_isnull='track_id IS NULL'))\
.order_by('track_isnull', 'track_order',
'-proposal__votes__rate__sum')
return schedule
def get_not_approved_schedule(self):
return self.proposals\
.cached_authors()\
.filter(
models.Q(is_approved=False) |
models.Q(track__isnull=True))
@receiver(user_signed_up)
def send_welcome_mail(request, user, **kwargs):
if not settings.SEND_NOTIFICATIONS:
return
message = render_to_string('mailing/welcome.txt')
subject = _(u'Welcome')
recipients = [user.email]
send_mail(subject, message, settings.NO_REPLY_EMAIL, recipients)
@receiver(post_save, sender=Event)
def create_initial_jury(sender, instance, signal, created, **kwargs):
if not created:
return
jury = Jury()
jury.save()
jury.users.add(instance.author)
instance.jury = jury
instance.save()
@receiver(post_save, sender=Event)
def create_initial_track(sender, instance, signal, created, **kwargs):
if not created:
return
Track.objects.create(event=instance)
@receiver(post_delete, sender=Proposal)
def send_proposal_deleted_mail(sender, instance, **kwargs):
if not settings.SEND_NOTIFICATIONS:
return
context = {'event_title': instance.event.title,
'proposal_title': instance.title}
message = render_to_string('mailing/jury_deleted_proposal.txt', context)
subject = _(u'Proposal from %s just got deleted' % instance.event.title)
recipients = instance.event.jury.users.values_list('email', flat=True)
send_mail(subject, message, settings.NO_REPLY_EMAIL, recipients)
| mit | -5,524,374,505,757,374,000 | 32.683117 | 87 | 0.610117 | false | 3.814118 | false | false | false |
thomas-bottesch/fcl | python/utils/create_pca_vectors_from_dataset.py | 1 | 2284 | from __future__ import print_function
import fcl
import os
import time
from os.path import abspath, join, dirname, isfile
from fcl import kmeans
from fcl.datasets import load_sector_dataset, load_usps_dataset
from fcl.matrix.csr_matrix import get_csr_matrix_from_object, csr_matrix_to_libsvm_string
from sklearn.decomposition import TruncatedSVD, PCA
from scipy.sparse import csr_matrix
from sklearn.datasets import dump_svmlight_file
import numpy as np
import argparse
def get_pca_projection_csrmatrix(fcl_csr_input_matrix, component_ratio):
n_components = int(fcl_csr_input_matrix.annz * component_ratio)
p = TruncatedSVD(n_components = n_components)
start = time.time()
p.fit(fcl_csr_input_matrix.to_numpy())
# convert to millis
fin = (time.time() - start) * 1000
(n_samples, n_dim) = fcl_csr_input_matrix.shape
print("Truncated SVD took %.3fs to retrieve %s components for input_matrix with n_samples %d, n_dim %d" % (fin/1000.0, str(n_components), n_samples, n_dim))
return get_csr_matrix_from_object(p.components_)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create a pca matrix from an input matrix with given component ratio.')
parser.add_argument('path_input_dataset', type=str, help='Path to the input libsvm dataset')
parser.add_argument('path_output_dataset', type=str, help='Path to the input libsvm dataset')
parser.add_argument('--component_ratio', default=0.1, type=float, help='Percentage of the average non zero values of the input dataset to use as components.')
args = parser.parse_args()
if not isfile(args.path_input_dataset):
raise Exception("Unable to find path_input_dataset: %s" % args.path_input_dataset)
print("Loading data from %s" % args.path_input_dataset)
fcl_mtrx_input_dataset = get_csr_matrix_from_object(args.path_input_dataset)
print("Retrieving the pca projection matrix")
pca_mtrx = get_pca_projection_csrmatrix(fcl_mtrx_input_dataset, args.component_ratio)
print("Convert pca projection matrix to libsvm string")
pca_mtrx_lsvm_str = csr_matrix_to_libsvm_string(pca_mtrx)
print("Writing pca projection matrix libsvm string to file %s" % args.path_output_dataset)
with open(args.path_output_dataset, 'w') as f:
f.write(pca_mtrx_lsvm_str)
| mit | 1,100,592,844,665,426,300 | 45.612245 | 160 | 0.741243 | false | 3.286331 | false | false | false |
credativUK/connector-magento | __unported__/magentoerpconnect/partner.py | 1 | 25089 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import xmlrpclib
from collections import namedtuple
from openerp.osv import fields, orm
from openerp.addons.connector.queue.job import job
from openerp.addons.connector.connector import ConnectorUnit
from openerp.addons.connector.exception import MappingError
from openerp.addons.connector.unit.backend_adapter import BackendAdapter
from openerp.addons.connector.unit.mapper import (mapping,
only_create,
ImportMapper
)
from openerp.addons.connector.exception import IDMissingInBackend
from .unit.backend_adapter import GenericAdapter
from .unit.import_synchronizer import (DelayedBatchImport,
MagentoImportSynchronizer
)
from .backend import magento
from .connector import get_environment
_logger = logging.getLogger(__name__)
class res_partner(orm.Model):
_inherit = 'res.partner'
_columns = {
'magento_bind_ids': fields.one2many(
'magento.res.partner', 'openerp_id',
string="Magento Bindings"),
'magento_address_bind_ids': fields.one2many(
'magento.address', 'openerp_id',
string="Magento Address Bindings"),
'birthday': fields.date('Birthday'),
'company': fields.char('Company'),
}
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default['magento_bind_ids'] = False
return super(res_partner, self).copy_data(cr, uid, id,
default=default,
context=context)
def _address_fields(self, cr, uid, context=None):
""" Returns the list of address fields that are synced from the parent
when the `use_parent_address` flag is set. """
fields = super(res_partner, self)._address_fields(cr, uid,
context=context)
fields.append('company')
return fields
class magento_res_partner(orm.Model):
_name = 'magento.res.partner'
_inherit = 'magento.binding'
_inherits = {'res.partner': 'openerp_id'}
_description = 'Magento Partner'
_rec_name = 'website_id'
def _get_mag_partner_from_website(self, cr, uid, ids, context=None):
mag_partner_obj = self.pool['magento.res.partner']
return mag_partner_obj.search(
cr, uid, [('website_id', 'in', ids)], context=context)
_columns = {
'openerp_id': fields.many2one('res.partner',
string='Partner',
required=True,
ondelete='cascade'),
'backend_id': fields.related(
'website_id', 'backend_id',
type='many2one',
relation='magento.backend',
string='Magento Backend',
store={
'magento.res.partner': (lambda self, cr, uid, ids, c=None: ids,
['website_id'], 10),
'magento.website': (_get_mag_partner_from_website,
['backend_id'], 20),
},
readonly=True),
'website_id': fields.many2one('magento.website',
string='Magento Website',
required=True,
ondelete='restrict'),
'group_id': fields.many2one('magento.res.partner.category',
string='Magento Group (Category)'),
'created_at': fields.datetime('Created At (on Magento)',
readonly=True),
'updated_at': fields.datetime('Updated At (on Magento)',
readonly=True),
'emailid': fields.char('E-mail address'),
'taxvat': fields.char('Magento VAT'),
'newsletter': fields.boolean('Newsletter'),
'guest_customer': fields.boolean('Guest Customer'),
'consider_as_company': fields.boolean(
'Considered as company',
help="An account imported with a 'company' in "
"the billing address is considered as a company.\n "
"The partner takes the name of the company and "
"is not merged with the billing address."),
}
_sql_constraints = [
('magento_uniq', 'unique(website_id, magento_id)',
'A partner with same ID on Magento already exists for this website.'),
]
class magento_address(orm.Model):
_name = 'magento.address'
_inherit = 'magento.binding'
_inherits = {'res.partner': 'openerp_id'}
_description = 'Magento Address'
_rec_name = 'backend_id'
def _get_mag_address_from_partner(self, cr, uid, ids, context=None):
mag_address_obj = self.pool['magento.address']
return mag_address_obj.search(
cr, uid, [('magento_partner_id', 'in', ids)], context=context)
_columns = {
'openerp_id': fields.many2one('res.partner',
string='Partner',
required=True,
ondelete='cascade'),
'created_at': fields.datetime('Created At (on Magento)',
readonly=True),
'updated_at': fields.datetime('Updated At (on Magento)',
readonly=True),
'is_default_billing': fields.boolean('Default Invoice'),
'is_default_shipping': fields.boolean('Default Shipping'),
'magento_partner_id': fields.many2one('magento.res.partner',
string='Magento Partner',
required=True,
ondelete='cascade'),
'backend_id': fields.related(
'magento_partner_id', 'backend_id',
type='many2one',
relation='magento.backend',
string='Magento Backend',
store={
'magento.address': (lambda self, cr, uid, ids, c=None: ids,
['magento_partner_id'], 10),
'magento.res.partner': (_get_mag_address_from_partner,
['backend_id', 'website_id'], 20),
},
readonly=True),
'website_id': fields.related(
'magento_partner_id', 'website_id',
type='many2one',
relation='magento.website',
string='Magento Website',
store={
'magento.address': (lambda self, cr, uid, ids, c=None: ids,
['magento_partner_id'], 10),
'magento.res.partner': (_get_mag_address_from_partner,
['website_id'], 20),
},
readonly=True),
'is_magento_order_address': fields.boolean(
'Address from a Magento Order'),
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
'A partner address with same ID on Magento already exists.'),
]
@magento
class PartnerAdapter(GenericAdapter):
_model_name = 'magento.res.partner'
_magento_model = 'customer'
_admin_path = '/{model}/edit/id/{id}'
def _call(self, method, arguments):
try:
return super(PartnerAdapter, self)._call(method, arguments)
except xmlrpclib.Fault as err:
# this is the error in the Magento API
# when the customer does not exist
if err.faultCode == 102:
raise IDMissingInBackend
else:
raise
def search(self, filters=None, from_date=None, magento_website_ids=None):
""" Search records according to some criterias and returns a
list of ids
:rtype: list
"""
if filters is None:
filters = {}
if from_date is not None:
# updated_at include the created records
str_from_date = from_date.strftime('%Y/%m/%d %H:%M:%S')
filters['updated_at'] = {'from': str_from_date}
if magento_website_ids is not None:
filters['website_id'] = {'in': magento_website_ids}
# the search method is on ol_customer instead of customer
return self._call('ol_customer.search',
[filters] if filters else [{}])
@magento
class PartnerBatchImport(DelayedBatchImport):
""" Import the Magento Partners.
For every partner in the list, a delayed job is created.
"""
_model_name = ['magento.res.partner']
def run(self, filters=None):
""" Run the synchronization """
from_date = filters.pop('from_date', None)
magento_website_ids = [filters.pop('magento_website_id')]
record_ids = self.backend_adapter.search(filters,
from_date,
magento_website_ids)
_logger.info('search for magento partners %s returned %s',
filters, record_ids)
for record_id in record_ids:
self._import_record(record_id)
@magento
class PartnerImport(MagentoImportSynchronizer):
_model_name = ['magento.res.partner']
def _import_dependencies(self):
""" Import the dependencies for the record"""
record = self.magento_record
self._import_dependency(record['group_id'],
'magento.res.partner.category')
@property
def mapper(self):
""" Return an instance of ``Mapper`` for the synchronization.
The instanciation is delayed because some synchronisations do
not need such an unit and the unit may not exist.
For ``magento.res.partner``, we have a company mapper and
a mapper, ensure we find the correct one here.
:rtype: :py:class:`~.PartnerImportMapper`
"""
if self._mapper is None:
get_unit = self.environment.get_connector_unit
self._mapper = get_unit(PartnerImportMapper)
return self._mapper
def _after_import(self, partner_binding_id):
""" Import the addresses """
get_unit = self.get_connector_unit_for_model
book = get_unit(PartnerAddressBook, 'magento.address')
book.import_addresses(self.magento_id, partner_binding_id)
@magento
class PartnerImportMapper(ImportMapper):
_model_name = 'magento.res.partner'
direct = [
('email', 'email'),
('dob', 'birthday'),
('created_at', 'created_at'),
('updated_at', 'updated_at'),
('email', 'emailid'),
('taxvat', 'taxvat'),
('group_id', 'group_id'),
]
@only_create
@mapping
def is_company(self, record):
# partners are companies so we can bind
# addresses on them
return {'is_company': True}
@mapping
def names(self, record):
# TODO create a glue module for base_surname
parts = [part for part in (record['firstname'],
record['middlename'],
record['lastname']) if part]
return {'name': ' '.join(parts)}
@mapping
def customer_group_id(self, record):
# import customer groups
binder = self.get_binder_for_model('magento.res.partner.category')
category_id = binder.to_openerp(record['group_id'], unwrap=True)
if category_id is None:
raise MappingError("The partner category with "
"magento id %s does not exist" %
record['group_id'])
# FIXME: should remove the previous tag (all the other tags from
# the same backend)
return {'category_id': [(4, category_id)]}
@mapping
def website_id(self, record):
binder = self.get_binder_for_model('magento.website')
website_id = binder.to_openerp(record['website_id'])
return {'website_id': website_id}
@mapping
def lang(self, record):
binder = self.get_binder_for_model('magento.storeview')
binding_id = binder.to_openerp(record['store_id'])
if binding_id:
storeview = self.session.browse('magento.storeview',
binding_id)
if storeview.lang_id:
return {'lang': storeview.lang_id.code}
@only_create
@mapping
def customer(self, record):
return {'customer': True}
@mapping
def type(self, record):
return {'type': 'default'}
@only_create
@mapping
def openerp_id(self, record):
""" Will bind the customer on a existing partner
with the same email """
sess = self.session
partner_ids = sess.search('res.partner',
[('email', '=', record['email']),
('customer', '=', True),
# FIXME once it has been changed in openerp
('is_company', '=', True)])
if partner_ids:
return {'openerp_id': partner_ids[0]}
AddressInfos = namedtuple('AddressInfos', ['magento_record',
'partner_binding_id',
'merge'])
@magento
class PartnerAddressBook(ConnectorUnit):
""" Import all addresses from the address book of a customer.
This class is responsible to define which addresses should
be imported and how (merge with the partner or not...).
Then, it delegate the import to the appropriate importer.
This is really intricate. The datamodel are different between
Magento and OpenERP and we have many uses cases to cover.
The first thing is that:
- we do not import companies and individuals the same manner
- we do not know if an account is a company -> we assume that
if we found something in the company field of the billing
address, the whole account is a company.
Differences:
- Individuals: we merge the billing address with the partner,
so we'll end with 1 entity if the customer has 1 address
- Companies: we never merge the addresses with the partner,
but we use the company name of the billing address as name
of the partner. We also copy the address informations from
the billing address as default values.
More information on:
https://bugs.launchpad.net/openerp-connector/+bug/1193281
"""
_model_name = 'magento.address'
def import_addresses(self, magento_partner_id, partner_binding_id):
get_unit = self.get_connector_unit_for_model
addresses = self._get_address_infos(magento_partner_id,
partner_binding_id)
for address_id, infos in addresses:
importer = get_unit(MagentoImportSynchronizer)
importer.run(address_id, infos)
def _get_address_infos(self, magento_partner_id, partner_binding_id):
get_unit = self.get_connector_unit_for_model
adapter = get_unit(BackendAdapter)
mag_address_ids = adapter.search({'customer_id':
{'eq': magento_partner_id}})
if not mag_address_ids:
return
for address_id in mag_address_ids:
magento_record = adapter.read(address_id)
# defines if the billing address is merged with the partner
# or imported as a standalone contact
merge = False
if magento_record.get('is_default_billing'):
if magento_record.get('company'):
# when a company is there, we never merge the contact
# with the partner.
# Copy the billing address on the company
# and use the name of the company for the name
company_mapper = get_unit(CompanyImportMapper,
'magento.res.partner')
map_record = company_mapper.map_record(magento_record)
self.session.write('magento.res.partner',
partner_binding_id,
map_record.values())
else:
# for B2C individual customers, merge with the main
# partner
merge = True
# in the case if the billing address no longer
# has a company, reset the flag
self.session.write('magento.res.partner',
partner_binding_id,
{'consider_as_company': False})
address_infos = AddressInfos(magento_record=magento_record,
partner_binding_id=partner_binding_id,
merge=merge)
yield address_id, address_infos
class BaseAddressImportMapper(ImportMapper):
""" Defines the base mappings for the imports
in ``res.partner`` (state, country, ...)
"""
direct = [('postcode', 'zip'),
('city', 'city'),
('telephone', 'phone'),
('fax', 'fax'),
('company', 'company'),
]
@mapping
def state(self, record):
if not record.get('region'):
return
state_ids = self.session.search('res.country.state',
[('name', '=ilike', record['region'])])
if state_ids:
return {'state_id': state_ids[0]}
@mapping
def country(self, record):
if not record.get('country_id'):
return
country_ids = self.session.search(
'res.country',
[('code', '=', record['country_id'])])
if country_ids:
return {'country_id': country_ids[0]}
@mapping
def street(self, record):
value = record['street']
lines = [line.strip() for line in value.split('\n') if line.strip()]
if len(lines) == 1:
result = {'street': lines[0], 'street2': False}
elif len(lines) >= 2:
result = {'street': lines[0], 'street2': u' - '.join(lines[1:])}
else:
result = {}
return result
@mapping
def title(self, record):
prefix = record['prefix']
title_id = False
if prefix:
title_ids = self.session.search('res.partner.title',
[('domain', '=', 'contact'),
('shortcut', 'ilike', prefix)])
if title_ids:
title_id = title_ids[0]
else:
title_id = self.session.create('res.partner.title',
{'domain': 'contact',
'shortcut': prefix,
'name': prefix})
return {'title': title_id}
@magento
class CompanyImportMapper(BaseAddressImportMapper):
""" Special mapping used when we import a company.
A company is considered as such when the billing address
of an account has something in the 'company' field.
This is a very special mapping not used in the same way
than the other.
The billing address will exist as a contact,
but we want to *copy* the data on the company.
The input record is the billing address.
The mapper returns data which will be written on the
main partner, in other words, the company.
The ``@only_create`` decorator would not have any
effect here because the mapper is always called
for updates.
"""
_model_name = 'magento.res.partner'
direct = BaseAddressImportMapper.direct + [
('company', 'name'),
]
@mapping
def consider_as_company(self, record):
return {'consider_as_company': True}
@magento
class AddressAdapter(GenericAdapter):
_model_name = 'magento.address'
_magento_model = 'customer_address'
def search(self, filters=None):
""" Search records according to some criterias
and returns a list of ids
:rtype: list
"""
return [int(row['customer_address_id']) for row
in self._call('%s.list' % self._magento_model,
[filters] if filters else [{}])]
@magento
class AddressImport(MagentoImportSynchronizer):
_model_name = ['magento.address']
def run(self, magento_id, address_infos):
""" Run the synchronization """
self.address_infos = address_infos
return super(AddressImport, self).run(magento_id)
def _get_magento_data(self):
""" Return the raw Magento data for ``self.magento_id`` """
# we already read the data from the Partner Importer
if self.address_infos.magento_record:
return self.address_infos.magento_record
else:
return super(AddressImport, self)._get_magento_data()
def _define_partner_relationship(self, data):
""" Link address with partner or parent company. """
partner_binding_id = self.address_infos.partner_binding_id
partner_id = self.session.read('magento.res.partner',
partner_binding_id,
['openerp_id'])['openerp_id'][0]
if self.address_infos.merge:
# it won't be imported as an independant address,
# but will be linked with the main res.partner
data['openerp_id'] = partner_id
data['type'] = 'default'
else:
data['parent_id'] = partner_id
partner = self.session.browse('res.partner', partner_id)
data['lang'] = partner.lang
data['magento_partner_id'] = self.address_infos.partner_binding_id
return data
def _create(self, data):
data = self._define_partner_relationship(data)
return super(AddressImport, self)._create(data)
@magento
class AddressImportMapper(BaseAddressImportMapper):
_model_name = 'magento.address'
# TODO fields not mapped:
# "suffix"=>"a",
# "vat_id"=>"12334",
direct = BaseAddressImportMapper.direct + [
('created_at', 'created_at'),
('updated_at', 'updated_at'),
('is_default_billing', 'is_default_billing'),
('is_default_shipping', 'is_default_shipping'),
('company', 'company'),
]
@mapping
def names(self, record):
# TODO create a glue module for base_surname
parts = [part for part in (record['firstname'],
record.get('middlename'),
record['lastname']) if part]
return {'name': ' '.join(parts)}
@mapping
def use_parent_address(self, record):
return {'use_parent_address': False}
@mapping
def type(self, record):
if record.get('is_default_billing'):
address_type = 'invoice'
elif record.get('is_default_shipping'):
address_type = 'delivery'
else:
address_type = 'contact'
return {'type': address_type}
@job
def partner_import_batch(session, model_name, backend_id, filters=None):
""" Prepare the import of partners modified on Magento """
if filters is None:
filters = {}
assert 'magento_website_id' in filters, (
'Missing information about Magento Website')
env = get_environment(session, model_name, backend_id)
importer = env.get_connector_unit(PartnerBatchImport)
importer.run(filters=filters)
| agpl-3.0 | -4,476,961,058,515,787,300 | 37.539171 | 79 | 0.544781 | false | 4.363304 | false | false | false |
luotao1/Paddle | python/paddle/static/io.py | 1 | 30947 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import errno
import inspect
import logging
import os
import warnings
import six
import numpy as np
import paddle
from paddle.fluid import (
core,
Variable,
CompiledProgram,
default_main_program,
Program,
layers,
unique_name,
program_guard, )
from paddle.fluid.io import prepend_feed_ops, append_fetch_ops
from paddle.fluid.framework import static_only, Parameter
from paddle.fluid.executor import Executor, global_scope
from paddle.fluid.log_helper import get_logger
__all__ = [
'save_inference_model',
'load_inference_model',
'serialize_program',
'serialize_persistables',
'save_to_file',
'deserialize_program',
'deserialize_persistables',
'load_from_file',
'normalize_program',
]
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
def _check_args(caller, args, supported_args=None, deprecated_args=None):
supported_args = [] if supported_args is None else supported_args
deprecated_args = [] if deprecated_args is None else deprecated_args
for arg in args:
if arg in deprecated_args:
raise ValueError(
"argument '{}' in function '{}' is deprecated, only {} are supported.".
format(arg, caller, supported_args))
elif arg not in supported_args:
raise ValueError(
"function '{}' doesn't support argument '{}',\n only {} are supported.".
format(caller, arg, supported_args))
def _check_vars(name, var_list):
if not isinstance(var_list, list):
var_list = [var_list]
if not var_list or not all([isinstance(var, Variable) for var in var_list]):
raise ValueError(
"'{}' should be a Variable or a list of Variable.".format(name))
def _normalize_path_prefix(path_prefix):
"""
convert path_prefix to absolute path.
"""
if not isinstance(path_prefix, six.string_types):
raise ValueError("'path_prefix' should be a string.")
if path_prefix.endswith("/"):
raise ValueError("'path_prefix' should not be a directory")
path_prefix = os.path.normpath(path_prefix)
path_prefix = os.path.abspath(path_prefix)
return path_prefix
def _get_valid_program(program=None):
"""
return default main program if program is None.
"""
if program is None:
program = default_main_program()
elif isinstance(program, CompiledProgram):
program = program._program
if program is None:
raise TypeError(
"The type of input program is invalid, expected tyep is Program, but received None"
)
warnings.warn(
"The input is a CompiledProgram, this is not recommended.")
if not isinstance(program, Program):
raise TypeError(
"The type of input program is invalid, expected type is fluid.Program, but received %s"
% type(program))
return program
def _clone_var_in_block(block, var):
assert isinstance(var, Variable)
if var.desc.type() == core.VarDesc.VarType.LOD_TENSOR:
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True)
else:
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
persistable=True)
def normalize_program(program, feed_vars, fetch_vars):
"""
:api_attr: Static Graph
Normalize/Optimize a program according to feed_vars and fetch_vars.
Args:
program(Program): Specify a program you want to optimize.
feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference.
Returns:
Program: Normalized/Optimized program.
Raises:
TypeError: If `program` is not a Program, an exception is thrown.
TypeError: If `feed_vars` is not a Variable or a list of Variable, an exception is thrown.
TypeError: If `fetch_vars` is not a Variable or a list of Variable, an exception is thrown.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# normalize main program.
program = default_main_program()
normalized_program = paddle.static.normalize_program(program, [image], [predict])
"""
if not isinstance(program, Program):
raise TypeError(
"program type must be `fluid.Program`, but received `%s`" %
type(program))
if not isinstance(feed_vars, list):
feed_vars = [feed_vars]
if not all(isinstance(v, Variable) for v in feed_vars):
raise TypeError(
"feed_vars type must be a Variable or a list of Variable.")
if not isinstance(fetch_vars, list):
fetch_vars = [fetch_vars]
if not all(isinstance(v, Variable) for v in fetch_vars):
raise TypeError(
"fetch_vars type must be a Variable or a list of Variable.")
# remind users to set auc_states to 0 if auc op were found.
for op in program.global_block().ops:
# clear device of Op
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
op._set_attr(device_attr_name, "")
if op.type == 'auc':
warnings.warn("Be sure that you have set auc states to 0 "
"before saving inference model.")
break
# fix the bug that the activation op's output as target will be pruned.
# will affect the inference performance.
# TODO(Superjomn) add an IR pass to remove 1-scale op.
with program_guard(program):
uniq_fetch_vars = []
for i, var in enumerate(fetch_vars):
var = layers.scale(
var, 1., name="save_infer_model/scale_{}".format(i))
uniq_fetch_vars.append(var)
fetch_vars = uniq_fetch_vars
# serialize program
copy_program = program.clone()
global_block = copy_program.global_block()
remove_op_idx = []
for i, op in enumerate(global_block.ops):
op.desc.set_is_target(False)
if op.type == "feed" or op.type == "fetch":
remove_op_idx.append(i)
for idx in remove_op_idx[::-1]:
global_block._remove_op(idx)
copy_program.desc.flush()
feed_var_names = [var.name for var in feed_vars]
copy_program = copy_program._prune_with_input(
feeded_var_names=feed_var_names, targets=fetch_vars)
copy_program = copy_program._inference_optimize(prune_read_op=True)
fetch_var_names = [var.name for var in fetch_vars]
prepend_feed_ops(copy_program, feed_var_names)
append_fetch_ops(copy_program, fetch_var_names)
copy_program.desc._set_version()
return copy_program
def is_persistable(var):
"""
Check whether the given variable is persistable.
Args:
var(Variable): The variable to be checked.
Returns:
bool: True if the given `var` is persistable
False if not.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
param = fluid.default_main_program().global_block().var('fc.b')
res = fluid.io.is_persistable(param)
"""
if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \
var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \
var.desc.type() == core.VarDesc.VarType.READER:
return False
return var.persistable
@static_only
def serialize_program(feed_vars, fetch_vars, **kwargs):
"""
:api_attr: Static Graph
Serialize default main program according to feed_vars and fetch_vars.
Args:
feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference.
kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly.
- program(Program): specify a program if you don't want to use default main program.
Returns:
bytes: serialized program.
Raises:
ValueError: If `feed_vars` is not a Variable or a list of Variable, an exception is thrown.
ValueError: If `fetch_vars` is not a Variable or a list of Variable, an exception is thrown.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# serialize the default main program to bytes.
serialized_program = paddle.static.serialize_program([image], [predict])
# deserialize bytes to program
deserialized_program = paddle.static.deserialize_program(serialized_program)
"""
# verify feed_vars
_check_vars('feed_vars', feed_vars)
# verify fetch_vars
_check_vars('fetch_vars', fetch_vars)
program = _get_valid_program(kwargs.get('program', None))
program = normalize_program(program, feed_vars, fetch_vars)
return _serialize_program(program)
def _serialize_program(program):
"""
serialize given program to bytes.
"""
return program.desc.serialize_to_string()
@static_only
def serialize_persistables(feed_vars, fetch_vars, executor, **kwargs):
"""
:api_attr: Static Graph
Serialize parameters using given executor and default main program according to feed_vars and fetch_vars.
Args:
feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference.
kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly.
- program(Program): specify a program if you don't want to use default main program.
Returns:
bytes: serialized program.
Raises:
ValueError: If `feed_vars` is not a Variable or a list of Variable, an exception is thrown.
ValueError: If `fetch_vars` is not a Variable or a list of Variable, an exception is thrown.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# serialize parameters to bytes.
serialized_params = paddle.static.serialize_persistables([image], [predict], exe)
# deserialize bytes to parameters.
main_program = paddle.static.default_main_program()
deserialized_params = paddle.static.deserialize_persistables(main_program, serialized_params, exe)
"""
# verify feed_vars
_check_vars('feed_vars', feed_vars)
# verify fetch_vars
_check_vars('fetch_vars', fetch_vars)
program = _get_valid_program(kwargs.get('program', None))
program = normalize_program(program, feed_vars, fetch_vars)
return _serialize_persistables(program, executor)
def _serialize_persistables(program, executor):
"""
Serialize parameters using given program and executor.
"""
vars_ = list(filter(is_persistable, program.list_vars()))
# warn if no variable found in model
if len(vars_) == 0:
warnings.warn("no variable in your model, please ensure there are any "
"variables in your model to save")
return None
# create a new program and clone persitable vars to it
save_program = Program()
save_block = save_program.global_block()
save_var_map = {}
for var in vars_:
if var.type != core.VarDesc.VarType.RAW:
var_copy = _clone_var_in_block(save_block, var)
save_var_map[var_copy.name] = var
# create in_vars and out_var, then append a save_combine op to save_program
in_vars = []
for name in sorted(save_var_map.keys()):
in_vars.append(save_var_map[name])
out_var_name = unique_name.generate("out_var")
out_var = save_block.create_var(
type=core.VarDesc.VarType.RAW, name=out_var_name)
out_var.desc.set_persistable(True)
save_block.append_op(
type='save_combine',
inputs={'X': in_vars},
outputs={'Y': out_var},
attrs={'file_path': '',
'save_to_memory': True})
# run save_program to save vars
# NOTE(zhiqiu): save op will add variable kLookupTablePath to save_program.desc,
# which leads to diff between save_program and its desc. Call _sync_with_cpp
# to keep consistency.
save_program._sync_with_cpp()
executor.run(save_program)
# return serialized bytes in out_var
return global_scope().find_var(out_var_name).get_bytes()
def save_to_file(path, content):
"""
Save content to given path.
Args:
path(str): Path to write content to.
content(bytes): Content to write.
Returns:
None
"""
if not isinstance(content, bytes):
raise ValueError("'content' type should be bytes.")
with open(path, "wb") as f:
f.write(content)
@static_only
def save_inference_model(path_prefix, feed_vars, fetch_vars, executor,
**kwargs):
"""
:api_attr: Static Graph
Save current model and its parameters to given path. i.e.
Given path_prefix = "/path/to/modelname", after invoking
save_inference_model(path_prefix, feed_vars, fetch_vars, executor),
you will find two files named modelname.pdmodel and modelname.pdiparams
under "/path/to", which represent your model and parameters respectively.
Args:
path_prefix(str): Directory path to save model + model name without suffix.
feed_vars(Variable | list[Variable]): Variables needed by inference.
fetch_vars(Variable | list[Variable]): Variables returned by inference.
executor(Executor): The executor that saves the inference model. You can refer
to :ref:`api_guide_executor_en` for more details.
kwargs: Supported keys including 'program'.Attention please, kwargs is used for backward compatibility mainly.
- program(Program): specify a program if you don't want to use default main program.
Returns:
None
Raises:
ValueError: If `feed_vars` is not a Variable or a list of Variable, an exception is thrown.
ValueError: If `fetch_vars` is not a Variable or a list of Variable, an exception is thrown.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# Feed data and train process
# Save inference model. Note we don't save label and loss in this example
paddle.static.save_inference_model(path_prefix, [image], [predict], exe)
# In this example, the save_inference_mode inference will prune the default
# main program according to the network's input node (img) and output node(predict).
# The pruned inference program is going to be saved in file "./infer_model.pdmodel"
# and parameters are going to be saved in file "./infer_model.pdiparams".
"""
# check path_prefix, set model_path and params_path
path_prefix = _normalize_path_prefix(path_prefix)
try:
# mkdir may conflict if pserver and trainer are running on the same machine
dirname = os.path.dirname(path_prefix)
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
model_path = path_prefix + ".pdmodel"
params_path = path_prefix + ".pdiparams"
if os.path.isdir(model_path):
raise ValueError("'{}' is an existing directory.".format(model_path))
if os.path.isdir(params_path):
raise ValueError("'{}' is an existing directory.".format(params_path))
# verify feed_vars
_check_vars('feed_vars', feed_vars)
# verify fetch_vars
_check_vars('fetch_vars', fetch_vars)
program = _get_valid_program(kwargs.get('program', None))
program = normalize_program(program, feed_vars, fetch_vars)
# serialize and save program
program_bytes = _serialize_program(program)
save_to_file(model_path, program_bytes)
# serialize and save params
params_bytes = _serialize_persistables(program, executor)
save_to_file(params_path, params_bytes)
@static_only
def deserialize_program(data):
"""
:api_attr: Static Graph
Deserialize given data to a program.
Args:
data(bytes): serialized program.
Returns:
Program: deserialized program.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# serialize the default main program to bytes.
serialized_program = paddle.static.serialize_program([image], [predict])
# deserialize bytes to program
deserialized_program = paddle.static.deserialize_program(serialized_program)
"""
program = Program.parse_from_string(data)
if not core._is_program_version_supported(program._version()):
raise ValueError("Unsupported program version: %d\n" %
program._version())
return program
@static_only
def deserialize_persistables(program, data, executor):
"""
:api_attr: Static Graph
Deserialize given data to parameters according to given program and executor.
Args:
program(Program): program that contains parameter names (to deserialize).
data(bytes): serialized parameters.
executor(Executor): executor used to run load op.
Returns:
Program: deserialized program.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
path_prefix = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
predict = paddle.static.nn.fc(image, 10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(predict, label)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
# serialize parameters to bytes.
serialized_params = paddle.static.serialize_persistables([image], [predict], exe)
# deserialize bytes to parameters.
main_program = paddle.static.default_main_program()
deserialized_params = paddle.static.deserialize_persistables(main_program, serialized_params, exe)
"""
if not isinstance(program, Program):
raise TypeError(
"program type must be `fluid.Program`, but received `%s`" %
type(program))
# load params to a tmp program
load_program = Program()
load_block = load_program.global_block()
vars_ = list(filter(is_persistable, program.list_vars()))
origin_shape_map = {}
load_var_map = {}
check_vars = []
sparse_vars = []
for var in vars_:
assert isinstance(var, Variable)
if var.type == core.VarDesc.VarType.RAW:
continue
if isinstance(var, Parameter):
origin_shape_map[var.name] = tuple(var.desc.get_shape())
if var.type == core.VarDesc.VarType.SELECTED_ROWS:
sparse_vars.append(var)
continue
var_copy = _clone_var_in_block(load_block, var)
check_vars.append(var)
load_var_map[var_copy.name] = var_copy
# append load_combine op to load parameters,
load_var_list = []
for name in sorted(load_var_map.keys()):
load_var_list.append(load_var_map[name])
load_block.append_op(
type='load_combine',
inputs={},
outputs={"Out": load_var_list},
# if load from memory, file_path is data
attrs={'file_path': data,
'model_from_memory': True})
executor.run(load_program)
# check var shape
for var in check_vars:
if not isinstance(var, Parameter):
continue
var_tmp = paddle.fluid.global_scope().find_var(var.name)
assert var_tmp != None, "can't not find var: " + var.name
new_shape = (np.array(var_tmp.get_tensor())).shape
assert var.name in origin_shape_map, var.name + " MUST in var list."
origin_shape = origin_shape_map.get(var.name)
if new_shape != origin_shape:
raise RuntimeError(
"Shape mismatch, program needs a parameter with shape ({}), "
"but the loaded parameter ('{}') has a shape of ({}).".format(
origin_shape, var.name, new_shape))
def load_from_file(path):
"""
Load file in binary mode.
Args:
path(str): Path of an existed file.
Returns:
bytes: Content of file.
"""
with open(path, 'rb') as f:
data = f.read()
return data
@static_only
def load_inference_model(path_prefix, executor, **kwargs):
"""
:api_attr: Static Graph
Load inference model from a given path. By this API, you can get the model
structure(Inference Program) and model parameters.
Args:
path_prefix(str | None): One of the following:
- Directory path to save model + model name without suffix.
- Set to None when reading the model from memory.
executor(Executor): The executor to run for loading inference model.
See :ref:`api_guide_executor_en` for more details about it.
kwargs: Supported keys including 'model_filename', 'params_filename'.Attention please, kwargs is used for backward compatibility mainly.
- model_filename(str): specify model_filename if you don't want to use default name.
- params_filename(str): specify params_filename if you don't want to use default name.
Returns:
list: The return of this API is a list with three elements:
(program, feed_target_names, fetch_targets). The `program` is a
``Program`` (refer to :ref:`api_guide_Program_en`), which is used for inference.
The `feed_target_names` is a list of ``str``, which contains names of variables
that need to feed data in the inference program. The `fetch_targets` is a list of
``Variable`` (refer to :ref:`api_guide_Program_en`). It contains variables from which
we can get inference results.
Raises:
ValueError: If `path_prefix.pdmodel` or `path_prefix.pdiparams` doesn't exist.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
# Build the model
startup_prog = paddle.static.default_startup_program()
main_prog = paddle.static.default_main_program()
with paddle.static.program_guard(main_prog, startup_prog):
image = paddle.static.data(name="img", shape=[64, 784])
w = paddle.create_parameter(shape=[784, 200], dtype='float32')
b = paddle.create_parameter(shape=[200], dtype='float32')
hidden_w = paddle.matmul(x=image, y=w)
hidden_b = paddle.add(hidden_w, b)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(startup_prog)
# Save the inference model
path_prefix = "./infer_model"
paddle.static.save_inference_model(path_prefix, [image], [hidden_b], exe)
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.load_inference_model(path_prefix, exe))
tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32)
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
# In this example, the inference program was saved in file
# "./infer_model.pdmodel" and parameters were saved in file
# " ./infer_model.pdiparams".
# By the inference program, feed_target_names and
# fetch_targets, we can use an executor to run the inference
# program to get the inference result.
"""
# check kwargs
supported_args = ('model_filename', 'params_filename')
deprecated_args = ('pserver_endpoints', )
caller = inspect.currentframe().f_code.co_name
_check_args(caller, kwargs, supported_args, deprecated_args)
# load from memory
if path_prefix is None:
_logger.warning("Load inference model from memory is deprecated.")
model_filename = kwargs.get('model_filename', None)
params_filename = kwargs.get('params_filename', None)
if params_filename is None:
raise ValueError(
"params_filename cannot be None when path_prefix is None.")
load_dirname = ''
program_bytes = model_filename
params_filename = params_filename
# load from file
else:
# check and norm path_prefix
path_prefix = _normalize_path_prefix(path_prefix)
# set model_path and params_path in new way,
# path_prefix represents a file path without suffix in this case.
if not kwargs:
model_path = path_prefix + ".pdmodel"
params_path = path_prefix + ".pdiparams"
# set model_path and params_path in old way for compatible,
# path_prefix represents a directory path.
else:
model_filename = kwargs.get('model_filename', None)
params_filename = kwargs.get('params_filename', None)
# set model_path
if model_filename is None:
model_path = os.path.join(path_prefix, "__model__")
else:
model_path = os.path.join(path_prefix,
model_filename + ".pdmodel")
if not os.path.exists(model_path):
model_path = os.path.join(path_prefix, model_filename)
# set params_path
if params_filename is None:
params_path = os.path.join(path_prefix, "")
else:
params_path = os.path.join(path_prefix,
params_filename + ".pdiparams")
if not os.path.exists(params_path):
params_path = os.path.join(path_prefix, params_filename)
_logger.warning("The old way to load inference model is deprecated."
" model path: {}, params path: {}".format(
model_path, params_path))
program_bytes = load_from_file(model_path)
load_dirname = os.path.dirname(params_path)
params_filename = os.path.basename(params_path)
# deserialize bytes to program
program = deserialize_program(program_bytes)
# load params data
params_path = os.path.join(load_dirname, params_filename)
params_bytes = load_from_file(params_path)
# deserialize bytes to params
deserialize_persistables(program, params_bytes, executor)
feed_target_names = program.desc.get_feed_target_names()
fetch_target_names = program.desc.get_fetch_target_names()
fetch_targets = [
program.global_block().var(name) for name in fetch_target_names
]
return [program, feed_target_names, fetch_targets]
| apache-2.0 | -2,991,404,676,168,279,000 | 36.602673 | 144 | 0.626587 | false | 4.095144 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.