repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
hdinsight/hue | desktop/core/ext-py/lxml-3.4.4/src/lxml/html/tests/test_frames.py | 20 | 1557 | import unittest, sys
from lxml.tests.common_imports import make_doctest, doctest
import lxml.html
from lxml.html import html_parser, XHTML_NAMESPACE
class FrameTest(unittest.TestCase):
def test_parse_fragments_fromstring(self):
parser = lxml.html.HTMLParser(encoding='utf-8', remove_comments=True)
html = """<frameset>
<frame src="main.php" name="srcpg" id="srcpg" frameborder="0" rolling="Auto" marginwidth="" marginheight="0">
</frameset>"""
etree_document = lxml.html.fragments_fromstring(html, parser=parser)
self.assertEqual(len(etree_document), 1)
root = etree_document[0]
self.assertEqual(root.tag, "frameset")
frame_element = root[0]
self.assertEqual(frame_element.tag, 'frame')
def test_parse_fromstring(self):
parser = lxml.html.HTMLParser(encoding='utf-8', remove_comments=True)
html = """<html><frameset>
<frame src="main.php" name="srcpg" id="srcpg" frameborder="0" rolling="Auto" marginwidth="" marginheight="0">
</frameset></html>"""
etree_document = lxml.html.fromstring(html, parser=parser)
self.assertEqual(etree_document.tag, 'html')
self.assertEqual(len(etree_document), 1)
frameset_element = etree_document[0]
self.assertEqual(len(frameset_element), 1)
frame_element = frameset_element[0]
self.assertEqual(frame_element.tag, 'frame')
def test_suite():
loader = unittest.TestLoader()
return loader.loadTestsFromModule(sys.modules[__name__]) | apache-2.0 | -7,609,780,043,351,632,000 | -7,635,432,057,526,556,000 | 42.277778 | 121 | 0.66474 | false |
Silmathoron/nest-simulator | pynest/examples/if_curve.py | 5 | 5195 | # -*- coding: utf-8 -*-
#
# if_curve.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""IF curve example
----------------------
This example illustrates how to measure the I-F curve of a neuron.
The program creates a small group of neurons and injects a noisy current
:math:`I(t) = I_mean + I_std*W(t)`
where :math:`W(t)` is a white noise process.
The programm systematically drives the current through a series of values in
the two-dimensional `(I_mean, I_std)` space and measures the firing rate of
the neurons.
In this example, we measure the I-F curve of the adaptive exponential
integrate and fire neuron (``aeif_cond_exp``), but any other neuron model that
accepts current inputs is possible. The model and its parameters are
supplied when the IF_curve object is created.
"""
import numpy
import nest
import shelve
###############################################################################
# Here we define which model and the neuron parameters to use for measuring
# the transfer function.
model = 'aeif_cond_exp'
params = {'a': 4.0,
'b': 80.8,
'V_th': -50.4,
'Delta_T': 2.0,
'I_e': 0.0,
'C_m': 281.0,
'g_L': 30.0,
'V_reset': -70.6,
'tau_w': 144.0,
't_ref': 5.0,
'V_peak': -40.0,
'E_L': -70.6,
'E_ex': 0.,
'E_in': -70.}
class IF_curve():
t_inter_trial = 200. # Interval between two successive measurement trials
t_sim = 1000. # Duration of a measurement trial
n_neurons = 100 # Number of neurons
n_threads = 4 # Nubmer of threads to run the simulation
def __init__(self, model, params=False):
self.model = model
self.params = params
self.build()
self.connect()
def build(self):
#######################################################################
# We reset NEST to delete information from previous simulations
# and adjust the number of threads.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': self.n_threads})
#######################################################################
# We set the default parameters of the neuron model to those
# defined above and create neurons and devices.
if self.params:
nest.SetDefaults(self.model, self.params)
self.neuron = nest.Create(self.model, self.n_neurons)
self.noise = nest.Create('noise_generator')
self.spike_detector = nest.Create('spike_detector')
def connect(self):
#######################################################################
# We connect the noisy current to the neurons and the neurons to
# the spike detectors.
nest.Connect(self.noise, self.neuron, 'all_to_all')
nest.Connect(self.neuron, self.spike_detector, 'all_to_all')
def output_rate(self, mean, std):
self.build()
self.connect()
#######################################################################
# We adjust the parameters of the noise according to the current
# values.
self.noise.set(mean=mean, std=std, start=0.0, stop=1000., origin=0.)
# We simulate the network and calculate the rate.
nest.Simulate(self.t_sim)
rate = self.spike_detector.n_events * 1000. / (1. * self.n_neurons * self.t_sim)
return rate
def compute_transfer(self, i_mean=(400.0, 900.0, 50.0),
i_std=(0.0, 600.0, 50.0)):
#######################################################################
# We loop through all possible combinations of `(I_mean, I_sigma)`
# and measure the output rate of the neuron.
self.i_range = numpy.arange(*i_mean)
self.std_range = numpy.arange(*i_std)
self.rate = numpy.zeros((self.i_range.size, self.std_range.size))
nest.set_verbosity('M_WARNING')
for n, i in enumerate(self.i_range):
print('I = {0}'.format(i))
for m, std in enumerate(self.std_range):
self.rate[n, m] = self.output_rate(i, std)
transfer = IF_curve(model, params)
transfer.compute_transfer()
###############################################################################
# After the simulation is finished we store the data into a file for
# later analysis.
with shelve.open(model + '_transfer.dat') as dat:
dat['I_mean'] = transfer.i_range
dat['I_std'] = transfer.std_range
dat['rate'] = transfer.rate
| gpl-2.0 | 7,504,276,404,796,305,000 | 2,279,455,267,339,918,800 | 34.582192 | 88 | 0.563426 | false |
Eficent/odoomrp-wip | stock_quant_valuation/tests/test_stock_quant_valuation.py | 10 | 2798 | # -*- coding: utf-8 -*-
# (c) 2016 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import openerp.tests.common as common
from openerp import fields
class TestStockQuantValuation(common.TransactionCase):
def setUp(self):
super(TestStockQuantValuation, self).setUp()
self.quant_model = self.env['stock.quant']
self.hist_model = self.env['stock.history']
self.location_model = self.env['stock.location']
self.product = self.env.ref('product.product_product_34')
self.location = self.location_model.search([('usage', '=',
'internal')], limit=1)
def test_quant_valuation(self):
self.product.sudo().write({'cost_method': 'real',
'standard_price': 20,
'manual_standard_cost': 35})
quant = self.quant_model.create(
{'product_id': self.product.id,
'cost': 20,
'location_id': self.location.id,
'qty': 5})
self.assertEqual(quant.manual_value, (35 * 5),
"Incorrect Manual Value for quant.")
self.assertEqual(quant.real_value, (20 * 5),
"Incorrect Real Value for quant.")
def test_stock_history(self):
hist_line = self.hist_model.search([], limit=1)
hist_line.product_id.sudo().manual_standard_cost = 40
self.assertEqual(hist_line.manual_value, (hist_line.quantity * 40),
"Incorrect Manual Value for history line.")
self.assertEqual(hist_line.real_value,
(hist_line.quantity * hist_line.price_unit_on_quant),
"Incorrect Real Value for history line.")
def test_stock_history_read_group(self):
gfields = ['product_id', 'location_id', 'move_id', 'date', 'source',
'quantity', 'inventory_value', 'manual_value', 'real_value']
groupby = ['product_id']
domain = [('date', '<=', fields.Date.today())]
res = self.hist_model.read_group(
domain=domain, fields=gfields, groupby=groupby, offset=0,
limit=None, orderby=False, lazy=True)
if res:
line = res[0]
line_domain = line.get('__domain', domain)
group_lines = self.hist_model.search(line_domain)
sum_real = sum([x.real_value for x in group_lines])
sum_manual = sum([x.manual_value for x in group_lines])
self.assertEqual(line['real_value'], sum_real,
"Real value not correct sum.")
self.assertEqual(line['manual_value'], sum_manual,
"Manual value not correct sum.")
| agpl-3.0 | 5,787,310,430,253,720,000 | 3,908,000,702,072,415,000 | 45.633333 | 79 | 0.55361 | false |
L3nzian/AutoPermit | ap_connections.py | 1 | 7082 | #import pyodbc
import odbc
import _winreg
import ConfigParser
config_file = 'AutoPermit.ini'
def get_config_dict(config_file, section):
"""
Reads a config file
:param config_file: a config file usable by ConfigParser
:param section: a section in the config file
:return: a dictionary section's entries
"""
dict1 = {}
config = ConfigParser.ConfigParser()
config.optionxform = str # Override optionxform so that keys remain case-sensitive
config.read(config_file)
options = config.options(section)
for option in options:
try:
dict1[option] = config.get(section, option)
if dict1[option] == -1:
print 'not an option'
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def get_odbc_connection_string(con_string):
"""
:param con_string: Name of the odbc connection
:return: odbc connection name/string from config_file
"""
odbc_connections = get_config_dict(config_file, 'odbc_connections')
return odbc_connections[con_string]
#print get_odbc_connection_string("DMMESDEConnection")
# def MakeConnection():
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources"
# odbckey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# except Exception as e:
# pass
# try:
# strValueName = "Dmlr_DB"
# strValue = "SQL Server"
# _winreg.SetValueEx(odbckey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\Dmlr_DB"
# valKey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# strValueName = "Database"
# strValue = "dmlr"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Driver"
# strValue = "C:\WINDOWS\System32\SQLSRV32.dll"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Server"
# strValue = "wsq00796"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Trusted_Connection"
# strValue = "Yes"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# _winreg.CloseKey(valKey)
# except:
# pass
# try:
# strValueName = "DMMESDEConnection"
# strValue = "SQL Server"
# _winreg.SetValueEx(odbckey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\DMMESDEConnection"
# valKey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# strValueName = "Database"
# strValue = "Ep"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Driver"
# strValue = "C:\WINDOWS\System32\SQLSRV32.dll"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Server"
# strValue = "wsq00796"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Trusted_Connection"
# strValue = "Yes"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# _winreg.CloseKey(valKey)
# except:
# pass
# try:
# strValueName = "WaterQuality"
# strValue = "SQL Server"
# _winreg.SetValueEx(odbckey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\WaterQuality"
# valKey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# strValueName = "Database"
# strValue = "WaterQuality"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Driver"
# strValue = "C:\WINDOWS\System32\SQLSRV32.dll"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Server"
# strValue = "DMMESDE"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Trusted_Connection"
# strValue = "Yes"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# _winreg.CloseKey(valKey)
# except:
# pass
# try:
# strValueName = "H2O"
# strValue = "SQL Server"
# _winreg.SetValueEx(odbckey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\H2O"
# valKey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# strValueName = "Database"
# strValue = "H2OSpatial"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Driver"
# strValue = "C:\WINDOWS\System32\SQLSRV32.dll"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Server"
# strValue = "DMMESDE"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Trusted_Connection"
# strValue = "Yes"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# _winreg.CloseKey(valKey)
# except:
# pass
# try:
# _winreg.CloseKey(odbckey)
# except:
# pass
# try:
# conn = odbc.odbc("Dmlr_DB")
# conn2 = odbc.odbc("WaterQuality")
# conn3 = odbc.odbc("DMMESDEConnection")
# except:
# try:
# conn = odbc.odbc("DSN=Dmlr_DB2;UID=ags_service;PWD=Welcome1234")
# conn2 = odbc.odbc("DSN=WaterQuality2;UID=wq_guest;PWD=wq_guest")
# conn3 = odbc.odbc("DSN=DMMESDEConnection2;UID=ags_service;PWD=Welcome1234")
# except:
# conn = 1
# conn2 = 1
# conn3 = 1
# connList = [conn, conn2, conn3]
# return connList
#
#
# def get_ep_cursor():
# con = pyodbc.connect('Trusted_Connection=yes',
# driver='{SQL Server}',
# server='wsq00796',
# database='EP')
# return con.cursor()
#
#
# def get_dmlr_cursor():
# con = pyodbc.connect('Trusted_Connection=yes',
# driver='{SQL Server}',
# server='wsq00796',
# database='dmlr')
# return con.cursor()
#
#
# def get_wq_cursor():
# con = pyodbc.connect('Trusted_Connection=yes',
# driver='{SQL Server}',
# server='dmmesde',
# database='WaterQuality')
# return con.cursor()
| gpl-2.0 | 1,233,186,664,016,484,900 | -8,165,454,714,210,366,000 | 30.336283 | 89 | 0.572296 | false |
luoyetx/mxnet | example/ssd/tools/caffe_converter/compare_layers.py | 54 | 14536 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test converted models layer by layer
"""
import os
import argparse
import logging
import mxnet as mx
import cv2
import numpy as np
logging.basicConfig(level=logging.INFO)
def read_image(img_path, image_dims=None, mean=None):
"""
Reads an image from file path or URL, optionally resizing to given image dimensions and
subtracting mean.
:param img_path: path to file, or url to download
:param image_dims: image dimensions to resize to, or None
:param mean: mean file to subtract, or None
:return: loaded image, in RGB format
"""
import urllib
filename = img_path.split("/")[-1]
if img_path.startswith('http'):
urllib.urlretrieve(img_path, filename)
img = cv2.imread(filename)
else:
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if image_dims is not None:
img = cv2.resize(img, image_dims) # resize to image_dims to fit model
img = np.rollaxis(img, 2) # change to (c, h, w) order
img = img[np.newaxis, :] # extend to (n, c, h, w)
if mean is not None:
mean = np.array(mean)
if mean.shape == (3,):
mean = mean[np.newaxis, :, np.newaxis, np.newaxis] # extend to (n, c, 1, 1)
img = img.astype(np.float32) - mean # subtract mean
return img
def _ch_dev(arg_params, aux_params, ctx):
"""
Changes device of given mxnet arguments
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param ctx: new device context
:return: arguments and auxiliary parameters on new device
"""
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path,
caffe_mean, mean_diff_allowed, max_diff_allowed):
"""
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
"""
import caffe
from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean
from convert_model import convert_model
if isinstance(caffe_mean, str):
caffe_mean = read_caffe_mean(caffe_mean)
elif caffe_mean is None:
pass
elif len(caffe_mean) == 3:
# swap channels from Caffe BGR to RGB
caffe_mean = caffe_mean[::-1]
# get caffe root location, this is needed to run the upgrade network utility, so we only need
# to support parsing of latest caffe
caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0]))
caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path)
_, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path)
caffe.set_mode_cpu()
caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST)
image_dims = tuple(caffe_net.blobs['data'].shape)[2:4]
logging.info('getting image %s', image_url)
img_rgb = read_image(image_url, image_dims, caffe_mean)
img_bgr = img_rgb[:, ::-1, :, :]
caffe_net.blobs['data'].reshape(*img_bgr.shape)
caffe_net.blobs['data'].data[...] = img_bgr
_ = caffe_net.forward()
# read sym and add all outputs
sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path)
sym = sym.get_internals()
# now mxnet
if gpu < 0:
ctx = mx.cpu(0)
else:
ctx = mx.gpu(gpu)
arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx)
arg_params["data"] = mx.nd.array(img_rgb, ctx)
arg_params["prob_label"] = mx.nd.empty((1,), ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed)
return
def _bfs(root_node, process_node):
"""
Implementation of Breadth-first search (BFS) on caffe network DAG
:param root_node: root node of caffe network DAG
:param process_node: function to run on each node
"""
from collections import deque
seen_nodes = set()
next_nodes = deque()
seen_nodes.add(root_node)
next_nodes.append(root_node)
while next_nodes:
current_node = next_nodes.popleft()
# process current node
process_node(current_node)
for child_node in current_node.children:
if child_node not in seen_nodes:
seen_nodes.add(child_node)
next_nodes.append(child_node)
def compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed):
"""
Compare layer by layer of a caffe network with mxnet network
:param caffe_net: loaded caffe network
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param exe: mxnet model
:param layer_name_to_record: map between caffe layer and information record
:param top_to_layers: map between caffe blob name to layers which outputs it (including inplace)
:param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob
:param max_diff_allowed: max difference allowed between caffe blob and mxnet blob
"""
import re
log_format = ' {0:<40} {1:<40} {2:<8} {3:>10} {4:>10} {5:<1}'
compare_layers_from_nets.is_first_convolution = True
def _compare_blob(caf_blob, mx_blob, caf_name, mx_name, blob_type, note):
diff = np.abs(mx_blob - caf_blob)
diff_mean = diff.mean()
diff_max = diff.max()
logging.info(log_format.format(caf_name, mx_name, blob_type, '%4.5f' % diff_mean,
'%4.5f' % diff_max, note))
assert diff_mean < mean_diff_allowed
assert diff_max < max_diff_allowed
def _process_layer_parameters(layer):
logging.debug('processing layer %s of type %s', layer.name, layer.type)
normalized_layer_name = re.sub('[-/]', '_', layer.name)
# handle weight and bias of convolution and fully-connected layers
if layer.name in caffe_net.params and layer.type in ['Convolution', 'InnerProduct',
'Deconvolution']:
has_bias = len(caffe_net.params[layer.name]) > 1
mx_name_weight = '{}_weight'.format(normalized_layer_name)
mx_beta = arg_params[mx_name_weight].asnumpy()
# first convolution should change from BGR to RGB
if layer.type == 'Convolution' and compare_layers_from_nets.is_first_convolution:
compare_layers_from_nets.is_first_convolution = False
# if RGB or RGBA
if mx_beta.shape[1] == 3 or mx_beta.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
mx_beta[:, [0, 2], :, :] = mx_beta[:, [2, 0], :, :]
caf_beta = caffe_net.params[layer.name][0].data
_compare_blob(caf_beta, mx_beta, layer.name, mx_name_weight, 'weight', '')
if has_bias:
mx_name_bias = '{}_bias'.format(normalized_layer_name)
mx_gamma = arg_params[mx_name_bias].asnumpy()
caf_gamma = caffe_net.params[layer.name][1].data
_compare_blob(caf_gamma, mx_gamma, layer.name, mx_name_bias, 'bias', '')
elif layer.name in caffe_net.params and layer.type == 'Scale':
if 'scale' in normalized_layer_name:
bn_name = normalized_layer_name.replace('scale', 'bn')
elif 'sc' in normalized_layer_name:
bn_name = normalized_layer_name.replace('sc', 'bn')
else:
assert False, 'Unknown name convention for bn/scale'
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
mx_beta = arg_params[beta_name].asnumpy()
caf_beta = caffe_net.params[layer.name][1].data
_compare_blob(caf_beta, mx_beta, layer.name, beta_name, 'mov_mean', '')
mx_gamma = arg_params[gamma_name].asnumpy()
caf_gamma = caffe_net.params[layer.name][0].data
_compare_blob(caf_gamma, mx_gamma, layer.name, gamma_name, 'mov_var', '')
elif layer.name in caffe_net.params and layer.type == 'BatchNorm':
mean_name = '{}_moving_mean'.format(normalized_layer_name)
var_name = '{}_moving_var'.format(normalized_layer_name)
caf_rescale_factor = caffe_net.params[layer.name][2].data
mx_mean = aux_params[mean_name].asnumpy()
caf_mean = caffe_net.params[layer.name][0].data / caf_rescale_factor
_compare_blob(caf_mean, mx_mean, layer.name, mean_name, 'mean', '')
mx_var = aux_params[var_name].asnumpy()
caf_var = caffe_net.params[layer.name][1].data / caf_rescale_factor
_compare_blob(caf_var, mx_var, layer.name, var_name, 'var',
'expect 1e-04 change due to cudnn eps')
elif layer.type in ['Input', 'Pooling', 'ReLU', 'Eltwise', 'Softmax', 'LRN', 'Concat',
'Dropout', 'Crop']:
# no parameters to check for these layers
pass
else:
logging.warn('No handling for layer %s of type %s, should we ignore it?', layer.name,
layer.type)
return
def _process_layer_output(caffe_blob_name):
logging.debug('processing blob %s', caffe_blob_name)
# skip blobs not originating from actual layers, e.g. artificial split layers added by caffe
if caffe_blob_name not in top_to_layers:
return
caf_blob = caffe_net.blobs[caffe_blob_name].data
# data should change from BGR to RGB
if caffe_blob_name == 'data':
# if RGB or RGBA
if caf_blob.shape[1] == 3 or caf_blob.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
caf_blob[:, [0, 2], :, :] = caf_blob[:, [2, 0], :, :]
mx_name = 'data'
else:
# get last layer name which outputs this blob name
last_layer_name = top_to_layers[caffe_blob_name][-1]
normalized_last_layer_name = re.sub('[-/]', '_', last_layer_name)
mx_name = '{}_output'.format(normalized_last_layer_name)
if 'scale' in mx_name:
mx_name = mx_name.replace('scale', 'bn')
elif 'sc' in mx_name:
mx_name = mx_name.replace('sc', 'bn')
if mx_name not in exe.output_dict:
logging.error('mxnet blob %s is missing, time to extend the compare tool..', mx_name)
return
mx_blob = exe.output_dict[mx_name].asnumpy()
_compare_blob(caf_blob, mx_blob, caffe_blob_name, mx_name, 'output', '')
return
# check layer parameters
logging.info('\n***** Network Parameters '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
first_layer_name = layer_name_to_record.keys()[0]
_bfs(layer_name_to_record[first_layer_name], _process_layer_parameters)
# check layer output
logging.info('\n***** Network Outputs '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
for caffe_blob_name in caffe_net.blobs.keys():
_process_layer_output(caffe_blob_name)
return
def main():
"""Entrypoint for compare_layers"""
parser = argparse.ArgumentParser(
description='Tool for testing caffe to mxnet conversion layer by layer')
parser.add_argument('--image_url', type=str,
default='http://writm.com/wp-content/uploads/2016/08/Cat-hd-wallpapers.jpg',
help='input image to test inference, can be either file path or url')
parser.add_argument('--caffe_prototxt_path', type=str,
default='./model.prototxt',
help='path to caffe prototxt')
parser.add_argument('--caffe_model_path', type=str,
default='./model.caffemodel',
help='path to caffe weights')
parser.add_argument('--caffe_mean', type=str,
default='./model_mean.binaryproto',
help='path to caffe mean file')
parser.add_argument('--mean_diff_allowed', type=int, default=1e-03,
help='mean difference allowed between caffe blob and mxnet blob')
parser.add_argument('--max_diff_allowed', type=int, default=1e-01,
help='max difference allowed between caffe blob and mxnet blob')
parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict')
args = parser.parse_args()
convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path,
args.caffe_model_path, args.caffe_mean,
args.mean_diff_allowed, args.max_diff_allowed)
if __name__ == '__main__':
main()
| apache-2.0 | 4,832,116,191,580,876,000 | 4,957,772,034,196,774,000 | 38.934066 | 100 | 0.607939 | false |
icgc-dcc/dcc-storage | docs/conf.py | 1 | 5104 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# score documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 27 15:44:32 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'score'
copyright = '2018, andricdu'
author = 'andricdu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.4.0'
# The full version, including alpha/beta/rc tags.
release = '1.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'scoredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'score.tex', 'score Documentation',
'andricdu', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'score', 'score Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'score', 'score Documentation',
author, 'score', 'One line description of project.',
'Miscellaneous'),
]
| gpl-3.0 | 2,808,808,567,914,510,300 | -9,055,008,960,769,625,000 | 29.023529 | 79 | 0.672218 | false |
mezz64/home-assistant | tests/components/august/test_config_flow.py | 5 | 9572 | """Test the August config flow."""
from august.authenticator import ValidationResult
from homeassistant import config_entries, setup
from homeassistant.components.august.const import (
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DOMAIN,
VERIFICATION_CODE_KEY,
)
from homeassistant.components.august.exceptions import (
CannotConnect,
InvalidAuth,
RequireValidation,
)
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "[email protected]"
assert result2["data"] == {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: "[email protected]",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_user_unexpected_exception(hass):
"""Test we handle an unexpected exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=ValueError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=CannotConnect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_needs_validate(hass):
"""Test we present validation when we need to validate."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=RequireValidation,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert len(mock_send_verification_code.mock_calls) == 1
assert result2["type"] == "form"
assert result2["errors"] is None
assert result2["step_id"] == "validation"
# Try with the WRONG verification code give us the form back again
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=RequireValidation,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_validate_verification_code",
return_value=ValidationResult.INVALID_VERIFICATION_CODE,
) as mock_validate_verification_code, patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code, patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{VERIFICATION_CODE_KEY: "incorrect"},
)
# Make sure we do not resend the code again
# so they have a chance to retry
assert len(mock_send_verification_code.mock_calls) == 0
assert len(mock_validate_verification_code.mock_calls) == 1
assert result3["type"] == "form"
assert result3["errors"] is None
assert result3["step_id"] == "validation"
# Try with the CORRECT verification code and we setup
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_validate_verification_code",
return_value=ValidationResult.VALIDATED,
) as mock_validate_verification_code, patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code, patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry", return_value=True
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{VERIFICATION_CODE_KEY: "correct"},
)
await hass.async_block_till_done()
assert len(mock_send_verification_code.mock_calls) == 0
assert len(mock_validate_verification_code.mock_calls) == 1
assert result4["type"] == "create_entry"
assert result4["title"] == "[email protected]"
assert result4["data"] == {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: "[email protected]",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_reauth(hass):
"""Test reauthenticate."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: "[email protected]",
},
unique_id="[email protected]",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=entry.data
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_PASSWORD: "new-test-password",
},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
| apache-2.0 | -5,995,051,841,085,697,000 | -8,945,967,669,325,551,000 | 34.583643 | 102 | 0.630276 | false |
MingdaZhou/gnuradio | gr-wxgui/python/wxgui/const_window.py | 58 | 6131 | #
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
##################################################
# Imports
##################################################
import plotter
import common
import wx
import numpy
import math
import pubsub
from constants import *
from gnuradio import gr #for gr.prefs
import forms
##################################################
# Constants
##################################################
SLIDER_STEPS = 200
LOOP_BW_MIN_EXP, LOOP_BW_MAX_EXP = -6, 0.0
GAIN_MU_MIN_EXP, GAIN_MU_MAX_EXP = -6, -0.301
DEFAULT_FRAME_RATE = gr.prefs().get_long('wxgui', 'const_rate', 5)
DEFAULT_WIN_SIZE = (500, 400)
DEFAULT_CONST_SIZE = gr.prefs().get_long('wxgui', 'const_size', 2048)
CONST_PLOT_COLOR_SPEC = (0, 0, 1)
MARKER_TYPES = (
('Dot Small', 1.0),
('Dot Medium', 2.0),
('Dot Large', 3.0),
('Line Link', None),
)
DEFAULT_MARKER_TYPE = 2.0
##################################################
# Constellation window control panel
##################################################
class control_panel(wx.Panel):
"""
A control panel with wx widgits to control the plotter.
"""
def __init__(self, parent):
"""
Create a new control panel.
Args:
parent: the wx parent window
"""
self.parent = parent
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)
parent[SHOW_CONTROL_PANEL_KEY] = True
parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)
control_box = forms.static_box_sizer(
parent=self, label='Options',
bold=True, orient=wx.VERTICAL,
)
#loop_bw
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Loop Bandwidth',
converter=forms.float_converter(),
ps=parent, key=LOOP_BW_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=LOOP_BW_MIN_EXP,
max_exp=LOOP_BW_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=LOOP_BW_KEY,
)
#gain_mu
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Gain Mu',
converter=forms.float_converter(),
ps=parent, key=GAIN_MU_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=GAIN_MU_MIN_EXP,
max_exp=GAIN_MU_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=GAIN_MU_KEY,
)
#marker
control_box.AddStretchSpacer()
forms.drop_down(
sizer=control_box, parent=self,
ps=parent, key=MARKER_KEY, label='Marker',
choices=map(lambda x: x[1], MARKER_TYPES),
labels=map(lambda x: x[0], MARKER_TYPES),
)
#run/stop
control_box.AddStretchSpacer()
forms.toggle_button(
sizer=control_box, parent=self,
true_label='Stop', false_label='Run',
ps=parent, key=RUNNING_KEY,
)
#set sizer
self.SetSizerAndFit(control_box)
##################################################
# Constellation window with plotter and control panel
##################################################
class const_window(wx.Panel, pubsub.pubsub):
def __init__(
self,
parent,
controller,
size,
title,
msg_key,
loop_bw_key,
gain_mu_key,
gain_omega_key,
omega_key,
sample_rate_key,
):
pubsub.pubsub.__init__(self)
#proxy the keys
self.proxy(MSG_KEY, controller, msg_key)
self.proxy(LOOP_BW_KEY, controller, loop_bw_key)
self.proxy(GAIN_MU_KEY, controller, gain_mu_key)
self.proxy(GAIN_OMEGA_KEY, controller, gain_omega_key)
self.proxy(OMEGA_KEY, controller, omega_key)
self.proxy(SAMPLE_RATE_KEY, controller, sample_rate_key)
#initialize values
self[RUNNING_KEY] = True
self[X_DIVS_KEY] = 8
self[Y_DIVS_KEY] = 8
self[MARKER_KEY] = DEFAULT_MARKER_TYPE
#init panel and plot
wx.Panel.__init__(self, parent, style=wx.SIMPLE_BORDER)
self.plotter = plotter.channel_plotter(self)
self.plotter.SetSize(wx.Size(*size))
self.plotter.SetSizeHints(*size)
self.plotter.set_title(title)
self.plotter.set_x_label('Inphase')
self.plotter.set_y_label('Quadrature')
self.plotter.enable_point_label(True)
self.plotter.enable_grid_lines(True)
#setup the box with plot and controls
self.control_panel = control_panel(self)
main_box = wx.BoxSizer(wx.HORIZONTAL)
main_box.Add(self.plotter, 1, wx.EXPAND)
main_box.Add(self.control_panel, 0, wx.EXPAND)
self.SetSizerAndFit(main_box)
#alpha and gain mu 2nd orders
def set_gain_omega(gain_mu): self[GAIN_OMEGA_KEY] = .25*gain_mu**2
self.subscribe(GAIN_MU_KEY, set_gain_omega)
#register events
self.subscribe(MSG_KEY, self.handle_msg)
self.subscribe(X_DIVS_KEY, self.update_grid)
self.subscribe(Y_DIVS_KEY, self.update_grid)
#initial update
self.update_grid()
def handle_msg(self, msg):
"""
Plot the samples onto the complex grid.
Args:
msg: the array of complex samples
"""
if not self[RUNNING_KEY]: return
#convert to complex floating point numbers
samples = numpy.fromstring(msg, numpy.complex64)
real = numpy.real(samples)
imag = numpy.imag(samples)
#plot
self.plotter.set_waveform(
channel=0,
samples=(real, imag),
color_spec=CONST_PLOT_COLOR_SPEC,
marker=self[MARKER_KEY],
)
#update the plotter
self.plotter.update()
def update_grid(self):
#update the x axis
x_max = 2.0
self.plotter.set_x_grid(-x_max, x_max, common.get_clean_num(2.0*x_max/self[X_DIVS_KEY]))
#update the y axis
y_max = 2.0
self.plotter.set_y_grid(-y_max, y_max, common.get_clean_num(2.0*y_max/self[Y_DIVS_KEY]))
#update plotter
self.plotter.update()
| gpl-3.0 | -662,287,140,243,626,500 | -93,946,984,172,015,170 | 28.056872 | 90 | 0.650302 | false |
manasapte/pants | src/python/pants/bin/remote_pants_runner.py | 7 | 3179 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import signal
import sys
from contextlib import contextmanager
from pants.java.nailgun_client import NailgunClient
from pants.java.nailgun_protocol import NailgunProtocol
from pants.pantsd.process_manager import ProcessMetadataManager
class RemotePantsRunner(object):
"""A thin client variant of PantsRunner."""
class PortNotFound(Exception): pass
PANTS_COMMAND = 'pants'
RECOVERABLE_EXCEPTIONS = (PortNotFound, NailgunClient.NailgunConnectionError)
def __init__(self, exiter, args, env, process_metadata_dir=None,
stdin=None, stdout=None, stderr=None):
"""
:param Exiter exiter: The Exiter instance to use for this run.
:param list args: The arguments (e.g. sys.argv) for this run.
:param dict env: The environment (e.g. os.environ) for this run.
:param str process_metadata_dir: The directory in which process metadata is kept.
:param file stdin: The stream representing stdin.
:param file stdout: The stream representing stdout.
:param file stderr: The stream representing stderr.
"""
self._exiter = exiter
self._args = args
self._env = env
self._process_metadata_dir = process_metadata_dir
self._stdin = stdin or sys.stdin
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
self._port = self._retrieve_pailgun_port()
if not self._port:
raise self.PortNotFound('unable to locate pailgun port!')
@staticmethod
def _combine_dicts(*dicts):
"""Combine one or more dicts into a new, unified dict (dicts to the right take precedence)."""
return {k: v for d in dicts for k, v in d.items()}
@contextmanager
def _trapped_control_c(self, client):
"""A contextmanager that overrides the SIGINT (control-c) handler and handles it remotely."""
def handle_control_c(signum, frame):
client.send_control_c()
existing_sigint_handler = signal.signal(signal.SIGINT, handle_control_c)
signal.siginterrupt(signal.SIGINT, False) # Retry interrupted system calls.
try:
yield
finally:
signal.signal(signal.SIGINT, existing_sigint_handler)
def _retrieve_pailgun_port(self):
return ProcessMetadataManager(
self._process_metadata_dir).read_metadata_by_name('pantsd', 'socket_pailgun', int)
def run(self, args=None):
# Merge the nailgun TTY capability environment variables with the passed environment dict.
ng_env = NailgunProtocol.isatty_to_env(self._stdin, self._stdout, self._stderr)
modified_env = self._combine_dicts(self._env, ng_env)
# Instantiate a NailgunClient.
client = NailgunClient(port=self._port, ins=self._stdin, out=self._stdout, err=self._stderr)
with self._trapped_control_c(client):
# Execute the command on the pailgun.
result = client.execute(self.PANTS_COMMAND, *self._args, **modified_env)
# Exit.
self._exiter.exit(result)
| apache-2.0 | -3,240,367,176,102,426,600 | 610,911,557,842,649,600 | 37.768293 | 98 | 0.709972 | false |
darkleons/odoo | addons/account/account_move_line.py | 9 | 78170 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp import workflow
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp import tools
from openerp.report import report_sxw
import openerp
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
context = dict(context or {})
initial_bal = context.get('initial_bal', False)
company_clause = " "
if context.get('company_id', False):
company_clause = " AND " +obj+".company_id = %s" % context.get('company_id', False)
if not context.get('fiscalyear', False):
if context.get('all_fiscalyear', False):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = [context['fiscalyear']]
fiscalyear_clause = (','.join([str(x) for x in fiscalyear_ids])) or '0'
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from', False) and context.get('date_to', False):
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < '" +context['date_from']+"')"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '" +context['date_from']+"' AND date <= '"+context['date_to']+"')"
if state:
if state.lower() not in ['all']:
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = '"+state+"')"
if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods', False):
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND date_start <= '%s' AND id NOT IN (%s)) %s %s" % (fiscalyear_clause, first_period.date_start, ids, where_move_state, where_move_lines_by_date)
else:
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, ids, where_move_state, where_move_lines_by_date)
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
if initial_bal and not context.get('periods', False) and not where_move_lines_by_date:
#we didn't pass any filter in the context, and the initial balance can't be computed using only the fiscalyear otherwise entries will be summed twice
#so we have to invalidate this query
raise osv.except_osv(_('Warning!'),_("You have not supplied enough arguments to compute the initial balance, please select a period and a journal in the context."))
if context.get('journal_ids', False):
query += ' AND '+obj+'.journal_id IN (%s)' % ','.join(map(str, context['journal_ids']))
if context.get('chart_account_id', False):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query += ' AND '+obj+'.account_id IN (%s)' % ','.join(map(str, child_ids))
query += company_clause
return query
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.reconcile:
#this function does not suport to be used on move lines not related to a reconcilable account
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def _prepare_analytic_line(self, cr, uid, obj_line, context=None):
"""
Prepare the values given at the create() of account.analytic.line upon the validation of a journal item having
an analytic account. This method is intended to be extended in other modules.
:param obj_line: browse record of the account.move.line that triggered the analytic line creation
"""
return {'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': uid,
}
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
if obj_line.analytic_lines:
acc_ana_line_obj.unlink(cr,uid,[obj.id for obj in obj_line.analytic_lines])
vals_line = self._prepare_analytic_line(cr, uid, obj_line, context=context)
acc_ana_line_obj.create(cr, uid, vals_line)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context = dict(context, period_id=ids and ids[0] or False)
return context
def _default_get(self, cr, uid, fields, context=None):
#default_get should only do the following:
# -propose the next amount in debit/credit in order to balance the move
# -propose the next account from the journal (default debit/credit account) accordingly
context = dict(context or {})
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
if not context.get('journal_id', False):
context['journal_id'] = context.get('search_default_journal_id', False)
if not context.get('period_id', False):
context['period_id'] = context.get('search_default_period_id', False)
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
if context.get('journal_id'):
total = 0.0
#in account.move form view, it is not possible to compute total debit and credit using
#a browse record. So we must use the context to pass the whole one2many field and compute the total
if context.get('line_id'):
for move_line_dict in move_obj.resolve_2many_commands(cr, uid, 'line_id', context.get('line_id'), context=context):
data['name'] = data.get('name') or move_line_dict.get('name')
data['partner_id'] = data.get('partner_id') or move_line_dict.get('partner_id')
total += move_line_dict.get('debit', 0.0) - move_line_dict.get('credit', 0.0)
elif context.get('period_id'):
#find the date and the ID of the last unbalanced account.move encoded by the current user in that journal and period
move_id = False
cr.execute('''SELECT move_id, date FROM account_move_line
WHERE journal_id = %s AND period_id = %s AND create_uid = %s AND state = %s
ORDER BY id DESC limit 1''', (context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = res and res[0] or False
data['date'] = res and res[1] or period_obj.browse(cr, uid, context['period_id'], context=context).date_start
data['move_id'] = move_id
if move_id:
#if there exist some unbalanced accounting entries that match the journal and the period,
#we propose to continue the same move by copying the ref, the name, the partner...
move = move_obj.browse(cr, uid, move_id, context=context)
data.setdefault('name', move.line_id[-1].name)
for l in move.line_id:
data['partner_id'] = data.get('partner_id') or l.partner_id.id
data['ref'] = data.get('ref') or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
#compute the total of current move
data['debit'] = total < 0 and -total or 0.0
data['credit'] = total > 0 and total or 0.0
#pick the good account on the journal accordingly if the next proposed line will be a debit or a credit
journal_data = journal_obj.browse(cr, uid, context['journal_id'], context=context)
account = total > 0 and journal_data.default_credit_account_id or journal_data.default_debit_account_id
#map the account using the fiscal position of the partner, if needed
part = data.get('partner_id') and partner_obj.browse(cr, uid, data['partner_id'], context=context) or False
if account and data.get('partner_id'):
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account and account.id or False
#compute the amount in secondary currency of the account, if needed
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
#set the context for the multi currency change
compute_ctx = context.copy()
compute_ctx.update({
#the following 2 parameters are used to choose the currency rate, in case where the account
#doesn't work with an outgoing currency rate method 'at date' but 'average'
'res.currency.compute.account': account,
'res.currency.compute.account_invert': True,
})
if data.get('date'):
compute_ctx.update({'date': data['date']})
data['amount_currency'] = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], -total, context=compute_ctx)
data = self._default_get_move_form_hook(cr, uid, data)
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
return map(lambda x: x.id, ml.move_id.line_id)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l1.id, COALESCE(SUM(l2.debit-l2.credit), 0)
FROM account_move_line l1 LEFT JOIN account_move_line l2
ON (l1.account_id = l2.account_id
AND l2.id <= l1.id
AND """ + \
self._query_get(cr, uid, obj='l2', context=c) + \
") WHERE l1.id IN %s GROUP BY l1.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {False: ''}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = (invoice_id, invoice_names[invoice_id])
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
def _get_reconcile(self, cr, uid, ids,name, unknow_none, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.reconcile_id:
res[line.id] = str(line.reconcile_id.name)
elif line.reconcile_partial_id:
res[line.id] = str(line.reconcile_partial_id.name)
return res
def _get_move_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
move_line_ids = []
if move:
move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('journal_id','in',move.keys())], context=context)
return move_line_ids
_columns = {
'name': fields.char('Name', required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Journal Entry', ondelete="cascade", help="The move of this entry line.", select=2, required=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Internal Note'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1, copy=False),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_ref': fields.function(_get_reconcile, type='char', string='Reconcile Ref', oldname='reconcile', store={
'account.move.line': (lambda self, cr, uid, ids, c={}: ids, ['reconcile_id','reconcile_partial_id'], 50),'account.move.reconcile': (_get_move_from_reconcile, None, 50)}),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, string='Residual Amount in Currency', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'journal_id': fields.related('move_id', 'journal_id', string='Journal', type='many2one', relation='account.journal', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['journal_id'], 20)
}),
'period_id': fields.related('move_id', 'period_id', string='Period', type='many2one', relation='account.period', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['period_id'], 20)
}),
'blocked': fields.boolean('No Follow-up', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Balanced')], 'Status', readonly=True, copy=False),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax', copy=False),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company',
string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if context.get('journal_id') and context.get('period_id'):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
def _get_period(self, cr, uid, context=None):
"""
Return default account period value
"""
context = context or {}
if context.get('period_id', False):
return context['period_id']
account_period_obj = self.pool.get('account.period')
ids = account_period_obj.find(cr, uid, context=context)
period_id = False
if ids:
period_id = ids[0]
return period_id
def _get_journal(self, cr, uid, context=None):
"""
Return journal based on the journal type
"""
context = context or {}
if context.get('journal_id', False):
return context['journal_id']
journal_id = False
journal_pool = self.pool.get('account.journal')
if context.get('journal_type', False):
jids = journal_pool.search(cr, uid, [('type','=', context.get('journal_type'))])
if not jids:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_journal_form')
msg = _("""Cannot find any account journal of "%s" type for this company, You should create one.\n Please go to Journal Configuration""") % context.get('journal_type').replace('_', ' ').title()
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
journal_id = jids[0]
return journal_id
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': fields.date.context_today,
'state': 'draft',
'currency_id': _get_currency,
'journal_id': _get_journal,
'credit': 0.0,
'debit': 0.0,
'amount_currency': 0.0,
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': _get_period,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
res = super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index ON account_move_line (journal_id, period_id)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('account_move_line_date_id_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_date_id_index ON account_move_line (date DESC, id desc)')
return res
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type in ('view', 'consolidation'):
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
raise osv.except_osv(_('Error!'), _('You cannot create journal items on a closed account %s %s.') % (l.account_id.code, l.account_id.name))
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.journal_id.allow_date:
if not time.strptime(l.date[:10],'%Y-%m-%d') >= time.strptime(l.period_id.date_start, '%Y-%m-%d') or not time.strptime(l.date[:10], '%Y-%m-%d') <= time.strptime(l.period_id.date_stop, '%Y-%m-%d'):
return False
return True
def _check_currency(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.account_id.currency_id:
if not l.currency_id or not l.currency_id.id == l.account_id.currency_id.id:
return False
return True
def _check_currency_and_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if (l.amount_currency and not l.currency_id):
return False
return True
def _check_currency_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.amount_currency:
if (l.amount_currency > 0.0 and l.credit > 0.0) or (l.amount_currency < 0.0 and l.debit > 0.0):
return False
return True
def _check_currency_company(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.currency_id.id == l.company_id.currency_id.id:
return False
return True
_constraints = [
(_check_no_view, 'You cannot create journal items on an account of type view or consolidation.', ['account_id']),
(_check_no_closed, 'You cannot create journal items on closed account.', ['account_id']),
(_check_company_id, 'Account and Period must belong to the same company.', ['company_id']),
(_check_date, 'The date of your Journal Entry is not in the defined period! You should change the date or remove this constraint from the journal.', ['date']),
(_check_currency, 'The selected account of your Journal Entry forces to provide a secondary currency. You should remove the secondary currency on the account or select a multi-currency view on the journal.', ['currency_id']),
(_check_currency_and_amount, "You cannot create journal items with a secondary currency without recording both 'currency' and 'amount currency' field.", ['currency_id','amount_currency']),
(_check_currency_amount, 'The amount expressed in the secondary currency must be positive when account is debited and negative when account is credited.', ['amount_currency']),
(_check_currency_company, "You cannot provide a secondary currency if it is the same than the company one." , ['currency_id']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context = dict(context)
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False, context=None):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
jt = False
if journal:
jt = journal_obj.browse(cr, uid, journal, context=context).type
part = partner_obj.browse(cr, uid, partner_id, context=context)
payment_term_id = False
if jt and jt in ('purchase', 'purchase_refund') and part.property_supplier_payment_term:
payment_term_id = part.property_supplier_payment_term.id
elif jt and part.property_payment_term:
payment_term_id = part.property_payment_term.id
if payment_term_id:
res = payment_term_obj.compute(cr, uid, payment_term_id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if jt:
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
elif jt in ('general', 'bank', 'cash'):
if part.customer:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif part.supplier:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, account_id=val['account_id'], partner_id=part.id, context=context)
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False, context=None):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id, context=context)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id, context=context)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('fiscalyear'):
args.append(('period_id.fiscalyear_id', '=', context.get('fiscalyear', False)))
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.list_partners_to_reconcile(cr, uid, context=context)
if partner:
partner = partner[0]
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def prepare_move_lines_for_reconciliation_widget(self, cr, uid, lines, target_currency=False, target_date=False, context=None):
""" Returns move lines formatted for the manual/bank reconciliation widget
:param target_currency: curreny you want the move line debit/credit converted into
:param target_date: date to use for the monetary conversion
"""
if not lines:
return []
if context is None:
context = {}
ctx = context.copy()
currency_obj = self.pool.get('res.currency')
company_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
rml_parser = report_sxw.rml_parse(cr, uid, 'reconciliation_widget_aml', context=context)
ret = []
for line in lines:
partial_reconciliation_siblings_ids = []
if line.reconcile_partial_id:
partial_reconciliation_siblings_ids = self.search(cr, uid, [('reconcile_partial_id', '=', line.reconcile_partial_id.id)], context=context)
partial_reconciliation_siblings_ids.remove(line.id)
ret_line = {
'id': line.id,
'name': line.name != '/' and line.move_id.name + ': ' + line.name or line.move_id.name,
'ref': line.move_id.ref,
'account_code': line.account_id.code,
'account_name': line.account_id.name,
'account_type': line.account_id.type,
'date_maturity': line.date_maturity,
'date': line.date,
'period_name': line.period_id.name,
'journal_name': line.journal_id.name,
'partner_id': line.partner_id.id,
'partner_name': line.partner_id.name,
'is_partially_reconciled': bool(line.reconcile_partial_id),
'partial_reconciliation_siblings_ids': partial_reconciliation_siblings_ids,
}
# Amount residual can be negative
debit = line.debit
credit = line.credit
total_amount = abs(debit - credit)
total_amount_currency = line.amount_currency
amount_residual = line.amount_residual
amount_residual_currency = line.amount_residual_currency
if line.amount_residual < 0:
debit, credit = credit, debit
amount_residual = -amount_residual
amount_residual_currency = -amount_residual_currency
# Get right debit / credit:
line_currency = line.currency_id or company_currency
amount_currency_str = ""
total_amount_currency_str = ""
if line.currency_id and line.amount_currency:
amount_currency_str = rml_parser.formatLang(amount_residual_currency, currency_obj=line.currency_id)
total_amount_currency_str = rml_parser.formatLang(total_amount_currency, currency_obj=line.currency_id)
if target_currency and line_currency == target_currency and target_currency != company_currency:
debit = debit > 0 and amount_residual_currency or 0.0
credit = credit > 0 and amount_residual_currency or 0.0
amount_currency_str = rml_parser.formatLang(amount_residual, currency_obj=company_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=company_currency)
amount_str = rml_parser.formatLang(debit or credit, currency_obj=target_currency)
total_amount_str = rml_parser.formatLang(total_amount_currency, currency_obj=target_currency)
else:
debit = debit > 0 and amount_residual or 0.0
credit = credit > 0 and amount_residual or 0.0
amount_str = rml_parser.formatLang(debit or credit, currency_obj=company_currency)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=company_currency)
if target_currency and target_currency != company_currency:
amount_currency_str = rml_parser.formatLang(debit or credit, currency_obj=line_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=line_currency)
ctx = context.copy()
if target_date:
ctx.update({'date': target_date})
debit = currency_obj.compute(cr, uid, company_currency.id, target_currency.id, debit, context=ctx)
credit = currency_obj.compute(cr, uid, company_currency.id, target_currency.id, credit, context=ctx)
amount_str = rml_parser.formatLang(debit or credit, currency_obj=target_currency)
total_amount = currency_obj.compute(cr, uid, company_currency.id, target_currency.id, total_amount, context=ctx)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=target_currency)
ret_line['credit'] = credit
ret_line['debit'] = debit
ret_line['amount_str'] = amount_str
ret_line['amount_currency_str'] = amount_currency_str
ret_line['total_amount_str'] = total_amount_str # For partial reconciliations
ret_line['total_amount_currency_str'] = total_amount_currency_str
ret.append(ret_line)
return ret
def list_partners_to_reconcile(self, cr, uid, context=None):
cr.execute(
"""SELECT partner_id FROM (
SELECT l.partner_id, p.last_reconciliation_date, SUM(l.debit) AS debit, SUM(l.credit) AS credit, MAX(l.create_date) AS max_date
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND l.state <> 'draft'
GROUP BY l.partner_id, p.last_reconciliation_date
) AS s
WHERE debit > 0 AND credit > 0 AND (last_reconciliation_date IS NULL OR max_date > last_reconciliation_date)
ORDER BY last_reconciliation_date""")
ids = [x[0] for x in cr.fetchall()]
if not ids:
return []
# To apply the ir_rules
partner_obj = self.pool.get('res.partner')
ids = partner_obj.search(cr, uid, [('id', 'in', ids)], context=context)
return partner_obj.name_get(cr, uid, ids, context=context)
def reconcile_partial(self, cr, uid, ids, type='auto', context=None, writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
if line.account_id.currency_id:
currency_id = line.account_id.currency_id
else:
currency_id = line.company_id.currency_id
if line.reconcile_id:
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if line2.state != 'valid':
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s) cannot be used in a reconciliation as it is not balanced!") % (line2.name, line2.id))
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
if line2.account_id.currency_id:
total += line2.amount_currency
else:
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id)
return res
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
}, context=reconcile_context)
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=reconcile_context)
return r_id
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise osv.except_osv(_('Error!'),
_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if len(r) != 1:
raise osv.except_osv(_('Error'), _('Entries are not of the same account or already reconciled ! '))
if not unrec_lines:
raise osv.except_osv(_('Error!'), _('Entry is already reconciled.'))
account = account_obj.browse(cr, uid, account_id, context=context)
if not account.reconcile:
raise osv.except_osv(_('Error'), _('The account is not defined to be reconciled !'))
if r[0][1] != None:
raise osv.except_osv(_('Error!'), _('Some entries are already reconciled.'))
if (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise osv.except_osv(_('Warning!'), _('You have to provide an account for the write off/exchange difference entry.'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
}, context=reconcile_context)
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
workflow.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and not partner_obj.has_something_to_reconcile(cr, uid, partner_id, context=context):
partner_obj.mark_as_reconciled(cr, uid, [partner_id], context=context)
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
if context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
pids = period_pool.find(cr, user, date, context=context)
if pids:
res.update({'period_id':pids[0]})
context = dict(context, period_id=pids[0])
return {
'value':res,
'context':context,
}
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('User Error!'),
_('The account move (%s) for centralisation ' \
'has been confirmed.') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=None, opening_reconciliation=False, context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
all_moves = obj_move_line.search(cr, uid, ['|',('reconcile_id', 'in', unlink_ids),('reconcile_partial_id', 'in', unlink_ids)])
all_moves = list(set(all_moves) - set(move_ids))
if unlink_ids:
if opening_reconciliation:
raise osv.except_osv(_('Warning!'),
_('Opening Entries have already been generated. Please run "Cancel Closing Entries" wizard to cancel those entries and then run this wizard.'))
obj_move_rec.write(cr, uid, unlink_ids, {'opening_reconciliation': False})
obj_move_rec.unlink(cr, uid, unlink_ids)
if len(all_moves) >= 2:
obj_move_line.reconcile_partial(cr, uid, all_moves, 'auto',context=context)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context = {}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
context['journal_id'] = line.journal_id.id
context['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=context)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('account_tax_id', False):
raise osv.except_osv(_('Unable to change tax!'), _('You cannot change the tax, you should remove and recreate lines.'))
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if update_check:
if ('account_id' in vals) or ('journal_id' in vals) or ('period_id' in vals) or ('move_id' in vals) or ('debit' in vals) or ('credit' in vals) or ('date' in vals):
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if not ctx.get('journal_id'):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if not ctx.get('period_id'):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if check:
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
for (state,) in result:
if state == 'done':
raise osv.except_osv(_('Error!'), _('You can not add/modify entries in a closed period %s of journal %s.' % (period.name,journal.name)))
if not result:
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
err_msg = _('Move name (id): %s (%s)') % (line.move_id.name, str(line.move_id.id))
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a confirmed entry. You can just change some non legal fields or you must unconfirm the journal entry first.\n%s.') % err_msg)
if line.reconcile_id:
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a reconciled entry. You can just change some non legal fields or you must unreconcile first.\n%s.') % err_msg)
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
context = dict(context or {})
if vals.get('move_id', False):
move = self.pool.get('account.move').browse(cr, uid, vals['move_id'], context=context)
if move.company_id:
vals['company_id'] = move.company_id.id
if move.date and not vals.get('date'):
vals['date'] = move.date
if ('account_id' in vals) and not account_obj.read(cr, uid, [vals['account_id']], ['active'])[0]['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if 'journal_id' in vals and vals['journal_id']:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals and vals['period_id']:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
#we need to treat the case where a value is given in the context for period_id as a string
if 'period_id' in context and not isinstance(context.get('period_id', ''), (int, long)):
period_candidate_ids = self.pool.get('account.period').name_search(cr, uid, name=context.get('period_id',''))
if len(period_candidate_ids) != 1:
raise osv.except_osv(_('Error!'), _('No period found or more than one period found for the given date.'))
context['period_id'] = period_candidate_ids[0][0]
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
vals['journal_id'] = vals.get('journal_id') or context.get('journal_id')
vals['period_id'] = vals.get('period_id') or context.get('period_id')
vals['date'] = vals.get('date') or context.get('date')
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').next_by_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No Piece Number!'), _('Cannot create an automatic sequence for this piece.\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise osv.except_osv(_('Bad Account!'), _('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
result = super(account_move_line, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
if journal.type in ('purchase_refund', 'sale_refund') or (journal.type in ('cash', 'bank') and total < 0):
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
tmp_cnt = 0
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00, force_excluded=False).get('taxes'):
#create the base movement
if tmp_cnt == 0:
if tax[base_code]:
tmp_cnt += 1
if tax_id.price_include:
total = tax['price_unit']
newvals = {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
}
if tax_id.price_include:
if tax['price_unit'] < 0:
newvals['credit'] = abs(tax['price_unit'])
else:
newvals['debit'] = tax['price_unit']
self.write(cr, uid, [result], newvals, context=context)
else:
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id', False),
'ref': vals.get('ref', False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
#create the Tax movement
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
del vals['account_tax_id']
if check and not context.get('novalidate') and (context.get('recompute', True) or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr,uid, [vals['move_id']], context)
return result
def list_periods(self, cr, uid, context=None):
ids = self.pool.get('account.period').search(cr,uid,[])
return self.pool.get('account.period').name_get(cr, uid, ids, context=context)
def list_journals(self, cr, uid, context=None):
ng = dict(self.pool.get('account.journal').name_search(cr,uid,'',[]))
ids = ng.keys()
result = []
for journal in self.pool.get('account.journal').browse(cr, uid, ids, context=context):
result.append((journal.id,ng[journal.id],journal.type,
bool(journal.currency),bool(journal.analytic_journal_id)))
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,685,565,673,740,274,000 | -17,290,871,717,163,108 | 54.165843 | 280 | 0.565588 | false |
fajoy/nova | nova/virt/baremetal/vif_driver.py | 2 | 3058 | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.baremetal import db as bmdb
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class BareMetalVIFDriver(object):
def _after_plug(self, instance, network, mapping, pif):
pass
def _after_unplug(self, instance, network, mapping, pif):
pass
def plug(self, instance, vif):
LOG.debug(_("plug: instance_uuid=%(uuid)s vif=%(vif)s")
% {'uuid': instance['uuid'], 'vif': vif})
network, mapping = vif
vif_uuid = mapping['vif_uuid']
ctx = context.get_admin_context()
node = bmdb.bm_node_get_by_instance_uuid(ctx, instance['uuid'])
# TODO(deva): optimize this database query
# this is just searching for a free physical interface
pifs = bmdb.bm_interface_get_all_by_bm_node_id(ctx, node['id'])
for pif in pifs:
if not pif['vif_uuid']:
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid)
LOG.debug(_("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_plug(instance, network, mapping, pif)
return
# NOTE(deva): should this really be raising an exception
# when there are no physical interfaces left?
raise exception.NovaException(_(
"Baremetal node: %(id)s has no available physical interface"
" for virtual interface %(vif_uuid)s")
% {'id': node['id'], 'vif_uuid': vif_uuid})
def unplug(self, instance, vif):
LOG.debug(_("unplug: instance_uuid=%(uuid)s vif=%(vif)s"),
{'uuid': instance['uuid'], 'vif': vif})
network, mapping = vif
vif_uuid = mapping['vif_uuid']
ctx = context.get_admin_context()
try:
pif = bmdb.bm_interface_get_by_vif_uuid(ctx, vif_uuid)
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], None)
LOG.debug(_("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_unplug(instance, network, mapping, pif)
except exception.NovaException:
LOG.warn(_("no pif for vif_uuid=%s") % vif_uuid)
| apache-2.0 | 1,917,987,362,197,263,400 | -7,245,324,590,478,735,000 | 40.324324 | 78 | 0.600065 | false |
Batterfii/django | tests/utils_tests/test_jslex.py | 153 | 9837 | # -*- coding: utf-8 -*-
"""Tests for jslex."""
# originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.jslex import JsLexer, prepare_js_for_gettext
class JsTokensTest(SimpleTestCase):
LEX_CASES = [
# ids
("a ABC $ _ a123", ["id a", "id ABC", "id $", "id _", "id a123"]),
("\\u1234 abc\\u0020 \\u0065_\\u0067", ["id \\u1234", "id abc\\u0020", "id \\u0065_\\u0067"]),
# numbers
("123 1.234 0.123e-3 0 1E+40 1e1 .123", [
"dnum 123", "dnum 1.234", "dnum 0.123e-3", "dnum 0", "dnum 1E+40",
"dnum 1e1", "dnum .123",
]),
("0x1 0xabCD 0XABcd", ["hnum 0x1", "hnum 0xabCD", "hnum 0XABcd"]),
("010 0377 090", ["onum 010", "onum 0377", "dnum 0", "dnum 90"]),
("0xa123ghi", ["hnum 0xa123", "id ghi"]),
# keywords
("function Function FUNCTION", ["keyword function", "id Function", "id FUNCTION"]),
("const constructor in inherits", ["keyword const", "id constructor", "keyword in", "id inherits"]),
("true true_enough", ["reserved true", "id true_enough"]),
# strings
(''' 'hello' "hello" ''', ["string 'hello'", 'string "hello"']),
(r""" 'don\'t' "don\"t" '"' "'" '\'' "\"" """, [
r"""string 'don\'t'""", r'''string "don\"t"''', r"""string '"'""",
r'''string "'"''', r"""string '\''""", r'''string "\""'''
]),
(r'"ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""', [r'string "ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""']),
# comments
("a//b", ["id a", "linecomment //b"]),
("/****/a/=2//hello", ["comment /****/", "id a", "punct /=", "dnum 2", "linecomment //hello"]),
("/*\n * Header\n */\na=1;", ["comment /*\n * Header\n */", "id a", "punct =", "dnum 1", "punct ;"]),
# punctuation
("a+++b", ["id a", "punct ++", "punct +", "id b"]),
# regex
(r"a=/a*/,1", ["id a", "punct =", "regex /a*/", "punct ,", "dnum 1"]),
(r"a=/a*[^/]+/,1", ["id a", "punct =", "regex /a*[^/]+/", "punct ,", "dnum 1"]),
(r"a=/a*\[^/,1", ["id a", "punct =", r"regex /a*\[^/", "punct ,", "dnum 1"]),
(r"a=/\//,1", ["id a", "punct =", r"regex /\//", "punct ,", "dnum 1"]),
# next two are from http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
("""for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct :", "regex /x:3;x<5;y</g", "punct /", "id i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
("""for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct /", "id x", "punct :", "dnum 3", "punct ;", "id x", "punct <", "dnum 5",
"punct ;", "id y", "punct <", "regex /g/i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
# Various "illegal" regexes that are valid according to the std.
(r"""/????/, /++++/, /[----]/ """, ["regex /????/", "punct ,", "regex /++++/", "punct ,", "regex /[----]/"]),
# Stress cases from http://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409 # NOQA
(r"""/\[/""", [r"""regex /\[/"""]),
(r"""/[i]/""", [r"""regex /[i]/"""]),
(r"""/[\]]/""", [r"""regex /[\]]/"""]),
(r"""/a[\]]/""", [r"""regex /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""regex /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""regex /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""regex /\[[^\]]+\]/gi"""]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;"
]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;",
"id str", "punct =", """string '"'""", "punct ;",
]),
(r""" this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\")"; """,
["keyword this", "punct .", "id _js", "punct =", r'''string "e.str(\""''', "punct +", "keyword this",
"punct .", "id value", "punct .", "id replace", "punct (", r"regex /\\/g", "punct ,", r'string "\\\\"',
"punct )",
"punct .", "id replace", "punct (", r'regex /"/g', "punct ,", r'string "\\\""', "punct )", "punct +",
r'string "\")"', "punct ;"]),
]
def make_function(input, toks):
def test_func(self):
lexer = JsLexer()
result = ["%s %s" % (name, tok) for name, tok in lexer.lex(input) if name != 'ws']
self.assertListEqual(result, toks)
return test_func
for i, (input, toks) in enumerate(JsTokensTest.LEX_CASES):
setattr(JsTokensTest, "test_case_%d" % i, make_function(input, toks))
GETTEXT_CASES = (
(
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
"""
), (
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
"""
), (
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
"""
), (
r"""
s = "Hello \"th/foo/ere\"";
s = 'He\x23llo \'th/foo/ere\'';
s = 'slash quote \", just quote "';
""",
r"""
s = "Hello \"th/foo/ere\"";
s = "He\x23llo \'th/foo/ere\'";
s = "slash quote \", just quote \"";
"""
), (
r"""
s = "Line continuation\
continued /hello/ still the string";/hello/;
""",
r"""
s = "Line continuation\
continued /hello/ still the string";"REGEX";
"""
), (
r"""
var regex = /pattern/;
var regex2 = /matter/gm;
var regex3 = /[*/]+/gm.foo("hey");
""",
r"""
var regex = "REGEX";
var regex2 = "REGEX";
var regex3 = "REGEX".foo("hey");
"""
), (
r"""
for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}
""",
r"""
for (var x = a in foo && "</x>" || mot ? z:"REGEX"/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y<"REGEX") {xyz(x++);}
"""
), (
"""
\\u1234xyz = gettext('Hello there');
""", r"""
Uu1234xyz = gettext("Hello there");
"""
)
)
class JsToCForGettextTest(SimpleTestCase):
pass
def make_function(js, c):
def test_func(self):
self.assertMultiLineEqual(prepare_js_for_gettext(js), c)
return test_func
for i, pair in enumerate(GETTEXT_CASES):
setattr(JsToCForGettextTest, "test_case_%d" % i, make_function(*pair))
| bsd-3-clause | 1,253,794,266,116,648,000 | 4,900,511,676,797,187,000 | 42.030702 | 168 | 0.366222 | false |
ashang/calibre | src/calibre/gui2/tweak_book/editor/syntax/javascript.py | 14 | 3917 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from pygments.lexer import RegexLexer, default, include
from pygments.token import Comment, Punctuation, Number, Keyword, Text, String, Operator, Name
import pygments.unistring as uni
from calibre.gui2.tweak_book.editor.syntax.pygments_highlighter import create_highlighter
JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') +
']|\\\\u[a-fA-F0-9]{4})')
JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Mn', 'Mc', 'Nd', 'Pc') +
u'\u200c\u200d]|\\\\u[a-fA-F0-9]{4})')
JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*'
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code. This is based on the pygments JS highlighter,
bu that does not handle multi-line comments in streaming mode, so we had to
modify it.
"""
flags = re.UNICODE | re.MULTILINE
tokens = {
b'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, b'comment')
],
b'comment': [
(r'[^*/]+', Comment.Multiline),
(r'\*/', Comment.Multiline, b'#pop'),
(r'[*/]', Comment.Multiline),
],
b'slashstartsregex': [
include(b'commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, b'#pop'),
(r'(?=/)', Text, (b'#pop', b'badregex')),
default(b'#pop')
],
b'badregex': [
(r'\n', Text, b'#pop')
],
b'root': [
(r'\A#! ?/.*?\n', Comment), # shebang lines are recognized by node.js
(r'^(?=\s|/|<!--)', Text, b'slashstartsregex'),
include(b'commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, b'slashstartsregex'),
(r'[{(\[;,]', Punctuation, b'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|'
r'this)\b', Keyword, b'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, b'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(JS_IDENT, Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
Highlighter = create_highlighter('JavascriptHighlighter', JavascriptLexer)
if __name__ == '__main__':
from calibre.gui2.tweak_book.editor.widget import launch_editor
launch_editor(P('viewer/images.js'), syntax='javascript')
| gpl-3.0 | -1,903,568,804,431,643,000 | -8,899,629,518,105,537,000 | 41.576087 | 94 | 0.529997 | false |
gartung/dxr | dxr/plugins/python/tests/test_imports/test_imports.py | 9 | 2924 | from dxr.testing import DxrInstanceTestCase
class ImportTests(DxrInstanceTestCase):
def test_bases_from_import(self):
"""Make sure the bases: filter matches base classes imported
from another file using `from x import y`.
"""
self.found_line_eq('bases:child.FromImportChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_bases_from_import_alias(self):
"""Make sure the bases: filter matches base classes imported
from another file using `from x import y as z`.
"""
self.found_line_eq('bases:child.FromImportAsChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_bases_import(self):
"""Make sure the bases: filter matches base classes imported
from another file using `import x`.
"""
self.found_line_eq('bases:child.ImportChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_bases_import_as(self):
"""Make sure the bases: filter matches base classes imported
from another file using `import x as y`.
"""
self.found_line_eq('bases:child.ImportAsChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_bases_relative_import(self):
"""Make sure the bases: filter matches base classes imported
from another file using `from . import x`.
"""
self.found_line_eq('bases:child.RelativeImportChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_derived(self):
"""Make sure the derived: filter matches child classes that
import from another file in a variety of ways.
"""
self.found_lines_eq('derived:parent.ParentClass', [
('class <b>FromImportChildClass</b>(ParentClass):', 8),
('class <b>ImportChildClass</b>(parent.ParentClass):', 12),
('class <b>FromImportAsChildClass</b>(PClass):', 16),
('class <b>ImportAsChildClass</b>(blarent.ParentClass):', 20),
('class <b>RelativeImportChildClass</b>(carent.ParentClass):', 24),
])
# Edge cases for the code in `package`.
def test_submodule_import_from(self):
"""Make sure we handle `from package import submodule` as
well as `import package.submodule`.
"""
self.found_lines_eq('derived:package.submodule.MyClass', [
('class <b>FirstDerivedFromSubmodule</b>(submodule.MyClass):', 4),
('class <b>SecondDerivedFromSubmodule</b>(package.submodule.MyClass):', 9),
])
def test_submodule_name_collision(self):
"""Make sure we handle `from package.sub import sub`."""
self.found_line_eq('derived:package.test_import_name_collision.MyClass',
'class <b>DerivedFromInaccessibleClass</b>(MyClass):', 24)
| mit | 5,470,652,515,668,725,000 | -7,425,183,198,745,315,000 | 39.611111 | 87 | 0.603283 | false |
rogerhu/django | django/db/migrations/operations/models.py | 5 | 9571 | from .base import Operation
from django.utils import six
from django.db import models, router
from django.db.models.options import normalize_unique_together
from django.db.migrations.state import ModelState
class CreateModel(Operation):
"""
Create a model's table.
"""
def __init__(self, name, fields, options=None, bases=None):
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()] = ModelState(app_label, self.name, self.fields, self.options, self.bases)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
app_cache = to_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
app_cache = from_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create model %s" % (self.name, )
def references_model(self, name, app_label=None):
strings_to_check = [self.name]
# Check we didn't inherit from the model
for base in self.bases:
if isinstance(base, six.string_types):
strings_to_check.append(base.split(".")[-1])
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.rel:
if isinstance(field.rel.to, six.string_types):
strings_to_check.append(field.rel.to.split(".")[-1])
# Now go over all the strings and compare them
for string in strings_to_check:
if string.lower() == name.lower():
return True
return False
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.name == other.name) and
(self.options == other.options) and
(self.bases == other.bases) and
([(k, f.deconstruct()[1:]) for k, f in self.fields] == [(k, f.deconstruct()[1:]) for k, f in other.fields])
)
class DeleteModel(Operation):
"""
Drops a model's table.
"""
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
del state.models[app_label, self.name.lower()]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
app_cache = from_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
app_cache = to_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(Operation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def state_forwards(self, app_label, state):
state.models[app_label, self.new_name.lower()] = state.models[app_label, self.old_name.lower()]
state.models[app_label, self.new_name.lower()].name = self.new_name
del state.models[app_label, self.old_name.lower()]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.old_name)
new_model = new_app_cache.get_model(app_label, self.new_name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.new_name)
new_model = new_app_cache.get_model(app_label, self.old_name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name.lower() or
name.lower() == self.new_name.lower()
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
class AlterModelTable(Operation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.name = name
self.table = table
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()].options["db_table"] = self.table
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.name)
new_model = new_app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
class AlterUniqueTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of unique_together must be a set of tuples.
"""
def __init__(self, name, unique_together):
self.name = name
unique_together = normalize_unique_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options["unique_together"] = self.unique_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.name)
new_model = new_app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, "unique_together", set()),
getattr(new_model._meta, "unique_together", set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter unique_together for %s (%s constraints)" % (self.name, len(self.unique_together))
class AlterIndexTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
def __init__(self, name, index_together):
self.name = name
self.index_together = set(tuple(cons) for cons in index_together)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options["index_together"] = self.index_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.name)
new_model = new_app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, "index_together", set()),
getattr(new_model._meta, "index_together", set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter index_together for %s (%s constraints)" % (self.name, len(self.index_together))
| bsd-3-clause | 2,663,045,117,216,115,700 | -2,724,936,156,083,599,000 | 37.906504 | 124 | 0.620625 | false |
cemoody/chainer | chainer/links/connection/lstm.py | 2 | 2516 | from chainer.functions.activation import lstm
from chainer import link
from chainer.links.connection import linear
from chainer import variable
class LSTM(link.Chain):
"""Fully-connected LSTM layer.
This is a fully-connected LSTM layer as a chain. Unlike the
:func:`~chainer.functions.lstm` function, which is defined as a stateless
activation function, this chain holds upward and lateral connections as
child links.
It also maintains *states*, including the cell state and the output
at the previous time step. Therefore, it can be used as a *stateful LSTM*.
Args:
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of output vectors.
Attributes:
upward (chainer.links.Linear): Linear layer of upward connections.
lateral (chainer.links.Linear): Linear layer of lateral connections.
c (chainer.Variable): Cell states of LSTM units.
h (chainer.Variable): Output at the previous timestep.
"""
def __init__(self, in_size, out_size):
super(LSTM, self).__init__(
upward=linear.Linear(in_size, 4 * out_size),
lateral=linear.Linear(out_size, 4 * out_size, nobias=True),
)
self.state_size = out_size
self.reset_state()
def to_cpu(self):
super(LSTM, self).to_cpu()
if self.c is not None:
self.c.to_cpu()
if self.h is not None:
self.h.to_cpu()
def to_gpu(self, device=None):
super(LSTM, self).to_gpu(device)
if self.c is not None:
self.c.to_gpu(device)
if self.h is not None:
self.h.to_gpu(device)
def reset_state(self):
"""Resets the internal state.
It sets None to the :attr:`c` and :attr:`h` attributes.
"""
self.c = self.h = None
def __call__(self, x):
"""Updates the internal state and returns the LSTM outputs.
Args:
x (~chainer.Variable): A new batch from the input sequence.
Returns:
~chainer.Variable: Outputs of updated LSTM units.
"""
lstm_in = self.upward(x)
if self.h is not None:
lstm_in += self.lateral(self.h)
if self.c is None:
xp = self.xp
self.c = variable.Variable(
xp.zeros((len(x.data), self.state_size), dtype=x.data.dtype),
volatile='auto')
self.c, self.h = lstm.lstm(self.c, lstm_in)
return self.h
| mit | 4,722,924,653,745,789,000 | -4,014,989,316,471,371,300 | 30.848101 | 78 | 0.600954 | false |
bwall/BAMF | IntegrationQueue/static/cifstrap.py | 1 | 6896 | import socket
import time
import struct
import sys
import threading
import datetime
def PrintLog(message):
message = "[" + str(datetime.datetime.now()) + "] " + message
print message
f = open("hashlog.txt", "a")
f.write(message + "\n")
f.close()
class Handler(threading.Thread):
def __init__(self, conn, addr):
threading.Thread.__init__(self)
self.conn = conn
self.addr = addr
def run(self):
try:
#get negotiate_protocol_request
negotiate_protocol_request = self.conn.recv(1024)
if not negotiate_protocol_request:
self.conn.close()
return
dialect_location = 40
dialect_index = 0
dialect_name = ""
while dialect_location < negotiate_protocol_request.__len__():
dialect_name = ""
while ord(negotiate_protocol_request[dialect_location]) != 0x00:
if ord(negotiate_protocol_request[dialect_location]) != 0x02:
dialect_name += negotiate_protocol_request[dialect_location]
dialect_location += 1
if dialect_name == "NT LM 0.12":
break
dialect_index += 1
dialect_location += 1
#netbios session service
negotiate_protocol_response = "\x00\x00\x00\x51"
#SMB Header
#Server Component
negotiate_protocol_response += "\xff\x53\x4d\x42"
#SMB Command
negotiate_protocol_response += "\x72"
#NT Status
negotiate_protocol_response += "\x00\x00\x00\x00"
#Flags
negotiate_protocol_response += "\x88"
#Flags2
negotiate_protocol_response += "\x01\xc0"
#Process ID High
negotiate_protocol_response += "\x00\x00"
#Signature
negotiate_protocol_response += "\x00\x00\x00\x00\x00\x00\x00\x00"
#Reserved
negotiate_protocol_response += "\x00\x00"
#Tree ID
negotiate_protocol_response += negotiate_protocol_request[28] + negotiate_protocol_request[29]
#Process ID
negotiate_protocol_response += negotiate_protocol_request[30] + negotiate_protocol_request[31]
#User ID
negotiate_protocol_response += negotiate_protocol_request[32] + negotiate_protocol_request[33]
#Multiplex ID
negotiate_protocol_response += negotiate_protocol_request[34] + negotiate_protocol_request[35]
#Negotiate Protocol Response
#Word Count
negotiate_protocol_response += "\x11"
#Dialect Index
negotiate_protocol_response += chr(dialect_index) + "\x00"
#Security Mode
negotiate_protocol_response += "\x03"
#Max Mpx Count
negotiate_protocol_response += "\x02\x00"
#Max VCs
negotiate_protocol_response += "\x01\x00"
#Max Buffer Size
negotiate_protocol_response += "\x04\x11\x00\x00"
#Max Raw Buffer
negotiate_protocol_response += "\x00\x00\x01\x00"
#Session Key
negotiate_protocol_response += "\x00\x00\x00\x00"
#Capabilities
negotiate_protocol_response += "\xfd\xe3\x00\x00"
#System Time
negotiate_protocol_response += "\x00" * 8#struct.pack('L', long(time.time()) * 1000L)
#UTC Offset in minutes
negotiate_protocol_response += "\x00\x00"
#Key Length
negotiate_protocol_response += "\x08"
#Byte Count
negotiate_protocol_response += "\x0c\x00"
#Encryption Key
negotiate_protocol_response += "\x11\x22\x33\x44\x55\x66\x77\x88"
#Primary Domain
negotiate_protocol_response += "\x00\x00"
#Server
negotiate_protocol_response += "\x00\x00"
self.conn.sendall(negotiate_protocol_response)
for x in range(0, 2):
ntlmssp_request = self.conn.recv(1024)
if ntlmssp_request.__len__() < 89 + 32 + 8 + 16:
continue
hmac = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89, 89 + 16))
header = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89 + 16, 89 + 20))
challenge = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89 + 24, 89 + 32 + 8))
tail = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89 + 32 + 8, 89 + 32 + 8 + 16))
tindex = 89 + 32 + 8 + 16 + 1
account = ""
while ord(ntlmssp_request[tindex]) != 0x00:
account += chr(ord(ntlmssp_request[tindex]))
tindex += 2
tindex += 2
domain = ""
while ord(ntlmssp_request[tindex]) != 0x00:
domain += chr(ord(ntlmssp_request[tindex]))
tindex += 2
PrintLog(account + "::" + domain + ":1122334455667788:" + hmac + ":" + header + "00000000" + challenge + tail)
#netbios session service
ntlmssp_failed = "\x00\x00\x00\x23"
#SMB Header
#Server Component
ntlmssp_failed += "\xff\x53\x4d\x42"
#SMB Command
ntlmssp_failed += "\x73"
#NT Status
ntlmssp_failed += "\x6d\x00\x00\xc0"
#Flags
ntlmssp_failed += "\x88"
#Flags2
ntlmssp_failed += "\x01\xc8"
#Process ID Hight
ntlmssp_failed += "\x00\x00"
#Signature
ntlmssp_failed += "\x00\x00\x00\x00\x00\x00\x00\x00"
#Reserved
ntlmssp_failed += "\x00\x00"
#Tree ID
ntlmssp_failed += ntlmssp_request[28] + ntlmssp_request[29]
#Process ID
ntlmssp_failed += ntlmssp_request[30] + ntlmssp_request[31]
#User ID
ntlmssp_failed += ntlmssp_request[32] + ntlmssp_request[33]
#Multiplex ID
ntlmssp_failed += ntlmssp_request[34] + ntlmssp_request[35]
#Negotiate Protocol Response
#Word Count
ntlmssp_failed += "\x00\x00\x00"
self.conn.sendall(ntlmssp_failed)
self.conn.close()
except:
pass
HOST = ''
PORT = 445
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
while True:
conn, addr = s.accept()
PrintLog('Connected by' + str(addr))
handler = Handler(conn, addr)
handler.start()
| mit | -8,328,270,960,482,443,000 | -571,024,563,027,543,040 | 37.311111 | 127 | 0.518997 | false |
heistermann/wradvis | wradvis/gui.py | 1 | 3615 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, wradlib Development Team. All Rights Reserved.
# Distributed under the MIT License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
#!/usr/bin/env python
from PyQt4 import QtGui, QtCore
# other wradvis imports
from wradvis.glcanvas import RadolanWidget
from wradvis.mplcanvas import MplWidget
from wradvis.properties import PropertiesWidget
from wradvis import utils
from wradvis.config import conf
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.resize(825, 500)
self.setWindowTitle('RADOLAN Viewer')
self._need_canvas_refresh = False
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.reload)
# initialize RadolanCanvas
self.rwidget = RadolanWidget()
self.iwidget = self.rwidget
# initialize MplWidget
self.mwidget = MplWidget()
# canvas swapper
self.swapper = []
self.swapper.append(self.rwidget)
self.swapper.append(self.mwidget)
# need some tracer for the mouse position
self.rwidget.canvas.mouse_moved.connect(self.mouse_moved)
self.rwidget.canvas.key_pressed.connect(self.keyPressEvent)
# add PropertiesWidget
self.props = PropertiesWidget()
self.props.signal_slider_changed.connect(self.slider_changed)
self.props.signal_playpause_changed.connect(self.start_stop)
self.props.signal_speed_changed.connect(self.speed)
# add Horizontal Splitter and the three widgets
self.splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.splitter.addWidget(self.props)
self.splitter.addWidget(self.swapper[0])
self.splitter.addWidget(self.swapper[1])
self.swapper[1].hide()
self.setCentralWidget(self.splitter)
# finish init
self.slider_changed()
def reload(self):
if self.props.slider.value() == self.props.slider.maximum():
self.props.slider.setValue(1)
else:
self.props.slider.setValue(self.props.slider.value() + 1)
def start_stop(self):
if self.timer.isActive():
self.timer.stop()
else:
self.timer.start()
def speed(self):
self.timer.setInterval(self.props.speed.value())
def slider_changed(self):
try:
self.data, self.meta = utils.read_radolan(self.props.filelist[self.props.actualFrame])
except IndexError:
print("Could not read any data.")
else:
scantime = self.meta['datetime']
self.props.sliderLabel.setText(scantime.strftime("%H:%M"))
self.props.date.setText(scantime.strftime("%Y-%m-%d"))
self.iwidget.set_data(self.data)
def mouse_moved(self, event):
self.props.show_mouse(self.rwidget.canvas._mouse_position)
def keyPressEvent(self, event):
if isinstance(event, QtGui.QKeyEvent):
text = event.text()
else:
text = event.text
if text == 'c':
self.swapper = self.swapper[::-1]
self.iwidget = self.swapper[0]
self.swapper[0].show()
self.swapper[1].hide()
def start(arg):
appQt = QtGui.QApplication(arg.argv)
win = MainWindow()
win.show()
appQt.exec_()
if __name__ == '__main__':
print('wradview: Calling module <gui> as main...')
| mit | -866,592,014,783,641,200 | -3,998,479,516,546,655,700 | 31.567568 | 98 | 0.604149 | false |
jeltz/rust-debian-package | src/llvm/tools/clang/utils/analyzer/SATestAdd.py | 48 | 3075 | #!/usr/bin/env python
"""
Static Analyzer qualification infrastructure: adding a new project to
the Repository Directory.
Add a new project for testing: build it and add to the Project Map file.
Assumes it's being run from the Repository Directory.
The project directory should be added inside the Repository Directory and
have the same name as the project ID
The project should use the following files for set up:
- pre_run_static_analyzer.sh - prepare the build environment.
Ex: make clean can be a part of it.
- run_static_analyzer.cmd - a list of commands to run through scan-build.
Each command should be on a separate line.
Choose from: configure, make, xcodebuild
"""
import SATestBuild
import os
import csv
import sys
def isExistingProject(PMapFile, projectID) :
PMapReader = csv.reader(PMapFile)
for I in PMapReader:
if projectID == I[0]:
return True
return False
# Add a new project for testing: build it and add to the Project Map file.
# Params:
# Dir is the directory where the sources are.
# ID is a short string used to identify a project.
def addNewProject(ID, BuildMode) :
CurDir = os.path.abspath(os.curdir)
Dir = SATestBuild.getProjectDir(ID)
if not os.path.exists(Dir):
print "Error: Project directory is missing: %s" % Dir
sys.exit(-1)
# Build the project.
SATestBuild.testProject(ID, BuildMode, IsReferenceBuild=True, Dir=Dir)
# Add the project ID to the project map.
ProjectMapPath = os.path.join(CurDir, SATestBuild.ProjectMapFile)
if os.path.exists(ProjectMapPath):
PMapFile = open(ProjectMapPath, "r+b")
else:
print "Warning: Creating the Project Map file!!"
PMapFile = open(ProjectMapPath, "w+b")
try:
if (isExistingProject(PMapFile, ID)) :
print >> sys.stdout, 'Warning: Project with ID \'', ID, \
'\' already exists.'
print >> sys.stdout, "Reference output has been regenerated."
else:
PMapWriter = csv.writer(PMapFile)
PMapWriter.writerow( (ID, int(BuildMode)) );
print "The project map is updated: ", ProjectMapPath
finally:
PMapFile.close()
# TODO: Add an option not to build.
# TODO: Set the path to the Repository directory.
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'Usage: ', sys.argv[0],\
'project_ID <mode>' \
'mode - 0 for single file project; ' \
'1 for scan_build; ' \
'2 for single file c++11 project'
sys.exit(-1)
BuildMode = 1
if (len(sys.argv) >= 3):
BuildMode = int(sys.argv[2])
assert((BuildMode == 0) | (BuildMode == 1) | (BuildMode == 2))
addNewProject(sys.argv[1], BuildMode)
| apache-2.0 | -6,330,127,467,291,917,000 | -4,565,668,270,639,601,700 | 36.5 | 79 | 0.589268 | false |
grlee77/nipype | nipype/interfaces/utility.py | 9 | 20150 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Various utilities
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
import os
import re
from cPickle import dumps, loads
import numpy as np
import nibabel as nb
from nipype.external import six
from nipype.utils.filemanip import (filename_to_list, copyfile, split_filename)
from nipype.interfaces.base import (traits, TraitedSpec, DynamicTraitedSpec, File,
Undefined, isdefined, OutputMultiPath,
InputMultiPath, BaseInterface, BaseInterfaceInputSpec)
from nipype.interfaces.io import IOBase, add_traits
from nipype.testing import assert_equal
from nipype.utils.misc import getsource, create_function_from_source
class IdentityInterface(IOBase):
"""Basic interface class generates identity mappings
Examples
--------
>>> from nipype.interfaces.utility import IdentityInterface
>>> ii = IdentityInterface(fields=['a', 'b'], mandatory_inputs=False)
>>> ii.inputs.a
<undefined>
>>> ii.inputs.a = 'foo'
>>> out = ii._outputs()
>>> out.a
<undefined>
>>> out = ii.run()
>>> out.outputs.a
'foo'
>>> ii2 = IdentityInterface(fields=['a', 'b'], mandatory_inputs=True)
>>> ii2.inputs.a = 'foo'
>>> out = ii2.run() # doctest: +SKIP
ValueError: IdentityInterface requires a value for input 'b' because it was listed in 'fields' Interface IdentityInterface failed to run.
"""
input_spec = DynamicTraitedSpec
output_spec = DynamicTraitedSpec
def __init__(self, fields=None, mandatory_inputs=True, **inputs):
super(IdentityInterface, self).__init__(**inputs)
if fields is None or not fields:
raise ValueError('Identity Interface fields must be a non-empty list')
# Each input must be in the fields.
for in_field in inputs:
if in_field not in fields:
raise ValueError('Identity Interface input is not in the fields: %s' % in_field)
self._fields = fields
self._mandatory_inputs = mandatory_inputs
add_traits(self.inputs, fields)
# Adding any traits wipes out all input values set in superclass initialization,
# even it the trait is not in the add_traits argument. The work-around is to reset
# the values after adding the traits.
self.inputs.set(**inputs)
def _add_output_traits(self, base):
undefined_traits = {}
for key in self._fields:
base.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
base.trait_set(trait_change_notify=False, **undefined_traits)
return base
def _list_outputs(self):
#manual mandatory inputs check
if self._fields and self._mandatory_inputs:
for key in self._fields:
value = getattr(self.inputs, key)
if not isdefined(value):
msg = "%s requires a value for input '%s' because it was listed in 'fields'. \
You can turn off mandatory inputs checking by passing mandatory_inputs = False to the constructor." % \
(self.__class__.__name__, key)
raise ValueError(msg)
outputs = self._outputs().get()
for key in self._fields:
val = getattr(self.inputs, key)
if isdefined(val):
outputs[key] = val
return outputs
class MergeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
axis = traits.Enum('vstack', 'hstack', usedefault=True,
desc='direction in which to merge, hstack requires same number of elements in each input')
no_flatten = traits.Bool(False, usedefault=True, desc='append to outlist instead of extending in vstack mode')
class MergeOutputSpec(TraitedSpec):
out = traits.List(desc='Merged output')
class Merge(IOBase):
"""Basic interface class to merge inputs into a single list
Examples
--------
>>> from nipype.interfaces.utility import Merge
>>> mi = Merge(3)
>>> mi.inputs.in1 = 1
>>> mi.inputs.in2 = [2, 5]
>>> mi.inputs.in3 = 3
>>> out = mi.run()
>>> out.outputs.out
[1, 2, 5, 3]
"""
input_spec = MergeInputSpec
output_spec = MergeOutputSpec
def __init__(self, numinputs=0, **inputs):
super(Merge, self).__init__(**inputs)
self._numinputs = numinputs
add_traits(self.inputs, ['in%d' % (i + 1) for i in range(numinputs)])
def _list_outputs(self):
outputs = self._outputs().get()
out = []
if self.inputs.axis == 'vstack':
for idx in range(self._numinputs):
value = getattr(self.inputs, 'in%d' % (idx + 1))
if isdefined(value):
if isinstance(value, list) and not self.inputs.no_flatten:
out.extend(value)
else:
out.append(value)
else:
for i in range(len(filename_to_list(self.inputs.in1))):
out.insert(i, [])
for j in range(self._numinputs):
out[i].append(filename_to_list(getattr(self.inputs, 'in%d' % (j + 1)))[i])
if out:
outputs['out'] = out
return outputs
class RenameInputSpec(DynamicTraitedSpec):
in_file = File(exists=True, mandatory=True, desc="file to rename")
keep_ext = traits.Bool(desc=("Keep in_file extension, replace "
"non-extension component of name"))
format_string = traits.String(mandatory=True,
desc=("Python formatting string for output "
"template"))
parse_string = traits.String(desc=("Python regexp parse string to define "
"replacement inputs"))
use_fullpath = traits.Bool(False, usedefault=True,
desc="Use full path as input to regex parser")
class RenameOutputSpec(TraitedSpec):
out_file = traits.File(exists=True, desc="softlink to original file with new name")
class Rename(IOBase):
"""Change the name of a file based on a mapped format string.
To use additional inputs that will be defined at run-time, the class
constructor must be called with the format template, and the fields
identified will become inputs to the interface.
Additionally, you may set the parse_string input, which will be run
over the input filename with a regular expressions search, and will
fill in additional input fields from matched groups. Fields set with
inputs have precedence over fields filled in with the regexp match.
Examples
--------
>>> from nipype.interfaces.utility import Rename
>>> rename1 = Rename()
>>> rename1.inputs.in_file = "zstat1.nii.gz"
>>> rename1.inputs.format_string = "Faces-Scenes.nii.gz"
>>> res = rename1.run() # doctest: +SKIP
>>> print res.outputs.out_file # doctest: +SKIP
'Faces-Scenes.nii.gz" # doctest: +SKIP
>>> rename2 = Rename(format_string="%(subject_id)s_func_run%(run)02d")
>>> rename2.inputs.in_file = "functional.nii"
>>> rename2.inputs.keep_ext = True
>>> rename2.inputs.subject_id = "subj_201"
>>> rename2.inputs.run = 2
>>> res = rename2.run() # doctest: +SKIP
>>> print res.outputs.out_file # doctest: +SKIP
'subj_201_func_run02.nii' # doctest: +SKIP
>>> rename3 = Rename(format_string="%(subject_id)s_%(seq)s_run%(run)02d.nii")
>>> rename3.inputs.in_file = "func_epi_1_1.nii"
>>> rename3.inputs.parse_string = "func_(?P<seq>\w*)_.*"
>>> rename3.inputs.subject_id = "subj_201"
>>> rename3.inputs.run = 2
>>> res = rename3.run() # doctest: +SKIP
>>> print res.outputs.out_file # doctest: +SKIP
'subj_201_epi_run02.nii' # doctest: +SKIP
"""
input_spec = RenameInputSpec
output_spec = RenameOutputSpec
def __init__(self, format_string=None, **inputs):
super(Rename, self).__init__(**inputs)
if format_string is not None:
self.inputs.format_string = format_string
self.fmt_fields = re.findall(r"%\((.+?)\)", format_string)
add_traits(self.inputs, self.fmt_fields)
else:
self.fmt_fields = []
def _rename(self):
fmt_dict = dict()
if isdefined(self.inputs.parse_string):
if isdefined(self.inputs.use_fullpath) and self.inputs.use_fullpath:
m = re.search(self.inputs.parse_string,
self.inputs.in_file)
else:
m = re.search(self.inputs.parse_string,
os.path.split(self.inputs.in_file)[1])
if m:
fmt_dict.update(m.groupdict())
for field in self.fmt_fields:
val = getattr(self.inputs, field)
if isdefined(val):
fmt_dict[field] = getattr(self.inputs, field)
if self.inputs.keep_ext:
fmt_string = "".join([self.inputs.format_string,
split_filename(self.inputs.in_file)[2]])
else:
fmt_string = self.inputs.format_string
return fmt_string % fmt_dict
def _run_interface(self, runtime):
runtime.returncode = 0
_ = copyfile(self.inputs.in_file, os.path.join(os.getcwd(),
self._rename()))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.join(os.getcwd(), self._rename())
return outputs
class SplitInputSpec(BaseInterfaceInputSpec):
inlist = traits.List(traits.Any, mandatory=True,
desc='list of values to split')
splits = traits.List(traits.Int, mandatory=True,
desc='Number of outputs in each split - should add to number of inputs')
squeeze = traits.Bool(False, usedefault=True,
desc='unfold one-element splits removing the list')
class Split(IOBase):
"""Basic interface class to split lists into multiple outputs
Examples
--------
>>> from nipype.interfaces.utility import Split
>>> sp = Split()
>>> _ = sp.inputs.set(inlist=[1, 2, 3], splits=[2, 1])
>>> out = sp.run()
>>> out.outputs.out1
[1, 2]
"""
input_spec = SplitInputSpec
output_spec = DynamicTraitedSpec
def _add_output_traits(self, base):
undefined_traits = {}
for i in range(len(self.inputs.splits)):
key = 'out%d' % (i + 1)
base.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
base.trait_set(trait_change_notify=False, **undefined_traits)
return base
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.splits):
if sum(self.inputs.splits) != len(self.inputs.inlist):
raise RuntimeError('sum of splits != num of list elements')
splits = [0]
splits.extend(self.inputs.splits)
splits = np.cumsum(splits)
for i in range(len(splits) - 1):
val = np.array(self.inputs.inlist)[splits[i]:splits[i + 1]].tolist()
if self.inputs.squeeze and len(val) == 1:
val = val[0]
outputs['out%d' % (i + 1)] = val
return outputs
class SelectInputSpec(BaseInterfaceInputSpec):
inlist = InputMultiPath(traits.Any, mandatory=True,
desc='list of values to choose from')
index = InputMultiPath(traits.Int, mandatory=True,
desc='0-based indices of values to choose')
class SelectOutputSpec(TraitedSpec):
out = OutputMultiPath(traits.Any, desc='list of selected values')
class Select(IOBase):
"""Basic interface class to select specific elements from a list
Examples
--------
>>> from nipype.interfaces.utility import Select
>>> sl = Select()
>>> _ = sl.inputs.set(inlist=[1, 2, 3, 4, 5], index=[3])
>>> out = sl.run()
>>> out.outputs.out
4
>>> _ = sl.inputs.set(inlist=[1, 2, 3, 4, 5], index=[3, 4])
>>> out = sl.run()
>>> out.outputs.out
[4, 5]
"""
input_spec = SelectInputSpec
output_spec = SelectOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
out = np.array(self.inputs.inlist)[np.array(self.inputs.index)].tolist()
outputs['out'] = out
return outputs
class FunctionInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
function_str = traits.Str(mandatory=True, desc='code for function')
class Function(IOBase):
"""Runs arbitrary function as an interface
Examples
--------
>>> func = 'def func(arg1, arg2=5): return arg1 + arg2'
>>> fi = Function(input_names=['arg1', 'arg2'], output_names=['out'])
>>> fi.inputs.function_str = func
>>> res = fi.run(arg1=1)
>>> res.outputs.out
6
"""
input_spec = FunctionInputSpec
output_spec = DynamicTraitedSpec
def __init__(self, input_names, output_names, function=None, imports=None,
**inputs):
"""
Parameters
----------
input_names: single str or list
names corresponding to function inputs
output_names: single str or list
names corresponding to function outputs.
has to match the number of outputs
function : callable
callable python object. must be able to execute in an
isolated namespace (possibly in concert with the ``imports``
parameter)
imports : list of strings
list of import statements that allow the function to execute
in an otherwise empty namespace
"""
super(Function, self).__init__(**inputs)
if function:
if hasattr(function, '__call__'):
try:
self.inputs.function_str = getsource(function)
except IOError:
raise Exception('Interface Function does not accept ' \
'function objects defined interactively ' \
'in a python session')
elif isinstance(function, six.string_types):
self.inputs.function_str = dumps(function)
else:
raise Exception('Unknown type of function')
self.inputs.on_trait_change(self._set_function_string,
'function_str')
self._input_names = filename_to_list(input_names)
self._output_names = filename_to_list(output_names)
add_traits(self.inputs, [name for name in self._input_names])
self.imports = imports
self._out = {}
for name in self._output_names:
self._out[name] = None
def _set_function_string(self, obj, name, old, new):
if name == 'function_str':
if hasattr(new, '__call__'):
function_source = getsource(new)
elif isinstance(new, six.string_types):
function_source = dumps(new)
self.inputs.trait_set(trait_change_notify=False,
**{'%s' % name: function_source})
def _add_output_traits(self, base):
undefined_traits = {}
for key in self._output_names:
base.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
base.trait_set(trait_change_notify=False, **undefined_traits)
return base
def _run_interface(self, runtime):
function_handle = create_function_from_source(self.inputs.function_str,
self.imports)
args = {}
for name in self._input_names:
value = getattr(self.inputs, name)
if isdefined(value):
args[name] = value
out = function_handle(**args)
if len(self._output_names) == 1:
self._out[self._output_names[0]] = out
else:
if isinstance(out, tuple) and (len(out) != len(self._output_names)):
raise RuntimeError('Mismatch in number of expected outputs')
else:
for idx, name in enumerate(self._output_names):
self._out[name] = out[idx]
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
for key in self._output_names:
outputs[key] = self._out[key]
return outputs
class AssertEqualInputSpec(BaseInterfaceInputSpec):
volume1 = File(exists=True, mandatory=True)
volume2 = File(exists=True, mandatory=True)
class AssertEqual(BaseInterface):
input_spec = AssertEqualInputSpec
def _run_interface(self, runtime):
data1 = nb.load(self.inputs.volume1).get_data()
data2 = nb.load(self.inputs.volume2).get_data()
assert_equal(data1, data2)
return runtime
class CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec):
in_file = File(exists=True, mandatory=True, desc='Input comma-seperated value (CSV) file')
header = traits.Bool(False, usedefault=True, desc='True if the first line is a column header')
class CSVReader(BaseInterface):
"""
Examples
--------
>>> reader = CSVReader() # doctest: +SKIP
>>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP
>>> out = reader.run() # doctest: +SKIP
>>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP
True
>>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP
True
>>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP
True
>>> reader = CSVReader() # doctest: +SKIP
>>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP
>>> reader.inputs.header = True # doctest: +SKIP
>>> out = reader.run() # doctest: +SKIP
>>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP
True
>>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP
True
>>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP
True
"""
input_spec = CSVReaderInputSpec
output_spec = DynamicTraitedSpec
_always_run = True
def _append_entry(self, outputs, entry):
for key, value in zip(self._outfields, entry):
outputs[key].append(value)
return outputs
def _parse_line(self, line):
line = line.replace('\n', '')
entry = [x.strip() for x in line.split(',')]
return entry
def _get_outfields(self):
with open(self.inputs.in_file, 'r') as fid:
entry = self._parse_line(fid.readline())
if self.inputs.header:
self._outfields = tuple(entry)
else:
self._outfields = tuple(['column_' + str(x) for x in range(len(entry))])
return self._outfields
def _run_interface(self, runtime):
self._get_outfields()
return runtime
def _outputs(self):
return self._add_output_traits(super(CSVReader, self)._outputs())
def _add_output_traits(self, base):
return add_traits(base, self._get_outfields())
def _list_outputs(self):
outputs = self.output_spec().get()
isHeader = True
for key in self._outfields:
outputs[key] = [] # initialize outfields
with open(self.inputs.in_file, 'r') as fid:
for line in fid.readlines():
if self.inputs.header and isHeader: # skip header line
isHeader = False
continue
entry = self._parse_line(line)
outputs = self._append_entry(outputs, entry)
return outputs
| bsd-3-clause | 5,829,993,984,256,638,000 | 6,318,556,667,434,796,000 | 34.918004 | 141 | 0.582134 | false |
psiwczak/openstack | nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py | 6 | 3228 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import compute
from nova.api.openstack.compute import extensions
from nova.api.openstack import wsgi
import nova.db.api
import nova.rpc
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
UUID = fakes.FAKE_UUID
class SchedulerHintsTestCase(test.TestCase):
def setUp(self):
super(SchedulerHintsTestCase, self).setUp()
self.fake_instance = fakes.stub_instance(1, uuid=UUID)
self.app = compute.APIRouter()
def test_create_server_without_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
}}
req.body = utils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_with_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {'a': 'b'})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
},
'os:scheduler_hints': {'a': 'b'},
}
req.body = utils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_bad_hints(self):
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
},
'os:scheduler_hints': 'here',
}
req.body = utils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
| apache-2.0 | -2,731,187,334,925,443,600 | 8,312,461,482,365,322,000 | 31.938776 | 78 | 0.599133 | false |
rabernat/xrft | setup.py | 1 | 1391 | import os
import versioneer
from setuptools import setup, find_packages
PACKAGES = find_packages()
DISTNAME = 'xrft'
LICENSE = 'MIT'
AUTHOR = 'xrft Developers'
AUTHOR_EMAIL = '[email protected]'
URL = 'https://github.com/xgcm/xrft'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['xarray', 'dask', 'numpy', 'pandas', 'scipy']
EXTRAS_REQUIRE = ['cftime']
SETUP_REQUIRES = ['pytest-runner']
TESTS_REQUIRE = ['pytest >= 2.8', 'coverage']
DESCRIPTION = "Discrete Fourier Transform with xarray"
def readme():
with open('README.rst') as f:
return f.read()
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=readme(),
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=find_packages())
| mit | -5,044,889,346,086,749,000 | -7,712,717,616,524,255,000 | 29.23913 | 65 | 0.670022 | false |
SexualHealthInnovations/callisto-core | callisto_core/tests/notification/test_views.py | 2 | 1698 | from unittest import skip
from unittest.mock import ANY, call, patch
from callisto_core.reporting.views import ReportingConfirmationView
from callisto_core.tests.test_base import ReportFlowHelper as ReportFlowTestCase
from callisto_core.tests.utils.api import CustomNotificationApi
@skip("disabled for 2019 summer maintenance - record creation is no longer supported")
class NotificationViewTest(ReportFlowTestCase):
def test_submit_confirmation_admin_email(self):
with patch.object(CustomNotificationApi, "_logging") as api_logging:
self.client_post_report_creation()
self.client_post_reporting_end_step()
api_logging.assert_has_calls(
[
call(
notification_name=ReportingConfirmationView.admin_email_template_name
),
call(report=self.report),
],
any_order=True,
)
def test_submit_confirmation_user_email(self):
with patch.object(CustomNotificationApi, "_logging") as api_logging:
self.client_post_report_creation()
self.client_post_reporting_end_step()
api_logging.assert_has_calls(
[call(notification_name="submit_confirmation")], any_order=True
)
def test_submit_confirmation_slack_notification(self):
with patch.object(CustomNotificationApi, "slack_notification") as api_logging:
self.client_post_report_creation()
self.client_post_reporting_end_step()
api_logging.assert_has_calls(
[call(msg=ANY, type="submit_confirmation")], any_order=True
)
| agpl-3.0 | 5,885,852,827,893,802,000 | -5,250,573,198,389,822 | 42.538462 | 93 | 0.643698 | false |
lordB8r/polls | ENV/lib/python2.7/site-packages/django/contrib/sites/tests.py | 94 | 2574 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.sites.models import Site, RequestSite, get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
class SitesFrameworkTests(TestCase):
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
self.old_Site_meta_installed = Site._meta.installed
Site._meta.installed = True
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
def test_save_another(self):
# Regression for #17415
# On some backends the sequence needs reset after save with explicit ID.
# Test that there is no sequence collisions by saving another site.
Site(domain="example2.com", name="example2.com").save()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertTrue(isinstance(s, Site))
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertTrue(isinstance(site, Site))
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
Site._meta.installed = False
site = get_current_site(request)
self.assertTrue(isinstance(site, RequestSite))
self.assertEqual(site.name, "example.com")
| mit | -3,535,289,956,972,125,000 | 6,248,838,620,908,719,000 | 38 | 82 | 0.665113 | false |
JshWright/home-assistant | homeassistant/components/http/auth.py | 3 | 1964 | """Authentication for HTTP component."""
import asyncio
import hmac
import logging
from homeassistant.const import HTTP_HEADER_HA_AUTH
from .util import get_real_ip
from .const import KEY_TRUSTED_NETWORKS, KEY_AUTHENTICATED
DATA_API_PASSWORD = 'api_password'
_LOGGER = logging.getLogger(__name__)
@asyncio.coroutine
def auth_middleware(app, handler):
"""Authenticate as middleware."""
# If no password set, just always set authenticated=True
if app['hass'].http.api_password is None:
@asyncio.coroutine
def no_auth_middleware_handler(request):
"""Auth middleware to approve all requests."""
request[KEY_AUTHENTICATED] = True
return handler(request)
return no_auth_middleware_handler
@asyncio.coroutine
def auth_middleware_handler(request):
"""Auth middleware to check authentication."""
# Auth code verbose on purpose
authenticated = False
if (HTTP_HEADER_HA_AUTH in request.headers and
validate_password(
request, request.headers[HTTP_HEADER_HA_AUTH])):
# A valid auth header has been set
authenticated = True
elif (DATA_API_PASSWORD in request.GET and
validate_password(request, request.GET[DATA_API_PASSWORD])):
authenticated = True
elif is_trusted_ip(request):
authenticated = True
request[KEY_AUTHENTICATED] = authenticated
return handler(request)
return auth_middleware_handler
def is_trusted_ip(request):
"""Test if request is from a trusted ip."""
ip_addr = get_real_ip(request)
return ip_addr and any(
ip_addr in trusted_network for trusted_network
in request.app[KEY_TRUSTED_NETWORKS])
def validate_password(request, api_password):
"""Test if password is valid."""
return hmac.compare_digest(
api_password, request.app['hass'].http.api_password)
| apache-2.0 | 2,304,244,339,539,581,000 | -664,294,140,000,021,100 | 28.757576 | 74 | 0.65835 | false |
xlzdew/seleniumpr | py/selenium/webdriver/support/color.py | 49 | 11161 | #!/usr/bin/python
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
RGB_PATTERN = r"^\s*rgb\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*\)\s*$"
RGB_PCT_PATTERN = r"^\s*rgb\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*\)\s*$"
RGBA_PATTERN = r"^\s*rgba\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
RGBA_PCT_PATTERN = r"^\s*rgba\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
HEX_PATTERN = r"#([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})"
HEX3_PATTERN = r"#([A-Fa-f0-9])([A-Fa-f0-9])([A-Fa-f0-9])"
HSL_PATTERN = r"^\s*hsl\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*\)\s*$"
HSLA_PATTERN = r"^\s*hsla\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
class Color(object):
"""
Color conversion support class
Example:
.. code-block:: python
from selenium.webdriver.support.color import Color
print(Color.from_string('#00ff33').rgba)
print(Color.from_string('rgb(1, 255, 3)').hex)
print(Color.from_string('blue').rgba)
"""
@staticmethod
def from_string(str_):
import re
class Matcher(object):
def __init__(self):
self.match_obj = None
def match(self, pattern, str_):
self.match_obj = re.match(pattern, str_)
return self.match_obj
@property
def groups(self):
return () if self.match_obj is None else self.match_obj.groups()
m = Matcher()
if m.match(RGB_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGB_PCT_PATTERN, str_):
rgb = tuple([float(each) / 100 * 255 for each in m.groups])
return Color(*rgb)
elif m.match(RGBA_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGBA_PCT_PATTERN, str_):
rgba = tuple([float(each) / 100 * 255 for each in m.groups[:3]] + [m.groups[3]])
return Color(*rgba)
elif m.match(HEX_PATTERN, str_):
rgb = tuple([int(each, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HEX3_PATTERN, str_):
rgb = tuple([int(each * 2, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HSL_PATTERN, str_) or m.match(HSLA_PATTERN, str_):
return Color._from_hsl(*m.groups)
elif str_.upper() in Colors.keys():
return Colors[str_.upper()]
else:
raise ValueError("Could not convert %s into color" % str_)
@staticmethod
def _from_hsl(h, s, l, a=1):
h = float(h) / 360
s = float(s) / 100
l = float(l) / 100
if s == 0:
r = l
g = r
b = r
else:
luminocity2 = l * (1 + s) if l < 0.5 else l + s - l * s
luminocity1 = 2 * l - luminocity2
def hue_to_rgb(lum1, lum2, hue):
if hue < 0.0:
hue += 1
if hue > 1.0:
hue -= 1
if hue < 1.0 / 6.0:
return (lum1 + (lum2 - lum1) * 6.0 * hue)
elif hue < 1.0 / 2.0:
return lum2
elif hue < 2.0 / 3.0:
return lum1 + (lum2 - lum1) * ((2.0 / 3.0) - hue) * 6.0
else:
return lum1
r = hue_to_rgb(luminocity1, luminocity2, h + 1.0 / 3.0)
g = hue_to_rgb(luminocity1, luminocity2, h)
b = hue_to_rgb(luminocity1, luminocity2, h - 1.0 / 3.0)
return Color(r * 256, g * 256, b * 256, a)
def __init__(self, red, green, blue, alpha=1):
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
self.alpha = "1" if float(alpha) == 1 else str(float(alpha) or 0)
@property
def rgb(self):
return "rgb(%d, %d, %d)" % (self.red, self.green, self.blue)
@property
def rgba(self):
return "rgba(%d, %d, %d, %s)" % (self.red, self.green, self.blue, self.alpha)
@property
def hex(self):
return "#%02x%02x%02x" % (self.red, self.green, self.blue)
def __eq__(self, other):
if isinstance(other, Color):
return self.rgba == other.rgba
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
def __repr__(self):
return "Color(red=%d, green=%d, blue=%d, alpha=%s)" % (self.red, self.green, self.blue, self.alpha)
def __str__(self):
return "Color: %s" % self.rgba
# Basic, extended and transparent colour keywords as defined by the W3C HTML4 spec
# See http://www.w3.org/TR/css3-color/#html4
Colors = {
"TRANSPARENT": Color(0, 0, 0, 0),
"ALICEBLUE": Color(240, 248, 255),
"ANTIQUEWHITE": Color(250, 235, 215),
"AQUA": Color(0, 255, 255),
"AQUAMARINE": Color(127, 255, 212),
"AZURE": Color(240, 255, 255),
"BEIGE": Color(245, 245, 220),
"BISQUE": Color(255, 228, 196),
"BLACK": Color(0, 0, 0),
"BLANCHEDALMOND": Color(255, 235, 205),
"BLUE": Color(0, 0, 255),
"BLUEVIOLET": Color(138, 43, 226),
"BROWN": Color(165, 42, 42),
"BURLYWOOD": Color(222, 184, 135),
"CADETBLUE": Color(95, 158, 160),
"CHARTREUSE": Color(127, 255, 0),
"CHOCOLATE": Color(210, 105, 30),
"CORAL": Color(255, 127, 80),
"CORNFLOWERBLUE": Color(100, 149, 237),
"CORNSILK": Color(255, 248, 220),
"CRIMSON": Color(220, 20, 60),
"CYAN": Color(0, 255, 255),
"DARKBLUE": Color(0, 0, 139),
"DARKCYAN": Color(0, 139, 139),
"DARKGOLDENROD": Color(184, 134, 11),
"DARKGRAY": Color(169, 169, 169),
"DARKGREEN": Color(0, 100, 0),
"DARKGREY": Color(169, 169, 169),
"DARKKHAKI": Color(189, 183, 107),
"DARKMAGENTA": Color(139, 0, 139),
"DARKOLIVEGREEN": Color(85, 107, 47),
"DARKORANGE": Color(255, 140, 0),
"DARKORCHID": Color(153, 50, 204),
"DARKRED": Color(139, 0, 0),
"DARKSALMON": Color(233, 150, 122),
"DARKSEAGREEN": Color(143, 188, 143),
"DARKSLATEBLUE": Color(72, 61, 139),
"DARKSLATEGRAY": Color(47, 79, 79),
"DARKSLATEGREY": Color(47, 79, 79),
"DARKTURQUOISE": Color(0, 206, 209),
"DARKVIOLET": Color(148, 0, 211),
"DEEPPINK": Color(255, 20, 147),
"DEEPSKYBLUE": Color(0, 191, 255),
"DIMGRAY": Color(105, 105, 105),
"DIMGREY": Color(105, 105, 105),
"DODGERBLUE": Color(30, 144, 255),
"FIREBRICK": Color(178, 34, 34),
"FLORALWHITE": Color(255, 250, 240),
"FORESTGREEN": Color(34, 139, 34),
"FUCHSIA": Color(255, 0, 255),
"GAINSBORO": Color(220, 220, 220),
"GHOSTWHITE": Color(248, 248, 255),
"GOLD": Color(255, 215, 0),
"GOLDENROD": Color(218, 165, 32),
"GRAY": Color(128, 128, 128),
"GREY": Color(128, 128, 128),
"GREEN": Color(0, 128, 0),
"GREENYELLOW": Color(173, 255, 47),
"HONEYDEW": Color(240, 255, 240),
"HOTPINK": Color(255, 105, 180),
"INDIANRED": Color(205, 92, 92),
"INDIGO": Color(75, 0, 130),
"IVORY": Color(255, 255, 240),
"KHAKI": Color(240, 230, 140),
"LAVENDER": Color(230, 230, 250),
"LAVENDERBLUSH": Color(255, 240, 245),
"LAWNGREEN": Color(124, 252, 0),
"LEMONCHIFFON": Color(255, 250, 205),
"LIGHTBLUE": Color(173, 216, 230),
"LIGHTCORAL": Color(240, 128, 128),
"LIGHTCYAN": Color(224, 255, 255),
"LIGHTGOLDENRODYELLOW": Color(250, 250, 210),
"LIGHTGRAY": Color(211, 211, 211),
"LIGHTGREEN": Color(144, 238, 144),
"LIGHTGREY": Color(211, 211, 211),
"LIGHTPINK": Color(255, 182, 193),
"LIGHTSALMON": Color(255, 160, 122),
"LIGHTSEAGREEN": Color(32, 178, 170),
"LIGHTSKYBLUE": Color(135, 206, 250),
"LIGHTSLATEGRAY": Color(119, 136, 153),
"LIGHTSLATEGREY": Color(119, 136, 153),
"LIGHTSTEELBLUE": Color(176, 196, 222),
"LIGHTYELLOW": Color(255, 255, 224),
"LIME": Color(0, 255, 0),
"LIMEGREEN": Color(50, 205, 50),
"LINEN": Color(250, 240, 230),
"MAGENTA": Color(255, 0, 255),
"MAROON": Color(128, 0, 0),
"MEDIUMAQUAMARINE": Color(102, 205, 170),
"MEDIUMBLUE": Color(0, 0, 205),
"MEDIUMORCHID": Color(186, 85, 211),
"MEDIUMPURPLE": Color(147, 112, 219),
"MEDIUMSEAGREEN": Color(60, 179, 113),
"MEDIUMSLATEBLUE": Color(123, 104, 238),
"MEDIUMSPRINGGREEN": Color(0, 250, 154),
"MEDIUMTURQUOISE": Color(72, 209, 204),
"MEDIUMVIOLETRED": Color(199, 21, 133),
"MIDNIGHTBLUE": Color(25, 25, 112),
"MINTCREAM": Color(245, 255, 250),
"MISTYROSE": Color(255, 228, 225),
"MOCCASIN": Color(255, 228, 181),
"NAVAJOWHITE": Color(255, 222, 173),
"NAVY": Color(0, 0, 128),
"OLDLACE": Color(253, 245, 230),
"OLIVE": Color(128, 128, 0),
"OLIVEDRAB": Color(107, 142, 35),
"ORANGE": Color(255, 165, 0),
"ORANGERED": Color(255, 69, 0),
"ORCHID": Color(218, 112, 214),
"PALEGOLDENROD": Color(238, 232, 170),
"PALEGREEN": Color(152, 251, 152),
"PALETURQUOISE": Color(175, 238, 238),
"PALEVIOLETRED": Color(219, 112, 147),
"PAPAYAWHIP": Color(255, 239, 213),
"PEACHPUFF": Color(255, 218, 185),
"PERU": Color(205, 133, 63),
"PINK": Color(255, 192, 203),
"PLUM": Color(221, 160, 221),
"POWDERBLUE": Color(176, 224, 230),
"PURPLE": Color(128, 0, 128),
"RED": Color(255, 0, 0),
"ROSYBROWN": Color(188, 143, 143),
"ROYALBLUE": Color(65, 105, 225),
"SADDLEBROWN": Color(139, 69, 19),
"SALMON": Color(250, 128, 114),
"SANDYBROWN": Color(244, 164, 96),
"SEAGREEN": Color(46, 139, 87),
"SEASHELL": Color(255, 245, 238),
"SIENNA": Color(160, 82, 45),
"SILVER": Color(192, 192, 192),
"SKYBLUE": Color(135, 206, 235),
"SLATEBLUE": Color(106, 90, 205),
"SLATEGRAY": Color(112, 128, 144),
"SLATEGREY": Color(112, 128, 144),
"SNOW": Color(255, 250, 250),
"SPRINGGREEN": Color(0, 255, 127),
"STEELBLUE": Color(70, 130, 180),
"TAN": Color(210, 180, 140),
"TEAL": Color(0, 128, 128),
"THISTLE": Color(216, 191, 216),
"TOMATO": Color(255, 99, 71),
"TURQUOISE": Color(64, 224, 208),
"VIOLET": Color(238, 130, 238),
"WHEAT": Color(245, 222, 179),
"WHITE": Color(255, 255, 255),
"WHITESMOKE": Color(245, 245, 245),
"YELLOW": Color(255, 255, 0),
"YELLOWGREEN": Color(154, 205, 50)
}
| apache-2.0 | -8,434,272,665,839,985,000 | -8,938,886,343,718,755,000 | 35.237013 | 146 | 0.547084 | false |
sertac/django | django/contrib/admin/options.py | 126 | 81190 | import copy
import operator
from collections import OrderedDict
from functools import partial, reduce, update_wrapper
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks,
)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects, flatten_fieldsets, get_deleted_objects,
lookup_needs_distinct, model_format_dict, quote, unquote,
)
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist, FieldError, PermissionDenied, ValidationError,
)
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet, inlineformset_factory, modelform_defines_fields,
modelform_factory, modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import Http404, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, escapejs
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
from django.utils.translation import string_concat, ugettext as _, ungettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
@classmethod
def check(cls, model, **kwargs):
return cls.checks_class().check(cls, model, **kwargs)
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.remote_field.model)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(returns None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field,
self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field,
self.admin_site, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = string_concat(help_text, ' ', msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
return self.fields
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(l):
l = l()
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (prev_field.concrete and
field not in prev_field.get_path_info()[-1].target_fields):
relation_parts.append(part)
if not getattr(field, 'get_path_info', None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.get_path_info()[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
clean_lookup = LOOKUP_SEP.join(relation_parts)
valid_lookups = [self.date_hierarchy]
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.append(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.append(filter_item[0])
else:
valid_lookups.append(filter_item)
return clean_lookup in valid_lookups
def to_field_allowed(self, request, to_field):
"""
Returns True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
if (any(issubclass(model, related_model) for model in registered_models) and
related_object.field.remote_field.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
"""
Returns True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
@python_2_unicode_compatible
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(ModelAdmin, self).__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/change/$', wrap(self.change_view), name='%s_%s_change' % info),
# For backwards compatibility (was the change url before 1.9)
url(r'^(.+)/$', wrap(RedirectView.as_view(
pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info)
))),
]
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'admin/RelatedObjectLookups.js',
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'actions%s.js' % extra,
'urlify.js',
'prepopulate%s.js' % extra,
'vendor/xregexp/xregexp.min.js',
]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = OrderedDict(
(f, None) for f in readonly_fields
if f in self.form.declared_fields
)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
"form": form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id, from_field=None):
"""
Returns an instance matching the field and value provided, the primary
key is used if no field is provided. Returns ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = model._meta.pk if from_field is None else model._meta.get_field(from_field)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if (defaults.get('fields') is None
and not modelform_defines_fields(defaults.get('form'))):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yields formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION,
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitly set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Returns a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Returns a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Returns a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Returns the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a change message from a changed object.
"""
change_message = []
if add:
change_message.append(_('Added.'))
elif form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_text(added_object._meta.verbose_name),
'object': force_text(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_text(changed_object._meta.verbose_name),
'object': force_text(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_text(deleted_object._meta.verbose_name),
'object': force_text(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError('Bad message level string: `%s`. '
'Possible values are: %s' % (level, levels_repr))
messages.add_message(request, level, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'value': value,
'obj': obj,
})
elif "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name)
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else obj._meta.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = request.resolver_match.args[0]
new_value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'change',
'value': escape(value),
'obj': escapejs(obj),
'new_value': escape(new_value),
})
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determines the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'delete',
'value': escape(obj_id),
})
self.message_user(request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display),
}, messages.SUCCESS)
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
)
return TemplateResponse(request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html"
], context)
def get_inline_formsets(self, request, formsets, inline_instances,
obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data.
Unless overridden, this populates from the GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
@csrf_protect_m
@transaction.atomic
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
object_id = None
obj = None
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
list_select_related = self.get_list_select_related(request)
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
# Add the action checkboxes if there are any actions available.
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and cl.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = dict(
self.admin_site.each_context(request),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST,
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js',
'inlines%s.js' % extra]
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance.pk is None:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super(DeleteProtectedModelForm, self).is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super(InlineModelAdmin, self).get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request)
return super(InlineModelAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
# The model was auto-created as intermediary for a
# ManyToMany-relationship, find the target model
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request, obj)
return super(InlineModelAdmin, self).has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| bsd-3-clause | 8,347,044,838,934,949,000 | 3,080,727,607,490,320,400 | 41.374739 | 119 | 0.591563 | false |
ppriest/mame | 3rdparty/bgfx/3rdparty/scintilla/scripts/ScintillaData.py | 69 | 8599 | # ScintillaData.py - implemented 2013 by Neil Hodgson [email protected]
# Released to the public domain.
# Common code used by Scintilla and SciTE for source file regeneration.
# The ScintillaData object exposes information about Scintilla as properties:
# Version properties
# version
# versionDotted
# versionCommad
#
# Date last modified
# dateModified
# yearModified
# mdyModified
# dmyModified
# myModified
#
# Information about lexers and properties defined in lexers
# lexFiles
# sorted list of lexer files
# lexerModules
# sorted list of module names
# lexerProperties
# sorted list of lexer properties
# propertyDocuments
# dictionary of property documentation { name: document string }
# This file can be run to see the data it provides.
# Requires Python 2.5 or later
from __future__ import with_statement
import codecs, datetime, glob, os, sys, textwrap
import FileGenerator
def FindModules(lexFile):
modules = []
with open(lexFile) as f:
for l in f.readlines():
if l.startswith("LexerModule"):
l = l.replace("(", " ")
modules.append(l.split()[1])
return modules
# Properties that start with lexer. or fold. are automatically found but there are some
# older properties that don't follow this pattern so must be explicitly listed.
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = {}
with open(lexFile) as f:
for l in f.readlines():
if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties[propertyName] = 1
return properties
def FindPropertyDocumentation(lexFile):
documents = {}
with open(lexFile) as f:
name = ""
for l in f.readlines():
l = l.strip()
if "// property " in l:
propertyName = l.split()[2]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif "DefineProperty" in l and "\"" in l:
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif name:
if l.startswith("//"):
if documents[name]:
documents[name] += " "
documents[name] += l[2:].strip()
elif l.startswith("\""):
l = l[1:].strip()
if l.endswith(";"):
l = l[:-1].strip()
if l.endswith(")"):
l = l[:-1].strip()
if l.endswith("\""):
l = l[:-1]
# Fix escaped double quotes
l = l.replace("\\\"", "\"")
documents[name] += l
else:
name = ""
for name in list(documents.keys()):
if documents[name] == "":
del documents[name]
return documents
def FindCredits(historyFile):
credits = []
stage = 0
with codecs.open(historyFile, "r", "utf-8") as f:
for l in f.readlines():
l = l.strip()
if stage == 0 and l == "<table>":
stage = 1
elif stage == 1 and l == "</table>":
stage = 2
if stage == 1 and l.startswith("<td>"):
credit = l[4:-5]
if "<a" in l:
title, a, rest = credit.partition("<a href=")
urlplus, bracket, end = rest.partition(">")
name = end.split("<")[0]
url = urlplus[1:-1]
credit = title.strip()
if credit:
credit += " "
credit += name + " " + url
credits.append(credit)
return credits
def ciCompare(a,b):
return cmp(a.lower(), b.lower())
def ciKey(a):
return a.lower()
def SortListInsensitive(l):
try: # Try key function
l.sort(key=ciKey)
except TypeError: # Earlier version of Python, so use comparison function
l.sort(ciCompare)
class ScintillaData:
def __init__(self, scintillaRoot):
# Discover verion information
with open(scintillaRoot + "version.txt") as f:
self.version = f.read().strip()
self.versionDotted = self.version[0] + '.' + self.version[1] + '.' + \
self.version[2]
self.versionCommad = self.version[0] + ', ' + self.version[1] + ', ' + \
self.version[2] + ', 0'
with open(scintillaRoot + "doc/index.html") as f:
self.dateModified = [l for l in f.readlines() if "Date.Modified" in l]\
[0].split('\"')[3]
# 20130602
# index.html, SciTE.html
dtModified = datetime.datetime.strptime(self.dateModified, "%Y%m%d")
self.yearModified = self.dateModified[0:4]
monthModified = dtModified.strftime("%B")
dayModified = "%d" % dtModified.day
self.mdyModified = monthModified + " " + dayModified + " " + self.yearModified
# May 22 2013
# index.html, SciTE.html
self.dmyModified = dayModified + " " + monthModified + " " + self.yearModified
# 22 May 2013
# ScintillaHistory.html -- only first should change
self.myModified = monthModified + " " + self.yearModified
# Find all the lexer source code files
lexFilePaths = glob.glob(scintillaRoot + "lexers/Lex*.cxx")
SortListInsensitive(lexFilePaths)
self.lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths]
self.lexerModules = []
lexerProperties = set()
self.propertyDocuments = {}
for lexFile in lexFilePaths:
self.lexerModules.extend(FindModules(lexFile))
for k in FindProperties(lexFile).keys():
lexerProperties.add(k)
documents = FindPropertyDocumentation(lexFile)
for k in documents.keys():
if k not in self.propertyDocuments:
self.propertyDocuments[k] = documents[k]
SortListInsensitive(self.lexerModules)
self.lexerProperties = list(lexerProperties)
SortListInsensitive(self.lexerProperties)
self.credits = FindCredits(scintillaRoot + "doc/ScintillaHistory.html")
def printWrapped(text):
print(textwrap.fill(text, subsequent_indent=" "))
if __name__=="__main__":
sci = ScintillaData("../")
print("Version %s %s %s" % (sci.version, sci.versionDotted, sci.versionCommad))
print("Date last modified %s %s %s %s %s" % (
sci.dateModified, sci.yearModified, sci.mdyModified, sci.dmyModified, sci.myModified))
printWrapped(str(len(sci.lexFiles)) + " lexer files: " + ", ".join(sci.lexFiles))
printWrapped(str(len(sci.lexerModules)) + " lexer modules: " + ", ".join(sci.lexerModules))
printWrapped("Lexer properties: " + ", ".join(sci.lexerProperties))
print("Lexer property documentation:")
documentProperties = list(sci.propertyDocuments.keys())
SortListInsensitive(documentProperties)
for k in documentProperties:
print(" " + k)
print(textwrap.fill(sci.propertyDocuments[k], initial_indent=" ",
subsequent_indent=" "))
print("Credits:")
for c in sci.credits:
if sys.version_info[0] == 2:
print(" " + c.encode("utf-8"))
else:
sys.stdout.buffer.write(b" " + c.encode("utf-8") + b"\n")
| gpl-2.0 | 8,426,283,756,333,913,000 | -3,965,469,492,295,725,000 | 37.217778 | 95 | 0.545063 | false |
ijoewahjoedi/instant-press | modules/widgets.py | 6 | 28658 | # -*- coding: utf-8 -*-
#
# Instant Press. Instant sites. CMS developed in Web2py Framework
# Site: http://www.instant2press.com
#
# Copyright (c) 2010 Mulone, Pablo Martín
#
# License Code: GPL, General Public License v. 2.0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# http://groups.google.com/group/web2py-usuarios
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from gluon.html import *
from gluon.http import *
from gluon.validators import *
from gluon.sqlhtml import *
import gluon.contrib.simplejson as sj
#local
from utils import *
class Widgets(object):
def __init__(self, i2p):
self.i2p = i2p
def get_menu(self):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
articles = self.i2p.articles
#this list the top pages
#the maximun display lenght of title is 25 characters
xml_pages=""
trunk_title = 25
(pages, pages_count) = articles.get_last_pages(1)
for page in pages:
(url, notvalid) = (IS_URL()(page.post_url))
if notvalid: #this is a normal post
link_page = articles.get_page_permanent_link(page.id, \
page.title[:trunk_title])
else: #this is a url-post
link_page = A(page.title[:trunk_title], _href=page.post_url)
xml_page = '<li>%s</li>' % link_page.xml()
#xml_pages += sanitate_string(xml_page)
xml_pages += xml_page
link_home = A(T('Home'), _href=URL(request.application,\
config.controller_default,\
'index'))
xml_menu='<ul><li>%s</li> %s </ul>' % (link_home,xml_pages)
return xml_menu
def front(self):
config = self.i2p.config
siteinfo = self.i2p.siteinfo
if config.front_enabled:
welcome_description = '<div class="entry">%s</div>' \
% siteinfo._get_frontpage()
xml = '<div class="post"> %s </div>' % welcome_description
else:
xml=""
return xml
def sidebar_aboutme(self):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
siteinfo = self.i2p.siteinfo
about_xml = ""
if config.about_enabled:
about_caption = T('About')
about_info = siteinfo._get_aboutme()
if about_info != "":
about_description = '%s' % about_info
about_xml = '<div id="sidebar-about"><h2>%s</h2> %s </div>' \
% (about_caption, about_description)
return about_xml
def sidebar_archive(self):
config = self.i2p.config
articles = self.i2p.articles
archive_xml = ""
if config.archive_enabled:
archive_generate=""
if not config.widgets_ajax:
archive_generate = articles.get_list_archives()
archive_xml = '<div id="sidebar-archive"> %s </div>' % archive_generate
return archive_xml
def footer_archive(self):
config = self.i2p.config
articles = self.i2p.articles
archive_xml = ""
if config.archive_enabled:
archive_generate=""
if not config.widgets_ajax:
archive_generate = articles.get_list_archives()
archive_xml = '<div id="footer-widgets-archives" class="footer-columns"> %s </div>' \
% archive_generate
return archive_xml
def get_pages(self):
T = self.i2p.environment.T
pages_caption = T('Pages')
pages_generate = self.get_menu()
xml_pages = '<h2>%s</h2> %s' % (pages_caption,pages_generate)
return xml_pages
def sidebar_pages(self):
config = self.i2p.config
pages_xml = ""
if config.pages_enabled:
pages_generate=""
if not config.widgets_ajax:
pages_generate = self.get_pages()
pages_xml = '<div id="sidebar-pages"> %s </div>' % (pages_generate)
return pages_xml
def footer_pages(self):
config = self.i2p.config
pages_xml = ""
if config.pages_enabled:
pages_generate=""
if not config.widgets_ajax:
pages_generate = self.get_pages()
pages_xml = '<div id="footer-widgets-pages" class="footer-columns"> %s </div>' \
% (pages_generate)
return pages_xml
def sidebar_links(self):
config = self.i2p.config
articles = self.i2p.articles
links_xml = ""
if config.pages_enabled:
links_generate=""
if not config.widgets_ajax:
links_generate = articles.get_list_links()
links_xml = '<div id="sidebar-links"> %s </div>' % (links_generate)
return links_xml
def load_last_comments(self, page=1):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_comments=""
last_comments=""
header = T('Last comments')
(limit_inf, limit_sup) = get_query_limits(page, 5) #the last five comments
query = (db.comments.id>0)
last_comments = db(query).select(db.comments.ALL,\
orderby=~db.comments.comment_on,\
limitby=(limit_inf, limit_sup))
for comment in last_comments:
#author_avatar = IMG(_src=URL(r=request,c='static',f='images/avatar.png'), alt="avatar", style="padding: 5px; float:left;")
author_sytle = ""#"padding: 5px; float:left;"
author_avatar = self.i2p.comments._get_avatar(comment.author_id,\
style=author_sytle)
text_comment = comment.comment[:60]
comment_user = self.i2p.users.get_user_title(comment.author_id)
comment_time = comment.comment_on.strftime("%B %d, %Y:%I:%M %p")
link_post = self.i2p.articles.get_post_permanent_link(comment.post_id)
xml_comment = '<li><div style="float:left">%s</div> %s say: %s on %s on article %s</li>' \
% (author_avatar.xml(), comment_user, text_comment, \
comment_time, link_post.xml())
#xml_comments += sanitate_string(xml_comment)
xml_comments += xml_comment
if xml_comments!="":
last_comments="<h2>%s</h2><ul>%s</ul>" % (header,xml_comments)
return last_comments
def sidebar_last_comments(self):
comments_xml = ""
config = self.i2p.config
if config.comments_method in ['Disqus']:
comments_xml = '<div id="sidebar-last-comments"> %s </div>' % self._disqus_last_comments()
elif config.comments_method in ['Enabled']:
comments_generate=""
if not config.widgets_ajax:
comments_generate = self.load_last_comments()
comments_xml = '<div id="sidebar-last-comments"> %s </div>' % (comments_generate)
return comments_xml
def sidebar_tags(self):
config = self.i2p.config
articles = self.i2p.articles
tags_xml = ""
if config.tags_enabled:
tags_generate=""
if not config.widgets_ajax:
tags_generate = articles.get_popular_tags()
tags_xml = '<div id="sidebar-tags">%s</div><div style="clear: both; float: none;"></div>' \
% tags_generate
return tags_xml
def sidebar_feed(self):
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
feed_xml = ""
if config.feed_enabled:
feed_caption = T('Rss')
icon_feed_url = URL(request.application,'static','images/feed.png')
img_feed = IMG(_src=icon_feed_url, _alt="Feed", _style="padding-left: 5px;")
link_feedposts = A(T("Rss last posts"), \
_href=URL(request.application,\
config.controller_default,\
'feed_articles.rss' ))
link_feedcomments = A(T("Rss last comments"), \
_href=URL(request.application,\
config.controller_default,\
'feed_comments.rss' ))
feed_posts = '<li>%s %s</li>' % (link_feedposts, img_feed.xml())
feed_comments = '<li>%s %s</li>' % (link_feedcomments, img_feed.xml())
feed_xml = '<div id="sidebar-feed"><h2>%s</h2> <ul> %s %s </ul> </div>' \
% (feed_caption, feed_posts, feed_comments)
return feed_xml
def load_last_posts(self):
articles = self.i2p.articles
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_posts=""
last_posts=""
last_entries = T('Last entries')
(posts, post_count) = articles.get_last_posts(1)
for post in posts:
link_post = articles.get_post_permanent_link(post.id, \
post.title)
xml_post = '<li>%s</li>' % link_post.xml()
xml_posts += xml_post
if xml_posts!="":
last_posts="<h2>%s</h2><ul>%s</ul>" % (last_entries,xml_posts)
return last_posts
def sidebar_last_posts(self):
config = self.i2p.config
last_posts=''
if config.last_post_enabled:
xml_posts=""
if not config.widgets_ajax:
xml_posts = self.load_last_posts()
last_posts='<div id="sidebar-last-posts">%s</div>' % xml_posts
return last_posts
def footer_last_posts(self):
config = self.i2p.config
last_posts=''
if config.last_post_enabled:
xml_posts=""
if not config.widgets_ajax:
xml_posts = self.load_last_posts()
last_posts='<div id="footer-widgets-last-posts" class="footer-columns">%s</div>' \
% xml_posts
return last_posts
def load_categories(self):
config = self.i2p.config
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_cats=""
categories=""
cats = self.i2p.categories.get_list(page=1, limit=30)
for cat in cats:
post_count = db(db.posts.categories.contains(str(cat.id))).count() #contains bug in web2py
text_cat = " %s (%s)" % (cat.title,post_count)
link_cat = A(text_cat,_title="%s"%cat.description,\
_href=URL(request.application,\
config.controller_default,\
'category/by_id', args=[unicode(cat.id)] ))
xml_cat = '<li>%s</li>' % link_cat.xml()
xml_cats += xml_cat
if xml_cats!="":
categories = "<h2>%s</h2>"%T('Categories')
categories += "<ul>%s</ul>"%xml_cats
return categories
def sidebar_categories(self):
config = self.i2p.config
xml_categories = ""
if config.categories_enabled:
xml_cats=""
if not config.widgets_ajax:
xml_cats = self.load_categories()
xml_categories='<div id="sidebar-categories">%s</div>' % xml_cats
return xml_categories
def footer_categories(self):
config = self.i2p.config
xml_categories = ""
if config.categories_enabled:
xml_cats=""
if not config.widgets_ajax:
xml_cats = self.load_categories()
xml_categories='<div id="footer-widgets-categories" class="footer-columns">%s</div>' \
% xml_cats
return xml_categories
def sidebar_search(self):
config = self.i2p.config
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_content = ""
if config.search_enabled:
title_search = T('Search')
search_url = URL(request.application, \
config.controller_default,\
'search')
xml_content = '''<div id="sidebar-search" >
<h2>%s</h2>
<form method="get" action="%s">
<div><input type="text" name="q" id="sidebar-search-text" value="" /></div>
<div><input type="submit" id="sidebar-search-submit" value="Search" /></div>
</form>
</div>
''' % (title_search,search_url)
return xml_content
def add_this(self, url="",title="",description=""):
config = self.i2p.config
#need fix: need to escape to somethin like: &
if title!='':
addthis_title = 'addthis:title="%s"' % clean_html(title)
else:
addthis_title = ''
if url!='':
addthis_url = 'addthis:url="%s"' % url
else:
addthis_url = ''
if description!='':
addthis_description = 'addthis:description="%s"' % clean_html(description)
else:
addthis_description = ''
addthis = '''<!-- AddThis Button BEGIN -->
<div class="addthis_toolbox addthis_default_style">
<a href="http://www.addthis.com/bookmark.php?v=250&username=%(username)s" class="addthis_button_compact" %(url)s %(title)s %(description)s >Share</a>
<span class="addthis_separator">|</span>
<a class="addthis_button_facebook" %(url)s %(title)s %(description)s></a>
<a class="addthis_button_myspace" %(url)s %(title)s %(description)s></a>
<a class="addthis_button_google" %(url)s %(title)s %(description)s></a>
<a class="addthis_button_twitter" %(url)s %(title)s %(description)s></a>
</div>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#username=%(username)s"></script>
<!-- AddThis Button END --> ''' % {'username': config.addthis_user, 'url': addthis_url, 'title': addthis_title, 'description': addthis_description}
return addthis
def post_meta(self, post):
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
articles = self.i2p.articles
users = self.i2p.users
post_author_caption = '<span class="author">%s</span>' \
% users.get_user_title(post.created_by_id)
post_category = articles.get_post_category(post.id)
if post_category=="":
post_category = T("uncategorized")
in_category = T('in')
else:
in_category = T('in categories')
#post_time = post.published_on.strftime("%B %d, %Y at %I:%M")
post_time = post.published_on.strftime("%Y-%m-%d %I:%M")
year_full = post.published_on.strftime("%Y")
month = post.published_on.strftime("%m")
link_time = A(post_time, _href=URL(request.application,\
config.controller_default,\
'archives',args=[year_full,month]))
posted_by = T('By')
updated_on = T('Published on')
byline = '%s %s %s %s %s %s' % (updated_on, link_time.xml(), posted_by, \
post_author_caption, in_category, post_category)
return byline
def post_extract(self, post):
config = self.i2p.config
T = self.i2p.environment.T
request = self.i2p.environment.request
comments = self.i2p.comments
articles = self.i2p.articles
label_comments = "%s" % T('Comments')
if config.comments_method in ['Enabled'] and not config.widgets_ajax:
comments_count = comments._get_comment_count(post.id)
elif config.comments_method in ['Disqus']:
comments_count = ""
else:
comments_count = 0
if config.comments_method in ['Disqus']:
link_comments = articles.get_post_permanent_link(post.id, \
'Comments', \
'disqus_thread')
else:
link_comments = articles.get_post_permanent_link(post.id, \
label_comments, \
'comments')
link_readmore = articles.get_post_permanent_link(post.id, T("Read more"))
base_http = 'http://' + str(request.env.http_host)
url_permanent = articles.get_post_permanent_link(post.id, only_url=True )
url_post = str(base_http + url_permanent)
if config.addthis_enabled:
#add_this = self.add_this(url_post,post.title,post.text_slice[:100]) #need to pass: title, url, description
add_this = self.add_this(url_permanent,post.title,post.text_slice[:100]) #need to pass: title, url, description
else:
add_this = ""
xml_post = '<div class="post">'
xml_post +='<h2 class="title">%s</h2>' \
% articles.get_post_permanent_link(post.id).xml()
xml_post +='''<div class="meta">%s -
<span class="comments-count" id="comments-count_%s"> %s </span>
%s
</div>''' \
% (self.post_meta(post), post.id, comments_count, link_comments.xml())
if config.editor_language in ['Markmin']:
text_slice = MARKMIN(post.text_slice)
else:
text_slice = post.text_slice
xml_post +='<div class="entry">%s</div>' % text_slice
xml_post +='''<div class="links">
<div class="readmore"> %s </div>
<div class="addthis"> %s </div>
<div style="float:none; clear:both;"></div>
</div>''' % (link_readmore.xml(), add_this)
xml_post +='</div>'
return xml_post
def last_posts(self, page):
articles = self.i2p.articles
(posts, count_posts) = articles.get_last_posts(page)
xml_posts = articles.get_xml_results_from_posts(posts)
xml_posts += articles.pagination_last_post(page, count_posts)
return xml_posts
def disqus_comments(self):
config = self.i2p.config
if config.disqus_dev:
developer = 'var disqus_developer = 1;';
else:
developer = '';
script = '''
<div id="disqus_thread"></div>
<script type="text/javascript">
%(developer)s
/**
* var disqus_identifier; [Optional but recommended: Define a unique identifier (e.g. post id or slug) for this thread]
*/
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = 'http://%(site)s.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript=%(site)s">comments powered by Disqus.</a></noscript>
<a href="http://disqus.com" class="dsq-brlink">blog comments powered by <span class="logo-disqus">Disqus</span></a>
''' % {'developer': developer, 'site': config.disqus_site}
return script
def disqus_comments_count(self):
config = self.i2p.config
script = '''
<script type="text/javascript">
var disqus_shortname = '%(site)s';
(function () {
var s = document.createElement('script'); s.async = true;
s.src = 'http://disqus.com/forums/%(site)s/count.js';
(document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s);
}());
</script>
''' % {'site': config.disqus_site}
return script
def _disqus_last_comments(self):
T = self.i2p.environment.T
config = self.i2p.config
if self.i2p.config.avatars_enabled:
hide_avatars = 0
else:
hide_avatars = 1
avatar_size = self.i2p.config.avatar_size
recent_comments=T("Recent Comments")
num_items = 5
script = '''
<div id="recentcomments" class="dsq-widget">
<h2 class="dsq-widget-title">%(recent_comments)s</h2>
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/recent_comments_widget.js?num_items=%(num_items)s&hide_avatars=%(hide_avatars)s&avatar_size=%(avatar_size)s&excerpt_length=200">
</script>
</div>
<a href="http://disqus.com/">Powered by Disqus</a>
''' % {'site': config.disqus_site, 'recent_comments': recent_comments, 'avatar_size': avatar_size, 'hide_avatars': hide_avatars, 'num_items': num_items}
return script
def sidebar_popular_threads(self):
config = self.i2p.config
popular_threads=''
#for now only in disqus
if config.comments_method in ['Disqus']:
popular_threads='<div id="sidebar-popular-threads">%s</div>' % self._disqus_popular_threads()
return popular_threads
def _disqus_popular_threads(self):
config = self.i2p.config
T = self.i2p.environment.T
popular_threads=T("Popular Threads")
script = '''
<div id="popularthreads" class="dsq-widget">
<h2 class="dsq-widget-title">%(popular_threads)s</h2>
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/popular_threads_widget.js?num_items=5">
</script>
</div>
<a href="http://disqus.com/">Powered by Disqus</a>
'''% {'site': config.disqus_site,'popular_threads':popular_threads}
return script
def sidebar_top_commenters(self):
config = self.i2p.config
top_commenters=''
#for now only in disqus
if config.comments_method in ['Disqus']:
top_commenters='<div id="sidebar-top-commenters">%s</div>' % self._disqus_top_commenters()
return top_commenters
def _disqus_top_commenters(self):
T = self.i2p.environment.T
config = self.i2p.config
avatar_size = self.i2p.config.avatar_size
if self.i2p.config.avatars_enabled:
hide_avatars = 0
else:
hide_avatars = 1
num_items = 5
top_commenters=T('Top Commenters')
script = '''
<div id="topcommenters" class="dsq-widget">
<h2 class="dsq-widget-title">%(top_commenters)s</h2>
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/top_commenters_widget.js?num_items=%(num_items)s&hide_mods=0&hide_avatars=%(hide_avatars)s&avatar_size=%(avatar_size)s">
</script>
</div>
<a href="http://disqus.com/">Powered by Disqus</a>
'''% {'site': config.disqus_site, 'top_commenters':top_commenters, 'avatar_size':avatar_size, 'hide_avatars':hide_avatars, 'num_items':num_items}
return script
def sidebar_combination(self):
config = self.i2p.config
T = self.i2p.environment.T
head_combination = T('Posts')
combination=''
#for now only in disqus
if config.comments_method in ['Disqus']:
combination='<div id="sidebar-combination"><h2>%s</h2>%s</div>' % (head_combination, self._disqus_combination())
return combination
def _disqus_combination(self):
config = self.i2p.config
num_items = 5
script = '''
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/combination_widget.js?num_items=%(num_items)s&hide_mods=0&color=grey&default_tab=recent&excerpt_length=200">
</script>
<a href="http://disqus.com/">Powered by Disqus</a>
'''% {'site': config.disqus_site, 'num_items':num_items}
return script
def ga_script(self):
config = self.i2p.config
script=""
if config.ga_enabled:
script = '''
<script>
var _gaq = [['_setAccount', '%(ga_id)s'], ['_trackPageview']];
(function(d, t) {
var g = d.createElement(t),
s = d.getElementsByTagName(t)[0];
g.async = true;
g.src = '//www.google-analytics.com/ga.js';
s.parentNode.insertBefore(g, s);
})(document, 'script');
</script>
''' % {'ga_id': config.ga_id}
return script
| gpl-2.0 | -2,681,283,982,646,796,000 | -3,302,876,512,384,342,500 | 34.291872 | 206 | 0.491363 | false |
umitproject/tease-o-matic | django/db/models/query_utils.py | 240 | 5799 | """
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
import weakref
from django.utils.copycompat import deepcopy
from django.db.backends import util
from django.utils import tree
from django.utils.datastructures import SortedDict
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
def __init__(self, sql, params):
self.data = sql, params
def as_sql(self, qn=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
& and |).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + kwargs.items())
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
self.model_ref = weakref.ref(model)
self.loaded = False
def __get__(self, instance, owner):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
from django.db.models.fields import FieldDoesNotExist
assert instance is not None
cls = self.model_ref()
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
cls._meta.get_field_by_name(self.field_name)
name = self.field_name
except FieldDoesNotExist:
name = [f.name for f in cls._meta.fields
if f.attname == self.field_name][0]
# We use only() instead of values() here because we want the
# various data coersion methods (to_python(), etc.) to be called
# here.
val = getattr(
cls._base_manager.filter(pk=instance.pk).only(name).using(
instance._state.db).get(),
self.field_name
)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def select_related_descend(field, restricted, requested, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_cached_row()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.rel:
return False
if field.rel.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
return True
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
class Meta:
proxy = True
app_label = model._meta.app_label
# The app_cache wants a unique name for each model, otherwise the new class
# won't be created (we get an old one back). Therefore, we generate the
# name using the passed in attrs. It's OK to reuse an existing class
# object if the attrs are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(list(attrs))))
name = util.truncate_name(name, 80, 32)
overrides = dict([(attr, DeferredAttribute(attr, model))
for attr in attrs])
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(name, (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True
| bsd-3-clause | 5,329,621,790,847,933,000 | -8,788,031,979,759,438,000 | 32.912281 | 79 | 0.630798 | false |
chachan/nodeshot | nodeshot/community/participation/views.py | 5 | 2627 | from django.utils.translation import ugettext_lazy as _
from django.shortcuts import get_object_or_404
from django.contrib.auth import get_user_model
User = get_user_model()
from rest_framework import permissions, authentication, generics
from rest_framework.response import Response
from nodeshot.core.nodes.models import Node
from .models import Rating, Vote, Comment
from .serializers import * # noqa
class NodeRelationViewMixin(object):
def initial(self, request, *args, **kwargs):
"""
Custom initial method:
* ensure node exists and store it in an instance attribute
* change queryset to return only comments of current node
"""
super(NodeRelationViewMixin, self).initial(request, *args, **kwargs)
self.node = get_object_or_404(Node, **{'slug': self.kwargs['slug']})
self.queryset = self.model.objects.filter(node_id=self.node.id)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class NodeComments(NodeRelationViewMixin, generics.ListCreateAPIView):
"""
### GET
Retrieve a **list** of comments for the specified node
### POST
Add a comment to the specified node
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = CommentSerializer
model = Comment
node_comments = NodeComments.as_view()
class NodeRatings(NodeRelationViewMixin, generics.CreateAPIView):
"""
### POST
Rate the specified node
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = RatingSerializer
model = Rating
node_ratings = NodeRatings.as_view()
class NodeVotes(NodeRelationViewMixin, generics.CreateAPIView):
"""
### POST
Vote for the specified node
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = VoteSerializer
model = Vote
def delete(self, *args, **kwargs):
votes = Vote.objects.filter(node_id=self.node.id, user_id=self.request.user.id)
if (len(votes) > 0):
for vote in votes:
vote.delete()
message = _('Vote for this node removed')
status = 200
else:
message = _('User has not voted this node yet')
status = 400
return Response({'details': message}, status=status)
node_votes = NodeVotes.as_view()
| gpl-3.0 | -7,412,497,793,414,597,000 | -5,881,218,467,300,082,000 | 30.27381 | 87 | 0.684812 | false |
ColOfAbRiX/ansible | lib/ansible/plugins/filter/json_query.py | 93 | 1583 | # (c) 2015, Filipe Niero Felisbino <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
try:
import jmespath
HAS_LIB = True
except ImportError:
HAS_LIB = False
def json_query(data, expr):
'''Query data using jmespath query language ( http://jmespath.org ). Example:
- debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
'''
if not HAS_LIB:
raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter')
return jmespath.search(expr, data)
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'json_query': json_query
}
| gpl-3.0 | 6,663,393,622,510,470,000 | -5,814,489,595,260,109,000 | 31.979167 | 101 | 0.700569 | false |
IPMITMO/statan | coala-bears/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py | 3 | 7854 | import functools
from itertools import combinations
from bears.c_languages.ClangBear import clang_available, ClangBear
from bears.c_languages.codeclone_detection.ClangCountingConditions import (
condition_dict)
from bears.c_languages.codeclone_detection.ClangCountVectorCreator import (
ClangCountVectorCreator)
from bears.c_languages.codeclone_detection.CloneDetectionRoutines import (
compare_functions, get_count_matrices)
from coala_utils.string_processing.StringConverter import StringConverter
from coalib.bears.GlobalBear import GlobalBear
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.collecting.Collectors import collect_dirs
from coalib.results.HiddenResult import HiddenResult
from coalib.settings.Setting import path_list, typed_ordered_dict
# counting_condition_dict is a function object generated by typed_dict. This
# function takes a setting and creates a dictionary out of it while it
# converts all keys to counting condition function objects (via the
# condition_dict) and all values to floats while unset values default to 1.
counting_condition_dict = typed_ordered_dict(
lambda setting: condition_dict[str(setting).lower()],
float,
1)
default_cc_dict = counting_condition_dict(StringConverter(
"""
used: 0,
returned: 1.4,
is_condition: 0,
in_condition: 1.4,
in_second_level_condition: 1.4,
in_third_level_condition: 1.0,
is_assignee: 0,
is_assigner: 0.6,
loop_content: 0,
second_level_loop_content,
third_level_loop_content,
is_param: 2,
is_called: 1.4,
is_call_param: 0.0,
in_sum: 2.0,
in_product: 0,
in_binary_operation,
member_accessed"""))
def get_difference(function_pair,
count_matrices,
average_calculation,
poly_postprocessing,
exp_postprocessing):
"""
Retrieves the difference between two functions using the munkres algorithm.
:param function_pair: A tuple containing both indices for the
count_matrices dictionary.
:param count_matrices: A dictionary holding CMs.
:param average_calculation: If set to true the difference calculation
function will take the average of all variable
differences as the difference, else it will
normalize the function as a whole and thus
weighting in variables dependent on their size.
:param poly_postprocessing: If set to true, the difference value of big
function pairs will be reduced using a
polynomial approach.
:param exp_postprocessing: If set to true, the difference value of big
function pairs will be reduced using an
exponential approach.
:return: A tuple containing both function ids and their
difference.
"""
function_1, function_2 = function_pair
return (function_1,
function_2,
compare_functions(count_matrices[function_1],
count_matrices[function_2],
average_calculation,
poly_postprocessing,
exp_postprocessing))
class ClangFunctionDifferenceBear(GlobalBear):
check_prerequisites = classmethod(clang_available)
LANGUAGES = ClangBear.LANGUAGES
REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres3', '1.0')}
def run(self,
counting_conditions: counting_condition_dict=default_cc_dict,
average_calculation: bool=False,
poly_postprocessing: bool=True,
exp_postprocessing: bool=False,
extra_include_paths: path_list=()):
"""
Retrieves similarities for code clone detection. Those can be reused in
another bear to produce results.
Postprocessing may be done because small functions are less likely to
be clones at the same difference value than big functions which may
provide a better refactoring opportunity for the user.
:param counting_conditions: A comma seperated list of counting
conditions. Possible values are: used,
returned, is_condition, in_condition,
in_second_level_condition,
in_third_level_condition, is_assignee,
is_assigner, loop_content,
second_level_loop_content,
third_level_loop_content, is_param,
in_sum, in_product, in_binary_operation,
member_accessed.
Weightings can be assigned to each
condition due to providing a dict
value, i.e. having used weighted in
half as much as other conditions would
simply be: "used: 0.5, is_assignee".
Weightings default to 1 if unset.
:param average_calculation: If set to true the difference calculation
function will take the average of all
variable differences as the difference,
else it will normalize the function as a
whole and thus weighting in variables
dependent on their size.
:param poly_postprocessing: If set to true, the difference value of big
function pairs will be reduced using a
polynomial approach.
:param extra_include_paths: A list containing additional include paths.
:param exp_postprocessing: If set to true, the difference value of big
function pairs will be reduced using an
exponential approach.
"""
self.debug('Using the following counting conditions:')
for key, val in counting_conditions.items():
self.debug(' *', key.__name__, '(weighting: {})'.format(val))
self.debug('Creating count matrices...')
count_matrices = get_count_matrices(
ClangCountVectorCreator(list(counting_conditions.keys()),
list(counting_conditions.values())),
list(self.file_dict.keys()),
lambda prog: self.debug('{:2.4f}%...'.format(prog)),
self.section['files'].origin,
collect_dirs(extra_include_paths))
self.debug('Calculating differences...')
differences = []
function_count = len(count_matrices)
# Thats n over 2, hardcoded to simplify calculation
combination_length = function_count * (function_count-1) / 2
partial_get_difference = functools.partial(
get_difference,
count_matrices=count_matrices,
average_calculation=average_calculation,
poly_postprocessing=poly_postprocessing,
exp_postprocessing=exp_postprocessing)
for i, elem in enumerate(
map(partial_get_difference,
[(f1, f2) for f1, f2 in combinations(count_matrices, 2)])):
if i % 50 == 0:
self.debug('{:2.4f}%...'.format(100*i/combination_length))
differences.append(elem)
yield HiddenResult(self, differences)
yield HiddenResult(self, count_matrices)
| mit | 4,211,391,398,262,315,500 | -4,517,187,287,277,231,600 | 46.02994 | 79 | 0.590018 | false |
chiluf/visvis.dev | backends/backend_fltk.py | 5 | 9139 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" The FLTK backend.
"""
import visvis
from visvis import BaseFigure, events, constants
import fltk
KEYMAP = { fltk.FL_SHIFT: constants.KEY_SHIFT,
fltk.FL_Shift_L: constants.KEY_SHIFT,
fltk.FL_Shift_R: constants.KEY_SHIFT,
fltk.FL_ALT : constants.KEY_ALT,
fltk.FL_Alt_L : constants.KEY_ALT,
fltk.FL_Alt_R : constants.KEY_ALT,
fltk.FL_Control_L: constants.KEY_CONTROL,
fltk.FL_Control_R: constants.KEY_CONTROL,
fltk.FL_Left: constants.KEY_LEFT,
fltk.FL_Up: constants.KEY_UP,
fltk.FL_Right: constants.KEY_RIGHT,
fltk.FL_Down: constants.KEY_DOWN,
fltk.FL_Page_Up: constants.KEY_PAGEUP,
fltk.FL_Page_Down: constants.KEY_PAGEDOWN,
fltk.FL_Delete: constants.KEY_DELETE
}
# Make uppercase letters be lowercase
for i in range(ord('A'), ord('Z')):
KEYMAP[i] = i+32
def modifiers():
"""Convert the fltk modifier state into a tuple of active modifier keys."""
mod = ()
fltkmod = fltk.Fl.event_state()
if fltk.FL_SHIFT & fltkmod:
mod += constants.KEY_SHIFT,
if fltk.FL_CTRL & fltkmod:
mod += constants.KEY_CONTROL,
if fltk.FL_ALT & fltkmod:
mod += constants.KEY_ALT,
return mod
class GLWidget(fltk.Fl_Gl_Window):
""" Implementation of the GL_window, which passes a number of
events to the Figure object that wraps it.
"""
def __init__(self, figure, *args, **kwargs):
fltk.Fl_Gl_Window.__init__(self, *args, **kwargs)
self.figure = figure
# Callback when closed
self.callback(self.OnClose)
def handle(self, event):
""" All events come in here. """
if not self.figure:
return 1
# map fltk buttons to visvis buttons
buttons = [0,1,0,2,0,0,0]
if event == fltk.FL_PUSH:
x, y = fltk.Fl.event_x(), fltk.Fl.event_y()
but = buttons[fltk.Fl.event_button()]
self.figure._GenerateMouseEvent('down', x, y, but, modifiers())
elif event == fltk.FL_RELEASE:
x, y = fltk.Fl.event_x(), fltk.Fl.event_y()
but = buttons[fltk.Fl.event_button()]
if fltk.Fl.event_clicks() == 1:
# double click
self.figure._GenerateMouseEvent('double', x, y, but, modifiers())
else:
# normal release
self.figure._GenerateMouseEvent('up', x, y, but, modifiers())
elif event in [fltk.FL_MOVE, fltk.FL_DRAG]:
w,h = self.w(), self.h()
self.OnMotion(None)
elif event == fltk.FL_ENTER:
ev = self.figure.eventEnter
ev.Set(0,0,0)
ev.Fire()
elif event == fltk.FL_LEAVE:
ev = self.figure.eventLeave
ev.Set(0,0,0)
ev.Fire()
elif event == fltk.FL_KEYDOWN:
self.OnKeyDown(None)
elif event == fltk.FL_KEYUP:
self.OnKeyUp(None)
elif event == fltk.FL_CLOSE:
self.OnClose(None)
elif event == fltk.FL_FOCUS:
self.OnFocus(None)
else:
return 1 # maybe someone else knows what to do with it
return 1 # event was handled.
def resize(self, x, y, w, h):
# Overload resize function to also draw after resizing
if self.figure:
fltk.Fl_Gl_Window.resize(self, x, y, w, h)
self.figure._OnResize()
def draw(self):
# Do the draw commands now
self.figure.OnDraw()
def OnMotion(self, event):
# prepare and fire event
x, y = fltk.Fl.event_x(), fltk.Fl.event_y()
self.figure._GenerateMouseEvent('motion', x, y, 0, modifiers())
def OnKeyDown(self, event):
key, text = self._ProcessKey()
ev = self.figure.eventKeyDown
ev.Set(key, text, modifiers())
ev.Fire()
def OnKeyUp(self, event):
key, text = self._ProcessKey()
ev = self.figure.eventKeyUp
ev.Set(key, text, modifiers())
ev.Fire()
def _ProcessKey(self):
""" evaluates the keycode of fltk, and transform to visvis key.
Also produce text version.
return key, text. """
key = fltk.Fl.event_key()
if isinstance(key,basestring):
key = ord(key)
# special cases for shift control and alt -> map to 17 18 19
if key in KEYMAP:
return KEYMAP[key], ''
else:
# other key, try producing text
#print key, self._shiftDown
if (97 <= key <= 122) and fltk.Fl.event_shift():
key -= 32
try:
return key, chr(key)
except ValueError:
return key, ''
def OnClose(self, event=None):
if self.figure:
self.figure.Destroy()
self.hide()
def OnFocus(self, event):
BaseFigure._currentNr = self.figure.nr
class Figure(BaseFigure):
""" This is the fltk implementation of the figure class.
A Figure represents the OpenGl context and is the root
of the visualization tree; a Figure Wibject does not have a parent.
A Figure can be created with the function vv.figure() or vv.gcf().
"""
def __init__(self, *args, **kwargs):
# Make sure there is a native app and the timer is started
# (also when embedded)
app.Create()
# create widget
self._widget = GLWidget(self, *args, **kwargs)
# call original init AFTER we created the widget
BaseFigure.__init__(self)
def _SetCurrent(self):
""" make this scene the current context """
if self._widget:
self._widget.make_current()
def _SwapBuffers(self):
""" Swap the memory and screen buffer such that
what we rendered appears on the screen """
self._widget.swap_buffers()
def _SetTitle(self, title):
""" Set the title of the figure... """
window = self._widget
if hasattr(window,'label'):
window.label(title)
def _SetPosition(self, x, y, w, h):
""" Set the position of the widget. """
# select widget to resize. If it
widget = self._widget
# apply
widget.position(x,y)
widget.size(w, h)
def _GetPosition(self):
""" Get the position of the widget. """
# select widget to resize. If it
widget = self._widget
# get and return
return widget.x(), widget.y(), widget.w(), widget.h()
def _RedrawGui(self):
if self._widget:
self._widget.redraw()
def _ProcessGuiEvents(self):
fltk.Fl.wait(0)
def _Close(self, widget=None):
if widget is None:
widget = self._widget
if widget:
widget.OnClose()
def newFigure():
""" Create a window with a figure widget.
"""
# Create figure
size = visvis.settings.figureSize
figure = Figure(size[0], size[1], "Figure")
figure._widget.size_range(100,100,0,0,0,0)
figure._widget.show() # Show AFTER canvas is added
# Make OpenGl Initialize and return
# Also call draw(), otherwise it will not really draw and crash on Linux
figure.DrawNow()
figure._widget.draw()
return figure
class VisvisEventsTimer:
""" Timer that can be started and stopped.
"""
def __init__(self):
self._running = False
def Start(self):
if not self._running:
self._running = True
self._PostOneShot()
def Stop(self):
self._running = False
def _PostOneShot(self):
fltk.Fl.add_timeout(0.01, self._Fire)
def _Fire(self):
if self._running:
events.processVisvisEvents()
self._PostOneShot() # Repost
class App(events.App):
""" App()
Application class to wrap the GUI applications in a class
with a simple interface that is the same for all backends.
This is the fltk implementation.
"""
def __init__(self):
# Init timer
self._timer = VisvisEventsTimer()
def _GetNativeApp(self):
self._timer.Start()
return fltk.Fl
def _ProcessEvents(self):
app = self._GetNativeApp()
app.wait(0)
def _Run(self):
app = self._GetNativeApp()
if hasattr(app, '_in_event_loop') and app._in_event_loop:
pass # Already in event loop
else:
app.run()
# Create application instance now
app = App()
| bsd-3-clause | -6,311,855,534,014,458,000 | 6,375,875,937,794,185,000 | 28.76873 | 81 | 0.541525 | false |
havatv/QGIS | python/plugins/processing/algs/qgis/RasterLayerHistogram.py | 15 | 4036 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
import warnings
from qgis.core import (QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterNumber,
QgsProcessingParameterFileDestination,
QgsProcessingException)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import raster
from qgis.PyQt.QtCore import QCoreApplication
class RasterLayerHistogram(QgisAlgorithm):
INPUT = 'INPUT'
BINS = 'BINS'
OUTPUT = 'OUTPUT'
BAND = 'BAND'
def group(self):
return self.tr('Plots')
def groupId(self):
return 'plots'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
1,
self.INPUT))
self.addParameter(QgsProcessingParameterNumber(self.BINS,
self.tr('number of bins'), minValue=2, defaultValue=10))
self.addParameter(QgsProcessingParameterFileDestination(self.OUTPUT, self.tr('Histogram'), self.tr('HTML files (*.html)')))
def name(self):
return 'rasterlayerhistogram'
def displayName(self):
return self.tr('Raster layer histogram')
def processAlgorithm(self, parameters, context, feedback):
try:
# importing plotly throws Python warnings from within the library - filter these out
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ResourceWarning)
warnings.filterwarnings("ignore", category=ImportWarning)
import plotly as plt
import plotly.graph_objs as go
except ImportError:
raise QgsProcessingException(QCoreApplication.translate('RasterLayerHistogram', 'This algorithm requires the Python “plotly” library. Please install this library and try again.'))
layer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
band = self.parameterAsInt(parameters, self.BAND, context)
nbins = self.parameterAsInt(parameters, self.BINS, context)
output = self.parameterAsFileOutput(parameters, self.OUTPUT, context)
# ALERT: this is potentially blocking if the layer is too big
values = raster.scanraster(layer, feedback, band)
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
data = [go.Histogram(x=valueslist,
nbinsx=nbins)]
plt.offline.plot(data, filename=output, auto_open=False)
return {self.OUTPUT: output}
| gpl-2.0 | 6,546,487,428,204,044,000 | -3,442,776,703,296,687,000 | 39.32 | 191 | 0.536706 | false |
40223202/test | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/event.py | 603 | 19086 | #!/usr/bin/env python
'''Pygame module for interacting with events and queues.
Pygame handles all it's event messaging through an event queue. The routines
in this module help you manage that event queue. The input queue is heavily
dependent on the pygame display module. If the display has not been
initialized and a video mode not set, the event queue will not really work.
The queue is a regular queue of Event objects, there are a variety of ways
to access the events it contains. From simply checking for the existance of
events, to grabbing them directly off the stack.
All events have a type identifier. This event type is in between the values
of NOEVENT and NUMEVENTS. All user defined events can have the value of
USEREVENT or higher. It is recommended make sure your event id's follow this
system.
To get the state of various input devices, you can forego the event queue
and access the input devices directly with their appropriate modules; mouse,
key, and joystick. If you use this method, remember that pygame requires some
form of communication with the system window manager and other parts of the
platform. To keep pygame in synch with the system, you will need to call
pygame.event.pump() to keep everything current. You'll want to call this
function usually once per game loop.
The event queue offers some simple filtering. This can help performance
slightly by blocking certain event types from the queue, use the
pygame.event.set_allowed() and pygame.event.set_blocked() to work with
this filtering. All events default to allowed.
Joysticks will not send any events until the device has been initialized.
An Event object contains an event type and a readonly set of member data.
The Event object contains no method functions, just member data. Event
objects are retrieved from the pygame event queue. You can create your
own new events with the pygame.event.Event() function.
Your program must take steps to keep the event queue from overflowing. If the
program is not clearing or getting all events off the queue at regular
intervals, it can overflow. When the queue overflows an exception is thrown.
All Event objects contain an event type identifier in the Event.type member.
You may also get full access to the Event's member data through the Event.dict
method. All other member lookups will be passed through to the Event's
dictionary values.
While debugging and experimenting, you can print the Event objects for a
quick display of its type and members. Events that come from the system
will have a guaranteed set of member items based on the type. Here is a
list of the Event members that are defined with each type.
QUIT
(none)
ACTIVEEVENT
gain, state
KEYDOWN
unicode, key, mod
KEYUP
key, mod
MOUSEMOTION
pos, rel, buttons
MOUSEBUTTONUP
pos, button
MOUSEBUTTONDOWN
pos, button
JOYAXISMOTION
joy, axis, value
JOYBALLMOTION
joy, ball, rel
JOYHATMOTION
joy, hat, value
JOYBUTTONUP
joy, button
JOYBUTTONDOWN
joy, button
VIDEORESIZE
size, w, h
VIDEOEXPOSE
(none)
USEREVENT
code
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from copy import copy
#from ctypes import * #brython
from SDL import *
import pygame.base
import pygame.locals
import pygame.display
def pump():
'''Internally process pygame event handlers.
For each frame of your game, you will need to make some sort of call to
the event queue. This ensures your program can internally interact with
the rest of the operating system. If you are not using other event
functions in your game, you should call pygame.event.pump() to allow
pygame to handle internal actions.
This function is not necessary if your program is consistently processing
events on the queue through the other pygame.event functions.
There are important things that must be dealt with internally in the event
queue. The main window may need to be repainted or respond to the system.
If you fail to make a call to the event queue for too long, the system may
decide your program has locked up.
'''
pygame.display._video_init_check()
SDL_PumpEvents()
def get(typelist=None):
'''Get events from the queue.
pygame.event.get(): return Eventlist
pygame.event.get(type): return Eventlist
pygame.event.get(typelist): return Eventlist
This will get all the messages and remove them from the queue. If a type
or sequence of types is given only those messages will be removed from the
queue.
If you are only taking specific events from the queue, be aware that the
queue could eventually fill up with the events you are not interested.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types that can be returned.
:rtype: list of `Event`
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
events.append(Event(0, sdl_event=new_events[0]))
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
return events
def poll():
'''Get a single event from the queue.
Returns a single event from the queue. If the event queue is empty an event
of type pygame.NOEVENT will be returned immediately. The returned event is
removed from the queue.
:rtype: Event
'''
pygame.display._video_init_check()
event = SDL_PollEventAndReturn()
if event:
return Event(0, sdl_event=event, keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT)
def wait():
'''Wait for a single event from the queue.
Returns a single event from the queue. If the queue is empty this function
will wait until one is created. While the program is waiting it will sleep
in an idle state. This is important for programs that want to share the
system with other applications.
:rtype: Event
'''
pygame.display._video_init_check()
return Event(0, sdl_event=SDL_WaitEventAndReturn())
def peek(typelist=None):
'''Test if event types are waiting on the queue.
Returns true if there are any events of the given type waiting on the
queue. If a sequence of event types is passed, this will return True if
any of those events are on the queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to look for.
:rtype: bool
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = SDL_EVENTMASK(int(typelist))
SDL_PumpEvents()
events = SDL_PeepEvents(1, SDL_PEEKEVENT, mask)
if typelist is None:
if events:
return Event(0, sdl_event=events[0], keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT) # XXX deviation from pygame
return len(events) > 0
def clear(typelist=None):
'''Remove all events from the queue.
Remove all events or events of a specific type from the queue. This has the
same effect as `get` except nothing is returned. This can be slightly more
effecient when clearing a full event queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to remove.
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
_event_names = {
SDL_ACTIVEEVENT: 'ActiveEvent',
SDL_KEYDOWN: 'KeyDown',
SDL_KEYUP: 'KeyUp',
SDL_MOUSEMOTION: 'MouseMotion',
SDL_MOUSEBUTTONDOWN:'MouseButtonDown',
SDL_MOUSEBUTTONUP: 'MouseButtonUp',
SDL_JOYAXISMOTION: 'JoyAxisMotion',
SDL_JOYBALLMOTION: 'JoyBallMotion',
SDL_JOYHATMOTION: 'JoyHatMotion',
SDL_JOYBUTTONUP: 'JoyButtonUp',
SDL_JOYBUTTONDOWN: 'JoyButtonDown',
SDL_QUIT: 'Quit',
SDL_SYSWMEVENT: 'SysWMEvent',
SDL_VIDEORESIZE: 'VideoResize',
SDL_VIDEOEXPOSE: 'VideoExpose',
SDL_NOEVENT: 'NoEvent'
}
def event_name(event_type):
'''Get the string name from an event id.
Pygame uses integer ids to represent the event types. If you want to
report these types to the user they should be converted to strings. This
will return a the simple name for an event type. The string is in the
CamelCase style.
:Parameters:
- `event_type`: int
:rtype: str
'''
if event_type >= SDL_USEREVENT and event_type < SDL_NUMEVENTS:
return 'UserEvent'
return _event_names.get(event_type, 'Unknown')
def set_blocked(typelist):
'''Control which events are allowed on the queue.
The given event types are not allowed to appear on the event queue. By
default all events can be placed on the queue. It is safe to disable an
event type multiple times.
If None is passed as the argument, this has the opposite effect and none of
the event types are allowed to be placed on the queue.
:note: events posted with `post` will not be blocked.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_IGNORE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_IGNORE)
else:
SDL_EventState(typelist, SDL_IGNORE)
def set_allowed(typelist):
'''Control which events are allowed on the queue.
The given event types are allowed to appear on the event queue. By default
all events can be placed on the queue. It is safe to enable an event type
multiple times.
If None is passed as the argument, this has the opposite effect and all of
the event types are allowed to be placed on the queue.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_ENABLE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_ENABLE)
else:
SDL_EventState(typelist, SDL_ENABLE)
def get_blocked(typelist):
'''Test if a type of event is blocked from the queue.
Returns true if the given event type is blocked from the queue.
:Parameters:
- `event_type`: int
:rtype: int
'''
pygame.display._video_init_check()
if typelist == None:
return SDL_EventState(SDL_ALLEVENTS, SDL_QUERY) == SDL_ENABLE
elif hasattr(typelist, '__len__'): # XXX undocumented behaviour
for val in typelist:
if SDL_EventState(val, SDL_QUERY) == SDL_ENABLE:
return True
return False
else:
return SDL_EventState(typelist, SDL_QUERY) == SDL_ENABLE
def set_grab(grab):
'''Control the sharing of input devices with other applications.
When your program runs in a windowed environment, it will share the mouse
and keyboard devices with other applications that have focus. If your
program sets the event grab to True, it will lock all input into your
program.
It is best to not always grab the input, since it prevents the user from
doing other things on their system.
:Parameters:
- `grab`: bool
'''
pygame.display._video_init_check()
if grab:
SDL_WM_GrabInput(SDL_GRAB_ON)
else:
SDL_WM_GrabInput(SDL_GRAB_OFF)
def get_grab():
'''Test if the program is sharing input devices.
Returns true when the input events are grabbed for this application. Use
`set_grab` to control this state.
:rtype: bool
'''
pygame.display._video_init_check()
return SDL_WM_GrabInput(SDL_GRAB_QUERY) == SDL_GRAB_ON
_USEROBJECT_CHECK1 = int(0xdeadbeef) # signed
_USEROBJECT_CHECK2 = 0xfeedf00d
_user_event_objects = {}
_user_event_nextid = 1
def post(event):
'''Place a new event on the queue.
This places a new event at the end of the event queue. These Events will
later be retrieved from the other queue functions.
This is usually used for placing pygame.USEREVENT events on the queue.
Although any type of event can be placed, if using the sytem event types
your program should be sure to create the standard attributes with
appropriate values.
:Parameters:
`event` : Event
Event to add to the queue.
'''
global _user_event_nextid
pygame.display._video_init_check()
sdl_event = SDL_Event(event.type)
sdl_event.user.code = _USEROBJECT_CHECK1
sdl_event.user.data1 = c_void_p(_USEROBJECT_CHECK2)
sdl_event.user.data2 = c_void_p(_user_event_nextid)
_user_event_objects[_user_event_nextid] = event
_user_event_nextid += 1
SDL_PushEvent(sdl_event)
class Event:
def __init__(self, event_type, event_dict=None, sdl_event=None,
keep_userdata=False, **attributes):
'''Create a new event object.
Creates a new event with the given type. The event is created with the
given attributes and values. The attributes can come from a dictionary
argument, or as string keys from a dictionary.
The given attributes will be readonly attributes on the new event
object itself. These are the only attributes on the Event object,
there are no methods attached to Event objects.
:Parameters:
`event_type` : int
Event type to create
`event_dict` : dict
Dictionary of attributes to assign.
`sdl_event` : `SDL_Event`
Construct a Pygame event from the given SDL_Event; used
internally.
`keep_userdata` : bool
Used internally.
`attributes` : additional keyword arguments
Additional attributes to assign to the event.
'''
if sdl_event:
uevent = cast(pointer(sdl_event), POINTER(SDL_UserEvent)).contents
if uevent.code == _USEROBJECT_CHECK1 and \
uevent.data1 == _USEROBJECT_CHECK2 and \
uevent.data2 in _user_event_objects:
# An event that was posted; grab dict from local store.
id = sdl_event.data2
for key, value in _user_event_objects[id].__dict__.items():
setattr(self, key, value)
# Free memory unless just peeking
if not keep_userdata:
del _user_event_objects[id]
else:
# Standard SDL event
self.type = sdl_event.type
if self.type == SDL_QUIT:
pass
elif self.type == SDL_ACTIVEEVENT:
self.gain = sdl_event.gain
self.state = sdl_event.state
elif self.type == SDL_KEYDOWN:
self.unicode = sdl_event.keysym.unicode
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_KEYUP:
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_MOUSEMOTION:
self.pos = (sdl_event.x, sdl_event.y)
self.rel = (sdl_event.xrel, sdl_event.yrel)
self.buttons = (sdl_event.state & SDL_BUTTON(1) != 0,
sdl_event.state & SDL_BUTTON(2) != 0,
sdl_event.state & SDL_BUTTON(3) != 0)
elif self.type in (SDL_MOUSEBUTTONDOWN, SDL_MOUSEBUTTONUP):
self.pos = (sdl_event.x, sdl_event.y)
self.button = sdl_event.button
elif self.type == SDL_JOYAXISMOTION:
self.joy = sdl_event.which
self.axis = sdl_event.axis
self.value = sdl_event.value / 32767.0
elif self.type == SDL_JOYBALLMOTION:
self.joy = sdl_event.which
self.ball = sdl_event.ball
self.rel = (sdl_event.xrel, sdl_event.yrel)
elif self.type == SDL_JOYHATMOTION:
self.joy = sdl_event.which
self.hat = sdl_event.hat
hx = hy = 0
if sdl_event.value & SDL_HAT_UP:
hy = 1
if sdl_event.value & SDL_HAT_DOWN:
hy = -1
if sdl_event.value & SDL_HAT_RIGHT:
hx = 1
if sdl_event.value & SDL_HAT_LEFT:
hx = -1
self.value = (hx, hy)
elif self.type in (SDL_JOYBUTTONUP, SDL_JOYBUTTONDOWN):
self.joy = sdl_event.which
self.button = sdl_event.button
elif self.type == SDL_VIDEORESIZE:
self.size = (sdl_event.w, sdl_event.h)
self.w = sdl_event.w
self.h = sdl_event.h
elif self.type == SDL_VIDEOEXPOSE:
pass
elif self.type == SDL_SYSWMEVENT:
pass ### XXX: not implemented
elif self.type >= SDL_USEREVENT and self.type < SDL_NUMEVENTS:
self.code = sdl_event.code
else:
# Create an event (not from event queue)
self.type = event_type
if event_dict:
for key, value in event_dict.items():
setattr(self, key, value)
for key, value in attributes.items():
setattr(self, key, value)
# Bizarre undocumented but used by some people.
self.dict = self.__dict__
def __repr__(self):
d = copy(self.__dict__)
del d['type']
return '<Event(%d-%s %r)>' % \
(self.type, event_name(self.type), d)
def __nonzero__(self):
return self.type != SDL_NOEVENT
EventType = Event
| agpl-3.0 | 516,833,392,905,349,000 | -6,167,475,423,006,733,000 | 34.084559 | 79 | 0.627947 | false |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/parallel/controller/dependency.py | 2 | 7155 | """Dependency utilities
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from types import ModuleType
from IPython.parallel.client.asyncresult import AsyncResult
from IPython.parallel.error import UnmetDependency
from IPython.parallel.util import interactive
from IPython.utils import py3compat
from IPython.utils.pickleutil import can, uncan
class depend(object):
"""Dependency decorator, for use with tasks.
`@depend` lets you define a function for engine dependencies
just like you use `apply` for tasks.
Examples
--------
::
@depend(df, a,b, c=5)
def f(m,n,p)
view.apply(f, 1,2,3)
will call df(a,b,c=5) on the engine, and if it returns False or
raises an UnmetDependency error, then the task will not be run
and another engine will be tried.
"""
def __init__(self, f, *args, **kwargs):
self.f = f
self.args = args
self.kwargs = kwargs
def __call__(self, f):
return dependent(f, self.f, *self.args, **self.kwargs)
class dependent(object):
"""A function that depends on another function.
This is an object to prevent the closure used
in traditional decorators, which are not picklable.
"""
def __init__(self, f, df, *dargs, **dkwargs):
self.f = f
self.func_name = getattr(f, '__name__', 'f')
self.df = df
self.dargs = dargs
self.dkwargs = dkwargs
def check_dependency(self):
if self.df(*self.dargs, **self.dkwargs) is False:
raise UnmetDependency()
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
if not py3compat.PY3:
@property
def __name__(self):
return self.func_name
@interactive
def _require(*modules, **mapping):
"""Helper for @require decorator."""
from IPython.parallel.error import UnmetDependency
from IPython.utils.pickleutil import uncan
user_ns = globals()
for name in modules:
try:
exec 'import %s' % name in user_ns
except ImportError:
raise UnmetDependency(name)
for name, cobj in mapping.items():
user_ns[name] = uncan(cobj, user_ns)
return True
def require(*objects, **mapping):
"""Simple decorator for requiring local objects and modules to be available
when the decorated function is called on the engine.
Modules specified by name or passed directly will be imported
prior to calling the decorated function.
Objects other than modules will be pushed as a part of the task.
Functions can be passed positionally,
and will be pushed to the engine with their __name__.
Other objects can be passed by keyword arg.
Examples
--------
In [1]: @require('numpy')
...: def norm(a):
...: return numpy.linalg.norm(a,2)
In [2]: foo = lambda x: x*x
In [3]: @require(foo)
...: def bar(a):
...: return foo(1-a)
"""
names = []
for obj in objects:
if isinstance(obj, ModuleType):
obj = obj.__name__
if isinstance(obj, basestring):
names.append(obj)
elif hasattr(obj, '__name__'):
mapping[obj.__name__] = obj
else:
raise TypeError("Objects other than modules and functions "
"must be passed by kwarg, but got: %s" % type(obj)
)
for name, obj in mapping.items():
mapping[name] = can(obj)
return depend(_require, *names, **mapping)
class Dependency(set):
"""An object for representing a set of msg_id dependencies.
Subclassed from set().
Parameters
----------
dependencies: list/set of msg_ids or AsyncResult objects or output of Dependency.as_dict()
The msg_ids to depend on
all : bool [default True]
Whether the dependency should be considered met when *all* depending tasks have completed
or only when *any* have been completed.
success : bool [default True]
Whether to consider successes as fulfilling dependencies.
failure : bool [default False]
Whether to consider failures as fulfilling dependencies.
If `all=success=True` and `failure=False`, then the task will fail with an ImpossibleDependency
as soon as the first depended-upon task fails.
"""
all=True
success=True
failure=True
def __init__(self, dependencies=[], all=True, success=True, failure=False):
if isinstance(dependencies, dict):
# load from dict
all = dependencies.get('all', True)
success = dependencies.get('success', success)
failure = dependencies.get('failure', failure)
dependencies = dependencies.get('dependencies', [])
ids = []
# extract ids from various sources:
if isinstance(dependencies, (basestring, AsyncResult)):
dependencies = [dependencies]
for d in dependencies:
if isinstance(d, basestring):
ids.append(d)
elif isinstance(d, AsyncResult):
ids.extend(d.msg_ids)
else:
raise TypeError("invalid dependency type: %r"%type(d))
set.__init__(self, ids)
self.all = all
if not (success or failure):
raise ValueError("Must depend on at least one of successes or failures!")
self.success=success
self.failure = failure
def check(self, completed, failed=None):
"""check whether our dependencies have been met."""
if len(self) == 0:
return True
against = set()
if self.success:
against = completed
if failed is not None and self.failure:
against = against.union(failed)
if self.all:
return self.issubset(against)
else:
return not self.isdisjoint(against)
def unreachable(self, completed, failed=None):
"""return whether this dependency has become impossible."""
if len(self) == 0:
return False
against = set()
if not self.success:
against = completed
if failed is not None and not self.failure:
against = against.union(failed)
if self.all:
return not self.isdisjoint(against)
else:
return self.issubset(against)
def as_dict(self):
"""Represent this dependency as a dict. For json compatibility."""
return dict(
dependencies=list(self),
all=self.all,
success=self.success,
failure=self.failure
)
__all__ = ['depend', 'require', 'dependent', 'Dependency']
| apache-2.0 | -9,105,054,983,908,929,000 | 2,271,436,290,797,649,700 | 30.8 | 99 | 0.58211 | false |
jeremiahyan/odoo | odoo/addons/base/tests/test_res_users.py | 4 | 4686 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase, Form, tagged
class TestUsers(TransactionCase):
def test_name_search(self):
""" Check name_search on user. """
User = self.env['res.users']
test_user = User.create({'name': 'Flad the Impaler', 'login': 'vlad'})
like_user = User.create({'name': 'Wlad the Impaler', 'login': 'vladi'})
other_user = User.create({'name': 'Nothing similar', 'login': 'nothing similar'})
all_users = test_user | like_user | other_user
res = User.name_search('vlad', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, test_user)
res = User.name_search('vlad', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, all_users)
res = User.name_search('', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, all_users)
res = User.name_search('', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, User)
res = User.name_search('lad', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, test_user | like_user)
res = User.name_search('lad', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, other_user)
def test_user_partner(self):
""" Check that the user partner is well created """
User = self.env['res.users']
Partner = self.env['res.partner']
Company = self.env['res.company']
company_1 = Company.create({'name': 'company_1'})
company_2 = Company.create({'name': 'company_2'})
partner = Partner.create({
'name': 'Bob Partner',
'company_id': company_2.id
})
# case 1 : the user has no partner
test_user = User.create({
'name': 'John Smith',
'login': 'jsmith',
'company_ids': [company_1.id],
'company_id': company_1.id
})
self.assertFalse(
test_user.partner_id.company_id,
"The partner_id linked to a user should be created without any company_id")
# case 2 : the user has a partner
test_user = User.create({
'name': 'Bob Smith',
'login': 'bsmith',
'company_ids': [company_1.id],
'company_id': company_1.id,
'partner_id': partner.id
})
self.assertEqual(
test_user.partner_id.company_id,
company_1,
"If the partner_id of a user has already a company, it is replaced by the user company"
)
def test_change_user_company(self):
""" Check the partner company update when the user company is changed """
User = self.env['res.users']
Company = self.env['res.company']
test_user = User.create({'name': 'John Smith', 'login': 'jsmith'})
company_1 = Company.create({'name': 'company_1'})
company_2 = Company.create({'name': 'company_2'})
test_user.company_ids += company_1
test_user.company_ids += company_2
# 1: the partner has no company_id, no modification
test_user.write({
'company_id': company_1.id
})
self.assertFalse(
test_user.partner_id.company_id,
"On user company change, if its partner_id has no company_id,"
"the company_id of the partner_id shall NOT be updated")
# 2: the partner has a company_id different from the new one, update it
test_user.partner_id.write({
'company_id': company_1.id
})
test_user.write({
'company_id': company_2.id
})
self.assertEqual(
test_user.partner_id.company_id,
company_2,
"On user company change, if its partner_id has already a company_id,"
"the company_id of the partner_id shall be updated"
)
@tagged('post_install', '-at_install')
class TestUsers2(TransactionCase):
def test_reified_groups(self):
""" The groups handler doesn't use the "real" view with pseudo-fields
during installation, so it always works (because it uses the normal
groups_id field).
"""
# use the specific views which has the pseudo-fields
f = Form(self.env['res.users'], view='base.view_users_form')
f.name = "bob"
f.login = "bob"
user = f.save()
self.assertIn(self.env.ref('base.group_user'), user.groups_id)
| gpl-3.0 | 2,274,143,095,645,616,000 | 4,039,819,943,217,177,000 | 34.770992 | 99 | 0.577892 | false |
klyone/PacketTesterXML | PTXML_server.py | 1 | 1324 | #! /usr/bin/python
# ******************************************************************************
# @file PTXML_client.py
# @brief Example using PTXMLServer
#
# Copyright (C) 2014
#
# @author Miguel Jimenez Lopez <[email protected]>
#
# ******************************************************************************
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http//www.gnu.org/licenses/>.
# ******************************************************************************
import sys
import PacketTesterXML
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage: PTXML_server.py <XML server configuration file>"
sys.exit(-1)
file_xml = sys.argv[1]
server = PacketTesterXML.PTXMLServer(file_xml)
server.run(1)
| lgpl-3.0 | -1,858,336,988,028,273,200 | -3,573,268,709,772,024,300 | 32.948718 | 81 | 0.592145 | false |
shikhar394/uptane | tests/test_der_encoder.py | 1 | 21951 | """
<Program Name>
test_der_encoder.py
<Purpose>
Unit testing for DER Encoding, uptane/encoding/asn1_codec.py
"""
from __future__ import print_function
from __future__ import unicode_literals
import uptane # Import before TUF modules; may change tuf.conf values.
import tuf.formats
import tuf.keys
import tuf.repository_tool as repo_tool
import uptane.formats
import uptane.common
import uptane.encoding.asn1_codec as asn1_codec
import uptane.encoding.timeserver_asn1_coder as timeserver_asn1_coder
import uptane.encoding.ecu_manifest_asn1_coder as ecu_manifest_asn1_coder
import uptane.encoding.asn1_definitions as asn1_spec
import pyasn1.codec.der.encoder as p_der_encoder
import pyasn1.codec.der.decoder as p_der_decoder
from pyasn1.type import tag, univ
import sys # to test Python version 2 vs 3, for byte string behavior
import hashlib
import unittest
import os
import time
import copy
import shutil
# For temporary convenience
import demo # for generate_key, import_public_key, import_private_key
TEST_DATA_DIR = os.path.join(uptane.WORKING_DIR, 'tests', 'test_data')
TEMP_TEST_DIR = os.path.join(TEST_DATA_DIR, 'temp_test_encoding')
test_signing_key = None
def destroy_temp_dir():
# Clean up anything that may currently exist in the temp test directory.
if os.path.exists(TEMP_TEST_DIR):
shutil.rmtree(TEMP_TEST_DIR)
def setUpModule():
"""
This is run once for the full module, before all tests.
It prepares some globals, including a single Primary ECU client instance.
When finished, it will also start up an OEM Repository Server,
Director Server, and Time Server. Currently, it requires them to be already
running.
"""
global primary_ecu_key
destroy_temp_dir()
private_key_fname = os.path.join(
os.getcwd(), 'demo', 'keys', 'director')
global test_signing_key
test_signing_key = repo_tool.import_ed25519_privatekey_from_file(
private_key_fname, 'pw')
def tearDownModule():
"""This is run once for the full module, after all tests."""
destroy_temp_dir()
class TestASN1(unittest.TestCase):
"""
"unittest"-style test class for the Primary module in the reference
implementation
Please note that these tests are NOT entirely independent of each other.
Several of them build on the results of previous tests. This is an unusual
pattern but saves code and works at least for now.
"""
def test_01_encode_token(self):
# Test the ASN.1 data definitions in asn1_spec.
# Begin with a very simple object, which simply encodes an integer.
# Create ASN.1 object for a token (nonce-like int provided by a Secondary
# in order to validate recency of the time returned).
t = asn1_spec.Token(42)
# Encode as DER
t_der = p_der_encoder.encode(t)
# Decode back into ASN1
# decode() returns a 2-tuple. The first element is the decoded portion.
# The second element (which should be empty) is any remainder after the
# decoding.
(decoded, remainder) = p_der_decoder.decode(
t_der, asn1_spec.Token())
self.assertFalse(remainder)
self.assertEqual(t, decoded)
def test_02_encode_tokens(self):
# Continue to test the ASN.1 data definitions in asn1_spec.
# This will be an array of tokens that each encode an integer.
# We create a Tokens object with the subtype definition required by the
# classes that contain Tokens objects.
tokens = asn1_spec.Tokens().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
# Add a first token to the list.
t = asn1_spec.Token(42)
tokens.setComponentByPosition(0, t, False) # TODO: Figure out what the third argument means....
# tokens should look like:
# Tokens(tagSet=TagSet((), Tag(tagClass=128, tagFormat=32, tagId=1))).setComponents(Token(42))
tokens_der = p_der_encoder.encode(tokens)
# tokens_der should look like: '\xa1\x03\x02\x01*'
(tokens_again, remainder) = p_der_decoder.decode(
tokens_der,
asn1_spec.Tokens().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1)))
self.assertFalse(remainder)
self.assertEqual(tokens, tokens_again)
def test_03_encode_tokensandtimestamp(self):
"""
Using lower level code (than asn1_codec), test translation of a timeserver
attestation from standard Uptane Python dictionary format to an ASN.1
representation, then into a DER encoding, and then back to the original
form.
"""
sample_attestation = {
'nonces': [15856], 'time': '2017-03-01T19:30:45Z'}
uptane.formats.TIMESERVER_ATTESTATION_SCHEMA.check_match(sample_attestation)
asn1_attestation = timeserver_asn1_coder.get_asn_signed(sample_attestation)
der_attestation = p_der_encoder.encode(asn1_attestation)
self.assertTrue(is_valid_nonempty_der(der_attestation))
# Decoding requires that we provide an object with typing that precisely
# matches what we expect to decode. This is such an object.
exemplar_object = asn1_spec.TokensAndTimestamp().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed,
0))
(asn1_attestation_again, remainder) = p_der_decoder.decode(
der_attestation, asn1Spec=exemplar_object)
self.assertFalse(remainder)
self.assertEqual(asn1_attestation, asn1_attestation_again)
def test_04_encode_full_signable_attestation(self):
"""
Tests the conversion of signable time attestations from Python dictionaries
to ASN.1/DER objects, and back.
Similar to test 03 above, except that it encodes the signable dictionary
(signed and signatures) instead of what is effectively just the 'signed'
portion.
Employing the asn1_codec code instead of using
- the lower level code from pyasn1.der, asn1_spec, and
timeserver_asn1_coder.
- using a signable version of the time attestation (encapsulated in
'signed', with 'signatures')
This test doesn't re-sign the attestation over the hash of the DER encoding.
One of the other tests below does that. The signatures here remain over
the human-readable internal representation of the time attestation.
"""
# Using str() here because in Python2, I'll get u'' if I don't, and the
# self.assertEqual(signable_attestation, attestation_again) will fail
# because they won't both have u' prefixes.
signable_attestation = {
str('signatures'): [
{str('keyid'):
str('79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'),
str('sig'):
str('a5ea6a3b685ad64f96c8c12145beda4efafddfac60bcdb45def35fe43c7d1150a182a1b50a1463bfffb0ef8d30b6203aa8b5365b0b7176312e1e9d7e355e550e'),
str('method'): str('ed25519')}],
str('signed'): {str('nonces'): [1],
str('time'): str('2017-03-08T17:09:56Z')}}
# str('signed'): {str('nonces'): [834845858], str('time'): str('2017-03-01T19:30:45Z')},
# str('signatures'): [{str('keyid'): str('12'), str('method'): str('ed25519'), str('sig'): str('123495')}]}
# u'signed': {u'nonces': [834845858], u'time': u'2017-03-01T19:30:45Z'},
# u'signatures': [{u'keyid': u'12', u'method': u'ed25519', u'sig': u'12345'}]}
uptane.formats.SIGNABLE_TIMESERVER_ATTESTATION_SCHEMA.check_match(
signable_attestation)
# Converts it, without re-signing (default for this method).
der_attestation = asn1_codec.convert_signed_metadata_to_der(
signable_attestation)
self.assertTrue(is_valid_nonempty_der(der_attestation))
attestation_again = asn1_codec.convert_signed_der_to_dersigned_json(
der_attestation)
self.assertEqual(attestation_again, signable_attestation)
def test_05_encode_full_signable_attestation_manual(self):
"""
Similar to test 04 above, but performs much of the work manually.
Useful separately only for debugging.
Instead of just employing asn1_codec, uses lower level code from pyasn1.der,
asn1_spec, and timeserver_asn1_coder.
"""
signable_attestation = {
'signatures': [
{'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e',
'sig': 'a5ea6a3b685ad64f96c8c12145beda4efafddfac60bcdb45def35fe43c7d1150a182a1b50a1463bfffb0ef8d30b6203aa8b5365b0b7176312e1e9d7e355e550e',
'method': 'ed25519'}],
'signed': {'nonces': [1], 'time': '2017-03-08T17:09:56Z'}}
uptane.formats.SIGNABLE_TIMESERVER_ATTESTATION_SCHEMA.check_match(
signable_attestation)
json_signed = signable_attestation['signed']
asn_signed = timeserver_asn1_coder.get_asn_signed(json_signed)
asn_signatures_list = asn1_spec.Signatures().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 2))
i = 0 # Index for iterating through asn signatures
for pydict_sig in signable_attestation['signatures']:
asn_sig = asn1_spec.Signature()
asn_sig['keyid'] = asn1_spec.Keyid().subtype(
explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 0))
asn_sig['keyid']['octetString'] = univ.OctetString(
hexValue=pydict_sig['keyid']).subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1))
asn_sig['method'] = int(asn1_spec.SignatureMethod(
pydict_sig['method']))
asn_sig['value'] = asn1_spec.BinaryData().subtype(
explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 2))
asn_sig['value']['octetString'] = univ.OctetString(
hexValue=pydict_sig['sig']).subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1))
asn_signatures_list[i] = asn_sig # has no append method
i += 1
asn_signable = asn1_spec.TokensAndTimestampSignable()#.subtype(implicitTag=tag.Tag(
#tag.tagClassContext, tag.tagFormatConstructed, 3))
asn_signable['signed'] = asn_signed #considering using der_signed instead - requires changes
asn_signable['signatures'] = asn_signatures_list # TODO: Support multiple sigs, or integrate with TUF.
asn_signable['numberOfSignatures'] = len(asn_signatures_list)
der_attestation = p_der_encoder.encode(asn_signable)
self.assertTrue(is_valid_nonempty_der(der_attestation))
# Decoding requires that we provide an object with typing that precisely
# matches what we expect to decode. This is such an object.
exemplar_object = asn1_spec.TokensAndTimestampSignable()#.subtype(implicitTag=tag.Tag(
#tag.tagClassContext, tag.tagFormatConstructed, 3))
(asn1_attestation_again, remainder) = p_der_decoder.decode(
der_attestation, asn1Spec=exemplar_object)
self.assertFalse(remainder)
self.assertEqual(asn1_attestation_again, asn_signable)
# TODO: Test rest of the way back: ASN1 to Python dictionary.
# (This is in addition to the next test, which does that with the higher
# level code in asn1_codec.)
def test_06_encode_and_validate_resigned_time_attestation(self):
"""
Test timeserver attestation encoding and decoding, with signing over DER
('re-sign' functionality in asn1_codec) and signature validation.
"""
signable_attestation = {
str('signatures'): [
{str('keyid'):
str('79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'),
str('sig'):
str('a5ea6a3b685ad64f96c8c12145beda4efafddfac60bcdb45def35fe43c7d1150a182a1b50a1463bfffb0ef8d30b6203aa8b5365b0b7176312e1e9d7e355e550e'),
str('method'): str('ed25519')}],
str('signed'): {str('nonces'): [1],
str('time'): str('2017-03-08T17:09:56Z')}}
# Load the timeserver's private key to sign a time attestation, and public
# key to verify that signature.
timeserver_key = demo.import_private_key('timeserver')
timeserver_key_pub = demo.import_public_key('timeserver')
tuf.formats.ANYKEY_SCHEMA.check_match(timeserver_key)
tuf.formats.ANYKEY_SCHEMA.check_match(timeserver_key_pub)
# First, calculate what we'll be verifying at the end of this test.
# The re-signing in the previous line produces a signature over the SHA256
# hash of the DER encoding of the ASN.1 format of the 'signed' portion of
# signable_attestation. We produce it here so that we can check it against
# the result of encoding, resigning, and decoding.
der_signed = asn1_codec.convert_signed_metadata_to_der(
signable_attestation, only_signed=True)
der_signed_hash = hashlib.sha256(der_signed).digest()
# Now perform the actual conversion to ASN.1/DER of the full
# signable_attestation, replacing the signature (which was given as
# signatures over the Python 'signed' dictionary) with a signature over
# the hash of the DER encoding of the 'signed' ASN.1 data.
# This is the final product to be distributed back to a Primary client.
der_attestation = asn1_codec.convert_signed_metadata_to_der(
signable_attestation, private_key=timeserver_key, resign=True)
# Now, in order to test the final product, decode it back from DER into
# pyasn1 ASN.1, and convert back into Uptane's standard Python dictionary
# form.
pydict_again = asn1_codec.convert_signed_der_to_dersigned_json(
der_attestation)
# Check the extracted signature against the hash we produced earlier.
self.assertTrue(tuf.keys.verify_signature(
timeserver_key_pub,
pydict_again['signatures'][0],
der_signed_hash))
def test_07_encode_and_validate_resigned_time_attestation_again(self):
"""
This is a redundant test, repeating much of test 06 above with a slightly
different method.
"""
signable_attestation = {
str('signatures'): [
{str('keyid'):
str('79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'),
str('sig'):
str('a5ea6a3b685ad64f96c8c12145beda4efafddfac60bcdb45def35fe43c7d1150a182a1b50a1463bfffb0ef8d30b6203aa8b5365b0b7176312e1e9d7e355e550e'),
str('method'): str('ed25519')}],
str('signed'): {str('nonces'): [1],
str('time'): str('2017-03-08T17:09:56Z')}}
conversion_tester(
signable_attestation, 'time_attestation', self)
def test_10_ecu_manifest_asn1_conversion(self):
# First try the low-level asn1 conversion.
asn_signed = ecu_manifest_asn1_coder.get_asn_signed(
SAMPLE_ECU_MANIFEST_SIGNABLE['signed'])#ecu_manifest_signed_component)
# Convert back to a basic Python dictionary.
json_signed = ecu_manifest_asn1_coder.get_json_signed({'signed': asn_signed})
# Make sure that the result of conversion to ASN.1 and back is the same
# as the original.
self.assertEqual(json_signed, SAMPLE_ECU_MANIFEST_SIGNABLE['signed'])
def test_11_ecu_manifest_der_conversion(self):
conversion_tester(
SAMPLE_ECU_MANIFEST_SIGNABLE, 'ecu_manifest', self)
# Redundant tests. The above call should cover all of the following tests.
der_ecu_manifest = asn1_codec.convert_signed_metadata_to_der(
SAMPLE_ECU_MANIFEST_SIGNABLE, only_signed=True, datatype='ecu_manifest')
der_ecu_manifest = asn1_codec.convert_signed_metadata_to_der(
SAMPLE_ECU_MANIFEST_SIGNABLE, datatype='ecu_manifest')
pydict_ecu_manifest = asn1_codec.convert_signed_der_to_dersigned_json(
der_ecu_manifest, datatype='ecu_manifest')
self.assertEqual(
pydict_ecu_manifest, SAMPLE_ECU_MANIFEST_SIGNABLE)
def test_20_vehicle_manifest_der_conversion(self):
uptane.formats.SIGNABLE_VEHICLE_VERSION_MANIFEST_SCHEMA.check_match(
SAMPLE_VEHICLE_MANIFEST_SIGNABLE)
conversion_tester(
SAMPLE_VEHICLE_MANIFEST_SIGNABLE, 'vehicle_manifest', self)
def conversion_tester(signable_pydict, datatype, cls): # cls: clunky
"""
Tests each of the different kinds of conversions into ASN.1/DER, and tests
converting back. In one type of conversion, compares to make sure the data
has not changed.
This function takes as a third parameter the unittest.TestCase object whose
functions (assertTrue etc) it can use. This is awkward and inappropriate. :P
Find a different means of providing modularity instead of this one.
(Can't just have this method in the class above because it would be run as
a test. Could have default parameters and do that, but that's clunky, too.)
Does unittest allow/test private functions in UnitTest classes?
"""
# Test type 1: only-signed
# Convert and return only the 'signed' portion, the metadata payload itself,
# without including any signatures.
signed_der = asn1_codec.convert_signed_metadata_to_der(
signable_pydict, only_signed=True, datatype=datatype)
cls.assertTrue(is_valid_nonempty_der(signed_der))
# TODO: Add function to asn1_codec that will convert signed-only DER back to
# Python dictionary. Might be useful, and is useful for testing only_signed
# in any case.
# Test type 2: full conversion
# Convert the full signable ('signed' and 'signatures'), maintaining the
# existing signature in a new format and encoding.
signable_der = asn1_codec.convert_signed_metadata_to_der(
signable_pydict, datatype=datatype)
cls.assertTrue(is_valid_nonempty_der(signable_der))
# Convert it back.
signable_reverted = asn1_codec.convert_signed_der_to_dersigned_json(
signable_der, datatype=datatype)
# Ensure the original is equal to what is converted back.
cls.assertEqual(signable_pydict, signable_reverted)
# Test type 3: full conversion with re-signing
# Convert the full signable ('signed' and 'signatures'), but discarding the
# original signatures and re-signing over, instead, the hash of the converted,
# ASN.1/DER 'signed' element.
resigned_der = asn1_codec.convert_signed_metadata_to_der(
signable_pydict, resign=True, private_key=test_signing_key,
datatype=datatype)
cls.assertTrue(is_valid_nonempty_der(resigned_der))
# Convert the re-signed DER manifest back in order to split it up.
resigned_reverted = asn1_codec.convert_signed_der_to_dersigned_json(
resigned_der, datatype=datatype)
resigned_signature = resigned_reverted['signatures'][0]
# Check the signature on the re-signed DER manifest:
cls.assertTrue(uptane.common.verify_signature_over_metadata(
test_signing_key,
resigned_signature,
resigned_reverted['signed'],
datatype=datatype,
metadata_format='der'))
# The signatures will not match, because a new signature was made, but the
# 'signed' elements should match when converted back.
cls.assertEqual(
signable_pydict['signed'], resigned_reverted['signed'])
def is_valid_nonempty_der(der_string):
"""
Currently a hacky test to see if the result is a non-empty byte string.
This CAN raise false failures, stochastically, in Python2. In Python2,
where bytes and str are the same type, we check to see if, anywhere in the
string, there is a character requiring a \\x escape, as would almost
certainly happen in an adequately long DER string of bytes. As a result,
short or very regular strings may raise false failures in Python2.
The best way to really do this test is to decode the DER and see if
believable ASN.1 has come out of it.
"""
if not der_string:
return False
elif sys.version_info.major < 3:
if '\\x' not in repr(der_string): # TODO: <~> TEMPORARY FOR DEBUG. DELETE
print(repr(der_string)) # TODO: <~> TEMPORARY FOR DEBUG. DELETE
return '\\x' in repr(der_string)
else:
return isinstance(der_string, bytes)
SAMPLE_ECU_MANIFEST_SIGNABLE = {
'signed': {
'timeserver_time': '2017-03-27T16:19:17Z',
'previous_timeserver_time': '2017-03-27T16:19:17Z',
'ecu_serial': '22222',
'attacks_detected': '',
'installed_image': {
'filepath': '/secondary_firmware.txt',
'fileinfo': {
'length': 37,
'hashes': {
'sha256': '6b9f987226610bfed08b824c93bf8b2f59521fce9a2adef80c495f363c1c9c44',
'sha512': '706c283972c5ae69864b199e1cdd9b4b8babc14f5a454d0fd4d3b35396a04ca0b40af731671b74020a738b5108a78deb032332c36d6ae9f31fae2f8a70f7e1ce'}}}},
'signatures': [{
'sig': '42e6f4b398dbad0404cca847786a926972b54ca2ae71c7334f3d87fc62a10d08e01f7b5aa481cd3add61ef36f2037a9f68beca9f6ea26d2f9edc6f4ba0ba2a06',
'method': 'ed25519',
'keyid': '49309f114b857e4b29bfbff1c1c75df59f154fbc45539b2eb30c8a867843b2cb'}]}
SAMPLE_VEHICLE_MANIFEST_SIGNABLE = {
'signed': {
'primary_ecu_serial': '11111',
'vin': '111',
'ecu_version_manifests': {
'22222': [{
'signed': {
'previous_timeserver_time': '2017-03-31T15:48:31Z',
'timeserver_time': '2017-03-31T15:48:31Z',
'ecu_serial': '22222',
'attacks_detected': '',
'installed_image': {
'filepath': '/secondary_firmware.txt',
'fileinfo': {
'length': 37,
'hashes': {
'sha256': '6b9f987226610bfed08b824c93bf8b2f59521fce9a2adef80c495f363c1c9c44',
'sha512': '706c283972c5ae69864b199e1cdd9b4b8babc14f5a454d0fd4d3b35396a04ca0b40af731671b74020a738b5108a78deb032332c36d6ae9f31fae2f8a70f7e1ce'}}}},
'signatures': [{
'keyid': '49309f114b857e4b29bfbff1c1c75df59f154fbc45539b2eb30c8a867843b2cb',
'sig': '40069be1dd6f3fc091300307d61bc1646683a3ab8ebefac855bec0082c6fa067136800b744a2276564d9216cfcaafdea3976fc7f2128d2454d8d46bac79ebe05',
'method': 'ed25519'}]}]}},
'signatures': [{
'keyid': '9a406d99e362e7c93e7acfe1e4d6585221315be817f350c026bbee84ada260da',
'sig': '222e3fe0f3aa4fd14e163ec68f61954a9f4714d6d91d7114190e0a19a5ecc1cc43d9684e99dd8082c519815a01dd2e55a7a63d1467612cfb360937178586530c',
'method': 'ed25519'}]}
# Run unit tests.
if __name__ == '__main__':
unittest.main()
| mit | 127,348,835,078,281,060 | -8,766,238,894,575,504,000 | 35.342715 | 163 | 0.70589 | false |
Filechaser/nzbToMedia | libs/beetsplug/permissions.py | 7 | 3087 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
def convert_perm(perm):
"""If the perm is a int it will first convert it to a string and back
to an oct int. Else it just converts it to oct.
"""
if isinstance(perm, int):
return int(bytes(perm), 8)
else:
return int(perm, 8)
def check_permissions(path, permission):
"""Checks the permissions of a path.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super(Permissions, self).__init__()
# Adding defaults.
self.config.add({
u'file': 644,
u'dir': 755
})
self.register_listener('item_imported', permissions)
self.register_listener('album_imported', permissions)
def permissions(lib, item=None, album=None):
"""Running the permission fixer.
"""
# Getting the config.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
# Converts permissions to oct.
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
# Create chmod_queue.
file_chmod_queue = []
if item:
file_chmod_queue.append(item.path)
elif album:
for album_item in album.items():
file_chmod_queue.append(album_item.path)
# A set of directories to change permissions for.
dir_chmod_queue = set()
for path in file_chmod_queue:
# Changing permissions on the destination file.
os.chmod(util.bytestring_path(path), file_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), file_perm):
message = u'There was a problem setting permission on {}'.format(
path)
print(message)
# Adding directories to the directory chmod queue.
dir_chmod_queue.update(
dirs_in_library(lib.directory,
path))
# Change permissions for the directories.
for path in dir_chmod_queue:
# Chaning permissions on the destination directory.
os.chmod(util.bytestring_path(path), dir_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), dir_perm):
message = u'There was a problem setting permission on {}'.format(
path)
print(message)
| gpl-3.0 | -750,689,252,959,655,600 | 7,130,677,503,105,125,000 | 29.264706 | 78 | 0.629738 | false |
kiddinn/plaso | tests/test_lib.py | 1 | 6573 | # -*- coding: utf-8 -*-
"""Shared functions and classes for testing."""
import io
import os
import shutil
import re
import tempfile
import unittest
from dfdatetime import time_elements
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
# The path to top of the Plaso source tree.
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# The paths below are all derived from the project path directory.
# They are enumerated explicitly here so that they can be overwritten for
# compatibility with different build systems.
ANALYSIS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'analysis')
ANALYZERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'analyzers')
CLI_HELPERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'cli', 'helpers')
CONTAINERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'containers')
DATA_PATH = os.path.join(PROJECT_PATH, 'data')
OUTPUT_PATH = os.path.join(PROJECT_PATH, 'plaso', 'output')
PARSERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'parsers')
PREPROCESSORS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'preprocessors')
TEST_DATA_PATH = os.path.join(PROJECT_PATH, 'test_data')
def GetTestFilePath(path_segments):
"""Retrieves the path of a file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(TEST_DATA_PATH, *path_segments)
def CopyTimestampFromSring(time_string):
"""Copies a date and time string to a Plaso timestamp.
Args:
time_string (str): a date and time string formatted as:
"YYYY-MM-DD hh:mm:ss.######[+-]##:##", where # are numeric digits
ranging from 0 to 9 and the seconds fraction can be either 3 or 6
digits. The time of day, seconds fraction and timezone offset are
optional. The default timezone is UTC.
Returns:
int: timestamp which contains the number of microseconds since January 1,
1970, 00:00:00 UTC.
Raises:
ValueError: if the time string is invalid or not supported.
"""
date_time = time_elements.TimeElementsInMicroseconds()
date_time.CopyFromDateTimeString(time_string)
return date_time.GetPlasoTimestamp()
class BaseTestCase(unittest.TestCase):
"""The base test case."""
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _GetDataFilePath(self, path_segments):
"""Retrieves the path of a file in the data directory.
Args:
path_segments (list[str]): path segments inside the data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(DATA_PATH, *path_segments)
def _GetTestFileEntry(self, path_segments):
"""Creates a file entry that references a file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
dfvfs.FileEntry: file entry.
Raises:
SkipTest: if the path inside the test data directory does not exist and
the test should be skipped.
"""
path_spec = self._GetTestFilePathSpec(path_segments)
return path_spec_resolver.Resolver.OpenFileEntry(path_spec)
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(TEST_DATA_PATH, *path_segments)
def _GetTestFilePathSpec(self, path_segments):
"""Retrieves the path specification of a file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
dfvfs.PathSpec: path specification.
Raises:
SkipTest: if the path does not exist and the test should be skipped.
"""
test_file_path = self._GetTestFilePath(path_segments)
self._SkipIfPathNotExists(test_file_path)
return path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
def _SkipIfPathNotExists(self, path):
"""Skips the test if the path does not exist.
Args:
path (str): path of a test file.
Raises:
SkipTest: if the path does not exist and the test should be skipped.
"""
if not os.path.exists(path):
filename = os.path.basename(path)
raise unittest.SkipTest('missing test file: {0:s}'.format(filename))
class ImportCheckTestCase(BaseTestCase):
"""Base class for tests that check modules are imported correctly."""
_FILENAME_REGEXP = re.compile(r'^[^_].*\.py$')
def _AssertFilesImportedInInit(self, path, ignorable_files):
"""Checks that files in path are imported in __init__.py
Args:
path (str): path to directory containing an __init__.py file and other
Python files which should be imported.
ignorable_files (list[str]): names of Python files that don't need to
appear in __init__.py. For example, 'manager.py'.
"""
init_path = '{0:s}/__init__.py'.format(path)
with io.open(init_path, mode='r', encoding='utf-8') as init_file:
init_content = init_file.read()
for file_path in os.listdir(path):
filename = os.path.basename(file_path)
if filename in ignorable_files:
continue
if self._FILENAME_REGEXP.search(filename):
module_name, _, _ = filename.partition('.')
import_expression = re.compile(r' import {0:s}\b'.format(module_name))
self.assertRegex(
init_content, import_expression,
'{0:s} not imported in {1:s}'.format(module_name, init_path))
class TempDirectory(object):
"""Class that implements a temporary directory."""
def __init__(self):
"""Initializes a temporary directory."""
super(TempDirectory, self).__init__()
self.name = ''
def __enter__(self):
"""Make this work with the 'with' statement."""
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exception_type, value, traceback):
"""Make this work with the 'with' statement."""
shutil.rmtree(self.name, True)
| apache-2.0 | -5,113,445,265,390,143,000 | -1,190,783,801,151,678,700 | 32.365482 | 78 | 0.685836 | false |
synergeticsedx/deployment-wipro | common/djangoapps/third_party_auth/tests/test_pipeline.py | 77 | 1807 | """Unit tests for third_party_auth/pipeline.py."""
import random
from third_party_auth import pipeline
from third_party_auth.tests import testutil
import unittest
# Allow tests access to protected methods (or module-protected methods) under test.
# pylint: disable=protected-access
class MakeRandomPasswordTest(testutil.TestCase):
"""Tests formation of random placeholder passwords."""
def setUp(self):
super(MakeRandomPasswordTest, self).setUp()
self.seed = 1
def test_default_args(self):
self.assertEqual(pipeline._DEFAULT_RANDOM_PASSWORD_LENGTH, len(pipeline.make_random_password()))
def test_probably_only_uses_charset(self):
# This is ultimately probablistic since we could randomly select a good character 100000 consecutive times.
for char in pipeline.make_random_password(length=100000):
self.assertIn(char, pipeline._PASSWORD_CHARSET)
def test_pseudorandomly_picks_chars_from_charset(self):
random_instance = random.Random(self.seed)
expected = ''.join(
random_instance.choice(pipeline._PASSWORD_CHARSET)
for _ in xrange(pipeline._DEFAULT_RANDOM_PASSWORD_LENGTH))
random_instance.seed(self.seed)
self.assertEqual(expected, pipeline.make_random_password(choice_fn=random_instance.choice))
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class ProviderUserStateTestCase(testutil.TestCase):
"""Tests ProviderUserState behavior."""
def test_get_unlink_form_name(self):
google_provider = self.configure_google_provider(enabled=True)
state = pipeline.ProviderUserState(google_provider, object(), None)
self.assertEqual(google_provider.provider_id + '_unlink_form', state.get_unlink_form_name())
| agpl-3.0 | -5,462,436,470,614,007,000 | 2,732,942,854,089,889,300 | 39.155556 | 115 | 0.726065 | false |
sander76/home-assistant | tests/components/kira/test_sensor.py | 5 | 1611 | """The tests for Kira sensor platform."""
import unittest
from unittest.mock import MagicMock
from homeassistant.components.kira import sensor as kira
from tests.common import get_test_home_assistant
TEST_CONFIG = {kira.DOMAIN: {"sensors": [{"host": "127.0.0.1", "port": 17324}]}}
DISCOVERY_INFO = {"name": "kira", "device": "kira"}
class TestKiraSensor(unittest.TestCase):
"""Tests the Kira Sensor platform."""
# pylint: disable=invalid-name
DEVICES = []
def add_entities(self, devices):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
mock_kira = MagicMock()
self.hass.data[kira.DOMAIN] = {kira.CONF_SENSOR: {}}
self.hass.data[kira.DOMAIN][kira.CONF_SENSOR]["kira"] = mock_kira
self.addCleanup(self.hass.stop)
# pylint: disable=protected-access
def test_kira_sensor_callback(self):
"""Ensure Kira sensor properly updates its attributes from callback."""
kira.setup_platform(self.hass, TEST_CONFIG, self.add_entities, DISCOVERY_INFO)
assert len(self.DEVICES) == 1
sensor = self.DEVICES[0]
assert sensor.name == "kira"
sensor.hass = self.hass
codeName = "FAKE_CODE"
deviceName = "FAKE_DEVICE"
codeTuple = (codeName, deviceName)
sensor._update_callback(codeTuple)
assert sensor.state == codeName
assert sensor.extra_state_attributes == {kira.CONF_DEVICE: deviceName}
| apache-2.0 | -3,357,284,828,869,289,500 | -2,586,769,200,859,749,000 | 31.22 | 86 | 0.646803 | false |
dragon788/powerline | powerline/lint/markedjson/events.py | 38 | 2157 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
# Abstract classes.
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [
key for key in ['implicit', 'value']
if hasattr(self, key)
]
arguments = ', '.join([
'%s=%r' % (key, getattr(self, key))
for key in attributes
])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, implicit, start_mark=None, end_mark=None, flow_style=None):
self.tag = None
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None, explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, implicit, value, start_mark=None, end_mark=None, style=None):
self.tag = None
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
| mit | -6,242,988,551,014,659,000 | 3,765,471,292,564,360,000 | 21.237113 | 92 | 0.707 | false |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Klout/User/ShowUser.py | 5 | 3067 | # -*- coding: utf-8 -*-
###############################################################################
#
# ShowUser
# Retrieves information for a specified Klout user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ShowUser(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ShowUser Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ShowUser, self).__init__(temboo_session, '/Library/Klout/User/ShowUser')
def new_input_set(self):
return ShowUserInputSet()
def _make_result_set(self, result, path):
return ShowUserResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ShowUserChoreographyExecution(session, exec_id, path)
class ShowUserInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ShowUser
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Klout.)
"""
super(ShowUserInputSet, self)._set_input('APIKey', value)
def set_KloutID(self, value):
"""
Set the value of the KloutID input for this Choreo. ((required, string) The id for a Klout user to retrieve.)
"""
super(ShowUserInputSet, self)._set_input('KloutID', value)
class ShowUserResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ShowUser Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Klout.)
"""
return self._output.get('Response', None)
class ShowUserChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ShowUserResultSet(response, path)
| gpl-2.0 | -2,800,344,179,173,842,400 | -7,795,770,099,867,020,000 | 34.252874 | 117 | 0.664493 | false |
brian-l/django-1.4.10 | django/contrib/gis/management/commands/inspectdb.py | 315 | 1466 | from django.core.management.commands.inspectdb import Command as InspectDBCommand
class Command(InspectDBCommand):
db_module = 'django.contrib.gis.db'
gis_tables = {}
def get_field_type(self, connection, table_name, row):
field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row)
if field_type == 'GeometryField':
geo_col = row[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col)
field_params.update(geo_params)
# Adding the table name and column to the `gis_tables` dictionary, this
# allows us to track which tables need a GeoManager.
if table_name in self.gis_tables:
self.gis_tables[table_name].append(geo_col)
else:
self.gis_tables[table_name] = [geo_col]
return field_type, field_params, field_notes
def get_meta(self, table_name):
meta_lines = super(Command, self).get_meta(table_name)
if table_name in self.gis_tables:
# If the table is a geographic one, then we need make
# GeoManager the default manager for the model.
meta_lines.insert(0, ' objects = models.GeoManager()')
return meta_lines
| bsd-3-clause | -2,186,140,204,358,013,000 | -8,651,395,481,850,750,000 | 49.551724 | 112 | 0.639154 | false |
40423130/2016fallcadp_hw | plugin/render_math/math.py | 283 | 14202 | # -*- coding: utf-8 -*-
"""
Math Render Plugin for Pelican
==============================
This plugin allows your site to render Math. It uses
the MathJax JavaScript engine.
For markdown, the plugin works by creating a Markdown
extension which is used during the markdown compilation
stage. Math therefore gets treated like a "first class
citizen" in Pelican
For reStructuredText, the plugin instructs the rst engine
to output Mathjax for all math.
The mathjax script is by default automatically inserted
into the HTML.
Typogrify Compatibility
-----------------------
This plugin now plays nicely with Typogrify, but it
requires Typogrify version 2.07 or above.
User Settings
-------------
Users are also able to pass a dictionary of settings
in the settings file which will control how the MathJax
library renders things. This could be very useful for
template builders that want to adjust the look and feel of
the math. See README for more details.
"""
import os
import sys
from pelican import signals, generators
try:
from bs4 import BeautifulSoup
except ImportError as e:
BeautifulSoup = None
try:
from . pelican_mathjax_markdown_extension import PelicanMathJaxExtension
except ImportError as e:
PelicanMathJaxExtension = None
def process_settings(pelicanobj):
"""Sets user specified MathJax settings (see README for more details)"""
mathjax_settings = {}
# NOTE TO FUTURE DEVELOPERS: Look at the README and what is happening in
# this function if any additional changes to the mathjax settings need to
# be incorporated. Also, please inline comment what the variables
# will be used for
# Default settings
mathjax_settings['auto_insert'] = True # if set to true, it will insert mathjax script automatically into content without needing to alter the template.
mathjax_settings['align'] = 'center' # controls alignment of of displayed equations (values can be: left, right, center)
mathjax_settings['indent'] = '0em' # if above is not set to 'center', then this setting acts as an indent
mathjax_settings['show_menu'] = 'true' # controls whether to attach mathjax contextual menu
mathjax_settings['process_escapes'] = 'true' # controls whether escapes are processed
mathjax_settings['latex_preview'] = 'TeX' # controls what user sees while waiting for LaTex to render
mathjax_settings['color'] = 'inherit' # controls color math is rendered in
mathjax_settings['linebreak_automatic'] = 'false' # Set to false by default for performance reasons (see http://docs.mathjax.org/en/latest/output.html#automatic-line-breaking)
mathjax_settings['tex_extensions'] = '' # latex extensions that can be embedded inside mathjax (see http://docs.mathjax.org/en/latest/tex.html#tex-and-latex-extensions)
mathjax_settings['responsive'] = 'false' # Tries to make displayed math responsive
mathjax_settings['responsive_break'] = '768' # The break point at which it math is responsively aligned (in pixels)
mathjax_settings['mathjax_font'] = 'default' # forces mathjax to use the specified font.
mathjax_settings['process_summary'] = BeautifulSoup is not None # will fix up summaries if math is cut off. Requires beautiful soup
mathjax_settings['force_tls'] = 'false' # will force mathjax to be served by https - if set as False, it will only use https if site is served using https
mathjax_settings['message_style'] = 'normal' # This value controls the verbosity of the messages in the lower left-hand corner. Set it to "none" to eliminate all messages
# Source for MathJax
mathjax_settings['source'] = "'//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'"
# Get the user specified settings
try:
settings = pelicanobj.settings['MATH_JAX']
except:
settings = None
# If no settings have been specified, then return the defaults
if not isinstance(settings, dict):
return mathjax_settings
# The following mathjax settings can be set via the settings dictionary
for key, value in ((key, settings[key]) for key in settings):
# Iterate over dictionary in a way that is compatible with both version 2
# and 3 of python
if key == 'align':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
if value == 'left' or value == 'right' or value == 'center':
mathjax_settings[key] = value
else:
mathjax_settings[key] = 'center'
if key == 'indent':
mathjax_settings[key] = value
if key == 'show_menu' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'message_style':
mathjax_settings[key] = value if value is not None else 'none'
if key == 'auto_insert' and isinstance(value, bool):
mathjax_settings[key] = value
if key == 'process_escapes' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'latex_preview':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
mathjax_settings[key] = value
if key == 'color':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
mathjax_settings[key] = value
if key == 'linebreak_automatic' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'process_summary' and isinstance(value, bool):
if value and BeautifulSoup is None:
print("BeautifulSoup4 is needed for summaries to be processed by render_math\nPlease install it")
value = False
mathjax_settings[key] = value
if key == 'responsive' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'force_tls' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'responsive_break' and isinstance(value, int):
mathjax_settings[key] = str(value)
if key == 'tex_extensions' and isinstance(value, list):
# filter string values, then add '' to them
try:
value = filter(lambda string: isinstance(string, basestring), value)
except NameError:
value = filter(lambda string: isinstance(string, str), value)
value = map(lambda string: "'%s'" % string, value)
mathjax_settings[key] = ',' + ','.join(value)
if key == 'mathjax_font':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
value = value.lower()
if value == 'sanserif':
value = 'SansSerif'
elif value == 'fraktur':
value = 'Fraktur'
elif value == 'typewriter':
value = 'Typewriter'
else:
value = 'default'
mathjax_settings[key] = value
return mathjax_settings
def process_summary(article):
"""Ensures summaries are not cut off. Also inserts
mathjax script so that math will be rendered"""
summary = article._get_summary()
summary_parsed = BeautifulSoup(summary, 'html.parser')
math = summary_parsed.find_all(class_='math')
if len(math) > 0:
last_math_text = math[-1].get_text()
if len(last_math_text) > 3 and last_math_text[-3:] == '...':
content_parsed = BeautifulSoup(article._content, 'html.parser')
full_text = content_parsed.find_all(class_='math')[len(math)-1].get_text()
math[-1].string = "%s ..." % full_text
summary = summary_parsed.decode()
article._summary = "%s<script type='text/javascript'>%s</script>" % (summary, process_summary.mathjax_script)
def configure_typogrify(pelicanobj, mathjax_settings):
"""Instructs Typogrify to ignore math tags - which allows Typogrify
to play nicely with math related content"""
# If Typogrify is not being used, then just exit
if not pelicanobj.settings.get('TYPOGRIFY', False):
return
try:
import typogrify
from distutils.version import LooseVersion
if LooseVersion(typogrify.__version__) < LooseVersion('2.0.7'):
raise TypeError('Incorrect version of Typogrify')
from typogrify.filters import typogrify
# At this point, we are happy to use Typogrify, meaning
# it is installed and it is a recent enough version
# that can be used to ignore all math
# Instantiate markdown extension and append it to the current extensions
pelicanobj.settings['TYPOGRIFY_IGNORE_TAGS'].extend(['.math', 'script']) # ignore math class and script
except (ImportError, TypeError) as e:
pelicanobj.settings['TYPOGRIFY'] = False # disable Typogrify
if isinstance(e, ImportError):
print("\nTypogrify is not installed, so it is being ignored.\nIf you want to use it, please install via: pip install typogrify\n")
if isinstance(e, TypeError):
print("\nA more recent version of Typogrify is needed for the render_math module.\nPlease upgrade Typogrify to the latest version (anything equal or above version 2.0.7 is okay).\nTypogrify will be turned off due to this reason.\n")
def process_mathjax_script(mathjax_settings):
"""Load the mathjax script template from file, and render with the settings"""
# Read the mathjax javascript template from file
with open (os.path.dirname(os.path.realpath(__file__))
+ '/mathjax_script_template', 'r') as mathjax_script_template:
mathjax_template = mathjax_script_template.read()
return mathjax_template.format(**mathjax_settings)
def mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings):
"""Instantiates a customized markdown extension for handling mathjax
related content"""
# Create the configuration for the markdown template
config = {}
config['mathjax_script'] = mathjax_script
config['math_tag_class'] = 'math'
config['auto_insert'] = mathjax_settings['auto_insert']
# Instantiate markdown extension and append it to the current extensions
try:
pelicanobj.settings['MD_EXTENSIONS'].append(PelicanMathJaxExtension(config))
except:
sys.excepthook(*sys.exc_info())
sys.stderr.write("\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\n")
sys.stderr.flush()
def mathjax_for_rst(pelicanobj, mathjax_script):
"""Setup math for RST"""
docutils_settings = pelicanobj.settings.get('DOCUTILS_SETTINGS', {})
docutils_settings['math_output'] = 'MathJax'
pelicanobj.settings['DOCUTILS_SETTINGS'] = docutils_settings
rst_add_mathjax.mathjax_script = mathjax_script
def pelican_init(pelicanobj):
"""
Loads the mathjax script according to the settings.
Instantiate the Python markdown extension, passing in the mathjax
script as config parameter.
"""
# Process settings, and set global var
mathjax_settings = process_settings(pelicanobj)
# Generate mathjax script
mathjax_script = process_mathjax_script(mathjax_settings)
# Configure Typogrify
configure_typogrify(pelicanobj, mathjax_settings)
# Configure Mathjax For Markdown
if PelicanMathJaxExtension:
mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings)
# Configure Mathjax For RST
mathjax_for_rst(pelicanobj, mathjax_script)
# Set process_summary's mathjax_script variable
process_summary.mathjax_script = None
if mathjax_settings['process_summary']:
process_summary.mathjax_script = mathjax_script
def rst_add_mathjax(content):
"""Adds mathjax script for reStructuredText"""
# .rst is the only valid extension for reStructuredText files
_, ext = os.path.splitext(os.path.basename(content.source_path))
if ext != '.rst':
return
# If math class is present in text, add the javascript
# note that RST hardwires mathjax to be class "math"
if 'class="math"' in content._content:
content._content += "<script type='text/javascript'>%s</script>" % rst_add_mathjax.mathjax_script
def process_rst_and_summaries(content_generators):
"""
Ensure mathjax script is applied to RST and summaries are
corrected if specified in user settings.
Handles content attached to ArticleGenerator and PageGenerator objects,
since the plugin doesn't know how to handle other Generator types.
For reStructuredText content, examine both articles and pages.
If article or page is reStructuredText and there is math present,
append the mathjax script.
Also process summaries if present (only applies to articles)
and user wants summaries processed (via user settings)
"""
for generator in content_generators:
if isinstance(generator, generators.ArticlesGenerator):
for article in generator.articles + generator.translations:
rst_add_mathjax(article)
#optionally fix truncated formulae in summaries.
if process_summary.mathjax_script is not None:
process_summary(article)
elif isinstance(generator, generators.PagesGenerator):
for page in generator.pages:
rst_add_mathjax(page)
def register():
"""Plugin registration"""
signals.initialized.connect(pelican_init)
signals.all_generators_finalized.connect(process_rst_and_summaries)
| agpl-3.0 | -6,810,960,395,018,657,000 | 2,022,757,531,409,152,500 | 39.232295 | 244 | 0.662019 | false |
mtndesign/mtnvim | myvim/bundle/ropevim/ftplugin/python/libs/rope/refactor/multiproject.py | 91 | 2639 | """This module can be used for performing cross-project refactorings
See the "cross-project refactorings" section of ``docs/library.txt``
file.
"""
from rope.base import resources, project, libutils
class MultiProjectRefactoring(object):
def __init__(self, refactoring, projects, addpath=True):
"""Create a multiproject proxy for the main refactoring
`projects` are other project.
"""
self.refactoring = refactoring
self.projects = projects
self.addpath = addpath
def __call__(self, project, *args, **kwds):
"""Create the refactoring"""
return _MultiRefactoring(self.refactoring, self.projects,
self.addpath, project, *args, **kwds)
class _MultiRefactoring(object):
def __init__(self, refactoring, other_projects, addpath,
project, *args, **kwds):
self.refactoring = refactoring
self.projects = [project] + other_projects
for other_project in other_projects:
for folder in self.project.pycore.get_source_folders():
other_project.get_prefs().add('python_path', folder.real_path)
self.refactorings = []
for other in self.projects:
args, kwds = self._resources_for_args(other, args, kwds)
self.refactorings.append(
self.refactoring(other, *args, **kwds))
def get_all_changes(self, *args, **kwds):
"""Get a project to changes dict"""
result = []
for project, refactoring in zip(self.projects, self.refactorings):
args, kwds = self._resources_for_args(project, args, kwds)
result.append((project, refactoring.get_changes(*args, **kwds)))
return result
def __getattr__(self, name):
return getattr(self.main_refactoring, name)
def _resources_for_args(self, project, args, kwds):
newargs = [self._change_project_resource(project, arg) for arg in args]
newkwds = dict((name, self._change_project_resource(project, value))
for name, value in kwds.items())
return newargs, newkwds
def _change_project_resource(self, project, obj):
if isinstance(obj, resources.Resource) and \
obj.project != project:
return libutils.path_to_resource(project, obj.real_path)
return obj
@property
def project(self):
return self.projects[0]
@property
def main_refactoring(self):
return self.refactorings[0]
def perform(project_changes):
for project, changes in project_changes:
project.do(changes)
| gpl-2.0 | 5,588,333,754,360,353,000 | -5,435,732,415,712,314,000 | 32.833333 | 79 | 0.622584 | false |
marcellodesales/svnedge-console | ext/solaris-sparc/pkg-toolkit/pkg/vendor-packages/mako/codegen.py | 6 | 32514 | # codegen.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer [email protected]
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters
MAGIC_NUMBER = 4
def compile(node, uri, filename=None, default_filters=None, buffer_filters=None, imports=None, source_encoding=None, generate_unicode=True):
"""generate module source code given a parsetree node, uri, and optional source filename"""
buf = util.FastEncodingBuffer(unicode=generate_unicode)
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer, _CompileContext(uri, filename, default_filters, buffer_filters, imports, source_encoding, generate_unicode), node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self, uri, filename, default_filters, buffer_filters, imports, source_encoding, generate_unicode):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_unicode = generate_unicode
class _GenerateRenderMethod(object):
"""a template visitor object which generates the full module source for a template."""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, parsetree.DefTag)
if self.in_def:
name = "render_" + node.name
args = node.function_decl.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(pagetag or node, name, args, buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
identifiers = property(lambda self:self.identifier_stack[-1])
def write_toplevel(self):
"""traverse a template structure for module-level directives and generate the
start of module-level code."""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = util.Set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers()
module_identifiers.declared = module_ident
# module-level names, python code
if not self.compiler.generate_unicode and self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" % self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %s" % repr(MAGIC_NUMBER))
self.printer.writeline("_modified_time = %s" % repr(time.time()))
self.printer.writeline("_template_filename=%s" % repr(self.compiler.filename))
self.printer.writeline("_template_uri=%s" % repr(self.compiler.uri))
self.printer.writeline("_template_cache=cache.Cache(__name__, _modified_time)")
self.printer.writeline("_source_encoding=%s" % repr(self.compiler.source_encoding))
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(buf, source='', lineno=0, pos=0, filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = module_identifiers.topleveldefs.union(main_identifiers.topleveldefs)
[module_identifiers.declared.add(x) for x in ["UNDEFINED"]]
if impcode:
[module_identifiers.declared.add(x) for x in impcode.declared_identifiers]
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %s" % repr([n.name for n in main_identifiers.topleveldefs.values()]))
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return main_identifiers.topleveldefs.values()
def write_render_callable(self, node, name, args, buffered, filtered, cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(self.compiler.identifiers.branch(self.node))
if not self.in_def and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared)>0):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" % ','.join(["%s=%s" % (x, x) for x in self.identifiers.argument_declared]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(node, name, args, buffered, self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which is enclosed in <%! %> tags
in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" % (node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if node.attributes.has_key('import'):
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
class NSDefVisitor(object):
def visitDefTag(s, node):
self.write_inline_def(node, identifiers, nested=False)
export.append(node.name)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
callable_name = "make_namespace()"
else:
callable_name = "None"
self.printer.writeline("ns = runtime.Namespace(%s, context._clean_inheritance_tokens(), templateuri=%s, callables=%s, calling_uri=_template_uri, module=%s)" % (repr(node.name), node.parsed_attributes.get('file', 'None'), callable_name, node.parsed_attributes.get('module', 'None')))
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline("context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable definitions for defs and/or
name lookup within the function's context argument. the names declared are based on the
names that are referenced in the function body, which don't otherwise have any explicit
assignment operation. names that are assigned within the body are assumed to be
locally-scoped variables and are not separately declared.
for def callable definitions, if the def is a top-level callable then a
'stub' callable is generated which wraps the current Context into a closure. if the def
is not top-level, it is fully rendered as a local closure."""
# collection of all defs available to us in this scope
comp_idents = dict([(c.name, c) for c in identifiers.defs])
to_write = util.Set()
# write "context.get()" for all variables we are going to need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define right here
to_write = to_write.union(util.Set([c.name for c in identifiers.closuredefs.values()]))
# remove identifiers that are declared in the argument signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to. in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.iteritems():
if ns.attributes.has_key('import'):
self.printer.writeline("_mako_get_namespace(context, %s)._populate(_import_ns, %s)" % (repr(ident), repr(re.split(r'\s*,\s*', ns.attributes['import']))))
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline("%s = _mako_get_namespace(context, %s)" % (ident, repr(ident)))
else:
if getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("%s = _import_ns.get(%s, context.get(%s, UNDEFINED))" % (ident, repr(ident), repr(ident)))
else:
self.printer.writeline("%s = context.get(%s, UNDEFINED)" % (ident, repr(ident)))
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.function_decl.funcname
namedecls = node.function_decl.get_argument_expressions()
nameargs = node.function_decl.get_argument_expressions(include_defaults=False)
if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline("return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.function_decl.get_argument_expressions()
self.printer.writeline("def %s(%s):" % (node.name, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.name, namedecls, False, identifiers, inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached, callstack=True):
"""write the end section of a rendering function, either outermost or inline.
this takes into account if the rendering function was filtered, buffered, etc.
and closes the corresponding try: block if any, and writes code to retrieve captured content,
apply filters, send proper return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s, False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name, args, buffered, identifiers, inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name))
cacheargs = {}
for arg in (('cache_type', 'type'), ('cache_dir', 'data_dir'), ('cache_timeout', 'expiretime'), ('cache_url', 'url')):
val = node_or_pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] = int(eval(val))
else:
cacheargs[arg[1]] = val
else:
if self.compiler.pagetag is not None:
val = self.compiler.pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] == int(eval(val))
else:
cacheargs[arg[1]] = val
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [ '=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a for a in args]
self.write_variable_declares(identifiers, toplevel=toplevel, limit=node_or_pagetag.undeclared_identifiers())
if buffered:
s = "context.get('local').get_cached(%s, %screatefunc=lambda:__M_%s(%s))" % (cachekey, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ','.join(pass_args))
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local').get_cached(%s, %screatefunc=lambda:__M_%s(%s)))" % (cachekey, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ','.join(pass_args)),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters present in the given
filter names, adjusting for the global 'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or (self.compiler.pagetag is not None and len(self.compiler.pagetag.filter_args.args)) or len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args, "%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
self.printer.writeline(None)
else:
self.write_source_comment(node)
self.printer.writeline(node.text)
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" % self.create_filter_callable(node.filter_args.args, "__M_buf.getvalue()", False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template, to simulate "enclosing scope"
self.printer.writeline('__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin()[__M_key]) for __M_key in [%s] if __M_key in __M_locals_builtin()]))' % ','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline("runtime._include_file(context, %s, _template_uri, %s)" % (node.parsed_attributes['file'], args))
else:
self.printer.writeline("runtime._include_file(context, %s, _template_uri)" % (node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used for the body() function,
# but for other non-body() <%def>s within <%call> we want the current caller off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
export.append(node.name)
# remove defs that are within the <%call> from the "closuredefs" defined
# in the body, so they dont render twice
if node.name in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.name]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# get local reference to current caller, if any
"caller = context.caller_stack._get_caller()",
# push on caller for nested call
"context.caller_stack.nextcaller = runtime.Namespace('caller', context, callables=ccall(caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable([], node.attributes['expr'], True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, node=None, parent=None, nested=False):
if parent is not None:
# things that have already been declared in an enclosing namespace (i.e. names we can just use)
self.declared = util.Set(parent.declared).union([c.name for c in parent.closuredefs.values()]).union(parent.locally_declared).union(parent.argument_declared)
# if these identifiers correspond to a "nested" scope, it means whatever the
# parent identifiers had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = util.Set()
self.topleveldefs = util.SetLikeDict()
# things within this level that are referenced before they are declared (e.g. assigned to)
self.undeclared = util.Set()
# things that are declared locally. some of these things could be in the "undeclared"
# list as well if they are referenced before declared
self.locally_declared = util.Set()
# assignments made in explicit python blocks. these will be propigated to
# the context of local def calls.
self.locally_assigned = util.Set()
# things that are declared in the argument signature of the def callable
self.argument_declared = util.Set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with this Identifiers as the parent."""
return _Identifiers(node, self, **kwargs)
defs = property(lambda self:util.Set(self.topleveldefs.union(self.closuredefs).values()))
def __repr__(self):
return "Identifiers(declared=%s, locally_declared=%s, undeclared=%s, topleveldefs=%s, closuredefs=%s, argumenetdeclared=%s)" % (repr(list(self.declared)), repr(list(self.locally_declared)), repr(list(self.undeclared)), repr([c.name for c in self.topleveldefs.values()]), repr([c.name for c in self.closuredefs.values()]), repr(self.argument_declared))
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(node.declared_identifiers())
def visitDefTag(self, node):
if node.is_root():
self.topleveldefs[node.name] = node
elif node is not self.node:
self.closuredefs[node.name] = node
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
| agpl-3.0 | 5,907,763,463,747,091,000 | -4,411,369,704,955,959,300 | 45.648494 | 359 | 0.583749 | false |
jcoady9/python-for-android | python-modules/twisted/twisted/conch/ssh/forwarding.py | 61 | 5659 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""
This module contains the implementation of the TCP forwarding, which allows
clients and servers to forward arbitrary TCP data across the connection.
Maintainer: Paul Swartz
"""
import struct
from twisted.internet import protocol, reactor
from twisted.python import log
import common, channel
class SSHListenForwardingFactory(protocol.Factory):
def __init__(self, connection, hostport, klass):
self.conn = connection
self.hostport = hostport # tuple
self.klass = klass
def buildProtocol(self, addr):
channel = self.klass(conn = self.conn)
client = SSHForwardingClient(channel)
channel.client = client
addrTuple = (addr.host, addr.port)
channelOpenData = packOpen_direct_tcpip(self.hostport, addrTuple)
self.conn.openChannel(channel, channelOpenData)
return client
class SSHListenForwardingChannel(channel.SSHChannel):
def channelOpen(self, specificData):
log.msg('opened forwarding channel %s' % self.id)
if len(self.client.buf)>1:
b = self.client.buf[1:]
self.write(b)
self.client.buf = ''
def openFailed(self, reason):
self.closed()
def dataReceived(self, data):
self.client.transport.write(data)
def eofReceived(self):
self.client.transport.loseConnection()
def closed(self):
if hasattr(self, 'client'):
log.msg('closing local forwarding channel %s' % self.id)
self.client.transport.loseConnection()
del self.client
class SSHListenClientForwardingChannel(SSHListenForwardingChannel):
name = 'direct-tcpip'
class SSHListenServerForwardingChannel(SSHListenForwardingChannel):
name = 'forwarded-tcpip'
class SSHConnectForwardingChannel(channel.SSHChannel):
def __init__(self, hostport, *args, **kw):
channel.SSHChannel.__init__(self, *args, **kw)
self.hostport = hostport
self.client = None
self.clientBuf = ''
def channelOpen(self, specificData):
cc = protocol.ClientCreator(reactor, SSHForwardingClient, self)
log.msg("connecting to %s:%i" % self.hostport)
cc.connectTCP(*self.hostport).addCallbacks(self._setClient, self._close)
def _setClient(self, client):
self.client = client
log.msg("connected to %s:%i" % self.hostport)
if self.clientBuf:
self.client.transport.write(self.clientBuf)
self.clientBuf = None
if self.client.buf[1:]:
self.write(self.client.buf[1:])
self.client.buf = ''
def _close(self, reason):
log.msg("failed to connect: %s" % reason)
self.loseConnection()
def dataReceived(self, data):
if self.client:
self.client.transport.write(data)
else:
self.clientBuf += data
def closed(self):
if self.client:
log.msg('closed remote forwarding channel %s' % self.id)
if self.client.channel:
self.loseConnection()
self.client.transport.loseConnection()
del self.client
def openConnectForwardingClient(remoteWindow, remoteMaxPacket, data, avatar):
remoteHP, origHP = unpackOpen_direct_tcpip(data)
return SSHConnectForwardingChannel(remoteHP,
remoteWindow=remoteWindow,
remoteMaxPacket=remoteMaxPacket,
avatar=avatar)
class SSHForwardingClient(protocol.Protocol):
def __init__(self, channel):
self.channel = channel
self.buf = '\000'
def dataReceived(self, data):
if self.buf:
self.buf += data
else:
self.channel.write(data)
def connectionLost(self, reason):
if self.channel:
self.channel.loseConnection()
self.channel = None
def packOpen_direct_tcpip((connHost, connPort), (origHost, origPort)):
"""Pack the data suitable for sending in a CHANNEL_OPEN packet.
"""
conn = common.NS(connHost) + struct.pack('>L', connPort)
orig = common.NS(origHost) + struct.pack('>L', origPort)
return conn + orig
packOpen_forwarded_tcpip = packOpen_direct_tcpip
def unpackOpen_direct_tcpip(data):
"""Unpack the data to a usable format.
"""
connHost, rest = common.getNS(data)
connPort = int(struct.unpack('>L', rest[:4])[0])
origHost, rest = common.getNS(rest[4:])
origPort = int(struct.unpack('>L', rest[:4])[0])
return (connHost, connPort), (origHost, origPort)
unpackOpen_forwarded_tcpip = unpackOpen_direct_tcpip
def packGlobal_tcpip_forward((host, port)):
return common.NS(host) + struct.pack('>L', port)
def unpackGlobal_tcpip_forward(data):
host, rest = common.getNS(data)
port = int(struct.unpack('>L', rest[:4])[0])
return host, port
"""This is how the data -> eof -> close stuff /should/ work.
debug3: channel 1: waiting for connection
debug1: channel 1: connected
debug1: channel 1: read<=0 rfd 7 len 0
debug1: channel 1: read failed
debug1: channel 1: close_read
debug1: channel 1: input open -> drain
debug1: channel 1: ibuf empty
debug1: channel 1: send eof
debug1: channel 1: input drain -> closed
debug1: channel 1: rcvd eof
debug1: channel 1: output open -> drain
debug1: channel 1: obuf empty
debug1: channel 1: close_write
debug1: channel 1: output drain -> closed
debug1: channel 1: rcvd close
debug3: channel 1: will not send data after close
debug1: channel 1: send close
debug1: channel 1: is dead
"""
| apache-2.0 | 5,269,135,339,849,662,000 | -6,898,995,009,259,117,000 | 30.265193 | 80 | 0.649761 | false |
fclesio/learning-space | Python/ims24/ims24/contrators/requester.py | 1 | 3740 | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
#
# requester.py
# @Author : Gustavo F ([email protected])
# @Link : https://github.com/sharkguto
# @Date : 17/02/2019 11:06:12
import typing
import requests
from requests_futures.sessions import FuturesSession
from ims24 import logger
from ims24.configuration.environments import BASE_URL_CRAWLER
class RequesterCrawler(object):
"""
Interface to implement in service
"""
def __init__(self, base_url: str = ""):
"""
constructor.... load session and set env variables
:param self: itself
"""
self._requester_url = base_url or BASE_URL_CRAWLER
self._default_headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0",
"content-type": "text/html",
}
self.reset_session()
def reset_session(self):
"""
Reset session if necessary.... bootstrap in constructor to load request session
:param self: itself
"""
self._requester_session = requests.Session()
self._requester_session.headers.update(self._default_headers)
def get_list(
self, endpoints: tuple, headers: dict = {}, params: dict = {}, **path_params
) -> typing.Tuple[object, int]:
"""
send multiple requests and group by endpoints
:param self: itself
:param endpoints:tuple: tuple list with endpoints
:param headers:dict={}:
:param params:dict={}:
:param **path_params:
"""
dict_responses = {}
result = {}
status_code = 200
wrk_num = self.max_workers_value(endpoints)
with FuturesSession(max_workers=wrk_num) as session_async:
session_async.headers.update(self._default_headers)
for i in endpoints:
dict_responses[i] = {"url": self._endpoints[i]}
dict_responses[i]["response"] = session_async.get(
dict_responses[i]["url"], params=params, timeout=20
)
try:
for i in endpoints:
response = dict_responses[i][
"response"
].result() # block IO... wait for thd response
if "application/json" in response.headers["content-type"].lower():
result[i] = response.json()
else: # needs to implement parser on your service
result[i] = self.html_parser(response.text)
logger.info(
f"finish request {dict_responses[i]['url']} {response.status_code}"
)
except requests.exceptions.ConnectionError as ex:
logger.error(f"{ex} \n key : {i} on request {dict_responses[i]}")
result = f"call procedure {i} failed -> Connection to requester failed"
raise ex
except Exception as ex:
logger.error(f"{ex} \n key : {i} on request {dict_responses[i]}")
result = f"call procedure {i} failed -> {response.text}"
raise ex
return result
def max_workers_value(self, endpoints: tuple):
"""
check how many endpoints we have... if have more than 50
trigger 50 tasks per requests loop
:param self: itself
:param endpoints:tuple: ("1","2","3")
"""
max_conn = len(endpoints) or 1
if max_conn > 50:
max_conn = 50
return max_conn
def html_parser(self, text: str):
raise NotImplementedError("Needs to be implement in your service")
| gpl-2.0 | 3,559,019,908,104,796,700 | -1,211,299,627,197,707,300 | 33.62963 | 105 | 0.55 | false |
Glignos/inspire-next | inspirehep/modules/fixtures/files.py | 4 | 2972 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Functions for searching ES and returning the results."""
from __future__ import absolute_import, division, print_function
import os
from flask import current_app
from invenio_db import db
from invenio_files_rest.models import Location
def init_default_storage_path():
"""Init default file store location."""
try:
uri = current_app.config['BASE_FILES_LOCATION']
if uri.startswith('/') and not os.path.exists(uri):
os.makedirs(uri)
loc = Location(
name="default",
uri=uri,
default=True
)
db.session.add(loc)
db.session.commit()
return loc
except Exception:
db.session.rollback()
raise
def init_workflows_storage_path(default=False):
"""Init workflows file store location."""
try:
uri = current_app.config['WORKFLOWS_FILE_LOCATION']
if uri.startswith('/') and not os.path.exists(uri):
os.makedirs(uri)
loc = Location(
name=current_app.config["WORKFLOWS_DEFAULT_FILE_LOCATION_NAME"],
uri=uri,
default=False
)
db.session.add(loc)
db.session.commit()
return loc
except Exception:
db.session.rollback()
raise
def init_records_files_storage_path(default=False):
"""Init records file store location."""
try:
uri = os.path.join(
current_app.config['BASE_FILES_LOCATION'],
"records", "files"
)
if uri.startswith('/') and not os.path.exists(uri):
os.makedirs(uri)
loc = Location(
name=current_app.config["RECORDS_DEFAULT_FILE_LOCATION_NAME"],
uri=uri,
default=False
)
db.session.add(loc)
db.session.commit()
return loc
except Exception:
db.session.rollback()
raise
def init_all_storage_paths():
"""Init all storage paths."""
init_default_storage_path()
init_workflows_storage_path()
init_records_files_storage_path()
| gpl-3.0 | -4,847,642,396,189,879,000 | -4,069,809,848,300,579,000 | 28.72 | 77 | 0.638964 | false |
pbmanis/acq4 | acq4/analysis/scripts/eventExplorer.py | 3 | 15464 | from __future__ import print_function
from acq4.util import Qt
import acq4.Manager
import acq4.pyqtgraph as pg
import acq4.pyqtgraph.opengl as gl
import numpy as np
import acq4.util.functions as fn
import re
man = acq4.Manager.getManager()
## update DB field to reflect dir meta info
#for i in db.select('Cell', ['rowid']):
#d = db.getDir('Cell', i[0])
#typ = d.info().get('type', '')
#db.update('Cell', {'type': typ}, rowid=i[0])
#print d, typ
global eventView, siteView, cells
eventView = 'events_view'
siteView = 'sites_view'
firstRun = False
if 'events' not in locals():
global events
events = {}
firstRun = True
win = Qt.QMainWindow()
#cw = Qt.QWidget()
layout = pg.LayoutWidget()
#layout = Qt.QGridLayout()
#layout.setContentsMargins(0,0,0,0)
#layout.setSpacing(0)
#cw.setLayout(layout)
win.setCentralWidget(layout)
cellCombo = Qt.QComboBox()
cellCombo.setSizeAdjustPolicy(cellCombo.AdjustToContents)
layout.addWidget(cellCombo)
reloadBtn = Qt.QPushButton('reload')
layout.addWidget(reloadBtn)
separateCheck = Qt.QCheckBox("color pre/post")
layout.addWidget(separateCheck)
colorCheck = Qt.QCheckBox("color y position")
layout.addWidget(colorCheck)
errLimitSpin = pg.SpinBox(value=0.7, step=0.1)
layout.addWidget(errLimitSpin)
lengthRatioLimitSpin = pg.SpinBox(value=1.5, step=0.1)
layout.addWidget(lengthRatioLimitSpin)
postRgnStartSpin = pg.SpinBox(value=0.500, step=0.01, siPrefix=True, suffix='s')
layout.addWidget(postRgnStartSpin)
postRgnStopSpin = pg.SpinBox(value=0.700, step=0.01, siPrefix=True, suffix='s')
layout.addWidget(postRgnStopSpin)
spl1 = Qt.QSplitter()
spl1.setOrientation(Qt.Qt.Vertical)
layout.addWidget(spl1, row=1, col=0, rowspan=1, colspan=8)
pw1 = pg.PlotWidget()
spl1.addWidget(pw1)
pw1.setLabel('left', 'Amplitude', 'A')
pw1.setLabel('bottom', 'Decay Tau', 's')
spl2 = Qt.QSplitter()
spl2.setOrientation(Qt.Qt.Horizontal)
spl1.addWidget(spl2)
pw2 = pg.PlotWidget(labels={'bottom': ('time', 's')})
spl2.addWidget(pw2)
tab = Qt.QTabWidget()
spl2.addWidget(tab)
## For viewing cell morphology
gv = pg.GraphicsView()
gv.setBackgroundBrush(pg.mkBrush('w'))
image = pg.ImageItem()
gv.addItem(image)
gv.enableMouse()
gv.setAspectLocked(True)
tab.addTab(gv, 'Morphology')
## 3D atlas
import acq4.analysis.atlas.CochlearNucleus as CN
atlas = CN.CNAtlasDisplayWidget()
atlas.showLabel('DCN')
atlas.showLabel('AVCN')
atlas.showLabel('PVCN')
tab.addTab(atlas, 'Atlas')
atlasPoints = gl.GLScatterPlotItem()
atlas.addItem(atlasPoints)
win.show()
win.resize(1000,800)
sp1 = pw1.scatterPlot([], pen=pg.mkPen(None), brush=(200,200,255,70), identical=True, size=8)
sp2 = pw1.scatterPlot([], pen=pg.mkPen(None), brush=(255,200,200,70), identical=True, size=8)
sp3 = pw1.scatterPlot([], pen=pg.mkPen(None), brush=(100,255,100,70), identical=True, size=8)
sp4 = pw1.scatterPlot([], pen=pg.mkPen(None), size=8)
print("Reading cell list...")
#import os, pickle
#md = os.path.abspath(os.path.split(__file__)[0])
#cacheFile = os.path.join(md, 'eventCache.p')
#if os.path.isfile(cacheFile):
#print "Read from cache..."
#ev = pickle.load(open(cacheFile, 'r'))
#else:
#pickle.dump(ev, open(cacheFile, 'w'))
## create views that link cell information to events/sites
db = man.getModule('Data Manager').currentDatabase()
if not db.hasTable(siteView):
print("Creating DB views.")
db.createView(siteView, ['photostim_sites', 'DirTable_Protocol', 'DirTable_Cell']) ## seems to be unused.
if not db.hasTable(eventView):
db.createView(eventView, ['photostim_events', 'DirTable_Protocol', 'DirTable_Cell'])
cells = db.select(siteView, ['CellDir'], distinct=True)
cells = [c['CellDir'] for c in cells]
cells.sort(lambda a,b: cmp(a.name(), b.name()))
cellCombo.addItem('')
for c in cells:
cellCombo.addItem(c.name(relativeTo=man.baseDir))
#cellSpin.setMaximum(len(cells)-1)
print("Done.")
def loadCell(cell, reloadData=False):
global events
if reloadData:
events.pop(cell, None)
if cell in events:
return
db = man.getModule('Data Manager').currentDatabase()
mod = man.dataModel
allEvents = []
hvals = {}
nEv = 0
positionCache = {}
tcache = {}
print("Loading all events for cell", cell)
tot = db.select(eventView, 'count()', where={'CellDir': cell})[0]['count()']
print(tot, "total events..")
with pg.ProgressDialog('Loading event data...', maximum=tot, wait=0) as dlg:
for ev in db.iterSelect(eventView, ['ProtocolSequenceDir', 'SourceFile', 'fitAmplitude', 'fitTime', 'fitDecayTau', 'fitRiseTau', 'fitTimeToPeak', 'fitLengthOverDecay', 'fitFractionalError', 'userTransform', 'CellType', 'CellDir', 'ProtocolDir'], where={'CellDir': cell}, toArray=True, chunkSize=200):
extra = np.empty(ev.shape, dtype=[('right', float), ('anterior', float), ('dorsal', float), ('holding', float)])
## insert holding levels
for i in range(len(ev)):
sd = ev[i]['ProtocolSequenceDir']
if sd not in hvals:
cf = ev[i]['SourceFile']
hvals[sd] = mod.getClampHoldingLevel(cf)
#print hvals[sd], cf
extra[i]['holding'] = hvals[sd]
## insert positions
for i in range(len(ev)):
protoDir = ev[i]['SourceFile'].parent()
key = protoDir
#key = (ev[i]['ProtocolSequenceDir'], ev[i]['SourceFile'])
if key not in positionCache:
#try:
#dh = ev[i]['ProtocolDir']
#p1 = pg.Point(dh.info()['Scanner']['position'])
#if key[0] not in tcache:
#tr = pg.SRTTransform()
#tr.restoreState(dh.parent().info()['userTransform'])
#tcache[key[0]] = tr
#trans = tcache[key[0]]
#p2 = trans.map(p1)
#pcache[key] = (p2.x(),p2.y())
#except:
#print key
#raise
rec = db.select('CochlearNucleus_Protocol', where={'ProtocolDir': protoDir})
if len(rec) == 0:
pos = (None, None, None)
elif len(rec) == 1:
pos = (rec[0]['right'], rec[0]['anterior'], rec[0]['dorsal'])
elif len(rec) == 2:
raise Exception("Multiple position records for %s!" % str(protoDir))
positionCache[key] = pos
extra[i]['right'] = positionCache[key][0]
extra[i]['anterior'] = positionCache[key][1]
extra[i]['dorsal'] = positionCache[key][2]
ev = fn.concatenateColumns([ev, extra])
allEvents.append(ev)
nEv += len(ev)
dlg.setValue(nEv)
if dlg.wasCanceled():
raise Exception('Canceled by user.')
ev = np.concatenate(allEvents)
numExSites = 0
numInSites = 0
for site in db.select(siteView, 'ProtocolSequenceDir', where={'CellDir': cell}):
h = hvals.get(site['ProtocolSequenceDir'],None)
if h is None:
continue
if h > -0.02:
numInSites += 1
elif h < -0.04:
numExSites += 1
events[cell] = (ev, numExSites, numInSites)
def init():
if not firstRun:
return
cellCombo.currentIndexChanged.connect(showCell)
separateCheck.toggled.connect(showCell)
colorCheck.toggled.connect(showCell)
errLimitSpin.valueChanged.connect(showCell)
lengthRatioLimitSpin.valueChanged.connect(showCell)
reloadBtn.clicked.connect(reloadCell)
for s in [sp1, sp2, sp3, sp4]:
s.sigPointsClicked.connect(plotClicked)
def plotClicked(plt, pts):
pt = pts[0]
#(id, fn, time) = pt.data
#[['SourceFile', 'ProtocolSequenceDir', 'fitTime']]
#fh = db.getDir('ProtocolSequence', id)[fn]
fh = pt.data()['SourceFile']
id = pt.data()['ProtocolSequenceDir']
time = pt.data()['fitTime']
data = fh.read()['Channel':'primary']
data = fn.besselFilter(data, 8e3)
p = pw2.plot(data, clear=True)
pos = time / data.xvals('Time')[-1]
arrow = pg.CurveArrow(p, pos=pos)
xr = pw2.viewRect().left(), pw2.viewRect().right()
if time < xr[0] or time > xr[1]:
w = xr[1]-xr[0]
pw2.setXRange(time-w/5., time+4*w/5., padding=0)
fitLen = pt.data()['fitDecayTau']*pt.data()['fitLengthOverDecay']
x = np.linspace(time, time+fitLen, fitLen * 50e3)
v = [pt.data()['fitAmplitude'], pt.data()['fitTime'], pt.data()['fitRiseTau'], pt.data()['fitDecayTau']]
y = fn.pspFunc(v, x, risePower=2.0) + data[np.argwhere(data.xvals('Time')>time)[0]-1]
pw2.plot(x, y, pen='b')
#plot.addItem(arrow)
def select(ev, ex=True):
#if source is not None:
#ev = ev[ev['CellDir']==source]
if ex:
ev = ev[ev['holding'] < -0.04] # excitatory events
ev = ev[(ev['fitAmplitude'] < 0) * (ev['fitAmplitude'] > -2e-10)]
else:
ev = ev[(ev['holding'] >= -0.02) * (ev['holding'] <= 0.01)] ## inhibitory events
ev = ev[(ev['fitAmplitude'] > 0) * (ev['fitAmplitude'] < 2e-10)]
ev = ev[(0 < ev['fitDecayTau']) * (ev['fitDecayTau'] < 0.2)] # select decay region
ev = ev[ev['fitFractionalError'] < errLimitSpin.value()]
ev = ev[ev['fitLengthOverDecay'] > lengthRatioLimitSpin.value()]
return ev
def reloadCell():
showCell(reloadData=True)
def showCell(**kwds):
pw2.clear()
reloadData = kwds.get('reloadData', False)
#global lock
#if lock:
#return
#lock = True
Qt.QApplication.processEvents() ## prevents double-spin
#lock = False
cell = cells[cellCombo.currentIndex()-1]
dh = cell #db.getDir('Cell', cell)
loadCell(dh, reloadData=reloadData)
try:
image.setImage(dh['morphology.png'].read())
gv.setRange(image.sceneBoundingRect())
except:
image.setImage(np.zeros((2,2)))
pass
ev, numExSites, numInSites = events[cell]
ev2 = select(ev, ex=True)
ev3 = select(ev, ex=False)
if colorCheck.isChecked():
sp1.hide()
sp2.hide()
sp3.hide()
sp4.show()
start = postRgnStart()
stop = postRgnStop()
ev2post = ev2[(ev2['fitTime']>start) * (ev2['fitTime']<stop)]
ev3post = ev3[(ev3['fitTime']>start) * (ev3['fitTime']<stop)]
ev4 = np.concatenate([ev2post, ev3post])
yMax = ev4['dorsal'].max()
yMin = ev4['dorsal'].min()
brushes = []
for i in range(len(ev4)):
hue = 0.6*((ev4[i]['dorsal']-yMin) / (yMax-yMin))
brushes.append(pg.hsvColor(hue, 1.0, 1.0, 0.3))
#pts.append({
#'pos': (ev4[i]['fitDecayTau'], ev4[i]['fitAmplitude']),
#'brush': pg.hsvColor(hue, 1, 1, 0.3),
#'data': ev4[i]
#})
sp4.setData(x=ev4['fitDecayTau'], y=ev4['fitAmplitude'], symbolBrush=brushes, data=ev4)
else:
sp1.show()
sp2.show()
#sp3.show()
sp4.hide()
## excitatory
if separateCheck.isChecked():
pre = ev2[ev2['fitTime']< preRgnStop()]
post = ev2[(ev2['fitTime'] > postRgnStart()) * (ev2['fitTime'] < postRgnStop())]
else:
pre = ev2
sp1.setData(x=pre['fitDecayTau'], y=pre['fitAmplitude'], data=pre);
#print "Cell ", cell
#print " excitatory:", np.median(ev2['fitDecayTau']), np.median(ev2['fitAmplitude'])
## inhibitory
if separateCheck.isChecked():
pre = ev3[ev3['fitTime']< preRgnStop()]
post2 = ev3[(ev3['fitTime'] > postRgnStart()) * (ev3['fitTime'] < postRgnStop())]
post = np.concatenate([post, post2])
else:
pre = ev3
sp2.setData(x=pre['fitDecayTau'], y=pre['fitAmplitude'], data=pre);
#print " inhibitory:", np.median(ev2['fitDecayTau']), np.median(ev2['fitAmplitude'])
if separateCheck.isChecked():
sp3.setData(x=post['fitDecayTau'], y=post['fitAmplitude'], data=post)
sp3.show()
else:
sp3.hide()
try:
typ = ev2[0]['CellType']
except:
typ = ev3[0]['CellType']
sr = spontRate(ev2, numExSites)
sri = spontRate(ev3, numInSites)
title = "%s -- %s --- <span style='color: #99F;'>ex:</span> %s %s %s %0.1fHz --- <span style='color: #F99;'>in:</span> %s %s %s %0.1fHz" % (
dh.name(relativeTo=dh.parent().parent().parent()),
typ,
pg.siFormat(np.median(ev2['fitTimeToPeak']), error=np.std(ev2['fitTimeToPeak']), space=False, suffix='s'),
pg.siFormat(np.median(ev2['fitDecayTau']), error=np.std(ev2['fitDecayTau']), space=False, suffix='s'),
pg.siFormat(np.median(ev2['fitAmplitude']), error=np.std(ev2['fitAmplitude']), space=False, suffix='A'),
sr,
pg.siFormat(np.median(ev3['fitTimeToPeak']), error=np.std(ev3['fitTimeToPeak']), space=False, suffix='s'),
pg.siFormat(np.median(ev3['fitDecayTau']), error=np.std(ev3['fitDecayTau']), space=False, suffix='s'),
pg.siFormat(np.median(ev3['fitAmplitude']), error=np.std(ev3['fitAmplitude']), space=False, suffix='A'),
sri)
print(re.sub(r'<[^>]+>', '', title))
pw1.setTitle(title)
### show cell in atlas
#rec = db.select('CochlearNucleus_Cell', where={'CellDir': cell})
#pts = []
#if len(rec) > 0:
#pos = (rec[0]['right'], rec[0]['anterior'], rec[0]['dorsal'])
#pts = [{'pos': pos, 'size': 100e-6, 'color': (0.7, 0.7, 1.0, 1.0)}]
### show event positions
evSpots = {}
for rec in ev:
p = (rec['right'], rec['anterior'], rec['dorsal'])
evSpots[p] = None
pos = np.array(list(evSpots.keys()))
atlasPoints.setData(pos=pos, )
def spontRate(ev, n):
## This is broken. It does not take into account recordings that had no events.
ev = ev[ev['fitTime'] < preRgnStop()]
#count = {}
#dirs = set()
#for i in range(len(ev)):
#key = (ev[i]['ProtocolSequenceDir'], ev[i]['SourceFile'])
#dirs.add(set)
#if key not in count:
#count[key] = 0
#count[key] += 1
#sr = np.mean([v/(preRgnStop()) for v in count.itervalues()])
if n == 0:
return 0
return len(ev) / (preRgnStop() * n)
def preRgnStop():
return postRgnStartSpin.value() - 0.002
def postRgnStart():
return postRgnStartSpin.value() + 0.002
def postRgnStop():
return postRgnStopSpin.value()
init() | mit | -4,206,170,899,507,962,400 | 9,049,453,428,758,714,000 | 33.752809 | 308 | 0.559946 | false |
iddqd1/django-cms | cms/tests/test_check.py | 1 | 7102 | # -*- coding: utf-8 -*-
from copy import deepcopy
import os
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template import base
from django.test import SimpleTestCase, TestCase
from cms.api import add_plugin
from cms.models.pluginmodel import CMSPlugin
from cms.models.placeholdermodel import Placeholder
from cms.test_utils.project.pluginapp.plugins.manytomany_rel.models import ArticlePluginModel
from cms.test_utils.project.extensionapp.models import MyPageExtension
from cms.utils.check import FileOutputWrapper, check, FileSectionWrapper
from djangocms_text_ckeditor.cms_plugins import TextPlugin
class TestOutput(FileOutputWrapper):
def __init__(self):
super(TestOutput, self).__init__(None, None)
self.section_wrapper = TestSectionOutput
def write(self, message):
pass
def write_stderr(self, message):
pass
class TestSectionOutput(FileSectionWrapper):
def write(self, message):
pass
def write_stderr(self, message):
pass
class CheckAssertMixin(object):
def assertCheck(self, successful, **assertions):
"""
asserts that checks are successful or not
Assertions is a mapping of numbers to check (eg successes=5)
"""
output = TestOutput()
check(output)
self.assertEqual(output.successful, successful)
for key, value in assertions.items():
self.assertEqual(getattr(output, key), value, "%s %s expected, got %s" % (value, key, getattr(output, key)))
class CheckTests(CheckAssertMixin, SimpleTestCase):
def test_test_confs(self):
self.assertCheck(True, errors=0, warnings=0)
def test_cms_moderator_deprecated(self):
with self.settings(CMS_MODERATOR=True):
self.assertCheck(True, warnings=1, errors=0)
def test_cms_flat_urls_deprecated(self):
with self.settings(CMS_FLAT_URLS=True):
self.assertCheck(True, warnings=1, errors=0)
def test_no_sekizai(self):
base.get_templatetags_modules.cache_clear()
apps.set_available_apps(['cms', 'menus'])
self.assertCheck(False, errors=2)
apps.unset_available_apps()
def test_no_sekizai_template_context_processor(self):
override = {'TEMPLATES': deepcopy(settings.TEMPLATES)}
override['TEMPLATES'][0]['OPTIONS']['context_processors'] = []
with self.settings(**override):
self.assertCheck(False, errors=2)
def test_old_style_i18n_settings(self):
with self.settings(CMS_LANGUAGES=[('en', 'English')]):
self.assertRaises(ImproperlyConfigured, self.assertCheck, True, warnings=1, errors=0)
def test_cms_hide_untranslated_deprecated(self):
with self.settings(CMS_HIDE_UNTRANSLATED=True):
self.assertCheck(True, warnings=1, errors=0)
def test_cms_language_fallback_deprecated(self):
with self.settings(CMS_LANGUAGE_FALLBACK=True):
self.assertCheck(True, warnings=1, errors=0)
def test_cms_language_conf_deprecated(self):
with self.settings(CMS_LANGUAGE_CONF=True):
self.assertCheck(True, warnings=1, errors=0)
def test_middlewares(self):
MIDDLEWARE_CLASSES=[
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
self.assertCheck(True, warnings=0, errors=0)
with self.settings(MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES):
self.assertCheck(False, warnings=0, errors=2)
def test_cms_site_languages_deprecated(self):
with self.settings(CMS_SITE_LANGUAGES=True):
self.assertCheck(True, warnings=1, errors=0)
def test_cms_frontend_languages_deprecated(self):
with self.settings(CMS_FRONTEND_LANGUAGES=True):
self.assertCheck(True, warnings=1, errors=0)
def test_copy_relations_fk_check(self):
"""
this is ugly, feel free to come up with a better test
"""
self.assertCheck(True, warnings=0, errors=0)
copy_rel = ArticlePluginModel.copy_relations
del ArticlePluginModel.copy_relations
self.assertCheck(True, warnings=2, errors=0)
ArticlePluginModel.copy_relations = copy_rel
def test_copy_relations_on_page_extension(self):
"""
Agreed. It is ugly, but it works.
"""
self.assertCheck(True, warnings=0, errors=0)
copy_rel = MyPageExtension.copy_relations
del MyPageExtension.copy_relations
self.assertCheck(True, warnings=1, errors=0)
MyPageExtension.copy_relations = copy_rel
def test_placeholder_tag_deprecation(self):
self.assertCheck(True, warnings=0, errors=0)
alt_dir = os.path.join(
os.path.dirname(__file__),
'..',
'test_utils',
'project',
'alt_templates'
)
NEWTEMPLATES = deepcopy(settings.TEMPLATES)
NEWTEMPLATES[0]['DIRS'] = [alt_dir]
with self.settings(TEMPLATES=NEWTEMPLATES, CMS_TEMPLATES=[]):
self.assertCheck(True, warnings=1, errors=0)
def test_non_numeric_site_id(self):
self.assertCheck(True, warnings=0, errors=0)
with self.settings(SITE_ID='broken'):
self.assertCheck(False, warnings=0, errors=1)
class CheckWithDatabaseTests(CheckAssertMixin, TestCase):
def test_check_plugin_instances(self):
self.assertCheck(True, warnings=0, errors=0 )
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp", "treebeard"]
with self.settings(INSTALLED_APPS=apps):
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, "LinkPlugin", "en",
name="A Link", url="https://www.django-cms.org")
# create a CMSPlugin with an unsaved instance
instanceless_plugin = CMSPlugin(language="en", plugin_type="TextPlugin")
instanceless_plugin.save()
self.assertCheck(False, warnings=0, errors=2)
# create a bogus CMSPlugin to simulate one which used to exist but
# is no longer installed
bogus_plugin = CMSPlugin(language="en", plugin_type="BogusPlugin")
bogus_plugin.save()
self.assertCheck(False, warnings=0, errors=3)
| bsd-3-clause | 3,094,205,849,490,816,000 | -366,018,599,675,483,600 | 37.808743 | 120 | 0.661222 | false |
vmindru/ansible | lib/ansible/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py | 31 | 2619 | #!/usr/bin/python
# Copyright: (c) 2018, Juergen Wiebe <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: utm_network_interface_address_info
author:
- Juergen Wiebe (@steamx)
short_description: Get info for a network/interface_address object
description:
- Get info for a network/interface_address object in SOPHOS UTM.
version_added: "2.8"
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
extends_documentation_fragment:
- utm
"""
EXAMPLES = """
- name: utm network interface address
utm_proxy_interface_address_info:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestNetworkInterfaceAddress
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: string
_locked:
description: Whether or not the object is currently locked
type: boolean
_type:
description: The type of the object
type: string
name:
description: The name of the object
type: string
address:
description: The ip4 address of the network/interface_address object
type: string
address6:
description: The ip6 address of the network/interface_address object
type: string
comment:
description: The comment string
type: string
resolved:
description: Whether or not the object is resolved
type: boolean
resolved6:
description: Whether or not the object is resolved
type: boolean
"""
from ansible.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "network/interface_address"
key_to_check_for_changes = []
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True)
)
)
try:
UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 | -4,739,559,390,284,978,000 | 5,401,091,273,334,584,000 | 24.930693 | 92 | 0.624666 | false |
enigmampc/catalyst | catalyst/data/continuous_future_reader.py | 1 | 12198 | import numpy as np
import pandas as pd
from catalyst.data.session_bars import SessionBarReader
class ContinuousFutureSessionBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'sid'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol, start_date, end_date, asset.offset)
num_sessions = len(
self.trading_calendar.sessions_in_range(start_date, end_date))
shape = num_sessions, len(assets)
results = []
tc = self._bar_reader.trading_calendar
sessions = tc.sessions_in_range(start_date, end_date)
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = sessions.get_loc(start)
if roll_date is not None:
end = roll_date - sessions.freq
end_loc = sessions.get_loc(end)
else:
end = end_date
end_loc = len(sessions) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start = sessions[end_loc + 1]
for column in columns:
if column != 'volume' and column != 'sid':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.int64)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the catalyst.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : catalyst.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unionining the range for all assets) which the
reader can provide.
"""
return self._bar_reader.sessions
class ContinuousFutureMinuteBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_session,
end_session, asset.offset)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(
roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1]))
for column in columns:
if column != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the catalyst.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : catalyst.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
return self._bar_reader.sessions
| apache-2.0 | 7,178,889,856,516,369,000 | -2,151,858,677,917,952,500 | 32.977716 | 79 | 0.523856 | false |
NaturalGIS/naturalgis_qgis | python/plugins/processing/gui/BatchAlgorithmDialog.py | 25 | 11235 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BatchAlgorithmDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
from pprint import pformat
import time
from qgis.PyQt.QtWidgets import QPushButton, QDialogButtonBox
from qgis.PyQt.QtCore import Qt, QCoreApplication
from qgis.core import (QgsProcessingOutputHtml,
QgsProcessingOutputNumber,
QgsProcessingOutputString,
QgsProcessingOutputBoolean,
QgsProject,
QgsProcessingMultiStepFeedback,
QgsScopedProxyProgressTask,
QgsProcessingException)
from qgis.gui import QgsProcessingAlgorithmDialogBase
from qgis.utils import OverrideCursor, iface
from processing.gui.BatchPanel import BatchPanel
from processing.gui.AlgorithmExecutor import execute
from processing.gui.Postprocessing import handleAlgorithmResults
from processing.core.ProcessingResults import resultsList
from processing.tools.system import getTempFilename
from processing.tools import dataobjects
import codecs
class BatchFeedback(QgsProcessingMultiStepFeedback):
def __init__(self, steps, feedback):
super().__init__(steps, feedback)
self.errors = []
def reportError(self, error: str, fatalError: bool = False):
self.errors.append(error)
super().reportError(error, fatalError)
class BatchAlgorithmDialog(QgsProcessingAlgorithmDialogBase):
def __init__(self, alg, parent=None):
super().__init__(parent)
self.setAlgorithm(alg)
self.setWindowTitle(self.tr('Batch Processing - {0}').format(self.algorithm().displayName()))
self.setMainWidget(BatchPanel(self, self.algorithm()))
self.hideShortHelp()
self.btnRunSingle = QPushButton(QCoreApplication.translate('BatchAlgorithmDialog', "Run as Single Process…"))
self.btnRunSingle.clicked.connect(self.runAsSingle)
self.buttonBox().addButton(self.btnRunSingle, QDialogButtonBox.ResetRole) # reset role to ensure left alignment
self.updateRunButtonVisibility()
def runAsSingle(self):
self.close()
from processing.gui.AlgorithmDialog import AlgorithmDialog
dlg = AlgorithmDialog(self.algorithm().create(), parent=iface.mainWindow())
dlg.show()
dlg.exec_()
def resetAdditionalGui(self):
self.btnRunSingle.setEnabled(True)
def blockAdditionalControlsWhileRunning(self):
self.btnRunSingle.setEnabled(False)
def runAlgorithm(self):
alg_parameters = []
feedback = self.createFeedback()
load_layers = self.mainWidget().checkLoadLayersOnCompletion.isChecked()
project = QgsProject.instance() if load_layers else None
for row in range(self.mainWidget().batchRowCount()):
parameters, ok = self.mainWidget().parametersForRow(row, destinationProject=project, warnOnInvalid=True)
if ok:
alg_parameters.append(parameters)
if not alg_parameters:
return
task = QgsScopedProxyProgressTask(self.tr('Batch Processing - {0}').format(self.algorithm().displayName()))
multi_feedback = BatchFeedback(len(alg_parameters), feedback)
feedback.progressChanged.connect(task.setProgress)
algorithm_results = []
errors = []
with OverrideCursor(Qt.WaitCursor):
self.blockControlsWhileRunning()
self.setExecutedAnyResult(True)
self.cancelButton().setEnabled(True)
# Make sure the Log tab is visible before executing the algorithm
try:
self.showLog()
self.repaint()
except Exception: # FIXME which one?
pass
start_time = time.time()
for count, parameters in enumerate(alg_parameters):
if feedback.isCanceled():
break
self.setProgressText(
QCoreApplication.translate('BatchAlgorithmDialog', '\nProcessing algorithm {0}/{1}…').format(
count + 1, len(alg_parameters)))
self.setInfo(self.tr('<b>Algorithm {0} starting…</b>').format(self.algorithm().displayName()),
escapeHtml=False)
multi_feedback.setCurrentStep(count)
parameters = self.algorithm().preprocessParameters(parameters)
feedback.pushInfo(self.tr('Input parameters:'))
feedback.pushCommandInfo(pformat(parameters))
feedback.pushInfo('')
# important - we create a new context for each iteration
# this avoids holding onto resources and layers from earlier iterations,
# and allows batch processing of many more items then is possible
# if we hold on to these layers
context = dataobjects.createContext(feedback)
alg_start_time = time.time()
multi_feedback.errors = []
results, ok = self.algorithm().run(parameters, context, multi_feedback)
if ok:
self.setInfo(
QCoreApplication.translate('BatchAlgorithmDialog', 'Algorithm {0} correctly executed…').format(
self.algorithm().displayName()), escapeHtml=False)
feedback.pushInfo(
self.tr('Execution completed in {0:0.2f} seconds'.format(time.time() - alg_start_time)))
feedback.pushInfo(self.tr('Results:'))
feedback.pushCommandInfo(pformat(results))
feedback.pushInfo('')
algorithm_results.append({'parameters': parameters, 'results': results})
handleAlgorithmResults(self.algorithm(), context, multi_feedback, False, parameters)
else:
err = [e for e in multi_feedback.errors]
self.setInfo(
QCoreApplication.translate('BatchAlgorithmDialog', 'Algorithm {0} failed…').format(
self.algorithm().displayName()), escapeHtml=False)
feedback.reportError(
self.tr('Execution failed after {0:0.2f} seconds'.format(time.time() - alg_start_time)),
fatalError=False)
errors.append({'parameters': parameters, 'errors': err})
feedback.pushInfo(self.tr('Batch execution completed in {0:0.2f} seconds'.format(time.time() - start_time)))
if errors:
feedback.reportError(self.tr('{} executions failed. See log for further details.').format(len(errors)), fatalError=True)
task = None
self.finish(algorithm_results, errors)
self.cancelButton().setEnabled(False)
def finish(self, algorithm_results, errors):
for count, results in enumerate(algorithm_results):
self.loadHTMLResults(results['results'], count)
self.createSummaryTable(algorithm_results, errors)
self.resetGui()
def loadHTMLResults(self, results, num):
for out in self.algorithm().outputDefinitions():
if isinstance(out, QgsProcessingOutputHtml) and out.name() in results and results[out.name()]:
resultsList.addResult(icon=self.algorithm().icon(), name='{} [{}]'.format(out.description(), num),
result=results[out.name()])
def createSummaryTable(self, algorithm_results, errors):
createTable = False
for out in self.algorithm().outputDefinitions():
if isinstance(out, (QgsProcessingOutputNumber, QgsProcessingOutputString, QgsProcessingOutputBoolean)):
createTable = True
break
if not createTable and not errors:
return
outputFile = getTempFilename('html')
with codecs.open(outputFile, 'w', encoding='utf-8') as f:
if createTable:
for i, res in enumerate(algorithm_results):
results = res['results']
params = res['parameters']
if i > 0:
f.write('<hr>\n')
f.write(self.tr('<h3>Parameters</h3>\n'))
f.write('<table>\n')
for param in self.algorithm().parameterDefinitions():
if not param.isDestination():
if param.name() in params:
f.write('<tr><th>{}</th><td>{}</td></tr>\n'.format(param.description(),
params[param.name()]))
f.write('</table>\n')
f.write(self.tr('<h3>Results</h3>\n'))
f.write('<table>\n')
for out in self.algorithm().outputDefinitions():
if out.name() in results:
f.write('<tr><th>{}</th><td>{}</td></tr>\n'.format(out.description(), results[out.name()]))
f.write('</table>\n')
if errors:
f.write('<h2 style="color: red">{}</h2>\n'.format(self.tr('Errors')))
for i, res in enumerate(errors):
errors = res['errors']
params = res['parameters']
if i > 0:
f.write('<hr>\n')
f.write(self.tr('<h3>Parameters</h3>\n'))
f.write('<table>\n')
for param in self.algorithm().parameterDefinitions():
if not param.isDestination():
if param.name() in params:
f.write(
'<tr><th>{}</th><td>{}</td></tr>\n'.format(param.description(), params[param.name()]))
f.write('</table>\n')
f.write('<h3>{}</h3>\n'.format(self.tr('Error')))
f.write('<p style="color: red">{}</p>\n'.format('<br>'.join(errors)))
resultsList.addResult(icon=self.algorithm().icon(),
name='{} [summary]'.format(self.algorithm().name()), timestamp=time.localtime(),
result=outputFile)
| gpl-2.0 | -2,690,309,149,378,765,000 | -7,251,473,017,268,078,000 | 43.200787 | 132 | 0.553398 | false |
vfasky/BlogCatke | virtualenv.bundle/requests/packages/chardet2/jpcntx.py | 25 | 19197 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = ( \
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
self._mRelSample = [0] * NUM_OF_CATEGORY # category counters, each interger counts sequence in its category
self._mNeedToSkipCharNum = 0 # if last byte in current buffer is not the last byte of a character, we need to know how many bytes to skip in next buffer
self._mLastCharOrder = -1 # The order of previous char
self._mDone = False # If this flag is set to True, detection is done and conclusion has been made
def feed(self, aBuf, aLen):
if self._mDone: return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not complete, we
# record how many byte needed to complete that character and skip these bytes here.
# We can choose to record those bytes as well and analyse the character once it
# is complete, but since a character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i+2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf: return -1, 1
# find out current char's byte length
if ((aBuf[0] >= 0x81) and (aBuf[0] <= 0x9F)) or \
((aBuf[0] >= 0xE0) and (aBuf[0] <= 0xFC)):
charLen = 2
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
if (aBuf[0] == 202) and \
(aBuf[1] >= 0x9F) and \
(aBuf[1] <= 0xF1):
return aBuf[1] - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf: return -1, 1
# find out current char's byte length
if (aBuf[0] == 0x8E) or \
((aBuf[0] >= 0xA1) and (aBuf[0] <= 0xFE)):
charLen = 2
elif aBuf[0] == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
if (aBuf[0] == 0xA4) and \
(aBuf[1] >= 0xA1) and \
(aBuf[1] <= 0xF3):
return aBuf[1] - 0xA1, charLen
return -1, charLen
| apache-2.0 | 1,823,897,016,795,689,500 | 7,725,425,439,652,399,000 | 89.414286 | 168 | 0.514247 | false |
FreeScienceCommunity/or-tools | examples/python/volsay2.py | 34 | 2029 | # Copyright 2011 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Volsay problem in Google or-tools.
From the OPL model volsay.mod
Using arrays.
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.linear_solver import pywraplp
def main(unused_argv):
# Create the solver.
# using GLPK
solver = pywraplp.Solver('CoinsGridGLPK',
pywraplp.Solver.GLPK_LINEAR_PROGRAMMING)
# Using CLP
# solver = pywraplp.Solver('CoinsGridCLP',
# pywraplp.Solver.CLP_LINEAR_PROGRAMMING)
# data
num_products = 2
Gas = 0
Chloride = 1
products = ['Gas', 'Chloride']
# declare variables
production = [solver.NumVar(0, 100000, 'production[%i]' % i)
for i in range(num_products)]
#
# constraints
#
solver.Add(production[Gas] + production[Chloride] <= 50)
solver.Add(3 * production[Gas] + 4 * production[Chloride] <= 180)
# objective
objective = solver.Maximize(40 * production[Gas] + 50 * production[Chloride])
print 'NumConstraints:', solver.NumConstraints()
#
# solution and search
#
solver.Solve()
print
print 'objective = ', solver.Objective().Value()
for i in range(num_products):
print products[i], '=', production[i].SolutionValue(),
print 'ReducedCost = ', production[i].ReducedCost()
if __name__ == '__main__':
main('Volsay')
| apache-2.0 | 9,091,180,088,616,003,000 | 5,064,056,157,640,439,000 | 25.697368 | 79 | 0.682109 | false |
mandeepdhami/nova | nova/tests/functional/v3/test_pci.py | 24 | 7238 | # Copyright 2013 Intel.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
import testtools
from nova import db
from nova import objects
from nova.objects import pci_device_pool
from nova.tests.functional.v3 import api_sample_base
from nova.tests.functional.v3 import test_servers
skip_msg = "Bug 1426241"
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '0000:04:10.0',
'vendor_id': '8086',
'numa_node': 0,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_0',
'label': 'label_8086_1520',
'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1',
'request_id': None,
'extra_info': '{"key1": "value1", "key2": "value2"}'
}
fake_db_dev_2 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': '0000:04:10.1',
'vendor_id': '8086',
'numa_node': 1,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_1',
'label': 'label_8086_1520',
'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc',
'request_id': None,
'extra_info': '{"key3": "value3", "key4": "value4"}'
}
class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extra_extensions_to_load = ['os-hypervisors']
extension_name = 'os-pci'
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
cpu_info = collections.OrderedDict([
('arch', 'x86_64'),
('model', 'Nehalem'),
('vendor', 'Intel'),
('features', ['pge', 'clflush']),
('topology', {
'cores': 1,
'threads': 1,
'sockets': 4,
}),
])
self.fake_compute_node = objects.ComputeNode(
cpu_info=jsonutils.dumps(cpu_info),
current_workload=0,
disk_available_least=0,
host_ip="1.1.1.1",
state="up",
status="enabled",
free_disk_gb=1028,
free_ram_mb=7680,
hypervisor_hostname="fake-mini",
hypervisor_type="fake",
hypervisor_version=1000,
id=1,
local_gb=1028,
local_gb_used=0,
memory_mb=8192,
memory_mb_used=512,
running_vms=0,
vcpus=1,
vcpus_used=0,
service_id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
pci_device_pools=pci_device_pool.from_pci_stats(
{"count": 5,
"vendor_id": "8086",
"product_id": "1520",
"keya": "valuea",
"key1": "value1",
"numa_node": 1}),)
self.fake_service = objects.Service(
id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
disabled=False,
disabled_reason=None)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNode.get_by_id")
def test_pci_show(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = self.fake_compute_node
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-show-resp',
subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNodeList.get_all")
def test_pci_detail(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = [self.fake_compute_node]
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/detail')
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-detail-resp',
subs, response, 200)
class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def _fake_pci_device_get_by_id(self, context, id):
return fake_db_dev_1
def _fake_pci_device_get_all_by_node(self, context, id):
return [fake_db_dev_1, fake_db_dev_2]
def test_pci_show(self):
self.stubs.Set(db, 'pci_device_get_by_id',
self._fake_pci_device_get_by_id)
response = self._do_get('os-pci/1')
subs = self._get_regexes()
self._verify_response('pci-show-resp', subs, response, 200)
def test_pci_index(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci')
subs = self._get_regexes()
self._verify_response('pci-index-resp', subs, response, 200)
def test_pci_detail(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci/detail')
subs = self._get_regexes()
self._verify_response('pci-detail-resp', subs, response, 200)
| apache-2.0 | -6,354,000,469,038,902,000 | 530,937,808,216,504,640 | 33.631579 | 78 | 0.572119 | false |
hbrunn/OCB | openerp/service/model.py | 62 | 8421 | # -*- coding: utf-8 -*-
from functools import wraps
import logging
from psycopg2 import IntegrityError, OperationalError, errorcodes
import random
import threading
import time
import openerp
from openerp.tools.translate import translate
from openerp.osv.orm import except_orm
from contextlib import contextmanager
import security
_logger = logging.getLogger(__name__)
PG_CONCURRENCY_ERRORS_TO_RETRY = (errorcodes.LOCK_NOT_AVAILABLE, errorcodes.SERIALIZATION_FAILURE, errorcodes.DEADLOCK_DETECTED)
MAX_TRIES_ON_CONCURRENCY_FAILURE = 5
def dispatch(method, params):
(db, uid, passwd ) = params[0:3]
# set uid tracker - cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
threading.current_thread().uid = uid
params = params[3:]
if method == 'obj_list':
raise NameError("obj_list has been discontinued via RPC as of 6.0, please query ir.model directly!")
if method not in ['execute', 'execute_kw', 'exec_workflow']:
raise NameError("Method not available %s" % method)
security.check(db,uid,passwd)
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
fn = globals()[method]
res = fn(db, uid, *params)
openerp.modules.registry.RegistryManager.signal_caches_change(db)
return res
def check(f):
@wraps(f)
def wrapper(___dbname, *args, **kwargs):
""" Wraps around OSV functions and normalises a few exceptions
"""
dbname = ___dbname # NOTE: this forbid to use "___dbname" as arguments in http routes
def tr(src, ttype):
# We try to do the same as the _(), but without the frame
# inspection, since we aready are wrapping an osv function
# trans_obj = self.get('ir.translation') cannot work yet :(
ctx = {}
if not kwargs:
if args and isinstance(args[-1], dict):
ctx = args[-1]
elif isinstance(kwargs, dict):
if 'context' in kwargs:
ctx = kwargs['context']
elif 'kwargs' in kwargs:
# http entry points such as call_kw()
ctx = kwargs['kwargs'].get('context')
uid = 1
if args and isinstance(args[0], (long, int)):
uid = args[0]
lang = ctx and ctx.get('lang')
if not (lang or hasattr(src, '__call__')):
return src
# We open a *new* cursor here, one reason is that failed SQL
# queries (as in IntegrityError) will invalidate the current one.
cr = False
if hasattr(src, '__call__'):
# callable. We need to find the right parameters to call
# the orm._sql_message(self, cr, uid, ids, context) function,
# or we skip..
# our signature is f(registry, dbname [,uid, obj, method, args])
try:
if args and len(args) > 1:
# TODO self doesn't exist, but was already wrong before (it was not a registry but just the object_service.
obj = self.get(args[1])
if len(args) > 3 and isinstance(args[3], (long, int, list)):
ids = args[3]
else:
ids = []
cr = openerp.sql_db.db_connect(dbname).cursor()
return src(obj, cr, uid, ids, context=(ctx or {}))
except Exception:
pass
finally:
if cr: cr.close()
return False # so that the original SQL error will
# be returned, it is the best we have.
try:
cr = openerp.sql_db.db_connect(dbname).cursor()
res = translate(cr, name=False, source_type=ttype,
lang=lang, source=src)
if res:
return res
else:
return src
finally:
if cr: cr.close()
def _(src):
return tr(src, 'code')
tries = 0
while True:
try:
if openerp.registry(dbname)._init and not openerp.tools.config['test_enable']:
raise openerp.exceptions.Warning('Currently, this database is not fully loaded and can not be used.')
return f(dbname, *args, **kwargs)
except OperationalError, e:
# Automatically retry the typical transaction serialization errors
if e.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
raise
if tries >= MAX_TRIES_ON_CONCURRENCY_FAILURE:
_logger.warning("%s, maximum number of tries reached" % errorcodes.lookup(e.pgcode))
raise
wait_time = random.uniform(0.0, 2 ** tries)
tries += 1
_logger.info("%s, retry %d/%d in %.04f sec..." % (errorcodes.lookup(e.pgcode), tries, MAX_TRIES_ON_CONCURRENCY_FAILURE, wait_time))
time.sleep(wait_time)
except IntegrityError, inst:
registry = openerp.registry(dbname)
for key in registry._sql_error.keys():
if key in inst[0]:
raise openerp.osv.orm.except_orm(_('Constraint Error'), tr(registry._sql_error[key], 'sql_constraint') or inst[0])
if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION):
msg = _('The operation cannot be completed, probably due to the following:\n- deletion: you may be trying to delete a record while other records still reference it\n- creation/update: a mandatory field is not correctly set')
_logger.debug("IntegrityError", exc_info=True)
try:
errortxt = inst.pgerror.replace('«','"').replace('»','"')
if '"public".' in errortxt:
context = errortxt.split('"public".')[1]
model_name = table = context.split('"')[1]
else:
last_quote_end = errortxt.rfind('"')
last_quote_begin = errortxt.rfind('"', 0, last_quote_end)
model_name = table = errortxt[last_quote_begin+1:last_quote_end].strip()
model = table.replace("_",".")
if model in registry:
model_obj = registry[model]
model_name = model_obj._description or model_obj._name
msg += _('\n\n[object with reference: %s - %s]') % (model_name, model)
except Exception:
pass
raise openerp.osv.orm.except_orm(_('Integrity Error'), msg)
else:
raise openerp.osv.orm.except_orm(_('Integrity Error'), inst[0])
return wrapper
def execute_cr(cr, uid, obj, method, *args, **kw):
object = openerp.registry(cr.dbname).get(obj)
if object is None:
raise except_orm('Object Error', "Object %s doesn't exist" % obj)
return getattr(object, method)(cr, uid, *args, **kw)
def execute_kw(db, uid, obj, method, args, kw=None):
return execute(db, uid, obj, method, *args, **kw or {})
@check
def execute(db, uid, obj, method, *args, **kw):
threading.currentThread().dbname = db
with openerp.registry(db).cursor() as cr:
if method.startswith('_'):
raise except_orm('Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method,))
res = execute_cr(cr, uid, obj, method, *args, **kw)
if res is None:
_logger.warning('The method %s of the object %s can not return `None` !', method, obj)
return res
def exec_workflow_cr(cr, uid, obj, signal, *args):
res_id = args[0]
return execute_cr(cr, uid, obj, 'signal_workflow', [res_id], signal)[res_id]
@check
def exec_workflow(db, uid, obj, signal, *args):
with openerp.registry(db).cursor() as cr:
return exec_workflow_cr(cr, uid, obj, signal, *args)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,927,022,696,167,436,000 | -8,506,779,484,062,492,000 | 43.310526 | 244 | 0.547809 | false |
ocaisa/easybuild-framework | easybuild/toolchains/iompi.py | 5 | 1519 | ##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for iompi compiler toolchain (includes Intel compilers (icc, ifort) and OpenMPI.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.compiler.inteliccifort import IntelIccIfort
from easybuild.toolchains.mpi.openmpi import OpenMPI
class Iompi(IntelIccIfort, OpenMPI):
"""
Compiler toolchain with Intel compilers (icc/ifort) and OpenMPI.
"""
NAME = 'iompi'
| gpl-2.0 | -6,261,990,513,993,238,000 | -8,762,174,965,577,495,000 | 36.975 | 98 | 0.755102 | false |
shadyueh/pyranking | env/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py | 252 | 106466 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
from urllib.parse import urlparse, urlunparse
if PY2:
from urlparse import urlparse, urlunparse
if PY3:
string_types = str,
else:
string_types = str, eval('unicode')
iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems()
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib.machinery as importlib_machinery
else:
importlib_machinery = None
try:
import parser
except ImportError:
pass
import pip._vendor.packaging.version
import pip._vendor.packaging.specifiers
packaging = pip._vendor.packaging
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError as e:
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_machinery is not None:
register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO'
def get_metadata(self, name):
if name=='PKG-INFO':
with open(self.path,'rU') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_machinery is not None:
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_machinery is not None:
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
msg = "\\ must not appear on the last nonblank line"
raise RequirementParseError(msg)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise RequirementParseError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise RequirementParseError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise RequirementParseError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| mit | 4,894,710,860,950,594,000 | -2,496,744,992,790,011,400 | 33.266495 | 82 | 0.594518 | false |
KohlsTechnology/ansible | lib/ansible/plugins/filter/ipaddr.py | 15 | 29588 | # (c) 2014, Maciej Delmanowski <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from functools import partial
import types
try:
import netaddr
except ImportError:
# in this case, we'll make the filters return error messages (see bottom)
netaddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _first_last(v):
if v.size == 2:
first_usable = int(netaddr.IPAddress(v.first))
last_usable = int(netaddr.IPAddress(v.last))
return first_usable, last_usable
elif v.size > 1:
first_usable = int(netaddr.IPAddress(v.first + 1))
last_usable = int(netaddr.IPAddress(v.last - 1))
return first_usable, last_usable
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
# /31 networks in netaddr have no broadcast address
if v.ip != v.network or not v.broadcast:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _address_prefix_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 2:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except:
return False
def _first_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size == 2:
return str(netaddr.IPAddress(int(v.network)))
elif v.size > 1:
return str(netaddr.IPAddress(int(v.network) + 1))
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ip_prefix_query(v):
if v.size == 2:
return str(v.ip) + '/' + str(v.prefixlen)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _ip_netmask_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.netmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.netmask)
'''
def _ip_wildcard_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.hostmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.hostmask)
'''
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _last_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
return str(netaddr.IPAddress(last_usable))
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
return str(v.netmask)
def _network_query(v):
'''Return the network of a given IP or subnet'''
if v.size > 1:
return str(v.network)
def _network_id_query(v):
'''Return the network of a given IP or subnet'''
return str(v.network)
def _network_netmask_query(v):
return str(v.network) + ' ' + str(v.netmask)
def _network_wildcard_query(v):
return str(v.network) + ' ' + str(v.hostmask)
def _next_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
next_ip = int(netaddr.IPAddress(int(v.ip) + 1))
if next_ip >= first_usable and next_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + 1))
def _prefix_query(v):
return int(v.prefixlen)
def _previous_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
previous_ip = int(netaddr.IPAddress(int(v.ip) - 1))
if previous_ip >= first_usable and previous_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - 1))
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if (v_ip.is_unicast() and not v_ip.is_private() and
not v_ip.is_loopback() and not v_ip.is_netmask() and
not v_ip.is_hostmask()):
return value
def _range_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
first_usable = str(netaddr.IPAddress(first_usable))
last_usable = str(netaddr.IPAddress(last_usable))
return "{0}-{1}".format(first_usable, last_usable)
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _size_usable_query(v):
if v.size == 1:
return 0
elif v.size == 2:
return 2
return v.size - 2
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
# ---- HWaddr query helpers ----
def _bare_query(v):
v.dialect = netaddr.mac_bare
return str(v)
def _bool_hwaddr_query(v):
if v:
return True
def _int_hwaddr_query(v):
return int(v)
def _cisco_query(v):
v.dialect = netaddr.mac_cisco
return str(v)
def _empty_hwaddr_query(v, value):
if v:
return value
def _linux_query(v):
v.dialect = mac_linux
return str(v)
def _postgresql_query(v):
v.dialect = netaddr.mac_pgsql
return str(v)
def _unix_query(v):
v.dialect = netaddr.mac_unix
return str(v)
def _win_query(v):
v.dialect = netaddr.mac_eui48
return str(v)
# ---- IP address and network filters ----
def ipaddr(value, query='', version=False, alias='ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_func_extra_args = {
'': ('vtype',),
'6to4': ('vtype', 'value'),
'cidr_lookup': ('iplist', 'value'),
'first_usable': ('vtype',),
'int': ('vtype',),
'ipv4': ('value',),
'ipv6': ('value',),
'last_usable': ('vtype',),
'link-local': ('value',),
'loopback': ('value',),
'lo': ('value',),
'multicast': ('value',),
'next_usable': ('vtype',),
'previous_usable': ('vtype',),
'private': ('value',),
'public': ('value',),
'unicast': ('value',),
'range_usable': ('vtype',),
'wrap': ('vtype', 'value'),
}
query_func_map = {
'': _empty_ipaddr_query,
'6to4': _6to4_query,
'address': _ip_query,
'address/prefix': _address_prefix_query, # deprecate
'bool': _bool_ipaddr_query,
'broadcast': _broadcast_query,
'cidr': _cidr_query,
'cidr_lookup': _cidr_lookup_query,
'first_usable': _first_usable_query,
'gateway': _gateway_query, # deprecate
'gw': _gateway_query, # deprecate
'host': _host_query,
'host/prefix': _address_prefix_query, # deprecate
'hostmask': _hostmask_query,
'hostnet': _gateway_query, # deprecate
'int': _int_query,
'ip': _ip_query,
'ip/prefix': _ip_prefix_query,
'ip_netmask': _ip_netmask_query,
# 'ip_wildcard': _ip_wildcard_query, built then could not think of use case
'ipv4': _ipv4_query,
'ipv6': _ipv6_query,
'last_usable': _last_usable_query,
'link-local': _link_local_query,
'lo': _loopback_query,
'loopback': _loopback_query,
'multicast': _multicast_query,
'net': _net_query,
'next_usable': _next_usable_query,
'netmask': _netmask_query,
'network': _network_query,
'network_id': _network_id_query,
'network/prefix': _subnet_query,
'network_netmask': _network_netmask_query,
'network_wildcard': _network_wildcard_query,
'prefix': _prefix_query,
'previous_usable': _previous_usable_query,
'private': _private_query,
'public': _public_query,
'range_usable': _range_usable_query,
'revdns': _revdns_query,
'router': _gateway_query, # deprecate
'size': _size_query,
'size_usable': _size_usable_query,
'subnet': _subnet_query,
'type': _type_query,
'unicast': _unicast_query,
'v4': _ipv4_query,
'v6': _ipv6_query,
'version': _version_query,
'wildcard': _hostmask_query,
'wrap': _wrap_query,
}
vtype = None
if not value:
return False
elif value is True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
# ?? 6to4 and link-local were True here before. Should they still?
if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except:
pass
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except:
return False
else:
return value
except:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipwrap(value, query=''):
try:
if isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, query, version=False, alias='ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version=False, alias='ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except:
return value
def ipv4(value, query=''):
return ipaddr(value, query, version=4, alias='ipv4')
def ipv6(value, query=''):
return ipaddr(value, query, version=6, alias='ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
def ipsubnet(value, query='', index='x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return str(value)
elif str(query).isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except:
return False
except:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except:
return False
return False
# Returns the nth host within a network described by value.
# Usage:
#
# - address or address/prefix | nthhost(nth)
# returns the nth host within the given network
def nthhost(value, query=''):
''' Get the nth host within a given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
nth = int(query)
if value.size > nth:
return value[nth]
except ValueError:
return False
return False
# Returns the next nth usable ip within a network described by value.
def next_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) + offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + offset))
# Returns the previous nth usable ip within a network described by value.
def previous_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) - offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - offset))
def _range_checker(ip_check, first, last):
'''
Tests whether an ip address is within the bounds of the first and last address.
:param ip_check: The ip to test if it is within first and last.
:param first: The first IP in the range to test against.
:param last: The last IP in the range to test against.
:return: bool
'''
if ip_check >= first and ip_check <= last:
return True
else:
return False
def _address_normalizer(value):
'''
Used to validate an address or network type and return it in a consistent format.
This is being used for future use cases not currently available such as an address range.
:param value: The string representation of an address or network.
:return: The address or network in the normalized form.
'''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address' or vtype == "network":
v = ipaddr(value, 'subnet')
except:
return False
return v
def network_in_usable(value, test):
'''
Checks whether 'test' is a useable address or addresses in 'value'
:param: value: The string representation of an address or network to test against.
:param test: The string representation of an address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'first_usable') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'last_usable') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def network_in_network(value, test):
'''
Checks whether the 'test' address or addresses are in 'value', including broadcast and network
:param: value: The network address or range to test against.
:param test: The address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'network') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'broadcast') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def reduce_on_network(value, network):
'''
Reduces a list of addresses to only the addresses that match a given network.
:param: value: The list of addresses to filter on.
:param: network: The network to validate against.
:return: The reduced list of addresses.
'''
# normalize network variable into an ipaddr
n = _address_normalizer(network)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
n_first = ipaddr(ipaddr(n, 'network') or ipaddr(n, 'address'), 'int')
n_last = ipaddr(ipaddr(n, 'broadcast') or ipaddr(n, 'address'), 'int')
# create an empty list to fill and return
r = []
for address in value:
# normalize address variables into an ipaddr
a = _address_normalizer(address)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
a_first = ipaddr(ipaddr(a, 'network') or ipaddr(a, 'address'), 'int')
a_last = ipaddr(ipaddr(a, 'broadcast') or ipaddr(a, 'address'), 'int')
if _range_checker(a_first, n_first, n_last) and _range_checker(a_last, n_first, n_last):
r.append(address)
return r
# Returns the SLAAC address within a network for a given HW/MAC address.
# Usage:
#
# - prefix | slaac(mac)
def slaac(value, query=''):
''' Get the SLAAC address within given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
if ipaddr(value, 'version') != 6:
return False
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
mac = hwaddr(query, alias='slaac')
eui = netaddr.EUI(mac)
except:
return False
return eui.ipv6(value.network)
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query='', alias='hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
query_func_extra_args = {
'': ('value',),
}
query_func_map = {
'': _empty_hwaddr_query,
'bare': _bare_query,
'bool': _bool_hwaddr_query,
'int': _int_hwaddr_query,
'cisco': _cisco_query,
'eui48': _win_query,
'linux': _linux_query,
'pgsql': _postgresql_query,
'postgresql': _postgresql_query,
'psql': _postgresql_query,
'unix': _unix_query,
'win': _win_query,
}
try:
v = netaddr.EUI(value)
except:
if query and query != 'bool':
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def macaddr(value, query=''):
return hwaddr(value, query, alias='macaddr')
def _need_netaddr(f_name, *args, **kwargs):
raise errors.AnsibleFilterError('The %s filter requires python-netaddr be '
'installed on the ansible controller' % f_name)
def ip4_hex(arg, delimiter=''):
''' Convert an IPv4 address to Hexadecimal notation '''
numbers = list(map(int, arg.split('.')))
return '{0:02x}{sep}{1:02x}{sep}{2:02x}{sep}{3:02x}'.format(*numbers, sep=delimiter)
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
filter_map = {
# IP addresses and networks
'ipaddr': ipaddr,
'ipwrap': ipwrap,
'ip4_hex': ip4_hex,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
'next_nth_usable': next_nth_usable,
'network_in_network': network_in_network,
'network_in_usable': network_in_usable,
'reduce_on_network': reduce_on_network,
'nthhost': nthhost,
'previous_nth_usable': previous_nth_usable,
'slaac': slaac,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
def filters(self):
if netaddr:
return self.filter_map
else:
# Need to install python-netaddr for these filters to work
return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
| gpl-3.0 | -4,882,947,598,030,905,000 | -670,275,023,654,991,700 | 27.098765 | 117 | 0.561782 | false |
varunnaganathan/django | tests/admin_widgets/tests.py | 2 | 61214 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gettext
import os
from datetime import datetime, timedelta
from importlib import import_module
from unittest import skipIf
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import CharField, DateField
from django.test import SimpleTestCase, TestCase, override_settings
from django.urls import reverse
from django.utils import six, translation
from . import models
from .widgetadmin import site as widget_admin_site
try:
import pytz
except ImportError:
pytz = None
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
pk=101, username='testser', first_name='Add', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=False,
is_staff=True, last_login=datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime(2007, 5, 30, 13, 20, 10)
)
models.Car.objects.create(id=1, owner=cls.u1, make='Volkswagen', model='Passat')
models.Car.objects.create(id=2, owner=cls.u2, make='BMW', model='M3')
class SeleniumDataMixin(object):
def setUp(self):
self.u1 = User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime(2007, 5, 30, 13, 20, 10)
)
class AdminFormfieldForDBFieldTests(SimpleTestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
# Check that we got a field of the right type
self.assertTrue(
isinstance(widget, widgetclass),
"Wrong widget for %s.%s: expected %s, got %s" % (
model.__class__.__name__,
fieldname,
widgetclass,
type(widget),
)
)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(models.Event, 'start_date', widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(models.Member, 'birthdate', widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(models.Event, 'start_time', widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(models.Event, 'description', widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(models.Event, 'link', widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(models.Event, 'min_age', widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(models.Member, 'name', widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(models.Member, 'email', widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(models.Album, 'cover_art', widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['main_band'])
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(models.Event, 'main_band', widgets.AdminRadioSelect,
radio_fields={'main_band': admin.VERTICAL})
self.assertEqual(ff.empty_label, None)
def test_many_to_many(self):
self.assertFormfield(models.Band, 'members', forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def test_filtered_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def test_formfield_overrides(self):
self.assertFormfield(models.Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def test_formfield_overrides_widget_instances(self):
"""
Test that widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size': '10'})}
}
ma = BandAdmin(models.Band, admin.site)
f1 = ma.formfield_for_dbfield(models.Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(models.Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def test_field_with_choices(self):
self.assertFormfield(models.Member, 'gender', forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(models.Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender': admin.VERTICAL})
def test_inheritance(self):
self.assertFormfield(models.Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ['companies']
self.assertFormfield(models.Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(models.Advisor, admin.site)
f = ma.formfield_for_dbfield(models.Advisor._meta.get_field('companies'), request=None)
self.assertEqual(
six.text_type(f.help_text),
'Hold down "Control", or "Command" on a Mac, to select more than one.'
)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase):
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.login(username="super", password="secret")
response = self.client.get(reverse('admin:admin_widgets_cartire_add'))
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagen Passat")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyWidgetChangeList(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username="super", password="secret")
def test_changelist_ForeignKey(self):
response = self.client.get(reverse('admin:admin_widgets_car_changelist'))
self.assertContains(response, '/auth/user/add/')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyRawIdWidget(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username="super", password="secret")
def test_nonexistent_target_id(self):
band = models.Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"main_band": '%s' % pk,
}
# Try posting with a non-existent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), post_data)
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'),
{"main_band": test_str})
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
class FilteredSelectMultipleWidgetTest(SimpleTestCase):
def test_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilter" '
'data-field-name="test\\" data-is-stacked="0">\n</select>'
)
def test_stacked_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilterstacked" '
'data-field-name="test\\" data-is-stacked="1">\n</select>'
)
class AdminDateWidgetTest(SimpleTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10" />',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20" />',
)
class AdminTimeWidgetTest(SimpleTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8" />',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20" />',
)
class AdminSplitDateTimeWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Date: <input value="2007-12-01" type="text" class="vDateField" '
'name="test_0" size="10" /><br />'
'Time: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8" /></p>'
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with self.settings(USE_L10N=True), translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Datum: <input value="01.12.2007" type="text" '
'class="vDateField" name="test_0"size="10" /><br />'
'Zeit: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8" /></p>'
)
class AdminURLWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url" />'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">'
'http://example.com</a><br />'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example.com" /></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">'
'http://example-äüö.com</a><br />'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com" /></p>'
)
def test_render_quoting(self):
# WARNING: Don't use assertHTMLEqual in that testcase!
# assertHTMLEqual will get rid of some escapes which are tested here!
w = widgets.AdminURLFieldWidget()
self.assertEqual(
w.render('test', 'http://example.com/<sometag>some text</sometag>'),
'<p class="url">Currently: '
'<a href="http://example.com/%3Csometag%3Esome%20text%3C/sometag%3E">'
'http://example.com/<sometag>some text</sometag></a><br />'
'Change: <input class="vURLField" name="test" type="url" '
'value="http://example.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://example-äüö.com/<sometag>some text</sometag>'),
'<p class="url">Currently: '
'<a href="http://xn--example--7za4pnc.com/%3Csometag%3Esome%20text%3C/sometag%3E">'
'http://example-äüö.com/<sometag>some text</sometag></a><br />'
'Change: <input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"'),
'<p class="url">Currently: '
'<a href="http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22">'
'http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"</a><br />'
'Change: <input class="vURLField" name="test" type="url" '
'value="http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"" /></p>'
)
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls',
)
class AdminFileWidgetTests(TestDataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super(AdminFileWidgetTests, cls).setUpTestData()
band = models.Band.objects.create(name='Linkin Park')
cls.album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" /> '
'<label for="test-clear_id">Clear</label></span><br />'
'Change: <input type="file" name="test" /></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test" />',
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.login(username="super", password="secret")
response = self.client.get(reverse('admin:admin_widgets_album_change', args=(self.album.id,)))
self.assertContains(
response,
'<p><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
'albums\hybrid_theory.jpg</a></p>' % {'STORAGE_URL': default_storage.url('')},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art" />',
html=True,
)
response = self.client.get(reverse('admin:admin_widgets_album_add'))
self.assertContains(
response,
'<p></p>',
html=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ForeignKeyRawIdWidgetTest(TestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = models.Album._meta.get_field('band').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.pk, attrs={}),
'<input type="text" name="test" value="%(bandpk)s" '
'class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/band/?_to_field=id" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a> <strong>'
'<a href="/admin_widgets/band/%(bandpk)s/change/">Linkin Park</a>'
'</strong>' % {'bandpk': band.pk}
)
def test_relations_to_non_primary_key(self):
# Check that ForeignKeyRawIdWidget works with fields which aren't
# related to the model's primary key.
apple = models.Inventory.objects.create(barcode=86, name='Apple')
models.Inventory.objects.create(barcode=22, name='Pear')
core = models.Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = models.Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}),
'<input type="text" name="test" value="86" '
'class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Apple</a></strong>' % {'pk': apple.pk}
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = models.Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = models.Bee._meta.get_field('honeycomb').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s" />'
' <strong>Honeycomb object</strong>'
% {'hcombpk': big_honeycomb.pk}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = models.Individual.objects.create(name='Subject #1')
models.Individual.objects.create(name='Child', parent=subject1)
rel = models.Individual._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s" />'
' <strong>Individual object</strong>'
% {'subj1pk': subject1.pk}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = models.Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = models.Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = models.Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Hidden</a></strong>' % {'pk': hidden.pk}
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ManyToManyRawIdWidgetTest(TestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
m1 = models.Member.objects.create(name='Chester')
m2 = models.Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = models.Band._meta.get_field('members').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}), (
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField" />'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk, m2pk=m2.pk)
)
self.assertHTMLEqual(
w.render('test', [m1.pk]), (
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk)
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = models.Advisor.objects.create(name='Rockstar Techie')
c1 = models.Company.objects.create(name='Doodle')
c2 = models.Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = models.Advisor._meta.get_field('companies').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s" />' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s" />' % {'c1pk': c1.pk}
)
class RelatedFieldWidgetWrapperTests(SimpleTestCase):
def test_no_can_add_related(self):
rel = models.Individual._meta.get_field('parent').remote_field
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = models.Individual._meta.get_field('parent').remote_field
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = models.Individual._meta.get_field('soulmate').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_show_hide_date_time_picker_widgets(self):
"""
Ensure that pressing the ESC key closes the date and time picker
widgets.
Refs #17064.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# First, with the date picker widget ---------------------------------
# Check that the date picker is hidden
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Check that the date picker is visible
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the date picker is hidden again
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Then, with the time picker widget ----------------------------------
# Check that the time picker is hidden
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon
self.selenium.find_element_by_id('clocklink0').click()
# Check that the time picker is visible
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'block')
self.assertEqual(
[
x.text for x in
self.selenium.find_elements_by_xpath("//ul[@class='timelist']/li/a")
],
['Now', 'Midnight', '6 a.m.', 'Noon', '6 p.m.']
)
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the time picker is hidden again
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute('class'), 'nonday')
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute('class'), 'selected')
self.assertEqual(selected.text, '1')
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute('class') == 'selected']
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
Ensure that the calendar show the date from the input field for every
locale supported by django.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Enter test data
member = models.Member.objects.create(name='Bob', birthdate=datetime(1984, 5, 15), gender='M')
# Get month name translations for every locale
month_string = 'May'
path = os.path.join(os.path.dirname(import_module('django.contrib.admin').__file__), 'locale')
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation('djangojs', path, [language_code])
except IOError:
continue
if month_string in catalog._catalog:
month_name = catalog._catalog[month_string]
else:
month_name = month_string
# Get the expected caption
may_translation = month_name
expected_caption = '{0:s} {1:d}'.format(may_translation.upper(), 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code, USE_L10N=True):
# Open a page that has a date picker widget
self.selenium.get('{}{}'.format(self.live_server_url,
reverse('admin:admin_widgets_member_change', args=(member.pk,))))
# Click on the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Make sure that the right month and year are displayed
self.wait_for_text('#calendarin0 caption', expected_caption)
class DateTimePickerSeleniumChromeTests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerSeleniumIETests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@skipIf(pytz is None, "this test requires pytz")
@override_settings(TIME_ZONE='Asia/Singapore')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerShortcutsSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_date_time_picker_shortcuts(self):
"""
Ensure that date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
self.admin_login(username='super', password='secret', login_url='/')
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = pytz.timezone('America/Chicago')
utc_now = datetime.now(pytz.utc)
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
now = datetime.now()
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
self.selenium.find_element_by_id('id_name').send_keys('test')
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements_by_css_selector(
'.field-birthdate .datetimeshortcuts')
for shortcut in shortcuts:
shortcut.find_element_by_tag_name('a').click()
# Check that there is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.selenium.find_elements_by_css_selector(
'.field-birthdate .timezonewarning')
# Submit the form.
self.selenium.find_element_by_tag_name('form').submit()
self.wait_page_loaded()
# Make sure that "now" in javascript is within 10 seconds
# from "now" on the server side.
member = models.Member.objects.get(name='test')
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
class DateTimePickerShortcutsSeleniumChromeTests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerShortcutsSeleniumIETests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
# The above tests run with Asia/Singapore which are on the positive side of
# UTC. Here we test with a timezone on the negative side.
@override_settings(TIME_ZONE='US/Eastern')
class DateTimePickerAltTimezoneSeleniumFirefoxTests(DateTimePickerShortcutsSeleniumFirefoxTests):
pass
class DateTimePickerAltTimezoneSeleniumChromeTests(DateTimePickerAltTimezoneSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerAltTimezoneSeleniumIETests(DateTimePickerAltTimezoneSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class HorizontalVerticalFilterSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
super(HorizontalVerticalFilterSeleniumFirefoxTests, self).setUp()
self.lisa = models.Student.objects.create(name='Lisa')
self.john = models.Student.objects.create(name='John')
self.bob = models.Student.objects.create(name='Bob')
self.peter = models.Student.objects.create(name='Peter')
self.jenny = models.Student.objects.create(name='Jenny')
self.jason = models.Student.objects.create(name='Jason')
self.cliff = models.Student.objects.create(name='Cliff')
self.arthur = models.Student.objects.create(name='Arthur')
self.school = models.School.objects.create(name='School of Awesome')
def assertActiveButtons(self, mode, field_name, choose, remove,
choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.get_select_option(from_box, str(self.lisa.id))
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(from_lisa_select_option.get_attribute('title'), from_lisa_select_option.get_attribute('text'))
from_lisa_select_option.click()
self.get_select_option(from_box, str(self.jason.id)).click()
self.get_select_option(from_box, str(self.bob.id)).click()
self.get_select_option(from_box, str(self.john.id)).click()
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id)])
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.get_select_option(to_box, str(self.lisa.id))
self.assertEqual(to_lisa_select_option.get_attribute('title'), to_lisa_select_option.get_attribute('text'))
# Remove some options -------------------------------------------------
self.get_select_option(to_box, str(self.lisa.id)).click()
self.get_select_option(to_box, str(self.bob.id)).click()
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.arthur.id)).click()
self.get_select_option(from_box, str(self.cliff.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id)])
def test_basic(self):
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get('%s%s' % (
self.live_server_url, reverse('admin:admin_widgets_school_change', args=(self.school.id,))))
self.wait_page_loaded()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()),
[self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Ensure that typing in the search box filters out options displayed in
the 'from' box.
"""
from selenium.webdriver.common.keys import Keys
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_school_change', args=(self.school.id,))))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
input = self.selenium.find_element_by_id('id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# -----------------------------------------------------------------
# Check that choosing a filtered option sends it properly to the
# 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.get_select_option(from_box, str(self.jason.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.jason.id)])
self.get_select_option(to_box, str(self.lisa.id)).click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Check that pressing enter on a filtered option sends it properly
# to the 'to' box.
self.get_select_option(to_box, str(self.jason.id)).click()
self.selenium.find_element_by_id(remove_link).click()
input.send_keys('ja')
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()),
[self.jason, self.peter])
def test_back_button_bug(self):
"""
Some browsers had a bug where navigating away from the change page
and then clicking the browser's back button would clear the
filter_horizontal/filter_vertical widgets (#13614).
"""
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
# Navigate away and go back to the change form page.
self.selenium.find_element_by_link_text('Home').click()
self.selenium.back()
expected_unselected_values = [
str(self.arthur.id), str(self.bob.id), str(self.cliff.id),
str(self.jason.id), str(self.jenny.id), str(self.john.id),
]
expected_selected_values = [str(self.lisa.id), str(self.peter.id)]
# Check that everything is still in place
self.assertSelectOptions('#id_students_from', expected_unselected_values)
self.assertSelectOptions('#id_students_to', expected_selected_values)
self.assertSelectOptions('#id_alumni_from', expected_unselected_values)
self.assertSelectOptions('#id_alumni_to', expected_selected_values)
def test_refresh_page(self):
"""
Horizontal and vertical filter widgets keep selected options on page
reload (#22955).
"""
self.school.students.add(self.arthur, self.jason)
self.school.alumni.add(self.arthur, self.jason)
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
# self.selenium.refresh() or send_keys(Keys.F5) does hard reload and
# doesn't replicate what happens when a user clicks the browser's
# 'Refresh' button.
self.selenium.execute_script("location.reload()")
self.wait_page_loaded()
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
class HorizontalVerticalFilterSeleniumChromeTests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class HorizontalVerticalFilterSeleniumIETests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminRawIdWidgetSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
super(AdminRawIdWidgetSeleniumFirefoxTests, self).setUp()
models.Band.objects.create(id=42, name='Bogey Blues')
models.Band.objects.create(id=98, name='Green Potatoes')
def test_ForeignKey(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_event_add')))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_main_band').get_attribute('value'),
'')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '98')
def test_many_to_many(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_event_add')))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_supporting_bands').get_attribute('value'),
'')
# Help text for the field is displayed
self.assertEqual(
self.selenium.find_element_by_css_selector('.field-supporting_bands p.help').text,
'Supporting Bands.'
)
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42,98')
class AdminRawIdWidgetSeleniumChromeTests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class AdminRawIdWidgetSeleniumIETests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class RelatedFieldWidgetSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_ForeignKey_using_to_field(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get('%s%s' % (
self.live_server_url,
reverse('admin:admin_widgets_profile_add')))
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element_by_id('add_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
password_field = self.selenium.find_element_by_id('id_password')
password_field.send_keys('password')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'newuser'
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.selenium.find_element_by_css_selector('#id_user option[value=newuser]')
# Click the Change User button to change it
self.selenium.find_element_by_id('change_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'changednewuser'
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
self.selenium.find_element_by_css_selector('#id_user option[value=changednewuser]')
# Go ahead and submit the form to make sure it works
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.wait_for_text('li.success', 'The profile "changednewuser" was added successfully.')
profiles = models.Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
class RelatedFieldWidgetSeleniumChromeTests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class RelatedFieldWidgetSeleniumIETests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| bsd-3-clause | 5,863,632,413,345,973,000 | 3,305,657,480,018,475,500 | 44.058174 | 119 | 0.621076 | false |
home-assistant/home-assistant | homeassistant/components/yeelight/config_flow.py | 1 | 9179 | """Config flow for Yeelight integration."""
import logging
import voluptuous as vol
import yeelight
from homeassistant import config_entries, exceptions
from homeassistant.components.dhcp import IP_ADDRESS
from homeassistant.const import CONF_DEVICE, CONF_HOST, CONF_ID, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import (
CONF_MODE_MUSIC,
CONF_MODEL,
CONF_NIGHTLIGHT_SWITCH,
CONF_NIGHTLIGHT_SWITCH_TYPE,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DOMAIN,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
_async_unique_name,
)
MODEL_UNKNOWN = "unknown"
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Yeelight."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Return the options flow."""
return OptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the config flow."""
self._discovered_devices = {}
self._discovered_model = None
self._discovered_ip = None
async def async_step_homekit(self, discovery_info):
"""Handle discovery from homekit."""
self._discovered_ip = discovery_info["host"]
return await self._async_handle_discovery()
async def async_step_dhcp(self, discovery_info):
"""Handle discovery from dhcp."""
self._discovered_ip = discovery_info[IP_ADDRESS]
return await self._async_handle_discovery()
async def _async_handle_discovery(self):
"""Handle any discovery."""
self.context[CONF_HOST] = self._discovered_ip
for progress in self._async_in_progress():
if progress.get("context", {}).get(CONF_HOST) == self._discovered_ip:
return self.async_abort(reason="already_in_progress")
try:
self._discovered_model = await self._async_try_connect(self._discovered_ip)
except CannotConnect:
return self.async_abort(reason="cannot_connect")
if not self.unique_id:
return self.async_abort(reason="cannot_connect")
self._abort_if_unique_id_configured(
updates={CONF_HOST: self._discovered_ip}, reload_on_update=False
)
return await self.async_step_discovery_confirm()
async def async_step_discovery_confirm(self, user_input=None):
"""Confirm discovery."""
if user_input is not None:
return self.async_create_entry(
title=f"{self._discovered_model} {self.unique_id}",
data={CONF_ID: self.unique_id, CONF_HOST: self._discovered_ip},
)
self._set_confirm_only()
placeholders = {
"model": self._discovered_model,
"host": self._discovered_ip,
}
self.context["title_placeholders"] = placeholders
return self.async_show_form(
step_id="discovery_confirm", description_placeholders=placeholders
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if not user_input.get(CONF_HOST):
return await self.async_step_pick_device()
try:
model = await self._async_try_connect(user_input[CONF_HOST])
except CannotConnect:
errors["base"] = "cannot_connect"
else:
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=f"{model} {self.unique_id}",
data=user_input,
)
user_input = user_input or {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
),
errors=errors,
)
async def async_step_pick_device(self, user_input=None):
"""Handle the step to pick discovered device."""
if user_input is not None:
unique_id = user_input[CONF_DEVICE]
capabilities = self._discovered_devices[unique_id]
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=_async_unique_name(capabilities),
data={CONF_ID: unique_id},
)
configured_devices = {
entry.data[CONF_ID]
for entry in self._async_current_entries()
if entry.data[CONF_ID]
}
devices_name = {}
# Run 3 times as packets can get lost
for _ in range(3):
devices = await self.hass.async_add_executor_job(yeelight.discover_bulbs)
for device in devices:
capabilities = device["capabilities"]
unique_id = capabilities["id"]
if unique_id in configured_devices:
continue # ignore configured devices
model = capabilities["model"]
host = device["ip"]
name = f"{host} {model} {unique_id}"
self._discovered_devices[unique_id] = capabilities
devices_name[unique_id] = name
# Check if there is at least one device
if not devices_name:
return self.async_abort(reason="no_devices_found")
return self.async_show_form(
step_id="pick_device",
data_schema=vol.Schema({vol.Required(CONF_DEVICE): vol.In(devices_name)}),
)
async def async_step_import(self, user_input=None):
"""Handle import step."""
host = user_input[CONF_HOST]
try:
await self._async_try_connect(host)
except CannotConnect:
_LOGGER.error("Failed to import %s: cannot connect", host)
return self.async_abort(reason="cannot_connect")
if CONF_NIGHTLIGHT_SWITCH_TYPE in user_input:
user_input[CONF_NIGHTLIGHT_SWITCH] = (
user_input.pop(CONF_NIGHTLIGHT_SWITCH_TYPE)
== NIGHTLIGHT_SWITCH_TYPE_LIGHT
)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=user_input[CONF_NAME], data=user_input)
async def _async_try_connect(self, host):
"""Set up with options."""
self._async_abort_entries_match({CONF_HOST: host})
bulb = yeelight.Bulb(host)
try:
capabilities = await self.hass.async_add_executor_job(bulb.get_capabilities)
if capabilities is None: # timeout
_LOGGER.debug("Failed to get capabilities from %s: timeout", host)
else:
_LOGGER.debug("Get capabilities: %s", capabilities)
await self.async_set_unique_id(capabilities["id"])
return capabilities["model"]
except OSError as err:
_LOGGER.debug("Failed to get capabilities from %s: %s", host, err)
# Ignore the error since get_capabilities uses UDP discovery packet
# which does not work in all network environments
# Fallback to get properties
try:
await self.hass.async_add_executor_job(bulb.get_properties)
except yeelight.BulbException as err:
_LOGGER.error("Failed to get properties from %s: %s", host, err)
raise CannotConnect from err
_LOGGER.debug("Get properties: %s", bulb.last_properties)
return MODEL_UNKNOWN
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Yeelight."""
def __init__(self, config_entry):
"""Initialize the option flow."""
self._config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle the initial step."""
if user_input is not None:
options = {**self._config_entry.options}
options.update(user_input)
return self.async_create_entry(title="", data=options)
options = self._config_entry.options
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(CONF_MODEL, default=options[CONF_MODEL]): str,
vol.Required(
CONF_TRANSITION,
default=options[CONF_TRANSITION],
): cv.positive_int,
vol.Required(
CONF_MODE_MUSIC, default=options[CONF_MODE_MUSIC]
): bool,
vol.Required(
CONF_SAVE_ON_CHANGE,
default=options[CONF_SAVE_ON_CHANGE],
): bool,
vol.Required(
CONF_NIGHTLIGHT_SWITCH,
default=options[CONF_NIGHTLIGHT_SWITCH],
): bool,
}
),
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| apache-2.0 | 161,076,037,189,978,100 | 2,514,556,139,693,665,300 | 36.313008 | 88 | 0.577514 | false |
GeographicaGS/moocng | moocng/courses/migrations/0015_calculate_stats.py | 2 | 14058 | # -*- coding: utf-8 -*-
# Copyright 2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from south.v2 import DataMigration
from moocng.mongodb import get_db
from moocng.portal.stats import calculate_all_stats
class Migration(DataMigration):
def forwards(self, orm):
def callback(step='', counter=0, total=0):
step = max(1, int(total / 100))
if counter % step == 0:
if step == 'calculating':
print 'Processed %d of %d users' % (counter, total)
elif step == 'storing':
print 'Saved %d of %d statistics entries' % (counter, total)
calculate_all_stats(
user_objects=orm['auth.user'].objects,
kq_objects=orm['courses.knowledgequantum'].objects,
callback=callback)
def backwards(self, orm):
connector = get_db()
db = connector.get_database()
db.stats_course.drop()
db.stats_unit.drop()
db.stats_kq.drop()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'badges.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Badge'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courses.announcement': {
'Meta': {'object_name': 'Announcement'},
'content': ('tinymce.models.HTMLField', [], {}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.attachment': {
'Meta': {'object_name': 'Attachment'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']"})
},
'courses.course': {
'Meta': {'ordering': "['order']", 'object_name': 'Course'},
'certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'certification_banner': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'completion_badge': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'course'", 'unique': 'True', 'null': 'True', 'to': "orm['badges.Badge']"}),
'description': ('tinymce.models.HTMLField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_method': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '200'}),
'estimated_effort': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_audience': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'max_reservations_pending': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'max_reservations_total': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'courses_as_owner'", 'to': "orm['auth.User']"}),
'promotion_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'promotion_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'requirements': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'courses_as_student'", 'blank': 'True', 'to': "orm['auth.User']"}),
'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses_as_teacher'", 'symmetrical': 'False', 'through': "orm['courses.CourseTeacher']", 'to': "orm['auth.User']"}),
'threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'})
},
'courses.courseteacher': {
'Meta': {'ordering': "['order']", 'object_name': 'CourseTeacher'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.knowledgequantum': {
'Meta': {'ordering': "['order']", 'object_name': 'KnowledgeQuantum'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'supplementary_material': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'teacher_comments': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Unit']"}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'courses.option': {
'Meta': {'object_name': 'Option'},
'feedback': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'optiontype': ('django.db.models.fields.CharField', [], {'default': "'t'", 'max_length': '1'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Question']"}),
'solution': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '100'}),
'x': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'y': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'courses.question': {
'Meta': {'object_name': 'Question'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']", 'unique': 'True'}),
'last_frame': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'solution_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'solution_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'solution_text': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'use_last_frame': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'courses.unit': {
'Meta': {'ordering': "['order']", 'object_name': 'Unit'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unittype': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
}
}
complete_apps = ['courses']
symmetrical = True
| apache-2.0 | -7,857,038,485,134,419,000 | 5,091,533,556,135,619,000 | 72.989474 | 215 | 0.556125 | false |
aerickson/ansible | test/units/plugins/cache/test_cache.py | 91 | 4078 | # (c) 2012-2015, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest, mock
from ansible.errors import AnsibleError
from ansible.plugins.cache import FactCache
from ansible.plugins.cache.base import BaseCacheModule
from ansible.plugins.cache.memory import CacheModule as MemoryCache
HAVE_MEMCACHED = True
try:
import memcache
except ImportError:
HAVE_MEMCACHED = False
else:
# Use an else so that the only reason we skip this is for lack of
# memcached, not errors importing the plugin
from ansible.plugins.cache.memcached import CacheModule as MemcachedCache
HAVE_REDIS = True
try:
import redis
except ImportError:
HAVE_REDIS = False
else:
from ansible.plugins.cache.redis import CacheModule as RedisCache
class TestFactCache(unittest.TestCase):
def setUp(self):
with mock.patch('ansible.constants.CACHE_PLUGIN', 'memory'):
self.cache = FactCache()
def test_copy(self):
self.cache['avocado'] = 'fruit'
self.cache['daisy'] = 'flower'
a_copy = self.cache.copy()
self.assertEqual(type(a_copy), dict)
self.assertEqual(a_copy, dict(avocado='fruit', daisy='flower'))
def test_plugin_load_failure(self):
# See https://github.com/ansible/ansible/issues/18751
# Note no fact_connection config set, so this will fail
with mock.patch('ansible.constants.CACHE_PLUGIN', 'json'):
self.assertRaisesRegexp(AnsibleError,
"Unable to load the facts cache plugin.*json.*",
FactCache)
class TestAbstractClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_subclass_error(self):
class CacheModule1(BaseCacheModule):
pass
with self.assertRaises(TypeError):
CacheModule1()
class CacheModule2(BaseCacheModule):
def get(self, key):
super(CacheModule2, self).get(key)
with self.assertRaises(TypeError):
CacheModule2()
def test_subclass_success(self):
class CacheModule3(BaseCacheModule):
def get(self, key):
super(CacheModule3, self).get(key)
def set(self, key, value):
super(CacheModule3, self).set(key, value)
def keys(self):
super(CacheModule3, self).keys()
def contains(self, key):
super(CacheModule3, self).contains(key)
def delete(self, key):
super(CacheModule3, self).delete(key)
def flush(self):
super(CacheModule3, self).flush()
def copy(self):
super(CacheModule3, self).copy()
self.assertIsInstance(CacheModule3(), CacheModule3)
@unittest.skipUnless(HAVE_MEMCACHED, 'python-memcached module not installed')
def test_memcached_cachemodule(self):
self.assertIsInstance(MemcachedCache(), MemcachedCache)
def test_memory_cachemodule(self):
self.assertIsInstance(MemoryCache(), MemoryCache)
@unittest.skipUnless(HAVE_REDIS, 'Redis python module not installed')
def test_redis_cachemodule(self):
self.assertIsInstance(RedisCache(), RedisCache)
| gpl-3.0 | 7,614,867,294,633,416,000 | 8,828,171,498,711,579,000 | 31.887097 | 84 | 0.663315 | false |
j5shi/ST3_Config | Packages/Anaconda/commands/mccabe.py | 6 | 2445 |
# Copyright (C) 2013 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib.callback import Callback
from ..anaconda_lib.helpers import get_settings, active_view
class AnacondaMcCabe(sublime_plugin.WindowCommand):
"""Execute McCabe complexity checker
"""
def run(self):
view = self.window.active_view()
code = view.substr(sublime.Region(0, view.size()))
data = {
'code': code,
'threshold': get_settings(view, 'mccabe_threshold', 7),
'filename': active_view().file_name(),
'method': 'mccabe',
'handler': 'qa'
}
Worker().execute(Callback(on_success=self.prepare_data), **data)
def is_enabled(self):
"""Determine if this command is enabled or not
"""
view = self.window.active_view()
location = view.sel()[0].begin()
matcher = 'source.python'
return view.match_selector(location, matcher)
def prepare_data(self, data):
"""Prepare the data to present in the quick panel
"""
if not data['success'] or data['errors'] is None:
sublime.status_message('Unable to run McCabe checker...')
return
if len(data['errors']) == 0:
view = self.window.active_view()
threshold = get_settings(view, 'mccabe_threshold', 7)
sublime.status_message(
'No code complexity beyond {} was found'.format(threshold)
)
self._show_options(data['errors'])
def _show_options(self, options):
"""Show a dropdown quickpanel with options to jump
"""
self.options = []
for option in options:
self.options.append(
[option['message'], 'line: {}'.format(option['line'])]
)
self.window.show_quick_panel(self.options, self._jump)
def _jump(self, item):
"""Jump to a line in the view buffer
"""
if item == -1:
return
lineno = int(self.options[item][1].split(':')[1].strip()) - 1
pt = self.window.active_view().text_point(lineno, 0)
self.window.active_view().sel().clear()
self.window.active_view().sel().add(sublime.Region(pt))
self.window.active_view().show(pt)
| mit | 8,896,764,222,401,885,000 | -6,517,807,355,799,045,000 | 29.5625 | 74 | 0.580777 | false |
RobertWWong/WebDev | djangoApp/ENV/lib/python3.5/site-packages/django/contrib/gis/serializers/geojson.py | 36 | 2813 | from __future__ import unicode_literals
from django.contrib.gis.gdal import CoordTransform, SpatialReference
from django.core.serializers.base import SerializerDoesNotExist
from django.core.serializers.json import Serializer as JSONSerializer
class Serializer(JSONSerializer):
"""
Convert a queryset to GeoJSON, http://geojson.org/
"""
def _init_options(self):
super(Serializer, self)._init_options()
self.geometry_field = self.json_kwargs.pop('geometry_field', None)
self.srid = self.json_kwargs.pop('srid', 4326)
if (self.selected_fields is not None and self.geometry_field is not None and
self.geometry_field not in self.selected_fields):
self.selected_fields = list(self.selected_fields) + [self.geometry_field]
def start_serialization(self):
self._init_options()
self._cts = {} # cache of CoordTransform's
self.stream.write(
'{"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "EPSG:%d"}},'
' "features": [' % self.srid)
def end_serialization(self):
self.stream.write(']}')
def start_object(self, obj):
super(Serializer, self).start_object(obj)
self._geometry = None
if self.geometry_field is None:
# Find the first declared geometry field
for field in obj._meta.fields:
if hasattr(field, 'geom_type'):
self.geometry_field = field.name
break
def get_dump_object(self, obj):
data = {
"type": "Feature",
"properties": self._current,
}
if ((self.selected_fields is None or 'pk' in self.selected_fields) and
'pk' not in data["properties"]):
data["properties"]["pk"] = obj._meta.pk.value_to_string(obj)
if self._geometry:
if self._geometry.srid != self.srid:
# If needed, transform the geometry in the srid of the global geojson srid
if self._geometry.srid not in self._cts:
srs = SpatialReference(self.srid)
self._cts[self._geometry.srid] = CoordTransform(self._geometry.srs, srs)
self._geometry.transform(self._cts[self._geometry.srid])
data["geometry"] = eval(self._geometry.geojson)
else:
data["geometry"] = None
return data
def handle_field(self, obj, field):
if field.name == self.geometry_field:
self._geometry = field.value_from_object(obj)
else:
super(Serializer, self).handle_field(obj, field)
class Deserializer(object):
def __init__(self, *args, **kwargs):
raise SerializerDoesNotExist("geojson is a serialization-only serializer")
| mit | 5,432,127,367,195,075,000 | 7,063,348,964,677,814,000 | 39.768116 | 103 | 0.601849 | false |
wenqf11/huhamhire-hosts | util/retrievedata.py | 24 | 10221 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# retrievedata.py : Retrieve data from the hosts data file.
#
# Copyleft (C) 2014 - huhamhire hosts team <[email protected]>
# =====================================================================
# Licensed under the GNU General Public License, version 3. You should
# have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING
# THE WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE.
# =====================================================================
__author__ = "huhamhire <[email protected]>"
import os
import sqlite3
import zipfile
DATAFILE = "./hostslist.data"
DATABASE = "./hostslist.s3db"
class RetrieveData(object):
"""
RetrieveData class contains a set of tools to retrieve information from
the hosts data file.
.. note:: All methods from this class are declared as `classmethod`.
:ivar sqlite3.connect conn: An instance of :class:`sqlite3.connect`
object to set up connection to a SQLite database.
:ivar sqlite3.connect.cursor _cur: An instance of
:class:`sqlite3.connect.cursor` object to process SQL queries in the
database.
:ivar str _db: Filename of a SQLite database file.
"""
conn = None
_cur = None
_database = None
@classmethod
def db_exists(cls, database=DATABASE):
"""
Check whether the :attr:`database` file exists or not.
.. note:: This is a `classmethod`.
:param database: Path to a SQLite database file.
`./hostslist.s3db` by default.
:type database: str
:return: A flag indicating whether the database file exists or not.
:rtype: bool
"""
return os.path.isfile(database)
@classmethod
def connect_db(cls, database=DATABASE):
"""
Set up connection with a SQLite :attr:`database`.
.. note:: This is a `classmethod`.
:param database: Path to a SQLite database file.
`./hostslist.s3db` by default.
:type database: str
"""
cls.conn = sqlite3.connect(database)
cls._cur = cls.conn.cursor()
cls._database = database
@classmethod
def disconnect_db(cls):
"""
Close the connection with a SQLite database.
.. note:: This is a `classmethod`.
"""
cls.conn.close()
@classmethod
def get_info(cls):
"""
Retrieve the metadata of current data file.
.. note:: This is a `classmethod`.
:return: Metadata of current data file. The metadata here is a
dictionary while the `Keys` are made of `Section Name` and
`Values` are made of `Information` defined in the hosts data file.
:rtype: dict
"""
cls._cur.execute("SELECT sect, info FROM info;")
info = dict(cls._cur.fetchall())
return info
@classmethod
def get_head(cls):
"""
Retrieve the head information from hosts data file.
.. note:: This is a `classmethod`.
:return: Lines of hosts head information.
:rtype: list
"""
cls._cur.execute("SELECT str FROM hosts_head ORDER BY ln;")
head = cls._cur.fetchall()
return head
@classmethod
def get_ids(cls, id_cfg):
"""
Calculate the id numbers covered by config word :attr:`id_cfg`.
.. note:: This is a `classmethod`.
:param id_cfg: A hexadecimal config word of id selections.
:type id_cfg: int
:return: ID numbers covered by config word.
:rtype: list
"""
cfg = bin(id_cfg)[:1:-1]
ids = []
for i, id_en in enumerate(cfg):
if int(id_en):
ids.append(0b1 << i)
return ids
@classmethod
def get_host(cls, part_id, mod_id):
"""
Retrieve the hosts module specified by :attr:`mod_id` from a part
specified by :attr:`part_id` in the data file.
.. note:: This is a `classmethod`.
:param part_id: ID number of a specified part from the hosts data
file.
.. note:: ID number is usually an 8-bit control byte.
:type part_id: int
:param mod_id: ID number of a specified module from a specified part.
.. note:: ID number is usually an 8-bit control byte.
:type mod_id: int
.. seealso:: :attr:`make_cfg` in
:class:`~tui.curses_d.CursesDaemon` class.
:return: hosts, mod_name
* hosts(`list`): Hosts entries from a specified module.
* mod_name(`str`): Name of a specified module.
:rtype: list, str
"""
if part_id == 0x04:
return None, "customize"
cls._cur.execute("""
SELECT part_name FROM parts
WHERE part_id=:part_id;
""", (part_id, ))
part_name = cls._cur.fetchone()[0]
cls._cur.execute("""
SELECT ip, host FROM %s
WHERE cate=%s;
""" % (part_name, mod_id))
hosts = cls._cur.fetchall()
cls._cur.execute("""
SELECT mod_name FROM modules
WHERE part_id=:part_id AND mod_id=:mod_id;
""", (part_id, mod_id))
mod_name = cls._cur.fetchone()[0]
return hosts, mod_name
@classmethod
def get_choice(cls, flag_v6=False):
"""
Retrieve module selection items from the hosts data file with default
selection for users.
.. note:: This is a `classmethod`.
:param flag_v6: A flag indicating whether to receive IPv6 hosts
entries or the IPv4 ones. Default by `False`.
=============== =======
:attr:`flag_v6` hosts
=============== =======
True IPv6
False IPv4
=============== =======
:type flag_v6: bool
:return: modules, defaults, slices
* modules(`list`): Information of modules for users to select.
* defaults(`dict`): Default selection config for selected parts.
* slices(`list`): Numbers of modules in each part.
:rtype: list, dict, list
"""
ch_parts = (0x08, 0x20 if flag_v6 else 0x10, 0x40)
cls._cur.execute("""
SELECT * FROM modules
WHERE part_id IN (:id_shared, :id_ipv, :id_adblock);
""", ch_parts)
modules = cls._cur.fetchall()
cls._cur.execute("""
SELECT part_id, part_default FROM parts
WHERE part_id IN (:id_shared, :id_ipv, :id_adblock);
""", ch_parts)
default_cfg = cls._cur.fetchall()
defaults = {}
for default in default_cfg:
defaults[default[0]] = cls.get_ids(default[1])
slices = [0]
for ch_part in ch_parts:
cls._cur.execute("""
SELECT COUNT(mod_id) FROM modules
WHERE part_id=:ch_part;
""", (ch_part, ))
slices.append(cls._cur.fetchone()[0])
for s in range(1, len(slices)):
slices[s] += slices[s - 1]
return modules, defaults, slices
@classmethod
def chk_mutex(cls, part_id, mod_cfg):
"""
Check if there is conflict in user selections :attr:`mod_cfg` from a
part specified by :attr:`part_id` in the data file.
.. note:: A conflict may happen while one module selected is declared
in `mutex` word of ano module selected at the same time.
.. note:: This is a `classmethod`.
:param part_id: ID number of a specified part from the hosts data
file.
.. note:: ID number is usually an 8-bit control byte.
.. seealso:: :meth:`~util.retrievedata.get_host`.
:type part_id: int
:param mod_cfg: A 16-bit config word indicating module selections of a
specified part.
.. note::
If modules in specified part whose IDs are `0x0002` and
`0x0010`, the value here should be `0x0002 + 0x0010 = 0x0012`,
which is `0b0000000000000010 + 0b0000000000010000 =
0b0000000000010010` in binary.
:type: int
.. seealso:: :attr:`make_cfg` in
:class:`~tui.curses_d.CursesDaemon` class.
:return: A flag indicating whether there is a conflict or not.
====== ============
Return Status
====== ============
True Conflict
False No conflicts
====== ============
:rtype: bool
"""
if part_id == 0x04:
return True
cls._cur.execute("""
SELECT mod_id, mutex FROM modules
WHERE part_id=:part_id;
""", (part_id, ))
mutex_tuple = dict(cls._cur.fetchall())
mutex_info = []
mod_info = cls.get_ids(mod_cfg)
for mod_id in mod_info:
mutex_info.extend(cls.get_ids(mutex_tuple[mod_id]))
mutex_info = set(mutex_info)
for mod_id in mod_info:
if mod_id in mutex_info:
return False
return True
@classmethod
def unpack(cls, datafile=DATAFILE, database=DATABASE):
"""
Unzip the archived :attr:`datafile` to a SQLite database file
:attr:`database`.
.. note:: This is a `classmethod`.
:param datafile: Path to the zipped data file. `./hostslist.data` by
default.
:type datafile: str
:param database: Path to a SQLite database file. `./hostslist.s3db` by
default.
:type database: str
"""
datafile = zipfile.ZipFile(datafile, "r")
path, filename = os.path.split(database)
datafile.extract(filename, path)
@classmethod
def clear(cls):
"""
Close connection to the database and delete the database file.
.. note:: This is a `classmethod`.
"""
cls.conn.close()
os.remove(cls._database)
| gpl-3.0 | 2,927,765,234,434,598,400 | -5,463,761,747,695,035,000 | 31.242902 | 78 | 0.545739 | false |
CIFASIS/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 39 | 5044 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause | -2,658,232,515,192,964,000 | -5,183,423,722,447,084,000 | 26.714286 | 89 | 0.568596 | false |
adityaduggal/erpnext | erpnext/patches/v5_0/update_item_description_and_image.py | 102 | 1951 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe.website.utils import find_first_image
from frappe.utils import cstr
import re
def execute():
item_details = frappe._dict()
for d in frappe.db.sql("select name, description_html, description from `tabItem`", as_dict=1):
description = cstr(d.description_html).strip() or cstr(d.description).strip()
image_url, new_desc = extract_image_and_description(description)
item_details.setdefault(d.name, frappe._dict({
"old_description": description,
"new_description": new_desc,
"image_url": image_url
}))
dt_list= ["Purchase Order Item","Supplier Quotation Item", "BOM", "BOM Explosion Item" , \
"BOM Item", "Opportunity Item" , "Quotation Item" , "Sales Order Item" , "Delivery Note Item" , \
"Material Request Item" , "Purchase Receipt Item" , "Stock Entry Detail"]
for dt in dt_list:
frappe.reload_doctype(dt)
records = frappe.db.sql("""select name, `{0}` as item_code, description from `tab{1}`
where description is not null and image is null and description like '%%<img%%'"""
.format("item" if dt=="BOM" else "item_code", dt), as_dict=1)
count = 1
for d in records:
if d.item_code and item_details.get(d.item_code) \
and cstr(d.description) == item_details.get(d.item_code).old_description:
image_url = item_details.get(d.item_code).image_url
desc = item_details.get(d.item_code).new_description
else:
image_url, desc = extract_image_and_description(cstr(d.description))
if image_url:
frappe.db.sql("""update `tab{0}` set description = %s, image = %s
where name = %s """.format(dt), (desc, image_url, d.name))
count += 1
if count % 500 == 0:
frappe.db.commit()
def extract_image_and_description(data):
image_url = find_first_image(data)
desc = re.sub("\<img[^>]+\>", "", data)
return image_url, desc
| gpl-3.0 | 4,206,955,078,727,566,000 | 5,374,770,596,637,524,000 | 35.811321 | 98 | 0.681189 | false |
doomsterinc/odoo | addons/multi_company/__init__.py | 886 | 1054 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,977,003,438,067,410,000 | -8,304,469,798,453,500,000 | 46.909091 | 79 | 0.606262 | false |
pcamarillor/Exploratory3D | external/gtest-1.5.0/test/gtest_break_on_failure_unittest.py | 1050 | 7214 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit | 4,408,726,085,710,113,300 | 6,330,119,353,466,206,000 | 32.091743 | 79 | 0.656224 | false |
rbdavid/MolecDynamics | Analysis/data_collector.py | 2 | 1272 | #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# PREAMBLE:
import numpy as np
import sys
log_file = sys.argv[1]
system = sys.argv[2]
# SUBROUTINES
# MAIN PROGRAM:
lf = open('%s' %(log_file), 'r')
tf = open('%s.time.dat' %(system), 'w')
ef = open('%s.tot_en.dat' %(system), 'w')
pf = open('%s.pot_en.dat' %(system), 'w')
kf = open('%s.kin_en.dat' %(system), 'w')
tempf = open('%s.temp.dat' %(system), 'w')
for line in lf: # for statement that flows through all lines in the .log file
if line.startswith('Step'): # collect value for time conversion unit (1 step per n fs)
t = line.split() # creates a list that contains all components of the line
step = float(t[-1])
tf.write('%15.6f \n' %(step*0.002))
if line.startswith('Total Energy'):
a = line.split()
en = float(a[-1])
ef.write('%15.6f \n' %(en))
if line.startswith('Total Potential'):
b = line.split()
pot = float(b[-1])
pf.write('%15.6f \n' %(pot))
if line.startswith('Total Kinetic'):
c = line.split()
kin = float(c[-1])
kf.write('%15.6f \n' %(kin))
if line.startswith('Instantaneous'):
d = line.split()
temp = float(d[-1])
tempf.write('%15.6f \n' %(temp))
lf.close()
tf.close()
ef.close()
pf.close()
kf.close()
tempf.close()
| mit | 5,598,725,959,627,088,000 | 654,522,661,849,088,800 | 21.315789 | 91 | 0.610849 | false |
pschwartz/ansible | v1/ansible/cache/__init__.py | 133 | 1828 | # (c) 2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from collections import MutableMapping
from ansible import utils
from ansible import constants as C
from ansible import errors
class FactCache(MutableMapping):
def __init__(self, *args, **kwargs):
self._plugin = utils.plugins.cache_loader.get(C.CACHE_PLUGIN)
if self._plugin is None:
return
def __getitem__(self, key):
if key not in self:
raise KeyError
return self._plugin.get(key)
def __setitem__(self, key, value):
self._plugin.set(key, value)
def __delitem__(self, key):
self._plugin.delete(key)
def __contains__(self, key):
return self._plugin.contains(key)
def __iter__(self):
return iter(self._plugin.keys())
def __len__(self):
return len(self._plugin.keys())
def copy(self):
""" Return a primitive copy of the keys and values from the cache. """
return dict([(k, v) for (k, v) in self.iteritems()])
def keys(self):
return self._plugin.keys()
def flush(self):
""" Flush the fact cache of all keys. """
self._plugin.flush()
| gpl-3.0 | 1,543,298,269,498,515,700 | 280,481,088,737,987,780 | 28.967213 | 78 | 0.657002 | false |
Awingu/open-ovf | py/ovf/env/PlatformSection.py | 1 | 2180 | # vi: ts=4 expandtab syntax=python
##############################################################################
# Copyright (c) 2008 IBM Corporation
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Murillo Fernandes Bernardes (IBM) - initial implementation
##############################################################################
"""OVF Environment PlatformSection representation"""
ORDERED_ELEMENTS = ("Kind", "Version", "Vendor", "Locale", "Timezone")
#:List of ordered elements that can be members of Platform Section.
class PlatformSection(object):
"""OVF Environment PlatformSection class"""
__slots__ = ORDERED_ELEMENTS
platDict = {} #: Store the elements as dict.
def __init__(self, data=None):
""" Initialize the PlatformSection."""
if data:
self.update(data)
def update(self, data):
"""Method to update attributes based in the dictionary
passed as argument
@param data: dictionary with new values.
@type data: dict
"""
for key, value in data.iteritems():
setattr(self, key, value)
self.platDict[key] = value
def iteritems(self):
"""Make this object iterable, like in a dict.
The iterator is ordered as required in the schema.
@return: iterable (key, value) pairs
"""
ret = []
for key in self.__slots__:
try:
value = getattr(self, key)
ret.append((key, value))
except AttributeError:
continue
return ret
def __setattr__(self, name, value):
"""Overrides method in order to ensure Timezone's type
as required in the OVF spec.
"""
if name == "Timezone":
try:
int(value)
except ValueError:
raise ValueError("Timezone must be an integer-like value")
object.__setattr__(self, name, value)
| epl-1.0 | -2,657,741,073,321,035,300 | -5,983,985,760,004,645,000 | 30.142857 | 78 | 0.563303 | false |
jeandet/meson | mesonbuild/mlog.py | 1 | 5948 | # Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import sys
import time
import platform
from contextlib import contextmanager
"""This is (mostly) a standalone module used to write logging
information about Meson runs. Some output goes to screen,
some to logging dir and some goes to both."""
def _windows_ansi():
from ctypes import windll, byref
from ctypes.wintypes import DWORD
kernel = windll.kernel32
stdout = kernel.GetStdHandle(-11)
mode = DWORD()
if not kernel.GetConsoleMode(stdout, byref(mode)):
return False
# ENABLE_VIRTUAL_TERMINAL_PROCESSING == 0x4
# If the call to enable VT processing fails (returns 0), we fallback to
# original behavior
return kernel.SetConsoleMode(stdout, mode.value | 0x4) or os.environ.get('ANSICON')
if platform.system().lower() == 'windows':
colorize_console = os.isatty(sys.stdout.fileno()) and _windows_ansi()
else:
colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'
log_dir = None
log_file = None
log_fname = 'meson-log.txt'
log_depth = 0
log_timestamp_start = None
def initialize(logdir):
global log_dir, log_file
log_dir = logdir
log_file = open(os.path.join(logdir, log_fname), 'w', encoding='utf8')
def set_timestamp_start(start):
global log_timestamp_start
log_timestamp_start = start
def shutdown():
global log_file
if log_file is not None:
path = log_file.name
exception_around_goer = log_file
log_file = None
exception_around_goer.close()
return path
return None
class AnsiDecorator:
plain_code = "\033[0m"
def __init__(self, text, code, quoted=False):
self.text = text
self.code = code
self.quoted = quoted
def get_text(self, with_codes):
text = self.text
if with_codes:
text = self.code + self.text + AnsiDecorator.plain_code
if self.quoted:
text = '"{}"'.format(text)
return text
def bold(text, quoted=False):
return AnsiDecorator(text, "\033[1m", quoted=quoted)
def red(text):
return AnsiDecorator(text, "\033[1;31m")
def green(text):
return AnsiDecorator(text, "\033[1;32m")
def yellow(text):
return AnsiDecorator(text, "\033[1;33m")
def cyan(text):
return AnsiDecorator(text, "\033[1;36m")
def process_markup(args, keep):
arr = []
if log_timestamp_start is not None:
arr = ['[{:.3f}]'.format(time.monotonic() - log_timestamp_start)]
for arg in args:
if isinstance(arg, str):
arr.append(arg)
elif isinstance(arg, AnsiDecorator):
arr.append(arg.get_text(keep))
else:
arr.append(str(arg))
return arr
def force_print(*args, **kwargs):
iostr = io.StringIO()
kwargs['file'] = iostr
print(*args, **kwargs)
raw = iostr.getvalue()
if log_depth > 0:
prepend = '|' * log_depth
raw = prepend + raw.replace('\n', '\n' + prepend, raw.count('\n') - 1)
# _Something_ is going to get printed.
try:
print(raw, end='')
except UnicodeEncodeError:
cleaned = raw.encode('ascii', 'replace').decode('ascii')
print(cleaned, end='')
def debug(*args, **kwargs):
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
log_file.flush()
def log(*args, **kwargs):
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
log_file.flush()
if colorize_console:
arr = process_markup(args, True)
force_print(*arr, **kwargs)
def _log_error(severity, *args, **kwargs):
from .mesonlib import get_error_location_string
from .environment import build_filename
if severity == 'warning':
args = (yellow('WARNING:'),) + args
elif severity == 'error':
args = (red('ERROR:'),) + args
elif severity == 'deprecation':
args = (red('DEPRECATION:'),) + args
else:
assert False, 'Invalid severity ' + severity
location = kwargs.pop('location', None)
if location is not None:
location_file = os.path.join(location.subdir, build_filename)
location_str = get_error_location_string(location_file, location.lineno)
args = (location_str,) + args
log(*args, **kwargs)
def error(*args, **kwargs):
return _log_error('error', *args, **kwargs)
def warning(*args, **kwargs):
return _log_error('warning', *args, **kwargs)
def deprecation(*args, **kwargs):
return _log_error('deprecation', *args, **kwargs)
def exception(e):
log()
if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):
log('%s:%d:%d:' % (e.file, e.lineno, e.colno), red('ERROR: '), e)
else:
log(red('ERROR:'), e)
# Format a list for logging purposes as a string. It separates
# all but the last item with commas, and the last with 'and'.
def format_list(list):
l = len(list)
if l > 2:
return ' and '.join([', '.join(list[:-1]), list[-1]])
elif l == 2:
return ' and '.join(list)
elif l == 1:
return list[0]
else:
return ''
@contextmanager
def nested():
global log_depth
log_depth += 1
try:
yield
finally:
log_depth -= 1
| apache-2.0 | 6,108,136,551,445,948,000 | 6,867,474,748,403,956,000 | 28.59204 | 90 | 0.632313 | false |
Endika/django | django/contrib/sitemaps/__init__.py | 15 | 5550 | from django.apps import apps as django_apps
from django.conf import settings
from django.core import paginator
from django.core.exceptions import ImproperlyConfigured
from django.urls import NoReverseMatch, reverse
from django.utils import translation
from django.utils.six.moves.urllib.parse import urlencode
from django.utils.six.moves.urllib.request import urlopen
PING_URL = "https://www.google.com/webmasters/tools/ping"
class SitemapNotFound(Exception):
pass
def ping_google(sitemap_url=None, ping_url=PING_URL):
"""
Alerts Google that the sitemap for the current site has been updated.
If sitemap_url is provided, it should be an absolute path to the sitemap
for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this
function will attempt to deduce it by using urls.reverse().
"""
if sitemap_url is None:
try:
# First, try to get the "index" sitemap URL.
sitemap_url = reverse('django.contrib.sitemaps.views.index')
except NoReverseMatch:
try:
# Next, try for the "global" sitemap URL.
sitemap_url = reverse('django.contrib.sitemaps.views.sitemap')
except NoReverseMatch:
pass
if sitemap_url is None:
raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.")
if not django_apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured("ping_google requires django.contrib.sites, which isn't installed.")
Site = django_apps.get_model('sites.Site')
current_site = Site.objects.get_current()
url = "http://%s%s" % (current_site.domain, sitemap_url)
params = urlencode({'sitemap': url})
urlopen("%s?%s" % (ping_url, params))
class Sitemap(object):
# This limit is defined by Google. See the index documentation at
# http://sitemaps.org/protocol.php#index.
limit = 50000
# If protocol is None, the URLs in the sitemap will use the protocol
# with which the sitemap was requested.
protocol = None
def __get(self, name, obj, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
return attr(obj)
return attr
def items(self):
return []
def location(self, obj):
return obj.get_absolute_url()
def _get_paginator(self):
return paginator.Paginator(self.items(), self.limit)
paginator = property(_get_paginator)
def get_urls(self, page=1, site=None, protocol=None):
# Determine protocol
if self.protocol is not None:
protocol = self.protocol
if protocol is None:
protocol = 'http'
# Determine domain
if site is None:
if django_apps.is_installed('django.contrib.sites'):
Site = django_apps.get_model('sites.Site')
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
pass
if site is None:
raise ImproperlyConfigured(
"To use sitemaps, either enable the sites framework or pass "
"a Site/RequestSite object in your view."
)
domain = site.domain
if getattr(self, 'i18n', False):
urls = []
current_lang_code = translation.get_language()
for lang_code, lang_name in settings.LANGUAGES:
translation.activate(lang_code)
urls += self._urls(page, protocol, domain)
translation.activate(current_lang_code)
else:
urls = self._urls(page, protocol, domain)
return urls
def _urls(self, page, protocol, domain):
urls = []
latest_lastmod = None
all_items_lastmod = True # track if all items have a lastmod
for item in self.paginator.page(page).object_list:
loc = "%s://%s%s" % (protocol, domain, self.__get('location', item))
priority = self.__get('priority', item)
lastmod = self.__get('lastmod', item)
if all_items_lastmod:
all_items_lastmod = lastmod is not None
if (all_items_lastmod and
(latest_lastmod is None or lastmod > latest_lastmod)):
latest_lastmod = lastmod
url_info = {
'item': item,
'location': loc,
'lastmod': lastmod,
'changefreq': self.__get('changefreq', item),
'priority': str(priority if priority is not None else ''),
}
urls.append(url_info)
if all_items_lastmod and latest_lastmod:
self.latest_lastmod = latest_lastmod
return urls
class GenericSitemap(Sitemap):
priority = None
changefreq = None
def __init__(self, info_dict, priority=None, changefreq=None):
self.queryset = info_dict['queryset']
self.date_field = info_dict.get('date_field')
self.priority = priority
self.changefreq = changefreq
def items(self):
# Make sure to return a clone; we don't want premature evaluation.
return self.queryset.filter()
def lastmod(self, item):
if self.date_field is not None:
return getattr(item, self.date_field)
return None
default_app_config = 'django.contrib.sitemaps.apps.SiteMapsConfig'
| bsd-3-clause | -4,901,414,263,857,693,000 | 9,137,200,679,483,542,000 | 34.806452 | 113 | 0.603784 | false |
ARG-TLQ/Red-DiscordBot | redbot/core/errors.py | 3 | 2555 | import importlib.machinery
import discord
from redbot.core.utils.chat_formatting import humanize_number
from .i18n import Translator
_ = Translator(__name__, __file__)
class RedError(Exception):
"""Base error class for Red-related errors."""
class PackageAlreadyLoaded(RedError):
"""Raised when trying to load an already-loaded package."""
def __init__(self, spec: importlib.machinery.ModuleSpec, *args, **kwargs):
super().__init__(*args, **kwargs)
self.spec: importlib.machinery.ModuleSpec = spec
def __str__(self) -> str:
return f"There is already a package named {self.spec.name.split('.')[-1]} loaded"
class CogLoadError(RedError):
"""Raised by a cog when it cannot load itself.
The message will be sent to the user."""
pass
class BankError(RedError):
"""Base error class for bank-related errors."""
class BalanceTooHigh(BankError, OverflowError):
"""Raised when trying to set a user's balance to higher than the maximum."""
def __init__(
self, user: discord.abc.User, max_balance: int, currency_name: str, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.user = user
self.max_balance = max_balance
self.currency_name = currency_name
def __str__(self) -> str:
return _("{user}'s balance cannot rise above {max} {currency}.").format(
user=self.user, max=humanize_number(self.max_balance), currency=self.currency_name
)
class BankPruneError(BankError):
"""Raised when trying to prune a local bank and no server is specified."""
class MissingExtraRequirements(RedError):
"""Raised when an extra requirement is missing but required."""
class ConfigError(RedError):
"""Error in a Config operation."""
class StoredTypeError(ConfigError, TypeError):
"""A TypeError pertaining to stored Config data.
This error may arise when, for example, trying to increment a value
which is not a number, or trying to toggle a value which is not a
boolean.
"""
class CannotSetSubfield(StoredTypeError):
"""Tried to set sub-field of an invalid data structure.
This would occur in the following example::
>>> import asyncio
>>> from redbot.core import Config
>>> config = Config.get_conf(None, 1234, cog_name="Example")
>>> async def example():
... await config.foo.set(True)
... await config.set_raw("foo", "bar", False) # Should raise here
...
>>> asyncio.run(example())
"""
| gpl-3.0 | -1,271,731,554,486,145,500 | -1,343,154,997,348,409,000 | 27.707865 | 94 | 0.652055 | false |
chrrrles/ansible | lib/ansible/plugins/action/set_fact.py | 71 | 1710 | # Copyright 2013 Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.vars import isidentifier
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
facts = dict()
if self._task.args:
for (k, v) in iteritems(self._task.args):
k = self._templar.template(k)
if not isidentifier(k):
return dict(failed=True, msg="The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k)
if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
v = boolean(v)
facts[k] = v
return dict(changed=False, ansible_facts=facts)
| gpl-3.0 | -6,835,392,930,096,186,000 | 1,712,598,948,705,257,700 | 37 | 203 | 0.687719 | false |
mariusor/tlcal | plusfw/scraper/html.py | 1 | 5805 | import plusfw
from datetime import datetime
from requests import get, post, codes, exceptions
import json
class Html:
"""
"""
base_uris = {
plusfw.LABEL_PFW: "https://www.plusforward.net",
plusfw.LABEL_QLIVE: "https://www.plusforward.net",
plusfw.LABEL_QIV: "https://www.plusforward.net",
plusfw.LABEL_QIII: "https://www.plusforward.net",
plusfw.LABEL_QII: "https://www.plusforward.net",
plusfw.LABEL_QWORLD: "https://www.plusforward.net",
plusfw.LABEL_DIABOT: "https://www.plusforward.net",
plusfw.LABEL_DOOM: "https://www.plusforward.net",
plusfw.LABEL_REFLEX: "https://www.plusforward.net",
plusfw.LABEL_OWATCH: "https://www.plusforward.net",
plusfw.LABEL_GG: "https://www.plusforward.net",
plusfw.LABEL_UNREAL: "https://www.plusforward.net",
plusfw.LABEL_WARSOW: "https://www.plusforward.net",
plusfw.LABEL_DBMB: "https://www.plusforward.net",
plusfw.LABEL_XONOT: "https://www.plusforward.net",
plusfw.LABEL_QCHAMP: "https://www.plusforward.net",
plusfw.LABEL_QCPMA: "https://www.plusforward.net",
}
calendar_path = {
plusfw.LABEL_PFW: "/calendar/",
plusfw.LABEL_QLIVE: "/calendar/",
plusfw.LABEL_QIV: "/calendar/",
plusfw.LABEL_QIII: "/calendar/",
plusfw.LABEL_QII: "/calendar/",
plusfw.LABEL_QWORLD: "/calendar/",
plusfw.LABEL_DIABOT: "/calendar/",
plusfw.LABEL_DOOM: "/calendar/",
plusfw.LABEL_REFLEX: "/calendar/",
plusfw.LABEL_OWATCH: "/calendar/",
plusfw.LABEL_GG: "/calendar/",
plusfw.LABEL_UNREAL: "/calendar/",
plusfw.LABEL_WARSOW: "/calendar/",
plusfw.LABEL_DBMB: "/calendar/",
plusfw.LABEL_XONOT: "/calendar/",
plusfw.LABEL_QCHAMP: "/calendar/",
plusfw.LABEL_QCPMA: "/calendar/",
}
event_path = {
plusfw.LABEL_PFW: "/calendar/manage/",
plusfw.LABEL_QLIVE: "/calendar/manage/",
plusfw.LABEL_QIV: "/calendar/manage/",
plusfw.LABEL_QIII: "/calendar/manage/",
plusfw.LABEL_QII: "/calendar/manage/",
plusfw.LABEL_QWORLD: "/calendar/manage/",
plusfw.LABEL_DIABOT: "/calendar/manage/",
plusfw.LABEL_DOOM: "/calendar/manage/",
plusfw.LABEL_REFLEX: "/calendar/manage/",
plusfw.LABEL_OWATCH: "/calendar/manage/",
plusfw.LABEL_GG: "/calendar/manage/",
plusfw.LABEL_UNREAL: "/calendar/manage/",
plusfw.LABEL_WARSOW: "/calendar/manage/",
plusfw.LABEL_DBMB: "/calendar/manage/",
plusfw.LABEL_XONOT: "/calendar/manage/",
plusfw.LABEL_QCHAMP: "/calendar/manage/",
plusfw.LABEL_QCPMA: "/calendar/manage/",
}
calendar_type = {
plusfw.LABEL_QLIVE: 3,
plusfw.LABEL_QIV: 4,
plusfw.LABEL_QIII: 5,
plusfw.LABEL_QII: 6,
plusfw.LABEL_QWORLD: 7,
plusfw.LABEL_DIABOT: 8,
plusfw.LABEL_DOOM: 9,
plusfw.LABEL_REFLEX: 10,
plusfw.LABEL_OWATCH: 13,
plusfw.LABEL_GG: 14,
plusfw.LABEL_UNREAL: 15,
plusfw.LABEL_WARSOW: 16,
plusfw.LABEL_DBMB: 17,
plusfw.LABEL_XONOT: 18,
plusfw.LABEL_QCHAMP: 20,
plusfw.LABEL_QCPMA: 21,
}
class UriBuilder:
"""
"""
@staticmethod
def get_uri(calendar=plusfw.LABEL_PFW):
return Html.base_uris[calendar]
@staticmethod
def get_calendar_uri(calendar=plusfw.LABEL_PFW, by_week=True, date=None):
if by_week:
view_by = "week"
else:
view_by = "month"
if date is None:
date = datetime.now()
fmt = date.strftime
url = Html.base_uris[calendar] + Html.calendar_path[calendar] + \
'?view=%s&year=%s&month=%s&day=%s¤t=0' % (view_by, fmt("%Y"), fmt("%m"), fmt("%d"))
return url
@staticmethod
def get_event_uri(calendar=plusfw.LABEL_PFW):
return Html.base_uris[calendar] + Html.event_path[calendar]
@staticmethod
def get_calendar(calendar="+fw", by_week=True, date=None, debug=False):
"""
:param debug:
:param date:
:param by_week:
:param calendar:
:return: str
"""
calendar_uri = Html.UriBuilder.get_calendar_uri(calendar, by_week, date)
if debug:
print("Loading calendar from: %s" % calendar_uri)
post_data = None
if calendar in Html.calendar_type:
post_data = {
"cat": str(Html.calendar_type[calendar])
}
try:
if post_data:
tl_response = post(calendar_uri, post_data)
else:
tl_response = get(calendar_uri)
except exceptions.ConnectionError as h:
return "" % h
if tl_response.status_code == codes.ok:
return tl_response.content
else:
return ""
@staticmethod
def get_event(calendar=plusfw.LABEL_QCHAMP, event_id=None, debug=True):
if event_id is None:
return ""
if debug:
print(event_id, end=" ", flush=True)
html = ""
event_uri = Html.UriBuilder.get_event_uri(calendar)
post_data = {
"action": "view-event-popup",
"event_id": event_id
}
tl_response = post(event_uri, post_data)
if tl_response.status_code == codes.ok and tl_response.headers.get('content-type') == "application/json":
decoded_response = json.loads(tl_response.content.decode(encoding="UTF-8"))
if "html" in decoded_response:
html = decoded_response["html"]
return html
| mit | 638,844,250,963,191,200 | -4,633,035,103,520,935,000 | 32.554913 | 113 | 0.566925 | false |
paulhtremblay/boto_emr | boto_emr/get_s3_common_crawl_paths.py | 1 | 1380 | import boto3
import os
def list_objects(bucket_name, prefix):
final = []
session = boto3.Session()
client = session.client('s3')
next_token = 'init'
kwargs = {}
while next_token:
if next_token != 'init':
kwargs.update({'ContinuationToken': next_token})
response = client.list_objects_v2(Bucket=bucket_name, Prefix = prefix,
**kwargs)
next_token = response.get('NextContinuationToken')
for contents in response.get('Contents', []):
key = contents['Key']
yield key
def get_segments(bucket_name = 'commoncrawl', prefix =
'crawl-data/CC-MAIN-2016-50/segments'):
return list_objects(bucket_name = bucket_name, prefix = prefix)
def main():
"""
get one file for testing
"""
gen = list_objects(bucket_name = 'commoncrawl', prefix =
'crawl-data/CC-MAIN-2016-50/segments')
key = next(gen)
s3 = boto3.resource('s3')
bucket = s3.Bucket('commoncrawl')
bucket.download_file(key, '/tmp/warc_ex.gz')
def get_crawl_paths(the_type, recursion_limit = None):
assert the_type in ['crawldiagnostics']
return list(filter(lambda x: os.path.split(os.path.split(x)[0])[1] == the_type,
get_segments()))
if __name__ == '__main__':
#main()
l = get_crawl_paths(the_type = 'crawldiagnostics')
print(len(l))
| mit | 6,560,017,055,906,072,000 | -4,016,037,011,996,083,000 | 29.666667 | 83 | 0.604348 | false |
frohoff/Empire | lib/modules/python/situational_awareness/network/active_directory/get_domaincontrollers.py | 12 | 4374 | class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Get Domain Controllers',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': 'This module will list all domain controllers from active directory',
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run on.',
'Required' : True,
'Value' : ''
},
'LDAPAddress' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'LDAP IP/Hostname',
'Required' : True,
'Value' : ''
},
'BindDN' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : '[email protected]',
'Required' : True,
'Value' : ''
},
'Password' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Password to connect to LDAP',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
LDAPAddress = self.options['LDAPAddress']['Value']
BindDN = self.options['BindDN']['Value']
password = self.options['Password']['Value']
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
import sys, os, subprocess, re
BindDN = "%s"
LDAPAddress = "%s"
password = "%s"
regex = re.compile('.+@([^.]+)\..+')
global tld
match = re.match(regex, BindDN)
tld = match.group(1)
global ext
ext = BindDN.split('.')[1]
cmd = \"""ldapsearch -x -h {} -b "dc={},dc={}" -D {} -w {} "(&(objectcategory=Computer)(userAccountControl:1.2.840.113556.1.4.803:=8192))" ""\".format(LDAPAddress, tld, ext, BindDN, password)
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output2 = subprocess.Popen(["grep", "name:"],stdin=output.stdout, stdout=subprocess.PIPE,universal_newlines=True)
output.stdout.close()
out,err = output2.communicate()
print ""
print out
""" % (BindDN, LDAPAddress, password)
return script
| bsd-3-clause | -1,431,365,142,358,546,000 | -897,416,157,608,408,000 | 36.067797 | 191 | 0.549154 | false |
semplice/alan2 | alan-config.py | 1 | 5617 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# alan2 - An openbox menu builder
# Copyright (C) 2013 Semplice Project
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Authors:
# Eugenio "g7" Paolantonio <[email protected]>
#
import xml.etree.ElementTree as etree
import alan.core.tree_helpers as tree_helpers
import alan.core.config as config
import argparse
import os
import sys
# Create and parse arguments
parser = argparse.ArgumentParser(description="Modifies Openbox configuration to enable/disable alan2 modules.")
parser.add_argument(
"extension",
nargs="?",
help="the extension to process",
)
parser.add_argument(
"-i", "--directory",
help="directory where to look for configuration files (default: ~/.config)",
default="~/.config"
)
parser.add_argument(
"-p", "--profile",
help="the profile to use"
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-e", "--enable",
help="enable the extension",
action="store_true"
)
group.add_argument(
"-d", "--disable",
help="disable the extension",
action="store_true"
)
group.add_argument(
"-l", "--list",
help="list active estensions",
action="store_true"
)
group.add_argument(
"-s", "--setup",
help="setups alan2",
action="store_true"
)
args = parser.parse_args()
if not (args.list or args.setup) and not args.extension:
raise Exception("You must specify an extension!")
## Welcome to alan2!
DIRECTORY = os.path.expanduser(args.directory)
OPENBOX_CONFIGURATION_DIR = os.path.join(DIRECTORY, "openbox")
DEFAULT_PATH=os.path.join(DIRECTORY, "alan-menus")
# Open alan2 configuration
configuration = config.Configuration(args.extension, DIRECTORY, args.profile)
map_as_main = configuration.settings["alan"]["map_as_main"]
if args.extension:
args.extension = args.extension.split(" ")
# Check for map_as_main
if map_as_main in args.extension:
# Found it, ensure we put it at the end
if args.extension.index(map_as_main) != len(args.extension)-1:
# It isn't at the end, remove it and reappend again
args.extension.remove(map_as_main)
args.extension.append(map_as_main)
# Open openbox configuration
tree = etree.parse(os.path.join(OPENBOX_CONFIGURATION_DIR, "rc.xml"), tree_helpers.PIParser())
document = tree.getroot()
root = document.find("ob:openbox_config", namespaces={"ob":"http://openbox.org/3.5/rc"})
if root is None:
# openbox-3.4
root = document.find("ob3:openbox_config", namespaces={"ob3":"http://openbox.org/3.4/rc"})
namespaces = {"ob":"http://openbox.org/3.4/rc"}
else:
namespaces = {"ob":"http://openbox.org/3.5/rc"}
etree.register_namespace('',namespaces["ob"])
menu = root.find("ob:menu", namespaces=namespaces)
# Get and parse all enabled modules...
ENABLED_MODULES = {}
_openbox_menu = []
for child in menu.findall("ob:file", namespaces=namespaces):
if os.path.dirname(child.text) == DEFAULT_PATH:
# it's ours!
ENABLED_MODULES[".".join(os.path.basename(child.text).split(".")[:-1])] = child
elif os.path.basename(child.text) in ("menu.xml","menu-static.xml"):
# Openbox default? Maybe. Cache the child, so that we can use it
# if we are going to setup alan2.
_openbox_menu.append(child)
# What should we do?
if args.setup:
# Setup!
for _menu in _openbox_menu:
# Remove found menus, they will clash with alan2 *for sure*.
menu.remove(_menu)
# If there are some extensions in the arguments, it's a good thing
# to set args.enable to True so that we do not need to recall this
# executable to enable things...
if args.extension:
args.enable = True
if args.list:
# Should do a list!
for module, child in ENABLED_MODULES.items():
print("Extension '%s', cached menu file at %s." % (module, child.text))
sys.exit()
elif args.enable:
# Should enable a module!
for extension in args.extension:
if extension in ENABLED_MODULES:
# Already enabled!
raise Exception("Extension %s already enabled!" % extension)
ext = etree.SubElement(menu, "file")
ext.text = os.path.join(DEFAULT_PATH, "%s.xml" % extension)
elif args.disable:
# Should remove a module!
for extension in args.extension:
if not extension in ENABLED_MODULES:
# Not found :(
raise Exception("Extension %s is not currently enabled!" % extension)
elif extension == map_as_main:
# Removing the main module would be a kind of pointless...
raise Exception("Extension %s is currently mapped as the main extension in alan2. Please change alan2's main module in order to disable this." % extension)
menu.remove(ENABLED_MODULES[extension])
del ENABLED_MODULES[extension]
# This is not a great thing to do, but we must: ensure we have the module
# mapped as main at the end of the subsection.
if map_as_main in ENABLED_MODULES:
# Ok, it's there, and it hasn't be touched (likely it is not the last anymore)
menu.remove(ENABLED_MODULES[map_as_main])
menu.append(ENABLED_MODULES[map_as_main])
tree_helpers.indent(root)
tree._setroot(root) # fixme?
tree.write(os.path.join(OPENBOX_CONFIGURATION_DIR, "rc.xml"),
xml_declaration=True,
encoding="utf-8",
method="xml"
)
| gpl-3.0 | 7,184,922,925,505,868,000 | 5,677,822,347,598,009,000 | 30.205556 | 158 | 0.718177 | false |
llamberson/git-cola | cola/widgets/highlighter.py | 11 | 3404 | from __future__ import division, absolute_import, unicode_literals
from PyQt4 import QtCore, QtGui
from cola.compat import ustr
have_pygments = True
try:
from pygments.styles import get_style_by_name
from pygments import lex
from pygments.util import ClassNotFound
from pygments.lexers import get_lexer_for_filename
except ImportError:
have_pygments = False
def highlight_document(edit, filename):
doc = edit.document()
if not have_pygments:
return
try:
lexer = get_lexer_for_filename(filename, stripnl=False)
except ClassNotFound:
return
style = get_style_by_name("default")
font = doc.defaultFont()
base_format = QtGui.QTextCharFormat()
base_format.setFont(font)
token_formats = {}
window = edit.window()
if hasattr(window, "processEvents"):
processEvents = window.processEvents
else:
processEvents = QtCore.QCoreApplication.processEvents
def get_token_format(token):
if token in token_formats:
return token_formats[token]
if token.parent:
parent_format = get_token_format(token.parent)
else:
parent_format = base_format
format = QtGui.QTextCharFormat(parent_format)
font = format.font()
if style.styles_token(token):
tstyle = style.style_for_token(token)
if tstyle['color']:
format.setForeground (QtGui.QColor("#"+tstyle['color']))
if tstyle['bold']: font.setWeight(QtGui.QFont.Bold)
if tstyle['italic']: font.setItalic (True)
if tstyle['underline']: format.setFontUnderline(True)
if tstyle['bgcolor']: format.setBackground (QtGui.QColor("#"+tstyle['bgcolor']))
# No way to set this for a QTextCharFormat
#if tstyle['border']: format.
token_formats[token] = format
return format
text = ustr(doc.toPlainText())
block_count = 0
block = doc.firstBlock()
assert(isinstance(block, QtGui.QTextBlock))
block_pos = 0
block_len = block.length()
block_formats = []
for token, ttext in lex(text, lexer):
format_len = len(ttext)
format = get_token_format(token)
while format_len > 0:
format_range = QtGui.QTextLayout.FormatRange()
format_range.start = block_pos
format_range.length = min(format_len, block_len)
format_range.format = format
block_formats.append(format_range)
block_len -= format_range.length
format_len -= format_range.length
block_pos += format_range.length
if block_len == 0:
block.layout().setAdditionalFormats(block_formats)
doc.markContentsDirty(block.position(), block.length())
block = block.next()
block_pos = 0
block_len = block.length()
block_formats = []
block_count += 1
if block_count % 100 == 0:
processEvents()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
python = QtGui.QPlainTextEdit()
f = open(__file__, 'r')
python.setPlainText(f.read())
f.close()
python.setWindowTitle('python')
python.show()
highlight_document(python, __file__)
sys.exit(app.exec_())
| gpl-2.0 | -2,517,777,363,859,935,700 | 8,062,278,643,279,774,000 | 29.666667 | 92 | 0.601058 | false |
eharney/nova | nova/objects/utils.py | 14 | 4736 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for objects"""
import datetime
import iso8601
import netaddr
import six
from nova.network import model as network_model
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
def datetime_or_none(dt):
"""Validate a datetime or None value."""
if dt is None:
return None
elif isinstance(dt, datetime.datetime):
if dt.utcoffset() is None:
# NOTE(danms): Legacy objects from sqlalchemy are stored in UTC,
# but are returned without a timezone attached.
# As a transitional aid, assume a tz-naive object is in UTC.
return dt.replace(tzinfo=iso8601.iso8601.Utc())
else:
return dt
raise ValueError('A datetime.datetime is required here')
# NOTE(danms): Being tolerant of isotime strings here will help us
# during our objects transition
def datetime_or_str_or_none(val):
if isinstance(val, six.string_types):
return timeutils.parse_isotime(val)
return datetime_or_none(val)
def int_or_none(val):
"""Attempt to parse an integer value, or None."""
if val is None:
return val
else:
return int(val)
def str_value(val):
if val is None:
raise ValueError(_('None is not valid here'))
return unicode(val)
def str_or_none(val):
"""Attempt to stringify a value, or None."""
if val is None:
return val
else:
return str_value(val)
def cstring(val):
if val is None:
raise ValueError(_('None is not valid here'))
return str(val)
def ip_or_none(version):
"""Return a version-specific IP address validator."""
def validator(val, version=version):
if val is None:
return val
else:
return netaddr.IPAddress(val, version=version)
return validator
def nested_object(objclass, none_ok=True):
def validator(val, objclass=objclass):
if none_ok and val is None:
return val
if isinstance(val, objclass):
return val
raise ValueError('An object of class %s is required here' % objclass)
return validator
def network_model_or_none(val):
"""Validate/Convert to a network_model.NetworkInfo, or None."""
if val is None:
return val
if isinstance(val, network_model.NetworkInfo):
return val
return network_model.NetworkInfo.hydrate(val)
def list_of_strings_or_none(val):
if val is None:
return val
if not isinstance(val, list):
raise ValueError(_('A list of strings is required here'))
if not all([isinstance(x, six.string_types) for x in val]):
raise ValueError(_('Invalid values found in list '
'(strings are required)'))
return val
def dict_of_strings_or_none(val):
if val is None:
return val
if not isinstance(val, dict):
try:
val = dict(val.iteritems())
except Exception:
raise ValueError(_('A dict of strings is required here'))
if not all([isinstance(x, six.string_types) for x in val.keys()]):
raise ValueError(_('Invalid keys found in dict '
'(strings are required)'))
if not all([isinstance(x, six.string_types) for x in val.values()]):
raise ValueError(_('Invalid values found in dict '
'(strings are required)'))
return val
def dt_serializer(name):
"""Return a datetime serializer for a named attribute."""
def serializer(self, name=name):
if getattr(self, name) is not None:
return timeutils.isotime(getattr(self, name))
else:
return None
return serializer
def dt_deserializer(instance, val):
"""A deserializer method for datetime attributes."""
if val is None:
return None
else:
return timeutils.parse_isotime(val)
def obj_serializer(name):
def serializer(self, name=name):
if getattr(self, name) is not None:
return getattr(self, name).obj_to_primitive()
else:
return None
return serializer
| apache-2.0 | 5,113,214,749,341,501,000 | 8,573,290,537,670,814,000 | 28.6 | 78 | 0.639358 | false |
jhg/django | tests/gis_tests/inspectapp/tests.py | 17 | 8167 | from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.core.management import call_command
from django.db import connection, connections
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import modify_settings
from django.utils.six import StringIO
from ..test_data import TEST_DATA
if HAS_GDAL:
from django.contrib.gis.gdal import Driver, GDALException
from django.contrib.gis.utils.ogrinspect import ogrinspect
from .models import AllOGRFields
@skipUnless(HAS_GDAL, "InspectDbTests needs GDAL support")
class InspectDbTests(TestCase):
@skipUnlessDBFeature("gis_enabled")
def test_geom_columns(self):
"""
Test the geo-enabled inspectdb command.
"""
out = StringIO()
call_command(
'inspectdb',
table_name_filter=lambda tn: tn == 'inspectapp_allogrfields',
stdout=out
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn('geom = models.PolygonField()', output)
self.assertIn('point = models.PointField()', output)
else:
self.assertIn('geom = models.GeometryField(', output)
self.assertIn('point = models.GeometryField(', output)
self.assertIn('objects = models.GeoManager()', output)
@skipUnlessDBFeature("supports_3d_storage")
def test_3d_columns(self):
out = StringIO()
call_command(
'inspectdb',
table_name_filter=lambda tn: tn == 'inspectapp_fields3d',
stdout=out
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn('point = models.PointField(dim=3)', output)
self.assertIn('line = models.LineStringField(dim=3)', output)
self.assertIn('poly = models.PolygonField(dim=3)', output)
else:
self.assertIn('point = models.GeometryField(', output)
self.assertIn('line = models.GeometryField(', output)
self.assertIn('poly = models.GeometryField(', output)
self.assertIn('objects = models.GeoManager()', output)
@skipUnless(HAS_GDAL, "OGRInspectTest needs GDAL support")
@modify_settings(
INSTALLED_APPS={'append': 'django.contrib.gis'},
)
class OGRInspectTest(TestCase):
maxDiff = 1024
def test_poly(self):
shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp')
model_def = ogrinspect(shp_file, 'MyModel')
expected = [
'# This is an auto-generated Django model module created by ogrinspect.',
'from django.contrib.gis.db import models',
'',
'class MyModel(models.Model):',
' float = models.FloatField()',
' int = models.FloatField()',
' str = models.CharField(max_length=80)',
' geom = models.PolygonField(srid=-1)',
' objects = models.GeoManager()',
]
self.assertEqual(model_def, '\n'.join(expected))
def test_poly_multi(self):
shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp')
model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True)
self.assertIn('geom = models.MultiPolygonField(srid=-1)', model_def)
# Same test with a 25D-type geometry field
shp_file = os.path.join(TEST_DATA, 'gas_lines', 'gas_leitung.shp')
model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True)
self.assertIn('geom = models.MultiLineStringField(srid=-1)', model_def)
def test_date_field(self):
shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp')
model_def = ogrinspect(shp_file, 'City')
expected = [
'# This is an auto-generated Django model module created by ogrinspect.',
'from django.contrib.gis.db import models',
'',
'class City(models.Model):',
' name = models.CharField(max_length=80)',
' population = models.FloatField()',
' density = models.FloatField()',
' created = models.DateField()',
' geom = models.PointField(srid=-1)',
' objects = models.GeoManager()',
]
self.assertEqual(model_def, '\n'.join(expected))
def test_time_field(self):
# Getting the database identifier used by OGR, if None returned
# GDAL does not have the support compiled in.
ogr_db = get_ogr_db_string()
if not ogr_db:
self.skipTest("Unable to setup an OGR connection to your database")
try:
# Writing shapefiles via GDAL currently does not support writing OGRTime
# fields, so we need to actually use a database
model_def = ogrinspect(ogr_db, 'Measurement',
layer_key=AllOGRFields._meta.db_table,
decimal=['f_decimal'])
except GDALException:
self.skipTest("Unable to setup an OGR connection to your database")
self.assertTrue(model_def.startswith(
'# This is an auto-generated Django model module created by ogrinspect.\n'
'from django.contrib.gis.db import models\n'
'\n'
'class Measurement(models.Model):\n'
))
# The ordering of model fields might vary depending on several factors (version of GDAL, etc.)
self.assertIn(' f_decimal = models.DecimalField(max_digits=0, decimal_places=0)', model_def)
self.assertIn(' f_int = models.IntegerField()', model_def)
self.assertIn(' f_datetime = models.DateTimeField()', model_def)
self.assertIn(' f_time = models.TimeField()', model_def)
self.assertIn(' f_float = models.FloatField()', model_def)
self.assertIn(' f_char = models.CharField(max_length=10)', model_def)
self.assertIn(' f_date = models.DateField()', model_def)
self.assertIsNotNone(re.search(
r' geom = models.PolygonField\(([^\)])*\)\n' # Some backends may have srid=-1
r' objects = models.GeoManager\(\)', model_def))
def test_management_command(self):
shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp')
out = StringIO()
call_command('ogrinspect', shp_file, 'City', stdout=out)
output = out.getvalue()
self.assertIn('class City(models.Model):', output)
def get_ogr_db_string():
"""
Construct the DB string that GDAL will use to inspect the database.
GDAL will create its own connection to the database, so we re-use the
connection settings from the Django test.
"""
db = connections.databases['default']
# Map from the django backend into the OGR driver name and database identifier
# http://www.gdal.org/ogr/ogr_formats.html
#
# TODO: Support Oracle (OCI).
drivers = {
'django.contrib.gis.db.backends.postgis': ('PostgreSQL', "PG:dbname='%(db_name)s'", ' '),
'django.contrib.gis.db.backends.mysql': ('MySQL', 'MYSQL:"%(db_name)s"', ','),
'django.contrib.gis.db.backends.spatialite': ('SQLite', '%(db_name)s', '')
}
db_engine = db['ENGINE']
if db_engine not in drivers:
return None
drv_name, db_str, param_sep = drivers[db_engine]
# Ensure that GDAL library has driver support for the database.
try:
Driver(drv_name)
except:
return None
# SQLite/Spatialite in-memory databases
if db['NAME'] == ":memory:":
return None
# Build the params of the OGR database connection string
params = [db_str % {'db_name': db['NAME']}]
def add(key, template):
value = db.get(key, None)
# Don't add the parameter if it is not in django's settings
if value:
params.append(template % value)
add('HOST', "host='%s'")
add('PORT', "port='%s'")
add('USER', "user='%s'")
add('PASSWORD', "password='%s'")
return param_sep.join(params)
| bsd-3-clause | -1,789,920,805,498,746,400 | -7,217,248,963,428,180,000 | 38.076555 | 103 | 0.609771 | false |
helenst/django | django/contrib/gis/tests/relatedapp/models.py | 36 | 1892 | from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
class SimpleModel(models.Model):
objects = models.GeoManager()
class Meta:
abstract = True
app_label = 'relatedapp'
@python_2_unicode_compatible
class Location(SimpleModel):
point = models.PointField()
def __str__(self):
return self.point.wkt
@python_2_unicode_compatible
class City(SimpleModel):
name = models.CharField(max_length=50)
state = models.CharField(max_length=2)
location = models.ForeignKey(Location)
def __str__(self):
return self.name
class AugmentedLocation(Location):
extra_text = models.TextField(blank=True)
objects = models.GeoManager()
class Meta:
app_label = 'relatedapp'
class DirectoryEntry(SimpleModel):
listing_text = models.CharField(max_length=50)
location = models.ForeignKey(AugmentedLocation)
@python_2_unicode_compatible
class Parcel(SimpleModel):
name = models.CharField(max_length=30)
city = models.ForeignKey(City)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column='mycenter')
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
def __str__(self):
return self.name
# These use the GeoManager but do not have any geographic fields.
class Author(SimpleModel):
name = models.CharField(max_length=100)
dob = models.DateField()
class Article(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, unique=True)
class Book(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, related_name='books', null=True)
class Event(SimpleModel):
name = models.CharField(max_length=100)
when = models.DateTimeField()
| bsd-3-clause | 8,864,165,096,081,388,000 | -2,588,959,469,318,402,000 | 23.25641 | 71 | 0.704017 | false |
DrKylstein/tiled-vorticons | keenlib/ted15.py | 1 | 3290 | '''
Commander Keen 1-3 level loader/saver
2014 <[email protected]>
'''
from __future__ import print_function
import math
import rlew
import file_gen_util as util
RLEW_FLAG = 0xFEFE
HEADER_FIELDS = ('width', 'height', 'plane_count', 'unk1', 'unk2', 'unk3',
'unk4', 'plane_size', 'unk5', 'unk6', 'unk7', 'unk8', 'unk9',
'unkA', 'unkB', 'unkC')
HEADER_LENGTH = len(HEADER_FIELDS)
def log(words):
for word in words:
print(word)
yield word
def load(file):
words = rlew.unrlew(util.first(util.uniform_file( "<H", open(file, 'rb'))),
0xFEFE)
for i in range(2):
words.next()
return _level(words)
def save(file, level):
_regen_header(level)
size = HEADER_LENGTH*2 + level['plane_size']*2
size = [size & 0xFFFF, size >> 16]
compressed = size+[item for item in rlew.rlew(_dump_level(level),
RLEW_FLAG)]
#compressed = size+[item for item in _dump_level(level)]
util.uniform_file_out('<H', open(file, 'wb'), util.tupelize(iter(
compressed)))
def _plane(width, height, remainder, words):
plane = [[words.next() for x in range(width)] for y in range(height)]
for i in range(remainder):
words.next()
return plane
def _regen_header(level):
if not 'plane_size' in level:
size = level['width']*level['height']*2
level['plane_size'] = int(math.ceil(size/16.0)*16)
if not 'plane_count' in level:
level['plane_count'] = 2
for key in HEADER_FIELDS:
if key not in level:
level[key] = 0
def _level(words):
level = {}
header_words = []
for i in range(HEADER_LENGTH):
header_words.append(words.next())
level.update(dict(zip(HEADER_FIELDS, header_words)))
remainder = _plane_padding(level)
level['tiles'] = _plane(level['width'], level['height'], remainder, words)
level['sprites'] = _plane(level['width'], level['height'], remainder,
words)
return level
def _dump_level(level):
for key in HEADER_FIELDS:
yield level[key]
remainder = _plane_padding(level)
for row in level['tiles']:
for tile in row:
yield tile
for i in range(remainder):
yield 0
for row in level['sprites']:
for sprite in row:
yield sprite
for i in range(remainder):
yield 0
def _plane_padding(level):
return level['plane_size']/2 - level['width']*level['height']
if __name__ == '__main__':
level = load('LEVEL80.CK1')
for key, item in level.items():
print(key, item)
print(level['width'], level['height'])
for row in level['tiles']:
for tile in row:
print("{:<7}".format(hex(tile)), end=' ')
print()
loop = _level(_dump_level(level))
if loop == level:
print("Great Success!")
else:
print("Fission Mailed!")
print(loop)
print(loop['width'], loop['height'])
for row in loop['tiles']:
for tile in row:
print("{:<7}".format(hex(tile)), end=' ')
print()
save('LEVEL99.CK1', level)
| mit | -5,049,402,394,613,196,000 | 2,771,636,892,217,241,000 | 25.111111 | 80 | 0.547416 | false |
gabriel-samfira/jrunner | jrunner/jobqueue/workers/simple.py | 1 | 1445 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jrunner.common.amqpclient.client import Consume
from jrunner.jobqueue.dispatch import dispatch
from jrunner.common import has_gevent_enabled
from jrunner.openstack.common import log as logging
from jrunner.jobqueue.workers import *
from oslo.config import cfg
logging.setup('jrunner')
opts = [
cfg.StrOpt(
'queue',
default='controller',
help='Worker queue'),
]
CONF = cfg.CONF
CONF.register_opts(opts, 'jobqueue')
LOG = logging.getLogger(__name__)
if has_gevent_enabled():
import gevent
sleep = gevent.sleep
else:
import time
sleep = time.sleep
def main():
while True:
try:
c = Consume(BROKER_EXCHANGE, CONF.jobqueue.queue)
c.consume(dispatch)
except Exception as err:
LOG.exception(err)
finally:
sleep(3)
| apache-2.0 | 1,755,302,202,652,290,000 | 364,421,435,665,588,200 | 26.264151 | 75 | 0.70519 | false |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/jupyter_client/channels.py | 9 | 6729 | """Base classes to manage a Client's interaction with a running kernel"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
import atexit
import errno
from threading import Thread
import time
import zmq
# import ZMQError in top-level namespace, to avoid ugly attribute-error messages
# during garbage collection of threads at exit:
from zmq import ZMQError
from jupyter_client import protocol_version_info
from .channelsabc import HBChannelABC
#-----------------------------------------------------------------------------
# Constants and exceptions
#-----------------------------------------------------------------------------
major_protocol_version = protocol_version_info[0]
class InvalidPortNumber(Exception):
pass
class HBChannel(Thread):
"""The heartbeat channel which monitors the kernel heartbeat.
Note that the heartbeat channel is paused by default. As long as you start
this channel, the kernel manager will ensure that it is paused and un-paused
as appropriate.
"""
context = None
session = None
socket = None
address = None
_exiting = False
time_to_dead = 1.
poller = None
_running = None
_pause = None
_beating = None
def __init__(self, context=None, session=None, address=None):
"""Create the heartbeat monitor thread.
Parameters
----------
context : :class:`zmq.Context`
The ZMQ context to use.
session : :class:`session.Session`
The session to use.
address : zmq url
Standard (ip, port) tuple that the kernel is listening on.
"""
super(HBChannel, self).__init__()
self.daemon = True
self.context = context
self.session = session
if isinstance(address, tuple):
if address[1] == 0:
message = 'The port number for a channel cannot be 0.'
raise InvalidPortNumber(message)
address = "tcp://%s:%i" % address
self.address = address
atexit.register(self._notice_exit)
# running is False until `.start()` is called
self._running = False
# don't start paused
self._pause = False
self.poller = zmq.Poller()
def _notice_exit(self):
self._exiting = True
def _create_socket(self):
if self.socket is not None:
# close previous socket, before opening a new one
self.poller.unregister(self.socket)
self.socket.close()
self.socket = self.context.socket(zmq.REQ)
self.socket.linger = 1000
self.socket.connect(self.address)
self.poller.register(self.socket, zmq.POLLIN)
def _poll(self, start_time):
"""poll for heartbeat replies until we reach self.time_to_dead.
Ignores interrupts, and returns the result of poll(), which
will be an empty list if no messages arrived before the timeout,
or the event tuple if there is a message to receive.
"""
until_dead = self.time_to_dead - (time.time() - start_time)
# ensure poll at least once
until_dead = max(until_dead, 1e-3)
events = []
while True:
try:
events = self.poller.poll(1000 * until_dead)
except ZMQError as e:
if e.errno == errno.EINTR:
# ignore interrupts during heartbeat
# this may never actually happen
until_dead = self.time_to_dead - (time.time() - start_time)
until_dead = max(until_dead, 1e-3)
pass
else:
raise
except Exception:
if self._exiting:
break
else:
raise
else:
break
return events
def run(self):
"""The thread's main activity. Call start() instead."""
self._create_socket()
self._running = True
self._beating = True
while self._running:
if self._pause:
# just sleep, and skip the rest of the loop
time.sleep(self.time_to_dead)
continue
since_last_heartbeat = 0.0
# io.rprint('Ping from HB channel') # dbg
# no need to catch EFSM here, because the previous event was
# either a recv or connect, which cannot be followed by EFSM
self.socket.send(b'ping')
request_time = time.time()
ready = self._poll(request_time)
if ready:
self._beating = True
# the poll above guarantees we have something to recv
self.socket.recv()
# sleep the remainder of the cycle
remainder = self.time_to_dead - (time.time() - request_time)
if remainder > 0:
time.sleep(remainder)
continue
else:
# nothing was received within the time limit, signal heart failure
self._beating = False
since_last_heartbeat = time.time() - request_time
self.call_handlers(since_last_heartbeat)
# and close/reopen the socket, because the REQ/REP cycle has been broken
self._create_socket()
continue
def pause(self):
"""Pause the heartbeat."""
self._pause = True
def unpause(self):
"""Unpause the heartbeat."""
self._pause = False
def is_beating(self):
"""Is the heartbeat running and responsive (and not paused)."""
if self.is_alive() and not self._pause and self._beating:
return True
else:
return False
def stop(self):
"""Stop the channel's event loop and join its thread."""
self._running = False
self.join()
self.close()
def close(self):
if self.socket is not None:
try:
self.socket.close(linger=0)
except Exception:
pass
self.socket = None
def call_handlers(self, since_last_heartbeat):
"""This method is called in the ioloop thread when a message arrives.
Subclasses should override this method to handle incoming messages.
It is important to remember that this method is called in the thread
so that some logic must be done to ensure that the application level
handlers are called in the application thread.
"""
pass
HBChannelABC.register(HBChannel)
| mit | 4,149,026,861,040,135,000 | -7,140,702,091,824,415,000 | 31.82439 | 88 | 0.561599 | false |
Diapolo/bitcoin | qa/rpc-tests/disablewallet.py | 102 | 1820 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise API with -disablewallet.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class DisableWalletTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-disablewallet']])
self.is_network_split = False
self.sync_all()
def run_test (self):
# Check regression: https://github.com/bitcoin/bitcoin/issues/6963#issuecomment-154548880
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet
try:
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
except JSONRPCException as e:
assert("Invalid address" not in e.error['message'])
assert("ProcessNewBlock, block not accepted" not in e.error['message'])
assert("Couldn't create new block" not in e.error['message'])
try:
self.nodes[0].generatetoaddress(1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
raise AssertionError("Must not mine to invalid address!")
except JSONRPCException as e:
assert("Invalid address" in e.error['message'])
if __name__ == '__main__':
DisableWalletTest ().main ()
| mit | 964,313,444,175,554,700 | 1,936,937,021,881,380,600 | 36.916667 | 97 | 0.667033 | false |
poppogbr/genropy | legacy_packages/dev/main.py | 1 | 1554 | #!/usr/bin/env python
# encoding: utf-8
import sys
import os
class Package(object):
def config_attributes(self):
return dict(comment='dev package',
name_short='dev', name_long='dev', name_full='dev')
def config_db(self, pkg):
#developer table
t = pkg.table('developer', pkey='id')
t.column('id', size='22')
t.column('shortname', size='12')
t.column('name', size='30', validate_case='capitalize')
t.column('password', size='12', input_mode='hidden')
t.column('tags')
#customer table
t = pkg.table('customer', pkey='id')
t.column('id', size='22')
t.column('shortname', size='12')
t.column('name', size='50')
t.column('password', size='12')
t.column('tags')
#package table
t = pkg.table('package', pkey='id')
t.column('id', size='6')
t.column('description', size='40')
t.column('customer_id', size='22').relation('dev.customer.id')
#role table
t = pkg.table('role', pkey='code')
t.column('code', size='10')
t.column('description', size='30')
#package-developer table
t = pkg.table('package_developer')
t.column('id', size='22', pkey='id')
t.column('package_id', size='6').relation('dev.package.id')
t.column('developer_id', size='22').relation('dev.developer.id')
t.column('role', size='10').relation('dev.role.code')
| lgpl-2.1 | 2,510,057,313,607,591,000 | 4,123,003,205,361,443,300 | 30.714286 | 72 | 0.530245 | false |
kirca/odoo | openerp/tools/misc.py | 9 | 42537 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import zipfile
from collections import defaultdict, Mapping
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
def find_in_path(name):
try:
return which(name)
except IOError:
return None
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
return None
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (prog,) + args
with open(os.devnull) as dn:
return subprocess.call(args2, stdout=dn, stderr=subprocess.STDOUT)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis ([email protected])
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'ab_RU': u'Abkhazian / аҧсуа',
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BS': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_CA': u'English (CA)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_HN': u'Spanish (HN) / Español (HN)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_NI': u'Spanish (NI) / Español (NI)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PR': u'Spanish (PR) / Español (PR)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_SV': u'Spanish (SV) / Español (SV)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'iu_CA': u'Inuktitut / ᐃᓄᒃᑎᑐᑦ',
'ja_JP': u'Japanese / 日本語',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'ml_IN': u'Malayalam / മലയാളം',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Flemish (BE) / Vlaams (BE)',
'oc_FR': u'Occitan (FR, post 1500) / Occitan',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'si_LK': u'Sinhalese / සිංහල',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'ur_PK': u'Urdu / اردو',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
'tlh_TLH': u'Klingon',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,858,356,993,506,823,000 | 89,752,326,104,470,450 | 33.842539 | 133 | 0.579524 | false |
chachan/nodeshot | nodeshot/interop/sync/synchronizers/openwisp.py | 5 | 1809 | from __future__ import absolute_import
from django.contrib.gis.geos import Point
from .base import XMLParserMixin, GenericGisSynchronizer
class OpenWisp(XMLParserMixin, GenericGisSynchronizer):
""" OpenWisp GeoRSS synchronizer class """
def parse(self):
""" parse data """
super(OpenWisp, self).parse()
self.parsed_data = self.parsed_data.getElementsByTagName('item')
def parse_item(self, item):
guid = self.get_text(item, 'guid')
try:
name, created_at = guid.split('201', 1) # ugly hack
except ValueError:
name = guid.replace('_', '-')
created_at = None
name = name.replace('_', ' ')
description = self.get_text(item, 'title')
address = self.get_text(item, 'description')
updated_at = self.get_text(item, 'updated')
# ensure created_at and updated_at are dates
if created_at:
created_at = "201%s" % created_at
else:
created_at = None
if updated_at == '0':
updated_at = None
try:
lat, lng = self.get_text(item, 'georss:point').split(' ')
except IndexError:
# detail view
lat = self.get_text(item, 'georss:lat')
lng = self.get_text(item, 'georss:long')
# point object
geometry = Point(float(lng), float(lat))
result = {
"name": name,
"status": None,
"address": address,
"is_published": True,
"user": None,
"geometry": geometry,
"elev": None,
"description": description,
"notes": guid,
"added": created_at,
"updated": updated_at,
"data": {}
}
return result
| gpl-3.0 | 8,608,123,995,215,913,000 | 3,363,773,648,737,984,000 | 29.661017 | 72 | 0.529022 | false |
songmonit/CTTMSONLINE_V8 | addons/hr/__openerp__.py | 260 | 2372 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Directory',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Human Resources',
'sequence': 21,
'website': 'https://www.odoo.com',
'summary': 'Jobs, Departments, Employees Details',
'description': """
Human Resources Management
==========================
This application enables you to manage important aspects of your company's staff and other details such as their skills, contacts, working time...
You can manage:
---------------
* Employees and hierarchies : You can define your employee with User and display hierarchies
* HR Departments
* HR Jobs
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['base_setup','mail', 'resource', 'board'],
'data': [
'security/hr_security.xml',
'security/ir.model.access.csv',
'hr_view.xml',
'hr_installer.xml',
'hr_data.xml',
'res_config_view.xml',
'mail_hr_view.xml',
'res_users_view.xml',
'views/hr.xml',
],
'demo': ['hr_demo.xml'],
'test': [
'test/hr_users.yml',
'test/open2recruit2close_job.yml',
'test/hr_demo.yml',
],
'installable': True,
'application': True,
'auto_install': False,
'qweb': [ 'static/src/xml/suggestions.xml' ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 92,601,253,638,028,260 | -3,464,412,649,111,783,400 | 33.882353 | 146 | 0.591484 | false |
wndhydrnt/airflow | airflow/example_dags/example_http_operator.py | 3 | 2871 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
### Example HTTP operator and sensor
"""
import json
from datetime import timedelta
import airflow
from airflow import DAG
from airflow.operators.http_operator import SimpleHttpOperator
from airflow.sensors.http_sensor import HttpSensor
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG('example_http_operator', default_args=default_args)
dag.doc_md = __doc__
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = SimpleHttpOperator(
task_id='post_op',
endpoint='api/v1.0/nodes',
data=json.dumps({"priority": 5}),
headers={"Content-Type": "application/json"},
response_check=lambda response: True if len(response.json()) == 0 else False,
dag=dag)
t5 = SimpleHttpOperator(
task_id='post_op_formenc',
endpoint='nodes/url',
data="name=Joe",
headers={"Content-Type": "application/x-www-form-urlencoded"},
dag=dag)
t2 = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='api/v1.0/nodes',
data={"param1": "value1", "param2": "value2"},
headers={},
dag=dag)
t3 = SimpleHttpOperator(
task_id='put_op',
method='PUT',
endpoint='api/v1.0/nodes',
data=json.dumps({"priority": 5}),
headers={"Content-Type": "application/json"},
dag=dag)
t4 = SimpleHttpOperator(
task_id='del_op',
method='DELETE',
endpoint='api/v1.0/nodes',
data="some=data",
headers={"Content-Type": "application/x-www-form-urlencoded"},
dag=dag)
sensor = HttpSensor(
task_id='http_sensor_check',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=lambda response: True if "Google" in response.content else False,
poke_interval=5,
dag=dag)
t1.set_upstream(sensor)
t2.set_upstream(t1)
t3.set_upstream(t2)
t4.set_upstream(t3)
t5.set_upstream(t4)
| apache-2.0 | -6,550,288,117,697,154,000 | -2,289,592,675,823,699,500 | 28.295918 | 84 | 0.68861 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.