repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
piiswrong/mxnet | example/ssd/tools/caffe_converter/compare_layers.py | 54 | 14536 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test converted models layer by layer
"""
import os
import argparse
import logging
import mxnet as mx
import cv2
import numpy as np
logging.basicConfig(level=logging.INFO)
def read_image(img_path, image_dims=None, mean=None):
"""
Reads an image from file path or URL, optionally resizing to given image dimensions and
subtracting mean.
:param img_path: path to file, or url to download
:param image_dims: image dimensions to resize to, or None
:param mean: mean file to subtract, or None
:return: loaded image, in RGB format
"""
import urllib
filename = img_path.split("/")[-1]
if img_path.startswith('http'):
urllib.urlretrieve(img_path, filename)
img = cv2.imread(filename)
else:
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if image_dims is not None:
img = cv2.resize(img, image_dims) # resize to image_dims to fit model
img = np.rollaxis(img, 2) # change to (c, h, w) order
img = img[np.newaxis, :] # extend to (n, c, h, w)
if mean is not None:
mean = np.array(mean)
if mean.shape == (3,):
mean = mean[np.newaxis, :, np.newaxis, np.newaxis] # extend to (n, c, 1, 1)
img = img.astype(np.float32) - mean # subtract mean
return img
def _ch_dev(arg_params, aux_params, ctx):
"""
Changes device of given mxnet arguments
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param ctx: new device context
:return: arguments and auxiliary parameters on new device
"""
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path,
caffe_mean, mean_diff_allowed, max_diff_allowed):
"""
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
"""
import caffe
from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean
from convert_model import convert_model
if isinstance(caffe_mean, str):
caffe_mean = read_caffe_mean(caffe_mean)
elif caffe_mean is None:
pass
elif len(caffe_mean) == 3:
# swap channels from Caffe BGR to RGB
caffe_mean = caffe_mean[::-1]
# get caffe root location, this is needed to run the upgrade network utility, so we only need
# to support parsing of latest caffe
caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0]))
caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path)
_, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path)
caffe.set_mode_cpu()
caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST)
image_dims = tuple(caffe_net.blobs['data'].shape)[2:4]
logging.info('getting image %s', image_url)
img_rgb = read_image(image_url, image_dims, caffe_mean)
img_bgr = img_rgb[:, ::-1, :, :]
caffe_net.blobs['data'].reshape(*img_bgr.shape)
caffe_net.blobs['data'].data[...] = img_bgr
_ = caffe_net.forward()
# read sym and add all outputs
sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path)
sym = sym.get_internals()
# now mxnet
if gpu < 0:
ctx = mx.cpu(0)
else:
ctx = mx.gpu(gpu)
arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx)
arg_params["data"] = mx.nd.array(img_rgb, ctx)
arg_params["prob_label"] = mx.nd.empty((1,), ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed)
return
def _bfs(root_node, process_node):
"""
Implementation of Breadth-first search (BFS) on caffe network DAG
:param root_node: root node of caffe network DAG
:param process_node: function to run on each node
"""
from collections import deque
seen_nodes = set()
next_nodes = deque()
seen_nodes.add(root_node)
next_nodes.append(root_node)
while next_nodes:
current_node = next_nodes.popleft()
# process current node
process_node(current_node)
for child_node in current_node.children:
if child_node not in seen_nodes:
seen_nodes.add(child_node)
next_nodes.append(child_node)
def compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed):
"""
Compare layer by layer of a caffe network with mxnet network
:param caffe_net: loaded caffe network
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param exe: mxnet model
:param layer_name_to_record: map between caffe layer and information record
:param top_to_layers: map between caffe blob name to layers which outputs it (including inplace)
:param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob
:param max_diff_allowed: max difference allowed between caffe blob and mxnet blob
"""
import re
log_format = ' {0:<40} {1:<40} {2:<8} {3:>10} {4:>10} {5:<1}'
compare_layers_from_nets.is_first_convolution = True
def _compare_blob(caf_blob, mx_blob, caf_name, mx_name, blob_type, note):
diff = np.abs(mx_blob - caf_blob)
diff_mean = diff.mean()
diff_max = diff.max()
logging.info(log_format.format(caf_name, mx_name, blob_type, '%4.5f' % diff_mean,
'%4.5f' % diff_max, note))
assert diff_mean < mean_diff_allowed
assert diff_max < max_diff_allowed
def _process_layer_parameters(layer):
logging.debug('processing layer %s of type %s', layer.name, layer.type)
normalized_layer_name = re.sub('[-/]', '_', layer.name)
# handle weight and bias of convolution and fully-connected layers
if layer.name in caffe_net.params and layer.type in ['Convolution', 'InnerProduct',
'Deconvolution']:
has_bias = len(caffe_net.params[layer.name]) > 1
mx_name_weight = '{}_weight'.format(normalized_layer_name)
mx_beta = arg_params[mx_name_weight].asnumpy()
# first convolution should change from BGR to RGB
if layer.type == 'Convolution' and compare_layers_from_nets.is_first_convolution:
compare_layers_from_nets.is_first_convolution = False
# if RGB or RGBA
if mx_beta.shape[1] == 3 or mx_beta.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
mx_beta[:, [0, 2], :, :] = mx_beta[:, [2, 0], :, :]
caf_beta = caffe_net.params[layer.name][0].data
_compare_blob(caf_beta, mx_beta, layer.name, mx_name_weight, 'weight', '')
if has_bias:
mx_name_bias = '{}_bias'.format(normalized_layer_name)
mx_gamma = arg_params[mx_name_bias].asnumpy()
caf_gamma = caffe_net.params[layer.name][1].data
_compare_blob(caf_gamma, mx_gamma, layer.name, mx_name_bias, 'bias', '')
elif layer.name in caffe_net.params and layer.type == 'Scale':
if 'scale' in normalized_layer_name:
bn_name = normalized_layer_name.replace('scale', 'bn')
elif 'sc' in normalized_layer_name:
bn_name = normalized_layer_name.replace('sc', 'bn')
else:
assert False, 'Unknown name convention for bn/scale'
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
mx_beta = arg_params[beta_name].asnumpy()
caf_beta = caffe_net.params[layer.name][1].data
_compare_blob(caf_beta, mx_beta, layer.name, beta_name, 'mov_mean', '')
mx_gamma = arg_params[gamma_name].asnumpy()
caf_gamma = caffe_net.params[layer.name][0].data
_compare_blob(caf_gamma, mx_gamma, layer.name, gamma_name, 'mov_var', '')
elif layer.name in caffe_net.params and layer.type == 'BatchNorm':
mean_name = '{}_moving_mean'.format(normalized_layer_name)
var_name = '{}_moving_var'.format(normalized_layer_name)
caf_rescale_factor = caffe_net.params[layer.name][2].data
mx_mean = aux_params[mean_name].asnumpy()
caf_mean = caffe_net.params[layer.name][0].data / caf_rescale_factor
_compare_blob(caf_mean, mx_mean, layer.name, mean_name, 'mean', '')
mx_var = aux_params[var_name].asnumpy()
caf_var = caffe_net.params[layer.name][1].data / caf_rescale_factor
_compare_blob(caf_var, mx_var, layer.name, var_name, 'var',
'expect 1e-04 change due to cudnn eps')
elif layer.type in ['Input', 'Pooling', 'ReLU', 'Eltwise', 'Softmax', 'LRN', 'Concat',
'Dropout', 'Crop']:
# no parameters to check for these layers
pass
else:
logging.warn('No handling for layer %s of type %s, should we ignore it?', layer.name,
layer.type)
return
def _process_layer_output(caffe_blob_name):
logging.debug('processing blob %s', caffe_blob_name)
# skip blobs not originating from actual layers, e.g. artificial split layers added by caffe
if caffe_blob_name not in top_to_layers:
return
caf_blob = caffe_net.blobs[caffe_blob_name].data
# data should change from BGR to RGB
if caffe_blob_name == 'data':
# if RGB or RGBA
if caf_blob.shape[1] == 3 or caf_blob.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
caf_blob[:, [0, 2], :, :] = caf_blob[:, [2, 0], :, :]
mx_name = 'data'
else:
# get last layer name which outputs this blob name
last_layer_name = top_to_layers[caffe_blob_name][-1]
normalized_last_layer_name = re.sub('[-/]', '_', last_layer_name)
mx_name = '{}_output'.format(normalized_last_layer_name)
if 'scale' in mx_name:
mx_name = mx_name.replace('scale', 'bn')
elif 'sc' in mx_name:
mx_name = mx_name.replace('sc', 'bn')
if mx_name not in exe.output_dict:
logging.error('mxnet blob %s is missing, time to extend the compare tool..', mx_name)
return
mx_blob = exe.output_dict[mx_name].asnumpy()
_compare_blob(caf_blob, mx_blob, caffe_blob_name, mx_name, 'output', '')
return
# check layer parameters
logging.info('\n***** Network Parameters '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
first_layer_name = layer_name_to_record.keys()[0]
_bfs(layer_name_to_record[first_layer_name], _process_layer_parameters)
# check layer output
logging.info('\n***** Network Outputs '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
for caffe_blob_name in caffe_net.blobs.keys():
_process_layer_output(caffe_blob_name)
return
def main():
"""Entrypoint for compare_layers"""
parser = argparse.ArgumentParser(
description='Tool for testing caffe to mxnet conversion layer by layer')
parser.add_argument('--image_url', type=str,
default='http://writm.com/wp-content/uploads/2016/08/Cat-hd-wallpapers.jpg',
help='input image to test inference, can be either file path or url')
parser.add_argument('--caffe_prototxt_path', type=str,
default='./model.prototxt',
help='path to caffe prototxt')
parser.add_argument('--caffe_model_path', type=str,
default='./model.caffemodel',
help='path to caffe weights')
parser.add_argument('--caffe_mean', type=str,
default='./model_mean.binaryproto',
help='path to caffe mean file')
parser.add_argument('--mean_diff_allowed', type=int, default=1e-03,
help='mean difference allowed between caffe blob and mxnet blob')
parser.add_argument('--max_diff_allowed', type=int, default=1e-01,
help='max difference allowed between caffe blob and mxnet blob')
parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict')
args = parser.parse_args()
convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path,
args.caffe_model_path, args.caffe_mean,
args.mean_diff_allowed, args.max_diff_allowed)
if __name__ == '__main__':
main()
| apache-2.0 | 5,746,854,504,313,520,000 | 38.934066 | 100 | 0.607939 | false |
nerdknight/beaglebone-black-stuff | n5110.py | 1 | 9178 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import Adafruit_BBIO.GPIO as GPIO
import time
PIN_SCE="P9_11" # LCD CS # Pin 3
PIN_RESET="P9_12" # LCD RST # Pin 4
PIN_DC="P9_13" # LCD Dat/Com # Pin 5
PIN_SDIN="P9_14" # LCD SPIDat # Pin 6
PIN_SCLK="P9_15" # LCD SPIClk# Pin 7
# LCD Gnd .... Pin 2
# LCD Vcc .... Pin 1
PIN_LEDs="P9_16" # LCD Vlcd ... Pin 8
LCD_C=GPIO.LOW
LCD_D=GPIO.HIGH
LCD_X=84
LCD_Y=48
LCD_YB=6
LCD_CMD=0
FREQ=0#.000001
ASCII=(
(0x00, 0x00, 0x00, 0x00, 0x00 ) # 20
,(
0x00, 0x00, 0x5f, 0x00, 0x00 ) # 21 !
,(
0x00, 0x07, 0x00, 0x07, 0x00 ) # 22 "
,(
0x14, 0x7f, 0x14, 0x7f, 0x14 ) # 23 #
,(
0x24, 0x2a, 0x7f, 0x2a, 0x12 ) # 24 $
,(
0x23, 0x13, 0x08, 0x64, 0x62 ) # 25 %
,(
0x36, 0x49, 0x55, 0x22, 0x50 ) # 26 &
,(
0x00, 0x05, 0x03, 0x00, 0x00 ) # 27 '
,(
0x00, 0x1c, 0x22, 0x41, 0x00 ) # 28 (
,(
0x00, 0x41, 0x22, 0x1c, 0x00 ) # 29 )
,(
0x14, 0x08, 0x3e, 0x08, 0x14 ) # 2a *
,(
0x08, 0x08, 0x3e, 0x08, 0x08 ) # 2b +
,(
0x00, 0x50, 0x30, 0x00, 0x00 ) # 2c ,
,(
0x08, 0x08, 0x08, 0x08, 0x08 ) # 2d -
,(
0x00, 0x60, 0x60, 0x00, 0x00 ) # 2e .
,(
0x20, 0x10, 0x08, 0x04, 0x02 ) # 2f /
,(
0x3e, 0x51, 0x49, 0x45, 0x3e ) # 30 0
,(
0x00, 0x42, 0x7f, 0x40, 0x00 ) # 31 1
,(
0x42, 0x61, 0x51, 0x49, 0x46 ) # 32 2
,(
0x21, 0x41, 0x45, 0x4b, 0x31 ) # 33 3
,(
0x18, 0x14, 0x12, 0x7f, 0x10 ) # 34 4
,(
0x27, 0x45, 0x45, 0x45, 0x39 ) # 35 5
,(
0x3c, 0x4a, 0x49, 0x49, 0x30 ) # 36 6
,(
0x01, 0x71, 0x09, 0x05, 0x03 ) # 37 7
,(
0x36, 0x49, 0x49, 0x49, 0x36 ) # 38 8
,(
0x06, 0x49, 0x49, 0x29, 0x1e ) # 39 9
,(
0x00, 0x36, 0x36, 0x00, 0x00 ) # 3a :
,(
0x00, 0x56, 0x36, 0x00, 0x00 ) # 3b ;
,(
0x08, 0x14, 0x22, 0x41, 0x00 ) # 3c <
,(
0x14, 0x14, 0x14, 0x14, 0x14 ) # 3d =
,(
0x00, 0x41, 0x22, 0x14, 0x08 ) # 3e >
,(
0x02, 0x01, 0x51, 0x09, 0x06 ) # 3f ?
,(
0x32, 0x49, 0x79, 0x41, 0x3e ) # 40 @
,(
0x7e, 0x11, 0x11, 0x11, 0x7e ) # 41 A
,(
0x7f, 0x49, 0x49, 0x49, 0x36 ) # 42 B
,(
0x3e, 0x41, 0x41, 0x41, 0x22 ) # 43 C
,(
0x7f, 0x41, 0x41, 0x22, 0x1c ) # 44 D
,(
0x7f, 0x49, 0x49, 0x49, 0x41 ) # 45 E
,(
0x7f, 0x09, 0x09, 0x09, 0x01 ) # 46 F
,(
0x3e, 0x41, 0x49, 0x49, 0x7a ) # 47 G
,(
0x7f, 0x08, 0x08, 0x08, 0x7f ) # 48 H
,(
0x00, 0x41, 0x7f, 0x41, 0x00 ) # 49 I
,(
0x20, 0x40, 0x41, 0x3f, 0x01 ) # 4a J
,(
0x7f, 0x08, 0x14, 0x22, 0x41 ) # 4b K
,(
0x7f, 0x40, 0x40, 0x40, 0x40 ) # 4c L
,(
0x7f, 0x02, 0x0c, 0x02, 0x7f ) # 4d M
,(
0x7f, 0x04, 0x08, 0x10, 0x7f ) # 4e N
,(
0x3e, 0x41, 0x41, 0x41, 0x3e ) # 4f O
,(
0x7f, 0x09, 0x09, 0x09, 0x06 ) # 50 P
,(
0x3e, 0x41, 0x51, 0x21, 0x5e ) # 51 Q
,(
0x7f, 0x09, 0x19, 0x29, 0x46 ) # 52 R
,(
0x46, 0x49, 0x49, 0x49, 0x31 ) # 53 S
,(
0x01, 0x01, 0x7f, 0x01, 0x01 ) # 54 T
,(
0x3f, 0x40, 0x40, 0x40, 0x3f ) # 55 U
,(
0x1f, 0x20, 0x40, 0x20, 0x1f ) # 56 V
,(
0x3f, 0x40, 0x38, 0x40, 0x3f ) # 57 W
,(
0x63, 0x14, 0x08, 0x14, 0x63 ) # 58 X
,(
0x07, 0x08, 0x70, 0x08, 0x07 ) # 59 Y
,(
0x61, 0x51, 0x49, 0x45, 0x43 ) # 5a Z
,(
0x00, 0x7f, 0x41, 0x41, 0x00 ) # 5b [
,(
0x02, 0x04, 0x08, 0x10, 0x20 ) # 5c ¥
,(
0x00, 0x41, 0x41, 0x7f, 0x00 ) # 5d ]
,(
0x04, 0x02, 0x01, 0x02, 0x04 ) # 5e ^
,(
0x40, 0x40, 0x40, 0x40, 0x40 ) # 5f _
,(
0x00, 0x01, 0x02, 0x04, 0x00 ) # 60 `
,(
0x20, 0x54, 0x54, 0x54, 0x78 ) # 61 a
,(
0x7f, 0x48, 0x44, 0x44, 0x38 ) # 62 b
,(
0x38, 0x44, 0x44, 0x44, 0x20 ) # 63 c
,(
0x38, 0x44, 0x44, 0x48, 0x7f ) # 64 d
,(
0x38, 0x54, 0x54, 0x54, 0x18 ) # 65 e
,(
0x08, 0x7e, 0x09, 0x01, 0x02 ) # 66 f
,(
0x0c, 0x52, 0x52, 0x52, 0x3e ) # 67 g
,(
0x7f, 0x08, 0x04, 0x04, 0x78 ) # 68 h
,(
0x00, 0x44, 0x7d, 0x40, 0x00 ) # 69 i
,(
0x20, 0x40, 0x44, 0x3d, 0x00 ) # 6a j
,(
0x7f, 0x10, 0x28, 0x44, 0x00 ) # 6b k
,(
0x00, 0x41, 0x7f, 0x40, 0x00 ) # 6c l
,(
0x7c, 0x04, 0x18, 0x04, 0x78 ) # 6d m
,(
0x7c, 0x08, 0x04, 0x04, 0x78 ) # 6e n
,(
0x38, 0x44, 0x44, 0x44, 0x38 ) # 6f o
,(
0x7c, 0x14, 0x14, 0x14, 0x08 ) # 70 p
,(
0x08, 0x14, 0x14, 0x18, 0x7c ) # 71 q
,(
0x7c, 0x08, 0x04, 0x04, 0x08 ) # 72 r
,(
0x48, 0x54, 0x54, 0x54, 0x20 ) # 73 s
,(
0x04, 0x3f, 0x44, 0x40, 0x20 ) # 74 t
,(
0x3c, 0x40, 0x40, 0x20, 0x7c ) # 75 u
,(
0x1c, 0x20, 0x40, 0x20, 0x1c ) # 76 v
,(
0x3c, 0x40, 0x30, 0x40, 0x3c ) # 77 w
,(
0x44, 0x28, 0x10, 0x28, 0x44 ) # 78 x
,(
0x0c, 0x50, 0x50, 0x50, 0x3c ) # 79 y
,(
0x44, 0x64, 0x54, 0x4c, 0x44 ) # 7a z
,(
0x00, 0x08, 0x36, 0x41, 0x00 ) # 7b (
,(
0x00, 0x00, 0x7f, 0x00, 0x00 ) # 7c |
,(
0x00, 0x41, 0x36, 0x08, 0x00 ) # 7d )
,(
0x10, 0x08, 0x08, 0x10, 0x08 ) # 7e ←
,(
0x00, 0x06, 0x09, 0x09, 0x06 ) # 7f →
)
"""
bitOrder: "MSB","LSB"
"""
def shiftOut(dataPin, clockPin, bitOrder, val):
for i in range(8):
oval=GPIO.LOW
if bitOrder == "LSB":
oval=(val & (1<<i))>>i
else:
oval=(val & (1 << (7 - i)))>>(7 - i)
GPIO.output(dataPin,oval)
GPIO.output(clockPin, GPIO.HIGH)
time.sleep(FREQ)
GPIO.output(clockPin, GPIO.LOW)
time.sleep(FREQ)
class Pos:
def __init__(self,x=0,y=0):
self.x=x
self.y=y
def __init__(self,pos):
self.x=pos.x
self.y=pos.y
def equals(self,pos):
return self.x==pos.x and self.y==pos.y
def set(self,x,y):
self.x=x
self.y=y
class Screen:
def __init__(self):
self.frame=[[0 for i in range(6)] for j in range(84)]
def lcdBacklight(self,on):
if on:
GPIO.output(PIN_LEDs,GPIO.HIGH)
else:
GPIO.output(PIN_LEDs,GPIO.LOW)
def lcdClear(self):
for i in range(0,LCD_X * LCD_Y / 8):
self.lcdWrite(LCD_D, 0x00)
def lcdWrite(self,dc, data):
GPIO.output(PIN_DC, dc)
GPIO.output(PIN_SCE, GPIO.LOW)
shiftOut(PIN_SDIN, PIN_SCLK, "MSB", data);
GPIO.output(PIN_SCE, GPIO.HIGH)
def orVal(self,x,y,val):
#print "Ov: x="+str(x)+" y="+str(y)+" val="+str(val)
x=x%LCD_X
y=y%LCD_YB
self.frame[x][y]=self.frame[x][y]|val
def lcdString(self,characters):
for c in characters:
self.lcdCharacter(c)
def lcdCharacter(self,character):
if ord(character)-0x20 < 0x7f:
self.lcdWrite(LCD_D, 0x00)
for i in range(5):
self.lcdWrite(LCD_D, ASCII[ord(character) - 0x20][i])
self.lcdWrite(LCD_D, 0x00)
def lcdDisplayControl(self,dc):
self.lcdWrite(LCD_CMD,0x8|dc)
def lcdInitialise(self):
GPIO.setup(PIN_SCE, GPIO.OUT)
GPIO.setup(PIN_RESET, GPIO.OUT)
GPIO.setup(PIN_DC,GPIO.OUT)
GPIO.setup(PIN_SDIN,GPIO.OUT)
GPIO.setup(PIN_SCLK,GPIO.OUT)
GPIO.setup(PIN_LEDs,GPIO.OUT)
GPIO.output(PIN_SCE, GPIO.LOW)
GPIO.output(PIN_RESET, GPIO.LOW)
GPIO.output(PIN_DC,GPIO.LOW)
GPIO.output(PIN_SDIN,GPIO.LOW)
GPIO.output(PIN_SCLK,GPIO.LOW)
GPIO.output(PIN_RESET, GPIO.LOW)
time.sleep(FREQ)
GPIO.output(PIN_RESET, GPIO.HIGH)
time.sleep(FREQ)
self.lcdWrite( LCD_CMD, 0x21 ) # LCD Extended Commands.
self.lcdWrite( LCD_CMD, 0xBf ) # Set LCD Vop (Contrast). //B1
self.lcdWrite( LCD_CMD, 0x04 ) # Set Temp coefficent. //0x04
self.lcdWrite( LCD_CMD, 0x14 ) # LCD bias mode 1:48. //0x13
self.lcdWrite( LCD_CMD, 0x0C ) # LCD in normal mode. 0x0d for inverse
self.lcdWrite(LCD_C, 0x20)
self.lcdWrite(LCD_C, 0x0C)
self.lcdClear()
def lcdClose(self):
GPIO.cleanup()
"""
gotoXY routine to position cursor
x - range: 0 to 83
y - range: 0 to 5
"""
def gotoXY(self, x, y):
self.lcdWrite( 0, 0x80 | x) # Column.
self.lcdWrite( 0, 0x40 | y) # Row.
"""
Draws a pixel in pixel coords (84x48)
"""
def drawPixel(self, x, y):
x=x%LCD_X
y=y%LCD_Y
res=255&(1<<(y%8))
self.orVal(x,y/8,res)
def draw(self):
self.gotoXY(0,0)
for y in range(LCD_YB):
for x in range(LCD_X):
self.lcdWrite(1,self.frame[x][y])
def clear(self):
for y in range(LCD_YB):
for x in range(LCD_X):
self.frame[x][y]=0x0
def drawChar(self, x, y, c):
if y+8<=LCD_Y :
for index in range(5):
try:
col=ASCII[ord(c) - 0x20][index]
except:
print "I don't know char "+c
col=ASCII[ord("?")-0x20][index]
for i in range(8):
if (((col>>i)&1)>0):
self.drawPixel(x+index,y+i)
def drawString(self, x, y, string):
for i in range(len(string)):
c=string[i]
if(x+5>LCD_X or c=='\n'):
x=0
y+=8
if c != '\n':
self.drawChar(x,y,string[i])
x+=6
def setDCInverse(self):
self.lcdDisplayControl(5)
def setDCNormal(self):
self.lcdDisplayControl(1)
def setDCBlank(self):
self.lcdDisplayControl(0)
def setDCOn():
self.lcdDisplayControl(4)
if __name__ == '__main__':
screen= Screen()
screen.lcdInitialise()
screen.clear()
screen.drawString(0,0,"Nuno es un NERD PiNES")
screen.lcdBacklight(True)
#screen.clear()
#for i in range():
# screen.drawPixel(20+i,5)
screen.draw()
time.sleep(1)
screen.lcdBacklight(False)
time.sleep(1)
screen.lcdBacklight(True)
time.sleep(2)
screen.clear()
screen.draw()
screen.lcdClose()
| gpl-2.0 | 5,284,405,530,558,225,000 | 22.460358 | 72 | 0.553036 | false |
dhhjx880713/GPy | GPy/plotting/gpy_plot/gp_plots.py | 3 | 23670 | #===============================================================================
# Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from . import plotting_library as pl
from .plot_util import helper_for_plot_data, update_not_existing_kwargs, \
helper_predict_with_model, get_which_data_ycols, get_x_y_var
from .data_plots import _plot_data, _plot_inducing, _plot_data_error
def plot_mean(self, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=False,
apply_link=False, visible_dims=None,
which_data_ycols='all',
levels=20, projection='2d',
label='gp mean',
predict_kw=None,
**kwargs):
"""
Plot the mean of the GP.
You can deactivate the legend for this one plot by supplying None to label.
Give the Y_metadata in the predict_kw if you need it.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
:type fixed_inputs: a list of tuples
:param int resolution: The resolution of the prediction [defaults are 1D:200, 2D:50]
:param bool plot_raw: plot the latent function (usually denoted f) only?
:param bool apply_link: whether to apply the link function of the GP to the raw prediction.
:param array-like which_data_ycols: which columns of y to plot (array-like or list of ints)
:param int levels: for 2D plotting, the number of contour levels to use is
:param {'2d','3d'} projection: whether to plot in 2d or 3d. This only applies when plotting two dimensional inputs!
:param str label: the label for the plot.
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
"""
canvas, kwargs = pl().new_canvas(projection=projection, **kwargs)
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[2], plot_raw,
apply_link, None,
get_which_data_ycols(self, which_data_ycols),
predict_kw)
plots = _plot_mean(self, canvas, helper_data, helper_prediction,
levels, projection, label, **kwargs)
return pl().add_to_canvas(canvas, plots)
def _plot_mean(self, canvas, helper_data, helper_prediction,
levels=20, projection='2d', label=None,
**kwargs):
_, free_dims, Xgrid, x, y, _, _, resolution = helper_data
if len(free_dims)<=2:
mu, _, _ = helper_prediction
if len(free_dims)==1:
# 1D plotting:
update_not_existing_kwargs(kwargs, pl().defaults.meanplot_1d) # @UndefinedVariable
plots = dict(gpmean=[pl().plot(canvas, Xgrid[:, free_dims], mu, label=label, **kwargs)])
else:
if projection.lower() in '2d':
update_not_existing_kwargs(kwargs, pl().defaults.meanplot_2d) # @UndefinedVariable
plots = dict(gpmean=[pl().contour(canvas, x[:,0], y[0,:],
mu.reshape(resolution, resolution).T,
levels=levels, label=label, **kwargs)])
elif projection.lower() in '3d':
update_not_existing_kwargs(kwargs, pl().defaults.meanplot_3d) # @UndefinedVariable
plots = dict(gpmean=[pl().surface(canvas, x, y,
mu.reshape(resolution, resolution),
label=label,
**kwargs)])
elif len(free_dims)==0:
pass # Nothing to plot!
else:
raise RuntimeError('Cannot plot mean in more then 2 input dimensions')
return plots
def plot_confidence(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=False,
apply_link=False, visible_dims=None,
which_data_ycols='all', label='gp confidence',
predict_kw=None,
**kwargs):
"""
Plot the confidence interval between the percentiles lower and upper.
E.g. the 95% confidence interval is $2.5, 97.5$.
Note: Only implemented for one dimension!
You can deactivate the legend for this one plot by supplying None to label.
Give the Y_metadata in the predict_kw if you need it.
:param float lower: the lower percentile to plot
:param float upper: the upper percentile to plot
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
:type fixed_inputs: a list of tuples
:param int resolution: The resolution of the prediction [default:200]
:param bool plot_raw: plot the latent function (usually denoted f) only?
:param bool apply_link: whether to apply the link function of the GP to the raw prediction.
:param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints)
:param array-like which_data_ycols: which columns of the output y (!) to plot (array-like or list of ints)
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
"""
canvas, kwargs = pl().new_canvas(**kwargs)
ycols = get_which_data_ycols(self, which_data_ycols)
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[2], plot_raw, apply_link,
(lower, upper),
ycols, predict_kw)
plots = _plot_confidence(self, canvas, helper_data, helper_prediction, label, **kwargs)
return pl().add_to_canvas(canvas, plots, legend=label is not None)
def _plot_confidence(self, canvas, helper_data, helper_prediction, label, **kwargs):
_, free_dims, Xgrid, _, _, _, _, _ = helper_data
update_not_existing_kwargs(kwargs, pl().defaults.confidence_interval) # @UndefinedVariable
if len(free_dims)<=1:
if len(free_dims)==1:
percs = helper_prediction[1]
fills = []
for d in range(helper_prediction[0].shape[1]):
fills.append(pl().fill_between(canvas, Xgrid[:,free_dims[0]], percs[0][:,d], percs[1][:,d], label=label, **kwargs))
return dict(gpconfidence=fills)
else:
pass #Nothing to plot!
else:
raise RuntimeError('Can only plot confidence interval in one input dimension')
def plot_samples(self, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=True,
apply_link=False, visible_dims=None,
which_data_ycols='all',
samples=3, projection='2d', label='gp_samples',
predict_kw=None,
**kwargs):
"""
Plot the mean of the GP.
You can deactivate the legend for this one plot by supplying None to label.
Give the Y_metadata in the predict_kw if you need it.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
:type fixed_inputs: a list of tuples
:param int resolution: The resolution of the prediction [defaults are 1D:200, 2D:50]
:param bool plot_raw: plot the latent function (usually denoted f) only? This is usually what you want!
:param bool apply_link: whether to apply the link function of the GP to the raw prediction.
:param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints)
:param array-like which_data_ycols: which columns of y to plot (array-like or list of ints)
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
:param int levels: for 2D plotting, the number of contour levels to use is
"""
canvas, kwargs = pl().new_canvas(projection=projection, **kwargs)
ycols = get_which_data_ycols(self, which_data_ycols)
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[2], plot_raw, apply_link,
None,
ycols, predict_kw, samples)
plots = _plot_samples(self, canvas, helper_data, helper_prediction,
projection, label, **kwargs)
return pl().add_to_canvas(canvas, plots)
def _plot_samples(self, canvas, helper_data, helper_prediction, projection,
label, **kwargs):
_, free_dims, Xgrid, x, y, _, _, resolution = helper_data
samples = helper_prediction[2]
if len(free_dims)<=2:
if len(free_dims)==1:
# 1D plotting:
update_not_existing_kwargs(kwargs, pl().defaults.samples_1d) # @UndefinedVariable
plots = [pl().plot(canvas, Xgrid[:, free_dims], samples[:, s], label=label if s==0 else None, **kwargs) for s in range(samples.shape[-1])]
elif len(free_dims)==2 and projection=='3d':
update_not_existing_kwargs(kwargs, pl().defaults.samples_3d) # @UndefinedVariable
plots = [pl().surface(canvas, x, y, samples[:, s].reshape(resolution, resolution), **kwargs) for s in range(samples.shape[-1])]
else:
pass # Nothing to plot!
return dict(gpmean=plots)
else:
raise RuntimeError('Cannot plot mean in more then 1 input dimensions')
def plot_density(self, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=False,
apply_link=False, visible_dims=None,
which_data_ycols='all',
levels=35, label='gp density',
predict_kw=None,
**kwargs):
"""
Plot the confidence interval between the percentiles lower and upper.
E.g. the 95% confidence interval is $2.5, 97.5$.
Note: Only implemented for one dimension!
You can deactivate the legend for this one plot by supplying None to label.
Give the Y_metadata in the predict_kw if you need it.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
:type fixed_inputs: a list of tuples
:param int resolution: The resolution of the prediction [default:200]
:param bool plot_raw: plot the latent function (usually denoted f) only?
:param bool apply_link: whether to apply the link function of the GP to the raw prediction.
:param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints)
:param array-like which_data_ycols: which columns of y to plot (array-like or list of ints)
:param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you.
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
"""
canvas, kwargs = pl().new_canvas(**kwargs)
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[2], plot_raw,
apply_link, np.linspace(2.5, 97.5, levels*2),
get_which_data_ycols(self, which_data_ycols),
predict_kw)
plots = _plot_density(self, canvas, helper_data, helper_prediction, label, **kwargs)
return pl().add_to_canvas(canvas, plots)
def _plot_density(self, canvas, helper_data, helper_prediction, label, **kwargs):
_, free_dims, Xgrid, _, _, _, _, _ = helper_data
mu, percs, _ = helper_prediction
update_not_existing_kwargs(kwargs, pl().defaults.density) # @UndefinedVariable
if len(free_dims)<=1:
if len(free_dims)==1:
# 1D plotting:
fills = []
for d in range(mu.shape[1]):
fills.append(pl().fill_gradient(canvas, Xgrid[:, free_dims[0]], [p[:,d] for p in percs], label=label, **kwargs))
return dict(gpdensity=fills)
else:
pass # Nothing to plot!
else:
raise RuntimeError('Can only plot density in one input dimension')
def plot(self, plot_limits=None, fixed_inputs=None,
resolution=None,
plot_raw=False, apply_link=False,
which_data_ycols='all', which_data_rows='all',
visible_dims=None,
levels=20, samples=0, samples_likelihood=0, lower=2.5, upper=97.5,
plot_data=True, plot_inducing=True, plot_density=False,
predict_kw=None, projection='2d', legend=True, **kwargs):
"""
Convenience function for plotting the fit of a GP.
You can deactivate the legend for this one plot by supplying None to label.
Give the Y_metadata in the predict_kw if you need it.
If you want fine graned control use the specific plotting functions supplied in the model.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
:type fixed_inputs: a list of tuples
:param int resolution: The resolution of the prediction [default:200]
:param bool plot_raw: plot the latent function (usually denoted f) only?
:param bool apply_link: whether to apply the link function of the GP to the raw prediction.
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_ycols: 'all' or a list of integers
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice self.X, self.Y
:param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints)
:param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you.
:param int samples: the number of samples to draw from the GP and plot into the plot. This will allways be samples from the latent function.
:param int samples_likelihood: the number of samples to draw from the GP and apply the likelihood noise. This is usually not what you want!
:param float lower: the lower percentile to plot
:param float upper: the upper percentile to plot
:param bool plot_data: plot the data into the plot?
:param bool plot_inducing: plot inducing inputs?
:param bool plot_density: plot density instead of the confidence interval?
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
:param {2d|3d} projection: plot in 2d or 3d?
:param bool legend: convenience, whether to put a legend on the plot or not.
"""
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution)
xmin, xmax = helper_data[5:7]
free_dims = helper_data[1]
if not 'xlim' in kwargs:
kwargs['xlim'] = (xmin[0], xmax[0])
if not 'ylim' in kwargs and len(free_dims) == 2:
kwargs['ylim'] = (xmin[1], xmax[1])
canvas, _ = pl().new_canvas(projection=projection, **kwargs)
helper_prediction = helper_predict_with_model(self, helper_data[2], plot_raw,
apply_link, np.linspace(2.5, 97.5, levels*2) if plot_density else (lower,upper),
get_which_data_ycols(self, which_data_ycols),
predict_kw, samples)
if plot_raw and not apply_link:
# It does not make sense to plot the data (which lives not in the latent function space) into latent function space.
plot_data = False
plots = {}
if hasattr(self, 'Z') and plot_inducing:
plots.update(_plot_inducing(self, canvas, visible_dims, projection, 'Inducing'))
if plot_data:
plots.update(_plot_data(self, canvas, which_data_rows, which_data_ycols, free_dims, projection, "Data"))
plots.update(_plot_data_error(self, canvas, which_data_rows, which_data_ycols, free_dims, projection, "Data Error"))
plots.update(_plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_inducing, plot_density, projection))
if plot_raw and (samples_likelihood > 0):
helper_prediction = helper_predict_with_model(self, helper_data[2], False,
apply_link, None,
get_which_data_ycols(self, which_data_ycols),
predict_kw, samples_likelihood)
plots.update(_plot_samples(canvas, helper_data, helper_prediction, projection, "Lik Samples"))
return pl().add_to_canvas(canvas, plots, legend=legend)
def plot_f(self, plot_limits=None, fixed_inputs=None,
resolution=None,
apply_link=False,
which_data_ycols='all', which_data_rows='all',
visible_dims=None,
levels=20, samples=0, lower=2.5, upper=97.5,
plot_density=False,
plot_data=True, plot_inducing=True,
projection='2d', legend=True,
predict_kw=None,
**kwargs):
"""
Convinience function for plotting the fit of a GP.
This is the same as plot, except it plots the latent function fit of the GP!
If you want fine graned control use the specific plotting functions supplied in the model.
You can deactivate the legend for this one plot by supplying None to label.
Give the Y_metadata in the predict_kw if you need it.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
:type fixed_inputs: a list of tuples
:param int resolution: The resolution of the prediction [default:200]
:param bool apply_link: whether to apply the link function of the GP to the raw prediction.
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_ycols: 'all' or a list of integers
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice self.X, self.Y
:param array-like visible_dims: an array specifying the input dimensions to plot (maximum two)
:param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you.
:param int samples: the number of samples to draw from the GP and plot into the plot. This will allways be samples from the latent function.
:param float lower: the lower percentile to plot
:param float upper: the upper percentile to plot
:param bool plot_data: plot the data into the plot?
:param bool plot_inducing: plot inducing inputs?
:param bool plot_density: plot density instead of the confidence interval?
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
:param dict error_kwargs: kwargs for the error plot for the plotting library you are using
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
"""
return plot(self, plot_limits, fixed_inputs, resolution, True,
apply_link, which_data_ycols, which_data_rows,
visible_dims, levels, samples, 0,
lower, upper, plot_data, plot_inducing,
plot_density, predict_kw, projection, legend, **kwargs)
def _plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_inducing=True, plot_density=False, projection='2d'):
plots.update(_plot_mean(self, canvas, helper_data, helper_prediction, levels, projection, 'Mean'))
try:
if projection=='2d':
if not plot_density:
plots.update(_plot_confidence(self, canvas, helper_data, helper_prediction, "Confidence"))
else:
plots.update(_plot_density(self, canvas, helper_data, helper_prediction, "Density"))
except RuntimeError:
#plotting in 2d
pass
if helper_prediction[2] is not None:
plots.update(_plot_samples(self, canvas, helper_data, helper_prediction, projection, "Samples"))
return plots
| bsd-3-clause | 3,406,426,655,036,419,600 | 55.223278 | 209 | 0.644825 | false |
gangadhar-kadam/adb-erp | selling/doctype/lead/get_leads.py | 5 | 2503 | # ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr, cint
from webnotes.utils.email_lib.receive import POP3Mailbox
from core.doctype.communication.communication import make
def add_sales_communication(subject, content, sender, real_name, mail=None,
status="Open", date=None):
def set_status(doctype, name):
w = webnotes.bean(doctype, name)
w.ignore_permissions = True
w.doc.status = is_system_user and "Replied" or status
w.doc.save()
if mail:
mail.save_attachments_in_doc(w.doc)
lead_name = webnotes.conn.get_value("Lead", {"email_id": sender})
contact_name = webnotes.conn.get_value("Contact", {"email_id": sender})
is_system_user = webnotes.conn.get_value("Profile", sender)
if not (lead_name or contact_name):
# none, create a new Lead
lead = webnotes.bean({
"doctype":"Lead",
"lead_name": real_name or sender,
"email_id": sender,
"status": status,
"source": "Email"
})
lead.ignore_permissions = True
lead.insert()
lead_name = lead.doc.name
make(content=content, sender=sender, subject=subject,
lead=lead_name, contact=contact_name, date=date)
if contact_name:
set_status("Contact", contact_name)
elif lead_name:
set_status("Lead", lead_name)
class SalesMailbox(POP3Mailbox):
def setup(self, args=None):
self.settings = args or webnotes.doc("Sales Email Settings", "Sales Email Settings")
def process_message(self, mail):
if mail.from_email == self.settings.email_id:
return
add_sales_communication(mail.mail.get("subject", "[No Subject]"), mail.content, mail.from_email,
mail.from_real_name, mail=mail, date=mail.date)
def get_leads():
if cint(webnotes.conn.get_value('Sales Email Settings', None, 'extract_emails')):
SalesMailbox() | agpl-3.0 | -4,528,631,096,231,753,700 | 33.777778 | 99 | 0.72473 | false |
bdr00/typedef | typedef/api.py | 1 | 3489 | from typedef.type_definition import define_simple_type, define_array, define_struct, define_union
from typedef.constants import Endian, Arch
from typedef.errors import ArchDependentType, MissingField
def sizeof(t, target_arch=Arch.Unknown):
"""
return the number of bytes the complex-type instance represents
:param t: instance of struct/union/array
:param target_arch: target architecture, if required by type
:return: number of bytes the container has
"""
sizes = t.__size__
if sizes[0] == sizes[1]:
return sizes[0]
try:
arch = t.__arch__ + 0
except (AttributeError, TypeError):
arch = target_arch
if arch is Arch.Unknown:
raise ArchDependentType('type size depends on target arch')
return sizes[arch]
def offsetof(mem_name, t, target_arch=Arch.Unknown):
"""
return the offset within the container where `member` can be found
:param mem_name: string, name of member
:param t: instance of struct/union/array
:param target_arch: target architecture, if required by type
:return: number of bytes the container has
"""
try:
offsets = t.__offsets__[t.__names__.index(mem_name)]
except:
raise MissingField('`{}` cannot be found in type {}'.format(mem_name, repr(t)))
if offsets[0] == offsets[1]:
return offsets[0]
try:
arch = t.__arch__ + 0
except (AttributeError, TypeError):
arch = target_arch
if arch is Arch.Unknown:
raise ArchDependentType('type offset depends on target arch')
return offsets[arch]
def struct(members, name=''):
"""
define a struct
:param members: list of type-name tuples, or copmlex types (for anonymous/rval structs/unions)
:param name: name of the struct.
if the struct is a nested definition, this will be the accessor(attribute name) for the struct.
:return: a new struct
definition
"""
accessor = name
if not name:
name = 'nameless_struct'
return define_struct(name, members, accessor)
def union(members, name=''):
"""
define a union
:param members: list of type-name tuples, or copmlex types (for anonymous/rval structs/unions)
:param name: name of the union
if the union is a nested definition, this will be the accessor(attribute name) for the union.
:return: a new union definition
"""
accessor = name
if not name:
name = 'nameless_union'
return define_union(name, members, accessor)
def array(t, count):
"""
define an array of a specific type
:param t: type for the array
:param count: size of the array
:return: a new array type
"""
name = 'array.{}.{}'.format(count, t.__name__)
return define_array(name, t, count)
def define(name, sizes, signed=False, end=Endian.Little):
"""
define a new simple-type
:param name: name of the type, will be shown when printing the type ow when used in arrays
:param sizes: an integer specifying the size of the type.
if its size vary between the two architectures, a tuple of sizes should be supplied
:param signed: signed / unsigned
:param end: little / big endianess
:return: a new simple type
"""
sizes = sizes if type(sizes) in [tuple, list] else (sizes,) * 2
return define_simple_type(name, sizes, signed, end)
| mit | 7,111,667,672,866,274,000 | 31.305556 | 116 | 0.637432 | false |
spark8103/dlop | app/assets_manage.py | 1 | 18143 | #-*- coding: UTF-8 -*-
import os, datetime, bson.objectid
from flask import Blueprint, request, redirect, render_template, url_for, flash
from flask.views import MethodView
from wtforms import Form, TextField, validators, SelectField, TextAreaField, BooleanField, SelectMultipleField
from app.models import db
from app import app
from app.db_query import query_server, query_script, query_idc, query_module
assets = Blueprint('assets', __name__, template_folder='templates')
WTF_CSRF_SECRET_KEY = 'QPo3QVT0UyBrPmoqhf'
class IdcForm(Form):
idc_code = TextField(u'IDC编码', [validators.Required(), validators.Length(min=4, max=10)])
bandwidth = TextField(u'带宽', [validators.NumberRange])
rack_num = TextField(u'机柜数量', [validators.NumberRange])
rack_code = TextField(u'机柜编号')
lan_ip = TextField(u'内网IP段')
wan_ip = TextField(u'公网IP段')
location = TextField(u'地理位置')
telephone = TextField(u'联系电话')
auth_code = TextField(u'验证码')
tax = TextField(u'传真')
status = SelectField(u'状态', choices=[(u'running', u'running'), (u'closed', u'closed')])
desc = TextAreaField(u'备注')
class ServerForm(Form):
host = TextField(u'主机名', [validators.Required()])
assetsnumber = TextField(u'资产编号')
serialnumber = TextField(u'设备编号')
osrelease = TextField(u'系统版本')
cpu_model = TextField(u'CPU型号')
cpu_num = TextField(u'CPU核数')
mem_total = TextField(u'内存大小')
type = TextField(u'设备类型')
idc_code = SelectField(u'机房编号', )
#idc_code = SelectField(u'机房编号', choices=[(u'SHT01', u'SHT01'), (u'JHT01', u'JHT01')])
rack_code = TextField(u'机柜编号')
ip = TextField(u'IP地址')
vip = TextField(u'VIP地址')
roles = TextField(u'角色')
biosversion = TextField(u'BIOS版本')
manufacturer = TextField(u'厂商')
productname = TextField(u'设备型号')
status = SelectField(u'状态', choices=[(u'running', u'running'), (u'closed', u'closed')])
desc = TextAreaField(u'备注')
clone = BooleanField(u'克隆')
def __init__(self, formdata=None, obj=None, prefix='', **kwargs):
#kwargs.setdefault('idc_code', 'some value')
Form.__init__(self, formdata, obj, prefix, **kwargs)
list_idc = query_idc()
self.idc_code.choices = zip(list_idc, list_idc)
class idc(MethodView):
def get(self):
db_idc = db.Idc.find()
return render_template('assets/idc.html', title=(u'资产管理'), IDC=list(db_idc))
class idc_add(MethodView):
def get(self):
form = IdcForm(request.form)
return render_template('assets/idc_add.html', title=(u'资产管理'), form=form)
def post(self):
form = IdcForm(request.form)
if form.validate():
db_idc = db.Idc.find({"idc_code": form.idc_code.data})
if not db_idc.count():
db_idc = db.Idc()
db_idc["idc_code"] = form.idc_code.data
try:
db_idc["bandwidth"] = int(form.bandwidth.data)
except:
db_idc["bandwidth"] = 0
try:
db_idc["rack_num"] = int(form.rack_num.data)
except:
db_idc["rack_num"] = 0
db_idc["lan_ip"] = form.lan_ip.data
db_idc["wan_ip"] = form.wan_ip.data
db_idc["location"] = form.location.data
db_idc["telephone"] = form.telephone.data
db_idc["auth_code"] = form.auth_code.data
db_idc["tax"] = form.tax.data
db_idc["status"] = form.status.data
db_idc["desc"] = form.desc.data
db_idc["creation"] = datetime.datetime.now()
db_idc.save()
flash(db_idc["idc_code"] + (u'添加成功!'))
else:
flash(form.idc_code.data + (u'重复!'))
else:
flash(u"添加IDC错误,请检查添加字段!")
return render_template('assets/idc_add.html', title=(u'资产管理'), form=form)
class idc_edit(MethodView):
def get(self, slug):
db_idc = db.Idc.find({"idc_code": slug})
form = IdcForm(request.form)
if db_idc.count():
form.idc_code.data = db_idc[0]["idc_code"]
form.bandwidth.data = db_idc[0]["bandwidth"]
form.rack_num.data = db_idc[0]["rack_num"]
form.rack_code.data = db_idc[0]["rack_code"]
form.lan_ip.data = db_idc[0]["lan_ip"]
form.wan_ip.data = db_idc[0]["wan_ip"]
form.location.data = db_idc[0]["location"]
form.telephone.data = db_idc[0]["telephone"]
form.auth_code.data = db_idc[0]["auth_code"]
form.tax.data = db_idc[0]["tax"]
form.status.data = db_idc[0]["status"]
form.desc.data = db_idc[0]["desc"]
else:
flash(u'您编辑的idc编码不存在!')
return render_template('assets/idc_edit.html', title=(u'资产管理'), slug=slug, form=form)
def post(self, slug):
form = IdcForm(request.form)
if form.validate():
db_idc = db.Idc()
db_idc["idc_code"] = form.idc_code.data
try:
db_idc["bandwidth"] = int(form.bandwidth.data)
except:
db_idc["bandwidth"] = 0
try:
db_idc["rack_num"] = int(form.rack_num.data)
except:
db_idc["rack_num"] = 0
db_idc["lan_ip"] = form.lan_ip.data
db_idc["wan_ip"] = form.wan_ip.data
db_idc["location"] = form.location.data
db_idc["telephone"] = form.telephone.data
db_idc["auth_code"] = form.auth_code.data
db_idc["tax"] = form.tax.data
db_idc["status"] = form.status.data
db_idc["desc"] = form.desc.data
db_idc["creation"] = datetime.datetime.now()
db.Idc.find_and_modify({"idc_code": slug}, db_idc)
flash(db_idc["idc_code"] + (u'更新成功!'))
else:
flash(u"更新IDC错误,请检查相关字段!")
return redirect(url_for('assets.idc_edit', slug=slug))
class List(MethodView):
def get(self):
#salt_assets = eval(os.popen("""python scripts/salt_ext.py""").readlines()[0])
salt_assets = query_server()
return render_template('assets/list.html', title=(u'资产管理'), salt_assets=salt_assets)
class switch(MethodView):
def get(self):
#db_switch = db.Switch.find()
db_switch = []
return render_template('assets/switch.html', title=(u'资产管理'), IDC=list(db_switch))
class server(MethodView):
def get(self):
db_server = db.Device.find()
return render_template('assets/server.html', title=(u'资产管理'), servers=list(db_server))
class server_add(MethodView):
def get(self):
form = ServerForm(request.form)
return render_template('assets/server_add.html', title=(u'资产管理'), form=form)
def post(self):
form = ServerForm(request.form)
if form.validate():
db_server = db.Device.find({"host": form.host.data})
if not db_server.count():
db_server = db.Device()
db_server["host"] = form.host.data
db_server["assetsnumber"] = form.assetsnumber.data
db_server["serialnumber"] = form.serialnumber.data
db_server["osrelease"] = form.osrelease.data
db_server["cpu_model"] = form.cpu_model.data
try:
db_server["cpu_num"] = int(form.cpu_num.data)
except:
db_server["cpu_num"] = 0
try:
db_server["mem_total"] = int(form.mem_total.data)
except:
db_server["mem_total"] = 0
db_server["type"] = 'server'
db_server["idc_code"] = form.idc_code.data
db_server["rack_code"] = form.rack_code.data
try:
db_server["ip"] = form.ip.data.split(',')
except:
db_server["ip"] = []
try:
db_server["vip"] = form.vip.data.split(',')
except:
db_server["vip"] = []
try:
db_server["roles"] = form.roles.data.split(',')
except:
db_server["roles"] = []
db_server["biosversion"] = form.biosversion.data
db_server["manufacturer"] = form.manufacturer.data
db_server["productname"] = form.productname.data
db_server["status"] = form.status.data
db_server["desc"] = form.desc.data
db_server["creation"] = datetime.datetime.now()
db_server.save()
flash(db_server["host"] + (u'添加成功!'))
else:
flash(form.host.data + (u'重复!'))
else:
flash(u"添加服务器错误,请检查相关字段!")
return render_template('assets/server_add.html', title=(u'资产管理'), form=form)
class server_edit(MethodView):
def get(self, slug):
form = ServerForm(request.form)
db_server = db.Device.find({"_id": bson.objectid.ObjectId(slug)})
if db_server.count():
form.host.data = db_server[0]["host"]
form.assetsnumber.data = db_server[0]["assetsnumber"]
form.serialnumber.data = db_server[0]["serialnumber"]
form.osrelease.data = db_server[0]["osrelease"]
form.cpu_model.data = db_server[0]["cpu_model"]
form.cpu_num.data = db_server[0]["cpu_num"]
form.mem_total.data = db_server[0]["mem_total"]
form.type.data = db_server[0]["type"]
form.idc_code.data = db_server[0]["idc_code"]
form.rack_code.data = db_server[0]["rack_code"]
form.ip.data = ','.join(db_server[0]["ip"])
form.vip.data = ','.join(db_server[0]["vip"])
form.roles.data = ','.join(db_server[0]["roles"])
form.biosversion.data = db_server[0]["biosversion"]
form.manufacturer.data = db_server[0]["manufacturer"]
form.productname.data = db_server[0]["productname"]
form.status.data = db_server[0]["status"]
form.desc.data = db_server[0]["desc"]
else:
flash(u'您编辑的服务器不存在!')
return render_template('assets/server_edit.html', title=(u'资产管理'), slug=slug, form=form)
def post(self, slug):
form = ServerForm(request.form)
if form.validate() and ( not form.clone.data):
db_server = db.Device()
db_server["host"] = form.host.data
db_server["assetsnumber"] = form.assetsnumber.data
db_server["serialnumber"] = form.serialnumber.data
db_server["osrelease"] = form.osrelease.data
db_server["cpu_model"] = form.cpu_model.data
try:
db_server["cpu_num"] = int(form.cpu_num.data)
except:
db_server["cpu_num"] = 0
try:
db_server["mem_total"] = int(form.mem_total.data)
except:
db_server["mem_total"] = 0
db_server["type"] = 'server'
db_server["idc_code"] = form.idc_code.data
db_server["rack_code"] = form.rack_code.data
try:
db_server["ip"] = form.ip.data.split(',')
except:
db_server["ip"] = []
try:
db_server["vip"] = form.vip.data.split(',')
except:
db_server["vip"] = []
try:
db_server["roles"] = form.roles.data.split(',')
except:
db_server["roles"] = []
db_server["biosversion"] = form.biosversion.data
db_server["manufacturer"] = form.manufacturer.data
db_server["productname"] = form.productname.data
db_server["status"] = form.status.data
db_server["desc"] = form.desc.data
db_server["creation"] = datetime.datetime.now()
db.Device.find_and_modify({"_id": bson.objectid.ObjectId(slug)}, db_server)
flash(form.host.data + (u'更新成功!'))
elif form.validate() and form.clone.data:
db_server = db.Device.find({"host": form.host.data})
if not db_server.count():
db_server = db.Device()
db_server["host"] = form.host.data
db_server["assetsnumber"] = form.assetsnumber.data
db_server["serialnumber"] = form.serialnumber.data
db_server["osrelease"] = form.osrelease.data
db_server["cpu_model"] = form.cpu_model.data
try:
db_server["cpu_num"] = int(form.cpu_num.data)
except:
db_server["cpu_num"] = 0
try:
db_server["mem_total"] = int(form.mem_total.data)
except:
db_server["mem_total"] = 0
db_server["type"] = 'server'
db_server["idc_code"] = form.idc_code.data
db_server["rack_code"] = form.rack_code.data
try:
db_server["ip"] = form.ip.data.split(',')
except:
db_server["ip"] = []
try:
db_server["vip"] = form.vip.data.split(',')
except:
db_server["vip"] = []
try:
db_server["roles"] = form.roles.data.split(',')
except:
db_server["roles"] = []
db_server["biosversion"] = form.biosversion.data
db_server["manufacturer"] = form.manufacturer.data
db_server["productname"] = form.productname.data
db_server["status"] = form.status.data
db_server["desc"] = form.desc.data
db_server["creation"] = datetime.datetime.now()
db_server.save()
flash(u'克隆' + form.host.data + (u'成功!'))
else:
flash(u'克隆' + form.host.data + (u'重复!'))
else:
flash(u"更新或克隆服务器错误,请检查相关字段!")
return redirect(url_for('assets.server_edit', slug=slug))
class server_del(MethodView):
def get(self, slug):
db_server = db.Device.find({"_id": bson.objectid.ObjectId(slug)})
server_name = db_server[0]["host"]
if db_server.count():
db.device.remove({"_id": bson.objectid.ObjectId(slug)})
flash(u'删除服务器' + server_name + u'成功!')
else:
flash(u'您要删除的服务器' + server_name + u'不存在!')
return redirect(url_for('assets.server'))
class server_salt_import(MethodView):
def get(self):
db_server = eval(os.popen("""python script/sys_salt_ext.py""").read())
return render_template('assets/server_salt_import.html', title=(u'资产管理'), servers=db_server)
def post(self):
re_host = []
assets = eval(os.popen("""python script/sys_salt_ext.py""").read())
servers = request.form.values()
if "checkedAll" in servers:
servers.remove("checkedAll")
for i in servers:
host = assets[i]
db_server = db.Device.find({"host": host["host"]})
if not db_server.count():
db_server = db.Device()
db_server["host"] = host["host"]
db_server["assetsnumber"] = ""
db_server["serialnumber"] = host["serialnumber"]
db_server["osrelease"] = host["os"] + " " + host["osrelease"]
db_server["cpu_model"] = host["cpu_model"]
db_server["cpu_num"] = host["num_cpus"]
db_server["mem_total"] = host["mem_total"]
db_server["type"] = 'server'
db_server["idc_code"] = ''
db_server["rack_code"] = ''
db_server["ip"] = host["ipv4"]
db_server["vip"] = []
if "roles" not in host:
db_server["roles"] = []
else:
db_server["roles"] = host["roles"]
db_server["biosversion"] = host["biosversion"]
db_server["manufacturer"] = host["manufacturer"]
db_server["productname"] = host["productname"]
db_server["status"] = 'running'
db_server["desc"] = ''
db_server["creation"] = datetime.datetime.now()
db_server.save()
re_host.append(host["host"] + u'成功!')
else:
re_host.append(host["host"] + u'重复!')
return render_template('assets/server_salt_import.html', title=(u'资产管理'), servers={}, host=re_host)
# Register the urls
assets.add_url_rule('/assets/', view_func=List.as_view('list'))
assets.add_url_rule('/assets/server/', view_func=server.as_view('server'))
assets.add_url_rule('/assets/server/add', view_func=server_add.as_view('server_add'))
assets.add_url_rule('/assets/server/edit_<slug>', view_func=server_edit.as_view('server_edit'))
assets.add_url_rule('/assets/server/del_<slug>', view_func=server_del.as_view('server_del'))
assets.add_url_rule('/assets/server_salt_import', view_func=server_salt_import.as_view('server_salt_import'))
assets.add_url_rule('/assets/switch/', view_func=switch.as_view('switch'))
assets.add_url_rule('/assets/idc/', view_func=idc.as_view('idc'))
assets.add_url_rule('/assets/idc/add', view_func=idc_add.as_view('idc_add'))
assets.add_url_rule('/assets/idc/edit_<slug>', view_func=idc_edit.as_view('idc_edit'))
| apache-2.0 | 1,741,695,170,881,628,700 | 43.148241 | 110 | 0.531842 | false |
liuzzfnst/tp-libvirt | libvirt/tests/src/multiqueue.py | 6 | 11761 | import logging
import time
import threading
import re
from autotest.client.shared import error, utils
from virttest import libvirt_vm, virsh, remote
from virttest.libvirt_xml import vm_xml
from virttest.utils_test import libvirt as utlv
def config_xml_multiqueue(vm_name, vcpu=1, multiqueue=4):
"""
Configure vCPU and interface for multiqueue test.
"""
vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu), int(vcpu))
vm_xml.VMXML.set_multiqueues(vm_name, multiqueue)
logging.debug("XML:%s", virsh.dumpxml(vm_name))
def get_channel_info(vm, interface="eth0"):
"""
Get channel parameters of interface in vm.
"""
cmd = "ethtool -l %s" % interface
session = vm.wait_for_login()
s, o = session.cmd_status_output(cmd)
session.close()
if s:
raise error.TestFail("Get channel parameters for vm failed:%s"
% o)
maximum = {}
current = {}
# Just for temp
settings = {}
for line in o.splitlines():
if line.count("maximums"):
settings = maximum
elif line.count("Current"):
settings = current
parameter = line.split(':')[0].strip()
if parameter in ["RX", "TX", "Other", "Combined"]:
settings[parameter] = line.split(':')[1].strip()
return maximum, current
def setting_channel(vm, interface, parameter, value):
"""
Setting channel parameters for interface.
"""
cmd = "ethtool -L %s %s %s" % (interface, parameter, value)
session = vm.wait_for_login()
s, o = session.cmd_status_output(cmd)
session.close()
if s:
logging.debug("Setting %s to %s failed:%s", parameter, value, o)
return False
maximum, current = get_channel_info(vm, interface)
try:
if int(current['Combined']) == int(value):
return True
except KeyError:
pass
logging.debug("Setting passed, but checking failed:%s", current)
return False
def get_vhost_pids(vm):
vmpid = vm.get_pid()
if vmpid is None:
raise error.TestFail("Couldn't get vm's pid, its state: %s"
% vm.state())
output = utils.run("ps aux | grep [v]host-%s | awk '{print $2}'"
% vmpid).stdout
return output.splitlines()
def top_vhost(pids, expected_running_vhosts=1, timeout=15):
"""
Use top tool to get vhost state.
"""
pids_str = ','.join(pids)
top_cmd = "top -n 1 -p %s -b" % pids_str
timeout = int(timeout)
while True:
output = utils.run(top_cmd).stdout
logging.debug(output)
process_cpus = []
for line in output.splitlines():
if line.count("vhost"):
process_cpus.append(line.split()[8])
if len(process_cpus) != len(pids):
raise error.TestFail("Couldn't get enough vhost processes.")
running_vhosts = 0
for cpu in process_cpus:
if float(cpu):
running_vhosts += 1
if running_vhosts == int(expected_running_vhosts):
break # Got expected result
else:
if timeout > 0:
timeout -= 3
time.sleep(3)
logging.debug("Trying again to avoid occassional...")
continue
else:
raise error.TestFail("Couldn't get enough running vhosts:%s "
"from all vhosts:%s, other CPU status of "
"vhost is 0."
% (running_vhosts, len(pids)))
def set_cpu_affinity(vm, affinity_cpu=0):
session = vm.wait_for_login()
try:
session.cmd("service irqbalance stop")
output = session.cmd_output("cat /proc/interrupts")
logging.debug(output)
interrupts = []
for line in output.splitlines():
if re.search("virtio.*input", line):
interrupts.append(line.split(':')[0].strip())
# Setting
for interrupt in interrupts:
cmd = ("echo %s > /proc/irq/%s/smp_affinity"
% (affinity_cpu, interrupt))
session.cmd(cmd)
finally:
session.close()
def get_cpu_affinity(vm):
"""
Get cpu affinity.
"""
session = vm.wait_for_login()
output = session.cmd_output("cat /proc/interrupts")
logging.debug(output)
session.close()
output = output.splitlines()
cpu_count = len(output[0].split())
cpu_state = {}
for line in output[1:]:
if re.search("virtio.*input", line):
rows = line.split()
interrupt_id = rows[0].strip().rstrip(':')
state = []
for count in range(cpu_count):
state.append(rows[count+1])
cpu_state[interrupt_id] = state
return cpu_state
def check_cpu_affinity(vm, affinity_cpu_number):
"""
Compare the distance of cpu usage to verify whether it's affinity.
"""
cpu_state_before = get_cpu_affinity(vm)
time.sleep(10) # Set a distance
cpu_state_after = get_cpu_affinity(vm)
logging.debug("Before:%s", cpu_state_before)
logging.debug("After:%s", cpu_state_after)
if len(cpu_state_before) != len(cpu_state_after):
raise error.TestFail("Get unmatched virtio input interrupts.")
# Count cpu expanded size
expand_cpu = {}
for interrupt in cpu_state_before.keys():
before = cpu_state_before[interrupt]
after = cpu_state_after[interrupt]
expand_ipt = []
for i in range(len(before)):
expand_ipt.append(int(after[i]) - int(before[i]))
expand_cpu[interrupt] = expand_ipt
logging.debug("Distance: %s", expand_cpu)
# Check affinity
for interrupt in expand_cpu.keys():
expand_ipt = expand_cpu[interrupt]
if max(expand_ipt) != expand_ipt[int(affinity_cpu_number)]:
raise error.TestFail("Not affinity cpu to number %s: %s"
% (affinity_cpu_number, expand_ipt))
def get_vm_interface(vm, mac):
session = vm.wait_for_login()
links = session.cmd_output("ip link show")
session.close()
interface = None
for line in links.splitlines():
elems = line.split(':')
if line.count("link/ether"):
elems = line.split()
if elems[1] == mac.lower():
return interface
else:
elems = line.split(':')
interface = elems[1].strip()
return None
def prepare_vm_queue(vm, params):
"""
Set vm queue for following test.
"""
vcpu = params.get("vcpu_count", 1)
queue = params.get("queue_count", 4)
if vm.is_alive():
vm.destroy()
config_xml_multiqueue(vm.name, vcpu, queue)
if vm.is_dead():
vm.start()
mac = vm.get_mac_address()
interface = get_vm_interface(vm, mac)
maximum, _ = get_channel_info(vm, interface)
if int(maximum.get("Combined")) is not int(queue):
raise error.TestFail("Set maximum queue is not effective:%s" % maximum)
setting_channel(vm, interface, "combined", queue)
_, current = get_channel_info(vm, interface)
if int(current.get("Combined")) is not int(queue):
raise error.TestFail("Set current queue is not effective:%s" % current)
def start_iperf(vm, params):
"""
Start iperf server on host and run client in vm.
"""
def iperf_func(session, server_ip, iperf_type, prefix=None, suffix=None):
"""
Thread to run iperf.
"""
if iperf_type == "server":
cmd = "iperf -s"
elif iperf_type == "client":
cmd = "iperf -c %s -t 300" % server_ip
if prefix:
cmd = "%s %s" % (prefix, cmd)
if suffix:
cmd = "%s %s" % (cmd, suffix)
session.cmd(cmd)
server_ip = params.get("local_ip")
server_pwd = params.get("local_pwd")
queue_count = int(params.get("queue_count", 1))
vcpu_count = int(params.get("vcpu_count", 1))
if queue_count > vcpu_count:
client_count = queue_count
else:
client_count = queue_count * 2
host_session = remote.remote_login("ssh", server_ip, 22, "root",
server_pwd, "#")
# We may start several sessions for client
client_sessions = []
# Start server
prefix = params.get("iperf_prefix")
host_t = threading.Thread(target=iperf_func,
args=(host_session, server_ip, "server"))
client_ts = []
for count in range(client_count):
client_session = vm.wait_for_login()
client_t = threading.Thread(target=iperf_func,
args=(client_session, server_ip,
"client", prefix))
client_sessions.append(client_session)
client_ts.append(client_t)
host_t.start()
time.sleep(5)
for client_t in client_ts:
client_t.start()
time.sleep(5)
# Wait for iperf running
try:
if not host_t.isAlive():
raise error.TestFail("Start iperf on server failed.")
for client_t in client_ts:
if not client_t.isAlive():
raise error.TestFail("Start iperf on client failed.")
except error.TestFail:
host_session.close()
for client_session in client_sessions:
client_session.close()
host_t.join(2)
for client_t in client_ts:
client_t.join(2)
raise
# All iperf are working
return host_session, client_sessions
def run(test, params, env):
"""
Test multi function of vm devices.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
# To avoid dirty after starting new vm
if vm.is_alive():
vm.destroy()
new_vm_name = "mq_new_%s" % vm_name
utlv.define_new_vm(vm_name, new_vm_name)
# Create a new vm object for convenience
new_vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
vm.address_cache)
host_session = None
client_sessions = []
try:
# Config new vm for multiqueue
try:
prepare_vm_queue(new_vm, params)
except Exception:
if int(params.get("queue_count")) > 8:
params["queue_count"] = 8
prepare_vm_queue(new_vm, params)
# Start checking
vhost_pids = get_vhost_pids(new_vm)
logging.debug("vhosts: %s", vhost_pids)
if len(vhost_pids) != int(params.get("queue_count")):
raise error.TestFail("Vhost count is not matched with queue.")
affinity_cpu_number = params.get("affinity_cpu_number", '1')
# Here, cpu affinity should be in this format:
# 0001 means CPU0, 0010 means CPU1...
affinity_format = {'1': 2, '2': 4, '3': 8}
if params.get("application") == "affinity":
affinity_cpu = affinity_format[affinity_cpu_number]
set_cpu_affinity(vm, affinity_cpu)
# Run iperf
# Use iperf to make interface busy, otherwise we may not get
# expected results
host_session, client_sessions = start_iperf(vm, params)
# Wait for cpu consumed
time.sleep(10)
# Top to get vhost state or get cpu affinity
if params.get("application") == "iperf":
top_vhost(vhost_pids, params.get("vcpu_count", 1))
elif params.get("application") == "affinity":
check_cpu_affinity(vm, affinity_cpu_number)
finally:
if host_session:
host_session.close()
for client_session in client_sessions:
client_session.close()
if new_vm.is_alive():
new_vm.destroy()
new_vm.undefine()
| gpl-2.0 | 542,402,993,461,577,400 | 32.69914 | 79 | 0.567724 | false |
hanfi/AWS-EC2-Backup | backup.py | 1 | 2850 | import boto
import boto.utils
import boto.ec2
import boto.ec2.cloudwatch
import datetime
import properties
import logging
import time
logging.basicConfig(filename=properties.log_filename, level=properties.log_lvl)
log = logging.getLogger("Aws-EC2-Backup-Tool")
log.info("AwsBackupTool Start")
def format_name(meta_data):
return "Backup-" + meta_data["instance-id"] + "-" + str(datetime.datetime.now()).replace(" ", "_").replace(":", "-")
# get instance Metadata infos from
instance_meta = boto.utils.get_instance_metadata()
# the [:-1] in the end of the sentence it's because in the instance metadata there is the az not the region.
region = str(instance_meta['placement']['availability-zone'][:-1])
#format Backup name
name = format_name(instance_meta)
#aws ec2 connection object instance
ec2_conn = boto.ec2.connect_to_region(region)
#aws Cloudwatch connection object instance
cw_conn = boto.ec2.cloudwatch.connect_to_region(region)
try:
start_time = time.time()
#image creation
image_id = ec2_conn.create_image(instance_meta["instance-id"], name, name, True, None, properties.is_dry_run)
#Wait 30seconds because because sometimes the AWS images isn't available directly after it's creation
time.sleep(30)
#get image object to add tags
image = ec2_conn.get_image(image_id)
#Waiting for the
log.info("wating for image creation to end")
while(str(image.state) == "pending"):
image = ec2_conn.get_image(image_id)
time.sleep(1)
time_spent = time.time() - start_time
log.info("created image : "+image_id+" created in "+str(time_spent)+" Seconds")
#tag the newly created image
image.add_tag("date_creation", str(datetime.datetime.now()))
image.add_tag("created_by", "AwsBackupTool")
image.add_tag("instance_name",properties.instance_name)
#Cloud watch add data
cw_conn.put_metric_data(properties.metric_name, properties.dimensions, value=time_spent, unit="Seconds", dimensions={"instance_id": instance_meta["instance-id"]})
#remove old images by the date_creation tag
images = ec2_conn.get_all_images(filters={"tag-value": "AwsBackupTool", "tag-value": properties.instance_name})
for image in images:
image_creation_date = datetime.datetime.strptime(image.__dict__["tags"]["date_creation"], "%Y-%m-%d %H:%M:%S.%f")
limit_date = datetime.datetime.now() - datetime.timedelta(days=properties.days_to_retain)
if image_creation_date < limit_date:
image.deregister()
log.info(str(image)+" deleted, because its retention period was exceeded ")
log.info("AwsBackupTool Ended")
except boto.exception.BotoServerError as ex:
log.error(ex)
cw_conn.put_metric_data(properties.metric_name, properties.dimensions, value="-1", unit="Seconds", dimensions={"instance_id": instance_meta["instance-id"]})
| apache-2.0 | 2,484,895,568,025,097,000 | 45.721311 | 166 | 0.708772 | false |
jut-io/jut-python-tools | tests/util.py | 1 | 10111 | """
testing utilities
"""
import os
import select
import subprocess
import tempfile
import time
from contextlib import contextmanager
from jut import config
from jut.api import accounts, auth, data_engine, deployments
from jut.common import info, error
# data service only knows about space changes every 30s
SPACE_CREATE_TIMEOUT = 30
@contextmanager
def temp_jut_tools_home():
"""
context manager to allow unit tests to switch the jut tools home and
therefore not collide with each other or existing jut-tools configurations
Usage:
with temp_jut_tools_home():
# do your thing
"""
home_override = os.environ.get('HOME_OVERRIDE')
try:
new_home = os.path.abspath(tempfile.mkdtemp())
os.environ['HOME_OVERRIDE'] = new_home
yield new_home
finally:
os.environ['HOME_OVERRIDE'] = home_override
def get_test_user_pass():
return os.environ.get('JUT_USER'), os.environ.get('JUT_PASS')
def get_test_app_url():
return os.environ.get('JUT_APP_URL')
class Spawn(object):
"""
expect like spawn class that has a limited but useful expect like functionality
"""
def __init__(self, process):
self.process = process
def expect_output_eof(self):
"""
expect the stdout to be closed
"""
line = self.process.stdout.read()
if line != '':
raise Exception('Expected eof on stdout but got "%s"' % line)
def expect_error_eof(self):
"""
expect the stderr to be closed
"""
line = self.process.stderr.read()
if line != '':
raise Exception('Expected eof on stderr but got "%s"' % line)
def expect_eof(self):
"""
expect eof from stdout and stderr
"""
self.expect_output_eof()
self.expect_error_eof()
def expect_output(self, message):
"""
expect the stdout contains the following message in its output before
proceeding
"""
# use select to timeout when there is no output
read_ready, _, _ = select.select([self.process.stdout.fileno()], [], [], 5)
if read_ready:
length = len(message)
line = self.process.stdout.read(length)
if message == line:
return
info(self.read_output())
error(self.read_error())
raise Exception('Expected "%s" got "%s"' % (message, line))
else:
info(self.read_output())
error(self.read_error())
raise Exception('Expected "%s" got nothing' % message)
def expect_error(self, message):
"""
expect the stderr contains the following message in its output before
proceeding
"""
# use select to timeout when there is no output
read_ready, _, _ = select.select([self.process.stderr.fileno()], [], [], 5)
if read_ready:
length = len(message)
line = self.process.stderr.read(length)
if message == line:
return
info(self.read_output())
error(self.read_error())
raise Exception('Expected "%s" got "%s"' % (message, line))
else:
info(self.read_output())
error(self.read_error())
raise Exception('Expected "%s" got nothing' % message)
def read_output(self):
"""
read and return the whole stdout output
"""
data = None
result = ''
while data != '':
data = self.process.stdout.read()
result += data
return result
def read_error(self):
"""
read and return the whole stderr output
"""
data = None
result = ''
while data != '':
data = self.process.stderr.read()
result += data
return result
def send(self, message):
"""
send the exact message to the stdin of the running process
"""
self.process.stdin.write(message)
self.process.stdin.flush()
def wait(self):
"""
wait for the process to complete and return the exit status code
"""
return self.process.wait()
def status(self):
"""
return the exit status code for the process
"""
return self.process.wait()
def expect_status(self, expected_status):
"""
expect the provided status exit code otherwise output the full stdout
and stderr output at that specific point in time
"""
status = self.wait()
if status != expected_status:
info(self.read_output())
error(self.read_error())
raise Exception('Expected status %s, got %s' % (expected_status, status))
def send_signal(self, signal_name):
"""
send the signal provided to the currently running process
"""
self.process.send_signal(signal_name)
def jut(*args,
**kwargs):
"""
FOR TESTING ONLY:
used to spawn jut tools command line invocation from the source directly
and interact with that same command through expect like mechanism
"""
if 'stdin' in kwargs:
stdin = kwargs['stdin']
else:
stdin = subprocess.PIPE
jut_cmd = ['python', 'jut/cli.py'] + list(args)
return Spawn(subprocess.Popen(jut_cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE))
def create_user_in_default_deployment(name, username, email, password):
"""
"""
configuration = config.get_default()
app_url = configuration['app_url']
deployment_name = configuration['deployment_name']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
delete_user_from_default_deployment(username, password)
accounts.create_user(name,
username,
email,
password,
token_manager=token_manager,
app_url=app_url)
deployments.add_user(username,
deployment_name,
token_manager=token_manager,
app_url=app_url)
def delete_user_from_default_deployment(username, password):
"""
"""
configuration = config.get_default()
app_url = configuration['app_url']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
if accounts.user_exists(username,
token_manager=token_manager,
app_url=app_url):
delete_token_manager = auth.TokenManager(username=username,
password=password,
app_url=app_url)
accounts.delete_user(username,
token_manager=delete_token_manager,
app_url=app_url)
def get_webhook_url(space):
"""
"""
configuration = config.get_default()
deployment_name = configuration['deployment_name']
app_url = configuration['app_url']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
import_url = data_engine.get_import_data_url(deployment_name,
token_manager=token_manager,
app_url=app_url)
api_key = deployments.get_apikey(deployment_name,
token_manager=token_manager,
app_url=app_url)
return '%s/api/v1/import/webhook/?space=%s&data_source=webhook&apikey=%s' % \
(import_url, space, api_key)
def create_space_in_default_deployment(space_name):
configuration = config.get_default()
deployment_name = configuration['deployment_name']
app_url = configuration['app_url']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
if deployments.space_exists(deployment_name,
space_name,
token_manager=token_manager,
app_url=app_url):
delete_space_from_default_deployment(space_name)
deployments.create_space(deployment_name,
space_name,
token_manager=token_manager,
app_url=app_url)
time.sleep(SPACE_CREATE_TIMEOUT)
def delete_space_from_default_deployment(space_name):
configuration = config.get_default()
deployment_name = configuration['deployment_name']
app_url = configuration['app_url']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
deployments.delete_space(deployment_name,
space_name,
token_manager=token_manager,
app_url=app_url)
| mit | 6,053,857,919,009,569,000 | 26.853994 | 85 | 0.543962 | false |
alxgu/ansible | lib/ansible/modules/network/fortios/fortios_log_syslogd2_setting.py | 23 | 12038 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_syslogd2_setting
short_description: Global settings for remote syslog server in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_syslogd2 feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_syslogd2_setting:
description:
- Global settings for remote syslog server.
default: null
suboptions:
certificate:
description:
- Certificate used to communicate with Syslog server. Source certificate.local.name.
custom-field-name:
description:
- Custom field name for CEF format logging.
suboptions:
custom:
description:
- Field custom name.
id:
description:
- Entry ID.
required: true
name:
description:
- Field name.
enc-algorithm:
description:
- Enable/disable reliable syslogging with TLS encryption.
choices:
- high-medium
- high
- low
- disable
facility:
description:
- Remote syslog facility.
choices:
- kernel
- user
- mail
- daemon
- auth
- syslog
- lpr
- news
- uucp
- cron
- authpriv
- ftp
- ntp
- audit
- alert
- clock
- local0
- local1
- local2
- local3
- local4
- local5
- local6
- local7
format:
description:
- Log format.
choices:
- default
- csv
- cef
mode:
description:
- Remote syslog logging over UDP/Reliable TCP.
choices:
- udp
- legacy-reliable
- reliable
port:
description:
- Server listen port.
server:
description:
- Address of remote syslog server.
source-ip:
description:
- Source IP address of syslog.
status:
description:
- Enable/disable remote syslog logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Global settings for remote syslog server.
fortios_log_syslogd2_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_syslogd2_setting:
certificate: "<your_own_value> (source certificate.local.name)"
custom-field-name:
-
custom: "<your_own_value>"
id: "6"
name: "default_name_7"
enc-algorithm: "high-medium"
facility: "kernel"
format: "default"
mode: "udp"
port: "12"
server: "192.168.100.40"
source-ip: "84.230.14.43"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_syslogd2_setting_data(json):
option_list = ['certificate', 'custom-field-name', 'enc-algorithm',
'facility', 'format', 'mode',
'port', 'server', 'source-ip',
'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def log_syslogd2_setting(data, fos):
vdom = data['vdom']
log_syslogd2_setting_data = data['log_syslogd2_setting']
flattened_data = flatten_multilists_attributes(log_syslogd2_setting_data)
filtered_data = filter_log_syslogd2_setting_data(flattened_data)
return fos.set('log.syslogd2',
'setting',
data=filtered_data,
vdom=vdom)
def fortios_log_syslogd2(data, fos):
login(data)
if data['log_syslogd2_setting']:
resp = log_syslogd2_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_syslogd2_setting": {
"required": False, "type": "dict",
"options": {
"certificate": {"required": False, "type": "str"},
"custom-field-name": {"required": False, "type": "list",
"options": {
"custom": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}},
"enc-algorithm": {"required": False, "type": "str",
"choices": ["high-medium", "high", "low",
"disable"]},
"facility": {"required": False, "type": "str",
"choices": ["kernel", "user", "mail",
"daemon", "auth", "syslog",
"lpr", "news", "uucp",
"cron", "authpriv", "ftp",
"ntp", "audit", "alert",
"clock", "local0", "local1",
"local2", "local3", "local4",
"local5", "local6", "local7"]},
"format": {"required": False, "type": "str",
"choices": ["default", "csv", "cef"]},
"mode": {"required": False, "type": "str",
"choices": ["udp", "legacy-reliable", "reliable"]},
"port": {"required": False, "type": "int"},
"server": {"required": False, "type": "str"},
"source-ip": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_syslogd2(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,696,767,529,458,629,600 | 30.595801 | 104 | 0.508805 | false |
SiLab-Bonn/Scarce | scarce/testing/test_solver.py | 1 | 1458 | import unittest
import fipy
import numpy as np
from scarce.examples import potential_1D
from scarce import constant, fields
from scipy import constants
class TestSolver(unittest.TestCase):
def test_linear_poison_solver(self):
''' Compare the result of the poison solution with
analytical result.
'''
# Electrical properties
n_eff = 5e12 # Effective doping concentration
rho = constants.elementary_charge * n_eff * (1e-4) ** 3 # Charge density in C / um3
epsilon = constant.epsilon_s * 1e-6 # Permitticity of silicon in F/um
# External voltages in V
V_read = -0
# Geometry
dx = 0.01 # Grid spacing / resolution
L = 200. # Length of simulation domain / width of sensor in um
# Create mesh
nx = L / dx # Number of space points
mesh = fipy.Grid1D(dx=np.ones((int(nx), )) * dx, nx=nx)
X = np.array(mesh.getFaceCenters()[0, :])
for V_bias in range(-10, -200, -20):
# Get 1D potential with numerical solver
potential = potential_1D.get_potential(mesh, rho, epsilon, L, V_read, V_bias)
# Get correct analytical solution
potential_a = fields.get_potential_planar_analytic_1D(X, V_bias=V_bias, V_readout=V_read, n_eff=n_eff, D=L)
self.assertTrue(np.allclose(potential, potential_a[:-1], atol=1e-1))
if __name__ == "__main__":
unittest.main()
| mit | -2,607,168,113,285,467,600 | 32.136364 | 119 | 0.613855 | false |
pnorman/mapnik | scons/scons-local-2.4.1/SCons/Tool/mwcc.py | 6 | 6871 | """SCons.Tool.mwcc
Tool-specific initialization for the Metrowerks CodeWarrior compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwcc.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import os
import os.path
import SCons.Util
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes))
ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs))
return 1
def find_versions():
"""Return a list of MWVersion objects representing installed versions"""
versions = []
### This function finds CodeWarrior by reading from the registry on
### Windows. Some other method needs to be implemented for other
### platforms, maybe something that calls env.WhereIs('mwcc')
if SCons.Util.can_read_reg:
try:
HLM = SCons.Util.HKEY_LOCAL_MACHINE
product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions'
product_key = SCons.Util.RegOpenKeyEx(HLM, product)
i = 0
while True:
name = product + '\\' + SCons.Util.RegEnumKey(product_key, i)
name_key = SCons.Util.RegOpenKeyEx(HLM, name)
try:
version = SCons.Util.RegQueryValueEx(name_key, 'VERSION')
path = SCons.Util.RegQueryValueEx(name_key, 'PATH')
mwv = MWVersion(version[0], path[0], 'Win32-X86')
versions.append(mwv)
except SCons.Util.RegError:
pass
i = i + 1
except SCons.Util.RegError:
pass
return versions
class MWVersion(object):
def __init__(self, version, path, platform):
self.version = version
self.path = path
self.platform = platform
self.clpath = os.path.join(path, 'Other Metrowerks Tools',
'Command Line Tools')
self.dllpath = os.path.join(path, 'Bin')
# The Metrowerks tools don't store any configuration data so they
# are totally dumb when it comes to locating standard headers,
# libraries, and other files, expecting all the information
# to be handed to them in environment variables. The members set
# below control what information scons injects into the environment
### The paths below give a normal build environment in CodeWarrior for
### Windows, other versions of CodeWarrior might need different paths.
msl = os.path.join(path, 'MSL')
support = os.path.join(path, '%s Support' % platform)
self.license = os.path.join(path, 'license.dat')
self.includes = [msl, support]
self.libs = [msl, support]
def __str__(self):
return self.version
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
#env['PCH'] = ?
#env['PCHSTOP'] = ?
def exists(env):
return set_vars(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 | -7,673,003,812,350,718,000 | 32.193237 | 103 | 0.6411 | false |
aznashwan/heat2arm | heat2arm/constants.py | 1 | 1634 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Definitions of various Azure-related constants.
"""
ARM_API_2015_05_01_PREVIEW = "2015-05-01-preview"
ARM_SCHEMA_URL_2015_01_01 = ("https://schema.management.azure.com/schemas/"
"2015-01-01/deploymentTemplate.json#")
# the version of the API to be used in all definitions:
ARM_API_VERSION = ARM_API_2015_05_01_PREVIEW
# ARM_SCHEMA_URL is the default URL for fetching the ARM template JSON schema:
ARM_SCHEMA_URL = ARM_SCHEMA_URL_2015_01_01
# ARM_TEMPLATE_VERSION is the version the resulting template will be on:
ARM_TEMPLATE_VERSION = "1.0.0.0"
# DEFAULT_STORAGE_ACCOUNT_TYPE is the default type for the storage account to
# be created for the deployment if required.
DEFAULT_STORAGE_ACCOUNT_TYPE = "Standard_LRS"
# DEFAULT_STORAGE_CONTAINER_NAME is the default name for the
# storage container to be used.
DEFAULT_STORAGE_CONTAINER_NAME = "vhds"
# DEFAULT_LOCATION is the default location to be used for the deployment.
DEFAULT_LOCATION = "West US"
| apache-2.0 | -925,301,965,850,339,600 | 37.904762 | 78 | 0.73317 | false |
lthurlow/Boolean-Constrained-Routing | pycosat-0.6.0/setup.py | 1 | 1391 | import sys
from distutils.core import setup, Extension
version = '0.6.0'
ext_kwds = dict(
name = "pycosat",
sources = ["pycosat.c"],
define_macros = []
)
if sys.platform != 'win32':
ext_kwds['define_macros'].append(('PYCOSAT_VERSION', '"%s"' % version))
if '--inplace' in sys.argv:
ext_kwds['define_macros'].append(('DONT_INCLUDE_PICOSAT', 1))
ext_kwds['library_dirs'] = ['.']
ext_kwds['libraries'] = ['picosat']
setup(
name = "pycosat",
version = version,
author = "Ilan Schnell",
author_email = "[email protected]",
url = "https://github.com/ContinuumIO/pycosat",
license = "MIT",
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Utilities",
],
ext_modules = [Extension(**ext_kwds)],
py_modules = ['test_pycosat'],
description = "bindings to picosat (a SAT solver)",
long_description = open('README.rst').read(),
)
| mit | 344,584,170,238,616,960 | 29.23913 | 75 | 0.585909 | false |
yusaira-khan/Brain-Soother | OpenBCI_Python/scripts/stream_data.py | 5 | 4823 | import sys; sys.path.append('..') # help python find open_bci_v3.py relative to scripts folder
import open_bci_v3 as bci
import streamer_tcp_server
import time, timeit
from threading import Thread
# Transmit data to openvibe acquisition server, intelpolating data (well, sort of) from 250Hz to 256Hz
# Listen to new connections every second using a separate thread.
# NB: Left here for resampling algorithm, prefer the use of user.py.
NB_CHANNELS = 8
# If > 0 will interpolate based on samples count, typically 1.024 to go from 250Hz to 256Hz
SAMPLING_FACTOR = -1.024
# If > 0 will interbolate based on elapsed time
SAMPLING_RATE = 256
SERVER_PORT=12345
SERVER_IP="localhost"
DEBUG=False
# check packet drop
last_id = -1
# counter for sampling rate
nb_samples_in = -1
nb_samples_out = -1
# last seen values for interpolation
last_values = [0] * NB_CHANNELS
# counter to trigger duplications...
leftover_duplications = 0
tick=timeit.default_timer()
# try to ease work for main loop
class Monitor(Thread):
def __init__(self):
Thread.__init__(self)
self.nb_samples_in = -1
self.nb_samples_out = -1
# Init time to compute sampling rate
self.tick = timeit.default_timer()
self.start_tick = self.tick
def run(self):
while True:
# check FPS + listen for new connections
new_tick = timeit.default_timer()
elapsed_time = new_tick - self.tick
current_samples_in = nb_samples_in
current_samples_out = nb_samples_out
print "--- at t: ", (new_tick - self.start_tick), " ---"
print "elapsed_time: ", elapsed_time
print "nb_samples_in: ", current_samples_in - self.nb_samples_in
print "nb_samples_out: ", current_samples_out - self.nb_samples_out
self.tick = new_tick
self.nb_samples_in = nb_samples_in
self.nb_samples_out = nb_samples_out
# time to watch for connection
# FIXME: not so great with threads
server.check_connections()
time.sleep(1)
def streamData(sample):
global last_values
global tick
# check packet skipped
global last_id
# TODO: duplicate packet if skipped to stay sync
if sample.id != last_id + 1:
print "time", tick, ": paquet skipped!"
if sample.id == 255:
last_id = -1
else:
last_id = sample.id
# update counters
global nb_samples_in, nb_samples_out
nb_samples_in = nb_samples_in + 1
# check for duplication, by default 1 (...which is *no* duplication of the one current sample)
global leftover_duplications
# first method with sampling rate and elapsed time (depends on system clock accuracy)
if (SAMPLING_RATE > 0):
# elapsed time since last call, update tick
now = timeit.default_timer()
elapsed_time = now - tick;
# now we have to compute how many times we should send data to keep up with sample rate (oversampling)
leftover_duplications = SAMPLING_RATE * elapsed_time + leftover_duplications - 1
tick = now
# second method with a samplin factor (depends on openbci accuracy)
elif SAMPLING_FACTOR > 0:
leftover_duplications = SAMPLING_FACTOR + leftover_duplications - 1
#print "needed_duplications: ", needed_duplications, "leftover_duplications: ", leftover_duplications
# If we need to insert values, will interpolate between current packet and last one
# FIXME: ok, at the moment because we do packet per packet treatment, only handles nb_duplications == 1 for more interpolation is bad and sends nothing
if (leftover_duplications > 1):
leftover_duplications = leftover_duplications - 1
interpol_values = list(last_values)
for i in range(0,len(interpol_values)):
# OK, it's a very rough interpolation
interpol_values[i] = (last_values[i] + sample.channel_data[i]) / 2
if DEBUG:
print " --"
print " last values: ", last_values
print " interpolation: ", interpol_values
print " current sample: ", sample.channel_data
# send to clients interpolated sample
#leftover_duplications = 0
server.broadcast_values(interpol_values)
nb_samples_out = nb_samples_out + 1
# send to clients current sample
server.broadcast_values(sample.channel_data)
nb_samples_out = nb_samples_out + 1
# save current values for possible interpolation
last_values = list(sample.channel_data)
if __name__ == '__main__':
# init server
server = streamer_tcp_server.StreamerTCPServer(ip=SERVER_IP, port=SERVER_PORT, nb_channels=NB_CHANNELS)
# init board
port = '/dev/ttyUSB1'
baud = 115200
monit = Monitor()
# daemonize theard to terminate it altogether with the main when time will come
monit.daemon = True
monit.start()
board = bci.OpenBCIBoard(port=port, baud=baud, filter_data=False)
board.startStreaming(streamData)
| mit | -6,670,059,923,779,559,000 | 33.697842 | 153 | 0.686087 | false |
Ivoz/ajenti | plugins/filesystems/main.py | 17 | 5244 | from ajenti.ui import *
from ajenti.com import implements
from ajenti.api import *
from ajenti.utils import *
import backend
class FSPlugin(CategoryPlugin):
text = 'Filesystems'
icon = '/dl/filesystems/icon.png'
folder = 'system'
def on_init(self):
self.fstab = backend.read()
def on_session_start(self):
self._editing = -1
def get_ui(self):
ui = self.app.inflate('filesystems:main')
t = ui.find('list')
for u in self.fstab:
t.append(UI.DTR(
UI.Label(text=u.src, bold=True),
UI.Label(text=u.dst),
UI.Label(text=u.fs_type),
UI.Label(text=u.options),
UI.Label(text=str(u.dump_p)),
UI.Label(text=str(u.fsck_p)),
UI.HContainer(
UI.TipIcon(icon='/dl/core/ui/stock/edit.png', id='edit/'+str(self.fstab.index(u)), text='Edit'),
UI.TipIcon(icon='/dl/core/ui/stock/delete.png', id='del/'+str(self.fstab.index(u)), text='Delete', warning='Remove %s from fstab'%u.src)
),
))
if self._editing != -1:
try:
e = self.fstab[self._editing]
except:
e = backend.Entry()
e.src = '/dev/sda1'
e.dst = '/tmp'
e.options = 'none'
e.fs_type = 'none'
e.dump_p = 0
e.fsck_p = 0
self.setup_ui_edit(ui, e)
else:
ui.remove('dlgEdit')
return ui
def get_ui_sources_list(self, e):
lst = UI.Select(name='disk')
cst = True
for p in backend.list_partitions():
s = p
try:
s += ': %s partition %s' % (backend.get_disk_vendor(p), p[-1])
except:
pass
sel = e != None and e.src == p
cst &= not sel
lst.append(UI.SelectOption(value=p, text=s, selected=sel))
for p in backend.list_partitions():
u = backend.get_partition_uuid_by_name(p)
if u != '':
s = 'UUID=' + u
sel = e != None and e.src == s
cst &= not sel
lst.append(UI.SelectOption(value=s, text=p+': '+u , selected=sel))
lst.append(UI.SelectOption(text='proc', value='proc', selected=e.src=='proc'))
cst &= e.src != 'proc'
lst.append(UI.SelectOption(text='Custom', value='custom', selected=cst))
return lst, cst
def setup_ui_edit(self, ui, e):
opts = e.options.split(',')
bind = False
ro = False
loop = False
if 'bind' in opts:
opts.remove('bind')
bind = True
if 'ro' in opts:
opts.remove('ro')
ro = True
if 'loop' in opts:
opts.remove('loop')
loop = True
opts = ','.join(opts)
lst,cst = self.get_ui_sources_list(e)
ui.append('sources', lst)
ui.find('src').set('value', e.src if cst else '')
ui.find('mp').set('value', e.dst)
ui.find('fs').set('value', e.fs_type)
ui.find('opts').set('value', e.options)
ui.find('ro').set('checked', ro)
ui.find('bind').set('checked', bind)
ui.find('loop').set('checked', loop)
ui.find('dump_p').set('value', e.dump_p)
ui.find('fsck_p').set('value', e.fsck_p)
@event('button/click')
@event('linklabel/click')
def on_click(self, event, params, vars=None):
if params[0] == 'add':
self._editing = len(self.fstab)
if params[0] == 'edit':
self._editing = int(params[1])
if params[0] == 'del':
self.fstab.pop(int(params[1]))
backend.save(self.fstab)
@event('dialog/submit')
def on_submit(self, event, params, vars=None):
if params[0] == 'dlgEdit':
v = vars.getvalue('value', '')
if vars.getvalue('action', '') == 'OK':
e = backend.Entry()
if vars.getvalue('disk', 'custom') == 'custom':
e.src = vars.getvalue('src', 'none')
else:
e.src = vars.getvalue('disk', 'none')
e.dst = vars.getvalue('mp', 'none')
e.fs_type = vars.getvalue('fs', 'none')
e.options = vars.getvalue('opts', '')
if vars.getvalue('bind', '0') == '1':
e.options += ',bind'
if vars.getvalue('loop', '0') == '1':
e.options += ',loop'
if vars.getvalue('ro', '0') == '1':
e.options += ',ro'
e.options = e.options.strip(',')
if e.options.startswith('none,'):
e.options = e.options[5:]
e.dump_p = int(vars.getvalue('dump_p', '0'))
e.fsck_p = int(vars.getvalue('fsck_p', '0'))
try:
self.fstab[self._editing] = e
except:
self.fstab.append(e)
backend.save(self.fstab)
self._editing = -1
| lgpl-3.0 | 5,433,390,977,734,535,000 | 33.728477 | 160 | 0.46148 | false |
RondaStrauch/landlab | landlab/graph/dual.py | 3 | 6794 | """Define a graph of nodes-links-patches and its dual.
This class should not be used directly. Instead, it should be used as a base
class when defining other types of graphs.
"""
import numpy as np
import xarray as xr
from ..core.utils import as_id_array
from ..utils.decorators import store_result_in_grid, read_only_array
from .graph import Graph, find_perimeter_nodes
from .sort.sort import reverse_one_to_one
from .ugrid import DUAL_MESH_ATTRS
def _sort_dual_graph(graph):
from .sort.sort import reindex_by_xy
from .sort.ext.remap_element import remap_graph_element
sorted_dual = reindex_by_xy(graph._dual)
sorted = reindex_by_xy(graph)
node_at_cell = graph.ds['node_at_cell'].values
node_at_cell[:] = node_at_cell[sorted_dual[2]]
remap_graph_element(graph.node_at_cell,
as_id_array(np.argsort(sorted[0])))
def update_node_at_cell(ugrid, node_at_cell):
node_at_cell = xr.DataArray(
data=as_id_array(node_at_cell),
dims=('cell', ),
attrs={'cf_role': 'cell_node_connectivity',
'long_name': 'nodes centered at cells',
'start_index': 0})
ugrid.update({'node_at_cell': node_at_cell})
def update_nodes_at_face(ugrid, nodes_at_face):
nodes_at_face = xr.DataArray(
data=as_id_array(nodes_at_face),
dims=('face', 'Two'),
attrs={'cf_role': 'face_node_connectivity',
'long_name': 'nodes on either side of a face',
'start_index': 0})
ugrid.update({'nodes_at_face': nodes_at_face})
class DualGraph(Graph):
def __init__(self, **kwds):
node_at_cell = kwds.pop('node_at_cell', None)
nodes_at_face = kwds.pop('nodes_at_face', None)
update_node_at_cell(self.ds, node_at_cell)
update_nodes_at_face(self.ds, nodes_at_face)
rename = {
'mesh': 'dual',
'node': 'corner',
'link': 'face',
'patch': 'cell',
'x_of_node': 'x_of_corner',
'y_of_node': 'y_of_corner',
'nodes_at_link': 'corners_at_face',
'links_at_patch': 'faces_at_cell',
'max_patch_links': 'max_cell_faces',
}
self._ds = xr.merge([self._ds, self._dual.ds.rename(rename)])
self._origin = (0., 0.)
self._frozen = False
self.freeze()
if kwds.get('sort', True):
self.sort()
def sort(self):
from .sort.ext.remap_element import remap_graph_element
sorted_nodes, sorted_links, sorted_patches = Graph.sort(self)
sorted_corners, sorted_faces, sorted_cells = self.dual.sort()
with self.thawed():
self.node_at_cell[:] = self.node_at_cell[sorted_cells]
self.nodes_at_face[:] = self.nodes_at_face[sorted_faces]
remap_graph_element(as_id_array(self.node_at_cell),
as_id_array(np.argsort(sorted_nodes)))
remap_graph_element(as_id_array(self.nodes_at_face).reshape((-1, )),
as_id_array(np.argsort(sorted_nodes)))
def freeze(self):
Graph.freeze(self)
if hasattr(self, 'dual'):
self.dual.freeze()
def thaw(self):
Graph.thaw(self)
if hasattr(self, 'dual'):
self.dual.thaw()
@property
def dual(self):
return self._dual
@property
def node_at_cell(self):
return self.ds['node_at_cell'].values
@property
def nodes_at_face(self):
return self.ds['nodes_at_face'].values
@property
def cell_at_node(self):
try:
return self._cell_at_node
except AttributeError:
self._cell_at_node = reverse_one_to_one(
self.node_at_cell, minlength=self.number_of_nodes)
return self._cell_at_node
@property
def link_at_face(self):
try:
return self._link_at_face
except AttributeError:
return self._create_link_at_face()
def _create_link_at_face(self):
link_at_nodes = {}
for link, pair in enumerate(self.nodes_at_link):
# pair.sort()
link_at_nodes[tuple(np.sort(pair))] = link
link_at_face = np.full((self.number_of_faces, ), -1, dtype=int)
# for face, pair in enumerate(self._nodes_at_face):
for face, pair in enumerate(self.nodes_at_face):
# pair.sort()
link_at_face[face] = link_at_nodes[tuple(np.sort(pair))]
self._link_at_face = link_at_face
return self._link_at_face
@property
def face_at_link(self):
try:
return self._face_at_link
except AttributeError:
self._face_at_link = reverse_one_to_one(
self.link_at_face, minlength=self.number_of_links)
return self._face_at_link
@property
def x_of_corner(self):
return self._dual.x_of_node
@property
def y_of_corner(self):
return self._dual.y_of_node
@property
def xy_of_corner(self):
return self._dual.xy_of_node
@property
def xy_of_face(self):
return self._dual.xy_of_link
@property
def xy_of_cell(self):
return self._dual.xy_of_patch
@property
def corners(self):
return self._dual.nodes
@property
def number_of_corners(self):
return self._dual.number_of_nodes
@property
@store_result_in_grid()
@read_only_array
def perimeter_corners(self):
return find_perimeter_nodes(self.dual)
@property
def corners_at_face(self):
return self._dual.nodes_at_link
@property
def corner_at_face_tail(self):
return self._dual.node_at_link_tail
@property
def corner_at_face_head(self):
return self._dual.node_at_link_head
@property
def number_of_faces(self):
return self._dual.number_of_links
@property
def faces_at_cell(self):
return self._dual.links_at_patch
@property
def corners_at_cell(self):
return self._dual.nodes_at_patch
@property
def number_of_cells(self):
return self._dual.number_of_patches
@property
def faces_at_corner(self):
return self._dual.links_at_node
@property
def face_dirs_at_corner(self):
return self._dual.link_dirs_at_node
@property
def cells_at_face(self):
return self._dual.patches_at_link
@property
def cells_at_corner(self):
return self._dual.patches_at_node
@property
def width_of_face(self):
return self._dual.length_of_link
@property
def length_of_face(self):
return self._dual.length_of_link
@property
def area_of_cell(self):
return self._dual.area_of_patch
| mit | -5,141,477,720,348,146,000 | 26.617886 | 80 | 0.584486 | false |
prheenan/Research | Perkins/Projects/Protein/bacteriorhodopsin/2017-7-jcp-paper/Manuscript/fulfill_figures.py | 1 | 2727 | #! /usr/bin/env python3
"""Pandoc filter that replaces labels of format {#?:???}, where ? is a
single lower case character defining the type and ??? is an alphanumeric
label, with numbers. Different types are counted separately.
credit to: blog.hartleygroup.org/2015/11/08/numbering-figures-schemes-and-charts-in-pandoc/
"""
from pandocfilters import toJSONFilter, Str,Strong
import re,pickle
import figure_util
def label_text(label_or_ref,kind,label,known_labels):
"""
Given a raw label, returns how its text should be formatted
Args:
label_or_ref: either "label_" or "ref_"
kind: one of the valid types (eq,tlb,sec, etc)
label: the label to use (ie: "PerformanceFigure")
known_labels: obtained from walk_figures
Retruns:
either Str or Strong element to replace in the AST
"""
is_label = label_or_ref is not None and "label" in label_or_ref
default_fmt = "{:s} {:s}"
sec_label = default_fmt
if (is_label):
default_fmt += "."
sec_label += ":"
num = known_labels[kind][label]
kind_to_label = dict(fig=default_fmt.format("Fig.",num),
eq=default_fmt.format("Eq.",num),
tbl=default_fmt.format("Table",num),
sec=sec_label.format("Section",num))
str_ele = [Str(kind_to_label[kind])]
if (is_label):
return [Strong(str_ele)]
else:
return str_ele
def fulfill_references(key,val,fmt,meta,known_labels):
"""
loops through, replacing the references with their proper numbers
"""
if (key == 'Str') and figure_util.REF_PAT.match(val):
start, label_or_ref,kind,supp,label, end = \
figure_util.match_pattern(val)
if (label_or_ref is not None) and ("label" in label_or_ref):
# format labels as we want...
content = label_text(label_or_ref,kind,label,known_labels)
elif ( (kind not in known_labels) or
(label not in known_labels[kind])):
# couldnt find the kind or the label for the reference
error = "XXX_Unknown-kind/label ({:s}:{:s})XXX".format(kind,label)
content = [Str(error)]
else:
# just a normal reference
content = label_text(label_or_ref,kind,label,known_labels)
return [Str(start)] + content + [Str(end)]
if __name__ == '__main__':
known_labels = figure_util.read_cache()
with open("tmp.txt",'w') as f:
f.write(str(known_labels))
m_func = lambda *args,**kwargs: \
fulfill_references(*args,
known_labels=known_labels,
**kwargs)
toJSONFilter(m_func)
| gpl-3.0 | -6,397,894,419,634,744,000 | 36.356164 | 91 | 0.591859 | false |
tensorflow/models | research/object_detection/utils/np_box_ops.py | 1 | 3431 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, 4] numpy arrays representing bounding boxes.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def area(boxes):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
def ioa(boxes1, boxes2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
intersect = intersection(boxes1, boxes2)
areas = np.expand_dims(area(boxes2), axis=0)
return intersect / areas
| apache-2.0 | -3,927,373,574,512,050,700 | 32.637255 | 80 | 0.688429 | false |
destijl/grr | grr/lib/flows/general/filetypes.py | 1 | 1961 | #!/usr/bin/env python
"""File-type specific flows."""
from grr.client.client_actions import plist as plist_actions
from grr.lib import flow
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import flows_pb2
class PlistValueFilterArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.PlistValueFilterArgs
class PlistValueFilter(flow.GRRFlow):
"""Obtains values from a plist based on a context and a query filter.
This function will parse a plist. Obtain all the values under the path given
in context and then filter each of them against the given query and return
only these that match. I.e:
plist = {
'values': [13, 14, 15]
'items':
[
{'name': 'John',
'age': 33,
'children': ['John', 'Phil'],
},
{'name': 'Mike',
'age': 24,
'children': [],
},
],
}
A call to PlistValueFilter with context "items" and query "age > 25" will
return {'name': 'John', 'age': 33}.
If you don't specify a context, the full plist will be matched and returned
if the query succceeds. I,e: a call to PlistValueFilter without a context but
query "values contains 13" will return the full plist.
If you don't specify a query, all the values under the context parameter will
get returned. I.e: a call to PlistValueFilter with context "items.children"
and no query, will return [ ['John', 'Phil'], []].
"""
category = "/FileTypes/"
args_type = PlistValueFilterArgs
@flow.StateHandler()
def Start(self, unused_response):
"""Issue a request to list the directory."""
self.CallClient(
plist_actions.PlistQuery,
request=self.args.request,
next_state="Receive")
@flow.StateHandler()
def Receive(self, responses):
if not responses.success:
self.Error("Could not retrieve value: %s" % responses.status)
else:
for response in responses.First():
self.SendReply(response)
| apache-2.0 | 2,640,817,103,653,268,000 | 28.268657 | 79 | 0.663947 | false |
markuszoeller/openstack | scripts/launchpad/query_expired.py | 2 | 1982 | #!/usr/bin/env python
# Displays all Launchpad bugs for OpenStack/Nova which are potentially expired
# because they got reported for Nova versions which are not supported anymore.
#
# Copyright 2016 Markus Zoeller
import argparse
import datetime
import common
parser = argparse.ArgumentParser()
parser.add_argument('-p',
'--project-name',
required=True,
dest='project_name',
help='The LP project name.')
args = parser.parse_args()
PROJECT_NAME = args.project_name
DAYS_SINCE_CREATED = 30 * 18 # 18 months
STILL_VALID_FLAG = "CONFIRMED FOR: %(release_name)s" # UPPER CASE
client = common.get_project_client(PROJECT_NAME)
bug_tasks = client.searchTasks(status=["New", "Confirmed", "Triaged"],
omit_duplicates=True,
order_by="datecreated")
SUPPORTED_RELEASE_NAMES = []
SUPPORTED_RELEASE_NAMES.append(client.development_focus.name) # master name
for s in client.series:
if s.active:
SUPPORTED_RELEASE_NAMES.append(s.name) # stable branch names
print(SUPPORTED_RELEASE_NAMES)
print("potentially expired bugs:")
print("=========================")
today = datetime.datetime.today()
counter = 0
def bug_is_still_valid(bug):
for message in bug.messages:
for release_name in SUPPORTED_RELEASE_NAMES:
flag = STILL_VALID_FLAG % {'release_name': release_name.upper()}
if flag in message.content:
return True
return False
for bug_task in bug_tasks:
# remove the timezone info as it disturbs the calculation of the diff
diff = today - bug_task.date_created.replace(tzinfo=None)
if diff.days < DAYS_SINCE_CREATED:
break
if bug_is_still_valid(bug_task.bug):
continue
print("%s (%d days)" %(bug_task.web_link, diff.days))
counter += 1
print("---------------------------------")
print("%s potentially expired bug reports" % counter) | mit | 7,359,826,331,495,939,000 | 29.507692 | 78 | 0.632694 | false |
goerz/pelican | pelican/tests/test_utils.py | 7 | 22455 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
import logging
import shutil
import os
import time
import locale
from sys import platform
from tempfile import mkdtemp
import pytz
from pelican.generators import TemplatePagesGenerator
from pelican.writers import Writer
from pelican.settings import read_settings
from pelican import utils
from pelican.tests.support import get_article, LoggedTestCase, locale_available, unittest
class TestUtils(LoggedTestCase):
_new_attribute = 'new_value'
@utils.deprecated_attribute(
old='_old_attribute', new='_new_attribute',
since=(3, 1, 0), remove=(4, 1, 3))
def _old_attribute():
return None
def test_deprecated_attribute(self):
value = self._old_attribute
self.assertEqual(value, self._new_attribute)
self.assertLogCountEqual(
count=1,
msg=('_old_attribute has been deprecated since 3.1.0 and will be '
'removed by version 4.1.3. Use _new_attribute instead'),
level=logging.WARNING)
def test_get_date(self):
# valid ones
date = utils.SafeDatetime(year=2012, month=11, day=22)
date_hour = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11)
date_hour_z = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11,
tzinfo=pytz.timezone('UTC'))
date_hour_est = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11,
tzinfo=pytz.timezone('EST'))
date_hour_sec = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11, second=10)
date_hour_sec_z = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11, second=10,
tzinfo=pytz.timezone('UTC'))
date_hour_sec_est = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11, second=10,
tzinfo=pytz.timezone('EST'))
date_hour_sec_frac_z = utils.SafeDatetime(
year=2012, month=11, day=22, hour=22, minute=11, second=10,
microsecond=123000, tzinfo=pytz.timezone('UTC'))
dates = {
'2012-11-22': date,
'2012/11/22': date,
'2012-11-22 22:11': date_hour,
'2012/11/22 22:11': date_hour,
'22-11-2012': date,
'22/11/2012': date,
'22.11.2012': date,
'22.11.2012 22:11': date_hour,
'2012-11-22T22:11Z': date_hour_z,
'2012-11-22T22:11-0500': date_hour_est,
'2012-11-22 22:11:10': date_hour_sec,
'2012-11-22T22:11:10Z': date_hour_sec_z,
'2012-11-22T22:11:10-0500': date_hour_sec_est,
'2012-11-22T22:11:10.123Z': date_hour_sec_frac_z,
}
# examples from http://www.w3.org/TR/NOTE-datetime
iso_8601_date = utils.SafeDatetime(year=1997, month=7, day=16)
iso_8601_date_hour_tz = utils.SafeDatetime(
year=1997, month=7, day=16, hour=19, minute=20,
tzinfo=pytz.timezone('CET'))
iso_8601_date_hour_sec_tz = utils.SafeDatetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30,
tzinfo=pytz.timezone('CET'))
iso_8601_date_hour_sec_ms_tz = utils.SafeDatetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30,
microsecond=450000, tzinfo=pytz.timezone('CET'))
iso_8601 = {
'1997-07-16': iso_8601_date,
'1997-07-16T19:20+01:00': iso_8601_date_hour_tz,
'1997-07-16T19:20:30+01:00': iso_8601_date_hour_sec_tz,
'1997-07-16T19:20:30.45+01:00': iso_8601_date_hour_sec_ms_tz,
}
# invalid ones
invalid_dates = ['2010-110-12', 'yay']
for value, expected in dates.items():
self.assertEqual(utils.get_date(value), expected, value)
for value, expected in iso_8601.items():
self.assertEqual(utils.get_date(value), expected, value)
for item in invalid_dates:
self.assertRaises(ValueError, utils.get_date, item)
def test_slugify(self):
samples = (('this is a test', 'this-is-a-test'),
('this is a test', 'this-is-a-test'),
('this → is ← a ↑ test', 'this-is-a-test'),
('this--is---a test', 'this-is-a-test'),
('unicode測試許功蓋,你看到了嗎?',
'unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma'),
('大飯原発4号機、18日夜起動へ',
'da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he'),)
for value, expected in samples:
self.assertEqual(utils.slugify(value), expected)
def test_slugify_substitute(self):
samples = (('C++ is based on C', 'cpp-is-based-on-c'),
('C+++ test C+ test', 'cpp-test-c-test'),
('c++, c#, C#, C++', 'cpp-c-sharp-c-sharp-cpp'),
('c++-streams', 'cpp-streams'),)
subs = (('C++', 'CPP'), ('C#', 'C-SHARP'))
for value, expected in samples:
self.assertEqual(utils.slugify(value, subs), expected)
def test_get_relative_path(self):
samples = ((os.path.join('test', 'test.html'), os.pardir),
(os.path.join('test', 'test', 'test.html'),
os.path.join(os.pardir, os.pardir)),
('test.html', os.curdir),
(os.path.join('/test', 'test.html'), os.pardir),
(os.path.join('/test', 'test', 'test.html'),
os.path.join(os.pardir, os.pardir)),
('/test.html', os.curdir),)
for value, expected in samples:
self.assertEqual(utils.get_relative_path(value), expected)
def test_process_translations(self):
# create a bunch of articles
# 1: no translation metadata
fr_article1 = get_article(lang='fr', slug='yay', title='Un titre',
content='en français')
en_article1 = get_article(lang='en', slug='yay', title='A title',
content='in english')
# 2: reverse which one is the translation thanks to metadata
fr_article2 = get_article(lang='fr', slug='yay2', title='Un titre',
content='en français')
en_article2 = get_article(lang='en', slug='yay2', title='A title',
content='in english',
extra_metadata={'translation': 'true'})
# 3: back to default language detection if all items have the
# translation metadata
fr_article3 = get_article(lang='fr', slug='yay3', title='Un titre',
content='en français',
extra_metadata={'translation': 'yep'})
en_article3 = get_article(lang='en', slug='yay3', title='A title',
content='in english',
extra_metadata={'translation': 'yes'})
articles = [fr_article1, en_article1, fr_article2, en_article2,
fr_article3, en_article3]
index, trans = utils.process_translations(articles)
self.assertIn(en_article1, index)
self.assertIn(fr_article1, trans)
self.assertNotIn(en_article1, trans)
self.assertNotIn(fr_article1, index)
self.assertIn(fr_article2, index)
self.assertIn(en_article2, trans)
self.assertNotIn(fr_article2, trans)
self.assertNotIn(en_article2, index)
self.assertIn(en_article3, index)
self.assertIn(fr_article3, trans)
self.assertNotIn(en_article3, trans)
self.assertNotIn(fr_article3, index)
def test_watchers(self):
# Test if file changes are correctly detected
# Make sure to handle not getting any files correctly.
dirname = os.path.join(os.path.dirname(__file__), 'content')
folder_watcher = utils.folder_watcher(dirname, ['rst'])
path = os.path.join(dirname, 'article_with_metadata.rst')
file_watcher = utils.file_watcher(path)
# first check returns True
self.assertEqual(next(folder_watcher), True)
self.assertEqual(next(file_watcher), True)
# next check without modification returns False
self.assertEqual(next(folder_watcher), False)
self.assertEqual(next(file_watcher), False)
# after modification, returns True
t = time.time()
os.utime(path, (t, t))
self.assertEqual(next(folder_watcher), True)
self.assertEqual(next(file_watcher), True)
# file watcher with None or empty path should return None
self.assertEqual(next(utils.file_watcher('')), None)
self.assertEqual(next(utils.file_watcher(None)), None)
empty_path = os.path.join(os.path.dirname(__file__), 'empty')
try:
os.mkdir(empty_path)
os.mkdir(os.path.join(empty_path, "empty_folder"))
shutil.copy(__file__, empty_path)
# if no files of interest, returns None
watcher = utils.folder_watcher(empty_path, ['rst'])
self.assertEqual(next(watcher), None)
except OSError:
self.fail("OSError Exception in test_files_changed test")
finally:
shutil.rmtree(empty_path, True)
def test_clean_output_dir(self):
retention = ()
test_directory = os.path.join(os.path.dirname(__file__),
'clean_output')
content = os.path.join(os.path.dirname(__file__), 'content')
shutil.copytree(content, test_directory)
utils.clean_output_dir(test_directory, retention)
self.assertTrue(os.path.isdir(test_directory))
self.assertListEqual([], os.listdir(test_directory))
shutil.rmtree(test_directory)
def test_clean_output_dir_not_there(self):
retention = ()
test_directory = os.path.join(os.path.dirname(__file__),
'does_not_exist')
utils.clean_output_dir(test_directory, retention)
self.assertFalse(os.path.exists(test_directory))
def test_clean_output_dir_is_file(self):
retention = ()
test_directory = os.path.join(os.path.dirname(__file__),
'this_is_a_file')
f = open(test_directory, 'w')
f.write('')
f.close()
utils.clean_output_dir(test_directory, retention)
self.assertFalse(os.path.exists(test_directory))
def test_strftime(self):
d = utils.SafeDatetime(2012, 8, 29)
# simple formatting
self.assertEqual(utils.strftime(d, '%d/%m/%y'), '29/08/12')
self.assertEqual(utils.strftime(d, '%d/%m/%Y'), '29/08/2012')
# RFC 3339
self.assertEqual(utils.strftime(d, '%Y-%m-%dT%H:%M:%SZ'),'2012-08-29T00:00:00Z')
# % escaped
self.assertEqual(utils.strftime(d, '%d%%%m%%%y'), '29%08%12')
self.assertEqual(utils.strftime(d, '%d %% %m %% %y'), '29 % 08 % 12')
# not valid % formatter
self.assertEqual(utils.strftime(d, '10% reduction in %Y'),
'10% reduction in 2012')
self.assertEqual(utils.strftime(d, '%10 reduction in %Y'),
'%10 reduction in 2012')
# with text
self.assertEqual(utils.strftime(d, 'Published in %d-%m-%Y'),
'Published in 29-08-2012')
# with non-ascii text
self.assertEqual(utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'),
'29/08/2012 Øl trinken beim Besäufnis')
# alternative formatting options
self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '29/8/12')
self.assertEqual(utils.strftime(d, '%-H:%-M:%-S'), '0:0:0')
d = utils.SafeDatetime(2012, 8, 9)
self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '9/8/12')
# test the output of utils.strftime in a different locale
# Turkish locale
@unittest.skipUnless(locale_available('tr_TR.UTF-8') or
locale_available('Turkish'),
'Turkish locale needed')
def test_strftime_locale_dependent_turkish(self):
# store current locale
old_locale = locale.setlocale(locale.LC_ALL)
if platform == 'win32':
locale.setlocale(locale.LC_ALL, str('Turkish'))
else:
locale.setlocale(locale.LC_ALL, str('tr_TR.UTF-8'))
d = utils.SafeDatetime(2012, 8, 29)
# simple
self.assertEqual(utils.strftime(d, '%d %B %Y'), '29 Ağustos 2012')
self.assertEqual(utils.strftime(d, '%A, %d %B %Y'),
'Çarşamba, 29 Ağustos 2012')
# with text
self.assertEqual(utils.strftime(d, 'Yayınlanma tarihi: %A, %d %B %Y'),
'Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012')
# non-ascii format candidate (someone might pass it... for some reason)
self.assertEqual(utils.strftime(d, '%Y yılında %üretim artışı'),
'2012 yılında %üretim artışı')
# restore locale back
locale.setlocale(locale.LC_ALL, old_locale)
# test the output of utils.strftime in a different locale
# French locale
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'),
'French locale needed')
def test_strftime_locale_dependent_french(self):
# store current locale
old_locale = locale.setlocale(locale.LC_ALL)
if platform == 'win32':
locale.setlocale(locale.LC_ALL, str('French'))
else:
locale.setlocale(locale.LC_ALL, str('fr_FR.UTF-8'))
d = utils.SafeDatetime(2012, 8, 29)
# simple
self.assertEqual(utils.strftime(d, '%d %B %Y'), '29 août 2012')
# depending on OS, the first letter is m or M
self.assertTrue(utils.strftime(d, '%A') in ('mercredi', 'Mercredi'))
# with text
self.assertEqual(utils.strftime(d, 'Écrit le %d %B %Y'),
'Écrit le 29 août 2012')
# non-ascii format candidate (someone might pass it... for some reason)
self.assertEqual(utils.strftime(d, '%écrits en %Y'),
'%écrits en 2012')
# restore locale back
locale.setlocale(locale.LC_ALL, old_locale)
class TestCopy(unittest.TestCase):
'''Tests the copy utility'''
def setUp(self):
self.root_dir = mkdtemp(prefix='pelicantests.')
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
def tearDown(self):
shutil.rmtree(self.root_dir)
locale.setlocale(locale.LC_ALL, self.old_locale)
def _create_file(self, *path):
with open(os.path.join(self.root_dir, *path), 'w') as f:
f.write('42\n')
def _create_dir(self, *path):
os.makedirs(os.path.join(self.root_dir, *path))
def _exist_file(self, *path):
path = os.path.join(self.root_dir, *path)
self.assertTrue(os.path.isfile(path), 'File does not exist: %s' % path)
def _exist_dir(self, *path):
path = os.path.join(self.root_dir, *path)
self.assertTrue(os.path.exists(path),
'Directory does not exist: %s' % path)
def test_copy_file_same_path(self):
self._create_file('a.txt')
utils.copy(os.path.join(self.root_dir, 'a.txt'),
os.path.join(self.root_dir, 'b.txt'))
self._exist_file('b.txt')
def test_copy_file_different_path(self):
self._create_dir('a')
self._create_dir('b')
self._create_file('a', 'a.txt')
utils.copy(os.path.join(self.root_dir, 'a', 'a.txt'),
os.path.join(self.root_dir, 'b', 'b.txt'))
self._exist_dir('b')
self._exist_file('b', 'b.txt')
def test_copy_file_create_dirs(self):
self._create_file('a.txt')
utils.copy(os.path.join(self.root_dir, 'a.txt'),
os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b.txt'))
self._exist_dir('b0')
self._exist_dir('b0', 'b1')
self._exist_dir('b0', 'b1', 'b2')
self._exist_dir('b0', 'b1', 'b2', 'b3')
self._exist_file('b0', 'b1', 'b2', 'b3', 'b.txt')
def test_copy_dir_same_path(self):
self._create_dir('a')
self._create_file('a', 'a.txt')
utils.copy(os.path.join(self.root_dir, 'a'),
os.path.join(self.root_dir, 'b'))
self._exist_dir('b')
self._exist_file('b', 'a.txt')
def test_copy_dir_different_path(self):
self._create_dir('a0')
self._create_dir('a0', 'a1')
self._create_file('a0', 'a1', 'a.txt')
self._create_dir('b0')
utils.copy(os.path.join(self.root_dir, 'a0', 'a1'),
os.path.join(self.root_dir, 'b0', 'b1'))
self._exist_dir('b0', 'b1')
self._exist_file('b0', 'b1', 'a.txt')
def test_copy_dir_create_dirs(self):
self._create_dir('a')
self._create_file('a', 'a.txt')
utils.copy(os.path.join(self.root_dir, 'a'),
os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b'))
self._exist_dir('b0')
self._exist_dir('b0', 'b1')
self._exist_dir('b0', 'b1', 'b2')
self._exist_dir('b0', 'b1', 'b2', 'b3')
self._exist_dir('b0', 'b1', 'b2', 'b3', 'b')
self._exist_file('b0', 'b1', 'b2', 'b3', 'b', 'a.txt')
class TestDateFormatter(unittest.TestCase):
'''Tests that the output of DateFormatter jinja filter is same as
utils.strftime'''
def setUp(self):
# prepare a temp content and output folder
self.temp_content = mkdtemp(prefix='pelicantests.')
self.temp_output = mkdtemp(prefix='pelicantests.')
# prepare a template file
template_dir = os.path.join(self.temp_content, 'template')
template_path = os.path.join(template_dir, 'source.html')
os.makedirs(template_dir)
with open(template_path, 'w') as template_file:
template_file.write('date = {{ date|strftime("%A, %d %B %Y") }}')
self.date = utils.SafeDatetime(2012, 8, 29)
def tearDown(self):
shutil.rmtree(self.temp_content)
shutil.rmtree(self.temp_output)
# reset locale to default
locale.setlocale(locale.LC_ALL, '')
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'),
'French locale needed')
def test_french_strftime(self):
# This test tries to reproduce an issue that occurred with python3.3 under macos10 only
if platform == 'win32':
locale.setlocale(locale.LC_ALL, str('French'))
else:
locale.setlocale(locale.LC_ALL, str('fr_FR.UTF-8'))
date = utils.SafeDatetime(2014,8,14)
# we compare the lower() dates since macos10 returns "Jeudi" for %A whereas linux reports "jeudi"
self.assertEqual( u'jeudi, 14 août 2014', utils.strftime(date, date_format="%A, %d %B %Y").lower() )
df = utils.DateFormatter()
self.assertEqual( u'jeudi, 14 août 2014', df(date, date_format="%A, %d %B %Y").lower() )
# Let us now set the global locale to C:
locale.setlocale(locale.LC_ALL, str('C'))
# DateFormatter should still work as expected since it is the whole point of DateFormatter
# (This is where pre-2014/4/15 code fails on macos10)
df_date = df(date, date_format="%A, %d %B %Y").lower()
self.assertEqual( u'jeudi, 14 août 2014', df_date )
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'),
'French locale needed')
def test_french_locale(self):
if platform == 'win32':
locale_string = 'French'
else:
locale_string = 'fr_FR.UTF-8'
settings = read_settings(
override = {'LOCALE': locale_string,
'TEMPLATE_PAGES': {'template/source.html':
'generated/file.html'}})
generator = TemplatePagesGenerator(
{'date': self.date}, settings,
self.temp_content, '', self.temp_output)
generator.env.filters.update({'strftime': utils.DateFormatter()})
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(
self.temp_output, 'generated', 'file.html')
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with utils.pelican_open(output_path) as output_file:
self.assertEqual(output_file,
utils.strftime(self.date, 'date = %A, %d %B %Y'))
@unittest.skipUnless(locale_available('tr_TR.UTF-8') or
locale_available('Turkish'),
'Turkish locale needed')
def test_turkish_locale(self):
if platform == 'win32':
locale_string = 'Turkish'
else:
locale_string = 'tr_TR.UTF-8'
settings = read_settings(
override = {'LOCALE': locale_string,
'TEMPLATE_PAGES': {'template/source.html':
'generated/file.html'}})
generator = TemplatePagesGenerator(
{'date': self.date}, settings,
self.temp_content, '', self.temp_output)
generator.env.filters.update({'strftime': utils.DateFormatter()})
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(
self.temp_output, 'generated', 'file.html')
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with utils.pelican_open(output_path) as output_file:
self.assertEqual(output_file,
utils.strftime(self.date, 'date = %A, %d %B %Y'))
| agpl-3.0 | -249,963,560,626,379,070 | 39.284685 | 108 | 0.563512 | false |
pbrod/scipy | scipy/io/tests/test_netcdf.py | 16 | 16440 | ''' Tests for netcdf '''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import warnings
from io import BytesIO
from glob import glob
from contextlib import contextmanager
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_raises,
assert_equal, run_module_suite)
from scipy.io.netcdf import netcdf_file
from scipy._lib._tmpdirs import in_tempdir
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
@contextmanager
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
yield f
f.close()
def check_simple(ncfileobj):
'''Example fileobj tests '''
assert_equal(ncfileobj.history, b'Created for a test')
time = ncfileobj.variables['time']
assert_equal(time.units, b'days since 2008-01-01')
assert_equal(time.shape, (N_EG_ELS,))
assert_equal(time[-1], N_EG_ELS-1)
def assert_mask_matches(arr, expected_mask):
'''
Asserts that the mask of arr is effectively the same as expected_mask.
In contrast to numpy.ma.testutils.assert_mask_equal, this function allows
testing the 'mask' of a standard numpy array (the mask in this case is treated
as all False).
Parameters
----------
arr: ndarray or MaskedArray
Array to test.
expected_mask: array_like of booleans
A list giving the expected mask.
'''
mask = np.ma.getmaskarray(arr)
assert_equal(mask, expected_mask)
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
with make_simple('simple.nc', 'w') as f:
pass
# read the file we just created in 'a' mode
with netcdf_file('simple.nc', 'a') as f:
check_simple(f)
# add something
f._attributes['appendRan'] = 1
# To read the NetCDF file we just created::
with netcdf_file('simple.nc') as f:
# Using mmap is the default
assert_(f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Read it in append (and check mmap is off)
with netcdf_file('simple.nc', 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Now without mmap
with netcdf_file('simple.nc', mmap=False) as f:
# Using mmap is the default
assert_(not f.use_mmap)
check_simple(f)
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj) as f:
# by default, don't use mmap for file-like
assert_(not f.use_mmap)
check_simple(f)
# Read file from fileobj, with mmap
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj, mmap=True) as f:
assert_(f.use_mmap)
check_simple(f)
# Again read it in append mode (adding another att)
with open('simple.nc', 'r+b') as fobj:
with netcdf_file(fobj, 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
f.createDimension('app_dim', 1)
var = f.createVariable('app_var', 'i', ('app_dim',))
var[:] = 42
# And... check that app_var made it in...
with netcdf_file('simple.nc') as f:
check_simple(f)
assert_equal(f.variables['app_var'][:], 42)
except:
os.chdir(cwd)
shutil.rmtree(tmpdir)
raise
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
with make_simple(eg_sio1, 'w') as f1:
str_val = eg_sio1.getvalue()
eg_sio2 = BytesIO(str_val)
with netcdf_file(eg_sio2) as f2:
check_simple(f2)
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
with make_simple(eg_sio_64, 'w', version=2) as f_64:
str_val = eg_sio_64.getvalue()
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64, version=2) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
with netcdf_file(fname, 'r') as f:
pass
with netcdf_file(fname, 'r', mmap=False) as f:
pass
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with netcdf_file(filename, 'r') as f:
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
with netcdf_file(BytesIO(), 'w') as f:
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
def test_flush_rewind():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
x = f.createDimension('x',4)
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert_(len_single == len_double)
def test_dtype_specifiers():
# Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
# Specifying np.int16 or similar only works from the same commit as this
# comment was made.
with make_simple(BytesIO(), mode='w') as f:
f.createDimension('x',4)
f.createVariable('v1', 'i2', ['x'])
f.createVariable('v2', np.int16, ['x'])
f.createVariable('v3', np.dtype(np.int16), ['x'])
def test_ticket_1720():
io = BytesIO()
items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
with netcdf_file(io, 'w') as f:
f.history = 'Created for a test'
f.createDimension('float_var', 10)
float_var = f.createVariable('float_var', 'f', ('float_var',))
float_var[:] = items
float_var.units = 'metres'
f.flush()
contents = io.getvalue()
io = BytesIO(contents)
with netcdf_file(io, 'r') as f:
assert_equal(f.history, b'Created for a test')
float_var = f.variables['float_var']
assert_equal(float_var.units, b'metres')
assert_equal(float_var.shape, (10,))
assert_allclose(float_var[:], items)
def test_mmaps_segfault():
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with warnings.catch_warnings():
warnings.simplefilter("error")
with netcdf_file(filename, mmap=True) as f:
x = f.variables['lat'][:]
# should not raise warnings
del x
def doit():
with netcdf_file(filename, mmap=True) as f:
return f.variables['lat'][:]
# should not crash
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = doit()
x.sum()
def test_zero_dimensional_var():
io = BytesIO()
with make_simple(io, 'w') as f:
v = f.createVariable('zerodim', 'i2', [])
# This is checking that .isrec returns a boolean - don't simplify it
# to 'assert not ...'
assert v.isrec is False, v.isrec
f.flush()
def test_byte_gatts():
# Check that global "string" atts work like they did before py3k
# unicode and general bytes confusion
with in_tempdir():
filename = 'g_byte_atts.nc'
f = netcdf_file(filename, 'w')
f._attributes['holy'] = b'grail'
f._attributes['witch'] = 'floats'
f.close()
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['holy'], b'grail')
assert_equal(f._attributes['witch'], b'floats')
f.close()
def test_open_append():
# open 'w' put one attr
with in_tempdir():
filename = 'append_dat.nc'
f = netcdf_file(filename, 'w')
f._attributes['Kilroy'] = 'was here'
f.close()
# open again in 'a', read the att and and a new one
f = netcdf_file(filename, 'a')
assert_equal(f._attributes['Kilroy'], b'was here')
f._attributes['naughty'] = b'Zoot'
f.close()
# open yet again in 'r' and check both atts
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['Kilroy'], b'was here')
assert_equal(f._attributes['naughty'], b'Zoot')
f.close()
def test_append_recordDimension():
dataSize = 100
with in_tempdir():
# Create file with record time dimension
with netcdf_file('withRecordDimension.nc', 'w') as f:
f.createDimension('time', None)
f.createVariable('time', 'd', ('time',))
f.createDimension('x', dataSize)
x = f.createVariable('x', 'd', ('x',))
x[:] = np.array(range(dataSize))
f.createDimension('y', dataSize)
y = f.createVariable('y', 'd', ('y',))
y[:] = np.array(range(dataSize))
f.createVariable('testData', 'i', ('time', 'x', 'y'))
f.flush()
f.close()
for i in range(2):
# Open the file in append mode and add data
with netcdf_file('withRecordDimension.nc', 'a') as f:
f.variables['time'].data = np.append(f.variables["time"].data, i)
f.variables['testData'][i, :, :] = np.ones((dataSize, dataSize))*i
f.flush()
# Read the file and check that append worked
with netcdf_file('withRecordDimension.nc') as f:
assert_equal(f.variables['time'][-1], i)
assert_equal(f.variables['testData'][-1, :, :].copy(), np.ones((dataSize, dataSize))*i)
assert_equal(f.variables['time'].data.shape[0], i+1)
assert_equal(f.variables['testData'].data.shape[0], i+1)
# Read the file and check that 'data' was not saved as user defined
# attribute of testData variable during append operation
with netcdf_file('withRecordDimension.nc') as f:
with assert_raises(KeyError) as ar:
f.variables['testData']._attributes['data']
ex = ar.exception
assert_equal(ex.args[0], 'data')
def test_maskandscale():
t = np.linspace(20, 30, 15)
t[3] = 100
tm = np.ma.masked_greater(t, 99)
fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
with netcdf_file(fname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
found = Temp[:].compressed()
del Temp # Remove ref to mmap, so file can be closed.
expected = np.round(tm.compressed(), 2)
assert_allclose(found, expected)
with in_tempdir():
newfname = 'ms.nc'
f = netcdf_file(newfname, 'w', maskandscale=True)
f.createDimension('Temperature', len(tm))
temp = f.createVariable('Temperature', 'i', ('Temperature',))
temp.missing_value = 9999
temp.scale_factor = 0.01
temp.add_offset = 20
temp[:] = tm
f.close()
with netcdf_file(newfname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
expected = np.round(tm.compressed(), 2)
found = Temp[:].compressed()
del Temp
assert_allclose(found, expected)
# ------------------------------------------------------------------------
# Test reading with masked values (_FillValue / missing_value)
# ------------------------------------------------------------------------
def test_read_withValuesNearFillValue():
# Regression test for ticket #5626
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var1_fillval0'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withNoFillValue():
# For a variable with no fill value, reading data with maskandscale=True
# should return unmasked data
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var2_noFillval'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1,2,3])
def test_read_withFillValueAndMissingValue():
# For a variable with both _FillValue and missing_value, the _FillValue
# should be used
IRRELEVANT_VALUE = 9999
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [True, False, False])
assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])
def test_read_withMissingValue():
# For a variable with missing_value but not _FillValue, the missing_value
# should be used
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var4_missingValue'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withFillValNaN():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var5_fillvalNaN'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withChar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var6_char'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_with2dVar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var7_2d'][:]
assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
def test_read_withMaskAndScaleFalse():
# If a variable has a _FillValue (or missing_value) attribute, but is read
# with maskandscale set to False, the result should be unmasked
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
# Open file with mmap=False to avoid problems with closing a mmap'ed file
# when arrays referring to its data still exist:
with netcdf_file(fname, maskandscale=False, mmap=False) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1, 2, 3])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -1,933,642,024,777,391,600 | 34.354839 | 103 | 0.590998 | false |
neurodata/ndgrutedb | packages/utils/mask_nifti.py | 2 | 2199 | #!/usr/bin/env python
# Copyright 2015 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# mask_nifti.py
# Created by Greg Kiar on 2015-02-17.
# Email: [email protected]
# Copyright (c) 2015. All rights reserved.
import argparse
from nibabel import load, save, Nifti1Image
from numpy import where
def masking(data_img, mask, output_img):
print "Loading data..."
d_img = load(data_img)
d_data = d_img.get_data()
print "Loading mask..."
m_img = load(mask)
m_img.get_header().set_data_shape(m_img.get_header().get_data_shape()[0:3])
m_data = m_img.get_data()
print "Determining data dimensions..."
t = d_img.get_header()
t = t.get_data_shape()
dimension = len(t)
#assumes that mask dimension is 3d
i,j,k= where(m_data[0:2] == 0)
if dimension == 3:
print "Masking 3D Image..."
d_data[i,j,k] = 0
elif dimension == 4:
print "Masking 4D Image..."
for q in range(t[3]):
print "Applying mask to layer", q+1, " of ", t[3]
d_data[i,j,k,q] = 0
print "Masking complete!"
print "Saving..."
out = Nifti1Image( data=d_data, affine=d_img.get_affine(), header=d_img.get_header() )
save(out, output_img)
print "Complete!"
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("data", action="store", help="The image we want to mask (.nii, .nii.gz)")
parser.add_argument("mask", action="store", help="The binary mask (.nii, .nii.gz)")
parser.add_argument("output", action="store", help="masked output image (.nii, .nii.gz)")
result = parser.parse_args()
masking(result.data, result.mask, result.output)
if __name__ == "__main__":
main()
| apache-2.0 | -759,839,102,581,999,400 | 29.541667 | 95 | 0.671669 | false |
williamdev/RatticWeb | cred/migrations/0015_auto__add_field_credicon_xoffset__add_field_credicon_yoffset.py | 7 | 6847 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CredIcon.xoffset'
db.add_column('cred_credicon', 'xoffset',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'CredIcon.yoffset'
db.add_column('cred_credicon', 'yoffset',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CredIcon.xoffset'
db.delete_column('cred_credicon', 'xoffset')
# Deleting field 'CredIcon.yoffset'
db.delete_column('cred_credicon', 'yoffset')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cred.cred': {
'Meta': {'object_name': 'Cred'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'default': '58', 'to': "orm['cred.CredIcon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'child_creds'", 'default': 'None', 'to': "orm['cred.Tag']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'cred.credaudit': {
'Meta': {'ordering': "('-time',)", 'object_name': 'CredAudit'},
'audittype': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'cred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['cred.Cred']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credlogs'", 'to': "orm['auth.User']"})
},
'cred.credchangeq': {
'Meta': {'object_name': 'CredChangeQ'},
'cred': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cred.Cred']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'cred.credicon': {
'Meta': {'object_name': 'CredIcon'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'xoffset': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'yoffset': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cred.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['cred'] | gpl-2.0 | -6,499,106,666,362,851,000 | 63 | 212 | 0.541113 | false |
VirtueSecurity/aws-extender | BappModules/boto3/resources/factory.py | 11 | 22401 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from functools import partial
from .action import ServiceAction
from .action import WaiterAction
from .base import ResourceMeta, ServiceResource
from .collection import CollectionFactory
from .model import ResourceModel
from .response import build_identifiers, ResourceHandler
from ..exceptions import ResourceLoadException
from ..docs import docstring
logger = logging.getLogger(__name__)
class ResourceFactory(object):
"""
A factory to create new :py:class:`~boto3.resources.base.ServiceResource`
classes from a :py:class:`~boto3.resources.model.ResourceModel`. There are
two types of lookups that can be done: one on the service itself (e.g. an
SQS resource) and another on models contained within the service (e.g. an
SQS Queue resource).
"""
def __init__(self, emitter):
self._collection_factory = CollectionFactory()
self._emitter = emitter
def load_from_definition(self, resource_name,
single_resource_json_definition, service_context):
"""
Loads a resource from a model, creating a new
:py:class:`~boto3.resources.base.ServiceResource` subclass
with the correct properties and methods, named based on the service
and resource name, e.g. EC2.Instance.
:type resource_name: string
:param resource_name: Name of the resource to look up. For services,
this should match the ``service_name``.
:type single_resource_json_definition: dict
:param single_resource_json_definition:
The loaded json of a single service resource or resource
definition.
:type service_context: :py:class:`~boto3.utils.ServiceContext`
:param service_context: Context about the AWS service
:rtype: Subclass of :py:class:`~boto3.resources.base.ServiceResource`
:return: The service or resource class.
"""
logger.debug('Loading %s:%s', service_context.service_name,
resource_name)
# Using the loaded JSON create a ResourceModel object.
resource_model = ResourceModel(
resource_name, single_resource_json_definition,
service_context.resource_json_definitions
)
# Do some renaming of the shape if there was a naming collision
# that needed to be accounted for.
shape = None
if resource_model.shape:
shape = service_context.service_model.shape_for(
resource_model.shape)
resource_model.load_rename_map(shape)
# Set some basic info
meta = ResourceMeta(
service_context.service_name, resource_model=resource_model)
attrs = {
'meta': meta,
}
# Create and load all of attributes of the resource class based
# on the models.
# Identifiers
self._load_identifiers(
attrs=attrs, meta=meta, resource_name=resource_name,
resource_model=resource_model
)
# Load/Reload actions
self._load_actions(
attrs=attrs, resource_name=resource_name,
resource_model=resource_model, service_context=service_context
)
# Attributes that get auto-loaded
self._load_attributes(
attrs=attrs, meta=meta, resource_name=resource_name,
resource_model=resource_model,
service_context=service_context)
# Collections and their corresponding methods
self._load_collections(
attrs=attrs, resource_model=resource_model,
service_context=service_context)
# References and Subresources
self._load_has_relations(
attrs=attrs, resource_name=resource_name,
resource_model=resource_model, service_context=service_context
)
# Waiter resource actions
self._load_waiters(
attrs=attrs, resource_name=resource_name,
resource_model=resource_model, service_context=service_context
)
# Create the name based on the requested service and resource
cls_name = resource_name
if service_context.service_name == resource_name:
cls_name = 'ServiceResource'
cls_name = service_context.service_name + '.' + cls_name
base_classes = [ServiceResource]
if self._emitter is not None:
self._emitter.emit(
'creating-resource-class.%s' % cls_name,
class_attributes=attrs, base_classes=base_classes,
service_context=service_context)
return type(str(cls_name), tuple(base_classes), attrs)
def _load_identifiers(self, attrs, meta, resource_model, resource_name):
"""
Populate required identifiers. These are arguments without which
the resource cannot be used. Identifiers become arguments for
operations on the resource.
"""
for identifier in resource_model.identifiers:
meta.identifiers.append(identifier.name)
attrs[identifier.name] = self._create_identifier(
identifier, resource_name)
def _load_actions(self, attrs, resource_name, resource_model,
service_context):
"""
Actions on the resource become methods, with the ``load`` method
being a special case which sets internal data for attributes, and
``reload`` is an alias for ``load``.
"""
if resource_model.load:
attrs['load'] = self._create_action(
action_model=resource_model.load, resource_name=resource_name,
service_context=service_context, is_load=True)
attrs['reload'] = attrs['load']
for action in resource_model.actions:
attrs[action.name] = self._create_action(
action_model=action, resource_name=resource_name,
service_context=service_context)
def _load_attributes(self, attrs, meta, resource_name, resource_model,
service_context):
"""
Load resource attributes based on the resource shape. The shape
name is referenced in the resource JSON, but the shape itself
is defined in the Botocore service JSON, hence the need for
access to the ``service_model``.
"""
if not resource_model.shape:
return
shape = service_context.service_model.shape_for(
resource_model.shape)
identifiers = dict(
(i.member_name, i)
for i in resource_model.identifiers if i.member_name)
attributes = resource_model.get_attributes(shape)
for name, (orig_name, member) in attributes.items():
if name in identifiers:
prop = self._create_identifier_alias(
resource_name=resource_name,
identifier=identifiers[name],
member_model=member,
service_context=service_context
)
else:
prop = self._create_autoload_property(
resource_name=resource_name,
name=orig_name, snake_cased=name,
member_model=member,
service_context=service_context
)
attrs[name] = prop
def _load_collections(self, attrs, resource_model, service_context):
"""
Load resource collections from the model. Each collection becomes
a :py:class:`~boto3.resources.collection.CollectionManager` instance
on the resource instance, which allows you to iterate and filter
through the collection's items.
"""
for collection_model in resource_model.collections:
attrs[collection_model.name] = self._create_collection(
resource_name=resource_model.name,
collection_model=collection_model,
service_context=service_context
)
def _load_has_relations(self, attrs, resource_name, resource_model,
service_context):
"""
Load related resources, which are defined via a ``has``
relationship but conceptually come in two forms:
1. A reference, which is a related resource instance and can be
``None``, such as an EC2 instance's ``vpc``.
2. A subresource, which is a resource constructor that will always
return a resource instance which shares identifiers/data with
this resource, such as ``s3.Bucket('name').Object('key')``.
"""
for reference in resource_model.references:
# This is a dangling reference, i.e. we have all
# the data we need to create the resource, so
# this instance becomes an attribute on the class.
attrs[reference.name] = self._create_reference(
reference_model=reference,
resource_name=resource_name,
service_context=service_context
)
for subresource in resource_model.subresources:
# This is a sub-resource class you can create
# by passing in an identifier, e.g. s3.Bucket(name).
attrs[subresource.name] = self._create_class_partial(
subresource_model=subresource,
resource_name=resource_name,
service_context=service_context
)
self._create_available_subresources_command(
attrs, resource_model.subresources)
def _create_available_subresources_command(self, attrs, subresources):
_subresources = [subresource.name for subresource in subresources]
_subresources = sorted(_subresources)
def get_available_subresources(factory_self):
"""
Returns a list of all the available sub-resources for this
Resource.
:returns: A list containing the name of each sub-resource for this
resource
:rtype: list of str
"""
return _subresources
attrs['get_available_subresources'] = get_available_subresources
def _load_waiters(self, attrs, resource_name, resource_model,
service_context):
"""
Load resource waiters from the model. Each waiter allows you to
wait until a resource reaches a specific state by polling the state
of the resource.
"""
for waiter in resource_model.waiters:
attrs[waiter.name] = self._create_waiter(
resource_waiter_model=waiter,
resource_name=resource_name,
service_context=service_context
)
def _create_identifier(factory_self, identifier, resource_name):
"""
Creates a read-only property for identifier attributes.
"""
def get_identifier(self):
# The default value is set to ``None`` instead of
# raising an AttributeError because when resources are
# instantiated a check is made such that none of the
# identifiers have a value ``None``. If any are ``None``,
# a more informative user error than a generic AttributeError
# is raised.
return getattr(self, '_' + identifier.name, None)
get_identifier.__name__ = str(identifier.name)
get_identifier.__doc__ = docstring.IdentifierDocstring(
resource_name=resource_name,
identifier_model=identifier,
include_signature=False
)
return property(get_identifier)
def _create_identifier_alias(factory_self, resource_name, identifier,
member_model, service_context):
"""
Creates a read-only property that aliases an identifier.
"""
def get_identifier(self):
return getattr(self, '_' + identifier.name, None)
get_identifier.__name__ = str(identifier.member_name)
get_identifier.__doc__ = docstring.AttributeDocstring(
service_name=service_context.service_name,
resource_name=resource_name,
attr_name=identifier.member_name,
event_emitter=factory_self._emitter,
attr_model=member_model,
include_signature=False
)
return property(get_identifier)
def _create_autoload_property(factory_self, resource_name, name,
snake_cased, member_model, service_context):
"""
Creates a new property on the resource to lazy-load its value
via the resource's ``load`` method (if it exists).
"""
# The property loader will check to see if this resource has already
# been loaded and return the cached value if possible. If not, then
# it first checks to see if it CAN be loaded (raise if not), then
# calls the load before returning the value.
def property_loader(self):
if self.meta.data is None:
if hasattr(self, 'load'):
self.load()
else:
raise ResourceLoadException(
'{0} has no load method'.format(
self.__class__.__name__))
return self.meta.data.get(name)
property_loader.__name__ = str(snake_cased)
property_loader.__doc__ = docstring.AttributeDocstring(
service_name=service_context.service_name,
resource_name=resource_name,
attr_name=snake_cased,
event_emitter=factory_self._emitter,
attr_model=member_model,
include_signature=False
)
return property(property_loader)
def _create_waiter(factory_self, resource_waiter_model, resource_name,
service_context):
"""
Creates a new wait method for each resource where both a waiter and
resource model is defined.
"""
waiter = WaiterAction(resource_waiter_model,
waiter_resource_name=resource_waiter_model.name)
def do_waiter(self, *args, **kwargs):
waiter(self, *args, **kwargs)
do_waiter.__name__ = str(resource_waiter_model.name)
do_waiter.__doc__ = docstring.ResourceWaiterDocstring(
resource_name=resource_name,
event_emitter=factory_self._emitter,
service_model=service_context.service_model,
resource_waiter_model=resource_waiter_model,
service_waiter_model=service_context.service_waiter_model,
include_signature=False
)
return do_waiter
def _create_collection(factory_self, resource_name, collection_model,
service_context):
"""
Creates a new property on the resource to lazy-load a collection.
"""
cls = factory_self._collection_factory.load_from_definition(
resource_name=resource_name, collection_model=collection_model,
service_context=service_context,
event_emitter=factory_self._emitter)
def get_collection(self):
return cls(
collection_model=collection_model, parent=self,
factory=factory_self, service_context=service_context)
get_collection.__name__ = str(collection_model.name)
get_collection.__doc__ = docstring.CollectionDocstring(
collection_model=collection_model, include_signature=False)
return property(get_collection)
def _create_reference(factory_self, reference_model, resource_name,
service_context):
"""
Creates a new property on the resource to lazy-load a reference.
"""
# References are essentially an action with no request
# or response, so we can re-use the response handlers to
# build up resources from identifiers and data members.
handler = ResourceHandler(
search_path=reference_model.resource.path, factory=factory_self,
resource_model=reference_model.resource,
service_context=service_context
)
# Are there any identifiers that need access to data members?
# This is important when building the resource below since
# it requires the data to be loaded.
needs_data = any(i.source == 'data' for i in
reference_model.resource.identifiers)
def get_reference(self):
# We need to lazy-evaluate the reference to handle circular
# references between resources. We do this by loading the class
# when first accessed.
# This is using a *response handler* so we need to make sure
# our data is loaded (if possible) and pass that data into
# the handler as if it were a response. This allows references
# to have their data loaded properly.
if needs_data and self.meta.data is None and hasattr(self, 'load'):
self.load()
return handler(self, {}, self.meta.data)
get_reference.__name__ = str(reference_model.name)
get_reference.__doc__ = docstring.ReferenceDocstring(
reference_model=reference_model,
include_signature=False
)
return property(get_reference)
def _create_class_partial(factory_self, subresource_model, resource_name,
service_context):
"""
Creates a new method which acts as a functools.partial, passing
along the instance's low-level `client` to the new resource
class' constructor.
"""
name = subresource_model.resource.type
def create_resource(self, *args, **kwargs):
# We need a new method here because we want access to the
# instance's client.
positional_args = []
# We lazy-load the class to handle circular references.
json_def = service_context.resource_json_definitions.get(name, {})
resource_cls = factory_self.load_from_definition(
resource_name=name,
single_resource_json_definition=json_def,
service_context=service_context
)
# Assumes that identifiers are in order, which lets you do
# e.g. ``sqs.Queue('foo').Message('bar')`` to create a new message
# linked with the ``foo`` queue and which has a ``bar`` receipt
# handle. If we did kwargs here then future positional arguments
# would lead to failure.
identifiers = subresource_model.resource.identifiers
if identifiers is not None:
for identifier, value in build_identifiers(identifiers, self):
positional_args.append(value)
return partial(resource_cls, *positional_args,
client=self.meta.client)(*args, **kwargs)
create_resource.__name__ = str(name)
create_resource.__doc__ = docstring.SubResourceDocstring(
resource_name=resource_name,
sub_resource_model=subresource_model,
service_model=service_context.service_model,
include_signature=False
)
return create_resource
def _create_action(factory_self, action_model, resource_name,
service_context, is_load=False):
"""
Creates a new method which makes a request to the underlying
AWS service.
"""
# Create the action in in this closure but before the ``do_action``
# method below is invoked, which allows instances of the resource
# to share the ServiceAction instance.
action = ServiceAction(
action_model, factory=factory_self,
service_context=service_context
)
# A resource's ``load`` method is special because it sets
# values on the resource instead of returning the response.
if is_load:
# We need a new method here because we want access to the
# instance via ``self``.
def do_action(self, *args, **kwargs):
response = action(self, *args, **kwargs)
self.meta.data = response
# Create the docstring for the load/reload mehtods.
lazy_docstring = docstring.LoadReloadDocstring(
action_name=action_model.name,
resource_name=resource_name,
event_emitter=factory_self._emitter,
load_model=action_model,
service_model=service_context.service_model,
include_signature=False
)
else:
# We need a new method here because we want access to the
# instance via ``self``.
def do_action(self, *args, **kwargs):
response = action(self, *args, **kwargs)
if hasattr(self, 'load'):
# Clear cached data. It will be reloaded the next
# time that an attribute is accessed.
# TODO: Make this configurable in the future?
self.meta.data = None
return response
lazy_docstring = docstring.ActionDocstring(
resource_name=resource_name,
event_emitter=factory_self._emitter,
action_model=action_model,
service_model=service_context.service_model,
include_signature=False
)
do_action.__name__ = str(action_model.name)
do_action.__doc__ = lazy_docstring
return do_action
| mit | -2,070,400,794,844,738,800 | 40.560297 | 79 | 0.604705 | false |
lavalamp-/ws-backend-community | rest/views/elasticsearch/mixin.py | 1 | 9340 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import copy
from django.conf import settings
from rest_framework import authentication
from rest_framework.exceptions import PermissionDenied, NotAcceptable
from rest_framework.response import Response
import rest.responses
from rest.views.exception import OperationFailed
class BaseElasticsearchAPIViewMixin(object):
"""
This is a mixin class for all Web Sight APIView classes that query data stored in Elasticsearch.
"""
# Class Members
authentication_classes = (authentication.TokenAuthentication,)
_query = None
_search_argument = None
_queried_fields = None
# Instantiation
# Static Methods
# Class Methods
@classmethod
def get_es_query_class(cls):
"""
Get the Elasticsearch query class that this APIView is meant to query.
:return: The Elasticsearch query class that this APIView is meant to query.
"""
raise NotImplementedError("Subclasses must implement this!")
# Public Methods
def check_ws_permissions(self):
"""
Check to see if the requesting user has sufficient permissions to be querying the contents of
this APIView.
:return: None
"""
if not self.request.user.is_superuser:
if not self._check_permissions():
raise PermissionDenied()
def get(self, *args, **kwargs):
"""
Handle the HTTP GET request to this APIView.
:param args: Positional arguments.
:param kwargs: Keyword arguments.
:return: A Django rest framework response object.
"""
if self.has_presentation_argument:
return rest.responses.WsPresentationResponse.from_es_api_view(self)
else:
self.check_ws_permissions()
self._validate_arguments()
response = self._query_elasticsearch()
response_body = self._extract_contents_from_response(response)
return Response(response_body)
# Protected Methods
def _apply_filters_to_query(self, query):
"""
Apply the necessary filters to the given Elasticsearch query and return it.
:param query: The Elasticsearch query to apply filters to.
:return: The query with filters applied.
"""
return query
def _check_permissions(self):
"""
Check to see if the requesting user has sufficient permissions to be querying the contents of
this APIView.
:return: True if the requesting user has sufficient permissions, False otherwise.
"""
raise NotImplementedError("Subclasses must implement this!")
def _extract_contents_from_response(self, response):
"""
Process the contents of the given response and return a list or dictionary that will then
be returned to the requesting user.
:param response: The Elasticsearch response to process.
:return: A list or dictionary to return to the requesting user.
"""
raise NotImplementedError("Subclasses must implement this!")
def _extract_objects_from_response(self, response):
"""
Process the contents of the given response and return a list of dictionaries reflecting the data
contained within the Elasticsearch response.
:param response: The Elasticsearch response to process.
:return: A list of dictionaries reflecting the contents of the given Elasticsearch response.
"""
return [self._get_object_from_result(result) for result in response.results]
def _get_elasticsearch_index(self):
"""
Get the Elasticsearch index that the resulting Elasticsearch query should be restricted to.
:return: The Elasticsearch index that the resulting Elasticsearch query should be restricted to.
"""
raise NotImplementedError("Subclasses must implement this!")
def _get_elasticsearch_query(self):
"""
Get the Elasticsearch query object that will be used to query Elasticsearch data. This can
be overridden to initialize the query in custom ways.
:return: The Elasticsearch query object that will be used to query Elasticsearch data.
"""
return self.es_query_class()
def _get_object_from_result(self, es_result):
"""
Get a dictionary reflecting the data contained within the given Elasticsearch result object.
:param es_result: An Elasticsearch result object.
:return: A dictionary reflecting the data contained within the given Elasticsearch result object.
"""
to_return = {}
for queried_field in self.queryable_model_fields:
if queried_field in es_result["_source"]:
to_return[queried_field] = es_result["_source"][queried_field]
to_return["type"] = es_result["_type"]
return to_return
def _query_elasticsearch(self):
"""
Submit a query to Elasticsearch and return the corresponding response.
:return: The response retrieved from Elasticsearch.
"""
es_index = self._get_elasticsearch_index()
self._query = self._get_elasticsearch_query()
self._query = self._apply_filters_to_query(self._query)
self._query.queried_fields = self.queried_fields
return self._query.search(index=es_index)
def _validate_arguments(self):
"""
Validate that the arguments supplied to this handler are valid for query execution, and raise a
ValidationError if they are not.
:return: None
"""
pass
# Private Methods
def __get_queried_fields(self):
"""
Get a list of the fields meant to be queried by this APIView. This list will take into account
fields specified in inclusion and exclusion query string parameters.
:return: A list of the fields meant to be queried by this APIView.
"""
if not self.has_exclude_fields_argument and not self.has_include_fields_argument:
return self.queryable_model_fields
elif self.has_include_fields_argument:
fields = []
included_fields = self.request.query_params[settings.INCLUDE_FIELDS_PARAM]
for included_field in [x.strip() for x in included_fields.split(",")]:
if included_field in self.queryable_model_fields:
fields.append(included_field)
elif self.has_exclude_fields_argument:
fields = copy.copy(self.queryable_model_fields)
excluded_fields = self.request.query_params[settings.EXCLUDE_FIELDS_PARAM]
for excluded_field in [x.strip() for x in excluded_fields.split(",")]:
if excluded_field in fields:
fields.remove(excluded_field)
fields = list(set(fields))
if len(fields) == 0:
raise OperationFailed(detail="You must specify at least one valid field to query.")
return fields
# Properties
@property
def has_exclude_fields_argument(self):
"""
Get whether or not the request has the exclude fields argument.
:return: whether or not the request has the exclude fields argument.
"""
return settings.EXCLUDE_FIELDS_PARAM in self.request.query_params
@property
def has_include_fields_argument(self):
"""
Get whether or not the request has the include fields argument.
:return: whether or not the request has the include fields argument.
"""
return settings.INCLUDE_FIELDS_PARAM in self.request.query_params
@property
def has_presentation_argument(self):
"""
Get whether or not the request has the presentation argument.
:return: whether or not the request has the presentation argument.
"""
return settings.PRESENTATION_PARAM in self.request.query_params
@property
def es_query_class(self):
"""
Get the Elasticsearch query class that this APIView is meant to query.
:return: The Elasticsearch query class that this APIView is meant to query.
"""
return self.__class__.get_es_query_class()
@property
def queried_fields(self):
"""
Get a list containing the Elasticsearch model fields that should be queried by this APIView.
:return: a list containing the Elasticsearch model fields that should be queried by this APIView.
"""
if self._queried_fields is None:
self._queried_fields = self.__get_queried_fields()
return self._queried_fields
@property
def queryable_model_fields(self):
"""
Get a list of the strings on the queried model to return to the requesting user.
:return: a list of the strings on the queried model to return to the requesting user.
"""
return self.es_query_class.get_queryable_fields()
@property
def query(self):
"""
Get the Elasticsearch query that this class is configured to run.
:return: the Elasticsearch query that this class is configured to run.
"""
return self._query
# Representation and Comparison
def __repr__(self):
return "<%s>" % (self.__class__.__name__,)
| gpl-3.0 | 2,869,166,828,440,299,000 | 37.436214 | 105 | 0.65182 | false |
Pica4x6/megaradrp | megaradrp/core/tests/test_processing.py | 1 | 3104 | import shutil
from tempfile import mkdtemp
import os
import astropy.io.fits as fits
import numpy as np
import pytest
from megaradrp.core.processing import trim_and_o
from megaradrp.core.processing import apextract_weights
from megaradrp.simulation.detector import ReadParams, MegaraDetectorSat
from megaradrp.simulation.actions import simulate_flat
def generate_bias_file():
PSCAN = 50
DSHAPE = (2056 * 2, 2048 * 2)
OSCAN = 50
ron = 2.0
gain = 1.0
bias = 1000.0
qe = 0.8 * np.ones(DSHAPE)
qe[0:15, 0:170] = 0.0
readpars1 = ReadParams(gain=gain, ron=ron, bias=bias)
readpars2 = ReadParams(gain=gain, ron=ron, bias=bias)
detector = MegaraDetectorSat('megara_test_detector', DSHAPE, OSCAN, PSCAN, qe=qe,
dark=(3.0 / 3600.0),
readpars1=readpars1, readpars2=readpars2,
bins='11')
return simulate_flat(detector, exposure=1.0, source=5000.0)
@pytest.mark.parametrize("direction", ['normal', 'mirror'])
def test_trim_and_o(direction):
temporary_path = mkdtemp()
fs = generate_bias_file()
fits.writeto('%s/flat.fits' % (temporary_path), fs, clobber=True)
trim_and_o('%s/flat.fits' % (temporary_path), out='%s/result.fits' % (temporary_path), direction=direction)
with fits.open('%s/result.fits' % (temporary_path)) as hdul:
assert hdul[0].shape[0] + 100 == fs.shape[0]
assert hdul[0].shape[1] + 100 == fs.shape[1]
shutil.rmtree(temporary_path)
def test_trim_and_o_fail():
temporary_path = mkdtemp()
fs = generate_bias_file()
fits.writeto('%s/flat.fits' % (temporary_path), fs, clobber=True)
direction = 'fails'
with pytest.raises(ValueError) as excinfo:
trim_and_o('%s/flat.fits' % (temporary_path), out='%s/result.fits' % (temporary_path), direction=direction)
shutil.rmtree(temporary_path)
assert excinfo.value.args[0] == "%s must be either 'normal' or 'mirror'" % direction
def test_trim_and_o_fail2():
temporary_path = mkdtemp()
fs = generate_bias_file()
fits.writeto('%s/flat.fits' % (temporary_path), fs, clobber=True)
bins = 'fail'
with pytest.raises(ValueError) as excinfo:
trim_and_o('%s/flat.fits' % (temporary_path), out='%s/result.fits' % (temporary_path), bins=bins)
shutil.rmtree(temporary_path)
assert excinfo.value.args[0] == "%s must be one if '11', '12', '21, '22'" % bins
@pytest.mark.skipif(not os.path.exists('master_weights_LCB_10img_1exp.tar'),
reason="no way of currently testing this without tar file")
def test_apextract_weights():
import tarfile
file_name = 'master_weights_LCB_10img_1exp.tar'
data = fits.getdata('fiberflat_frame.fits')
rss = apextract_weights(data, tarfile.open(file_name, 'r'))
hdu_rss = fits.PrimaryHDU(rss)
final = fits.HDUList([hdu_rss])
final.writeto('rss.fits', clobber=True)
assert True
if __name__ == "__main__":
# test_trim_and_o()
# test_trim_and_o_fail()
# test_trim_and_o_fail2()
test_apextract_weights() | gpl-3.0 | 5,889,563,368,696,697,000 | 32.031915 | 115 | 0.643363 | false |
mith1979/ansible_automation | applied_python/applied_python/lib/python2.7/site-packages/ansible/playbook/__init__.py | 6 | 35747 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.inventory
import ansible.constants as C
import ansible.runner
from ansible.utils.template import template
from ansible import utils
from ansible import errors
from ansible.module_utils.splitter import split_args, unquote
import ansible.callbacks
import ansible.cache
import os
import shlex
import collections
from play import Play
import StringIO
import pipes
# the setup cache stores all variables about a host
# gathered during the setup step, while the vars cache
# holds all other variables about a host
SETUP_CACHE = ansible.cache.FactCache()
VARS_CACHE = collections.defaultdict(dict)
RESERVED_TAGS = ['all','tagged','untagged','always']
class PlayBook(object):
'''
runs an ansible playbook, given as a datastructure or YAML filename.
A playbook is a deployment, config management, or automation based
set of commands to run in series.
multiple plays/tasks do not execute simultaneously, but tasks in each
pattern do execute in parallel (according to the number of forks
requested) among the hosts they address
'''
# *****************************************************
def __init__(self,
playbook = None,
host_list = C.DEFAULT_HOST_LIST,
module_path = None,
forks = C.DEFAULT_FORKS,
timeout = C.DEFAULT_TIMEOUT,
remote_user = C.DEFAULT_REMOTE_USER,
remote_pass = C.DEFAULT_REMOTE_PASS,
remote_port = None,
transport = C.DEFAULT_TRANSPORT,
private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
callbacks = None,
runner_callbacks = None,
stats = None,
extra_vars = None,
only_tags = None,
skip_tags = None,
subset = C.DEFAULT_SUBSET,
inventory = None,
check = False,
diff = False,
any_errors_fatal = False,
vault_password = False,
force_handlers = False,
# privelege escalation
become = C.DEFAULT_BECOME,
become_method = C.DEFAULT_BECOME_METHOD,
become_user = C.DEFAULT_BECOME_USER,
become_pass = None,
):
"""
playbook: path to a playbook file
host_list: path to a file like /etc/ansible/hosts
module_path: path to ansible modules, like /usr/share/ansible/
forks: desired level of parallelism
timeout: connection timeout
remote_user: run as this user if not specified in a particular play
remote_pass: use this remote password (for all plays) vs using SSH keys
remote_port: default remote port to use if not specified with the host or play
transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
callbacks output callbacks for the playbook
runner_callbacks: more callbacks, this time for the runner API
stats: holds aggregrate data about events occurring to each host
inventory: can be specified instead of host_list to use a pre-existing inventory object
check: don't change anything, just try to detect some potential changes
any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
force_handlers: continue to notify and run handlers even if a task fails
"""
self.SETUP_CACHE = SETUP_CACHE
self.VARS_CACHE = VARS_CACHE
arguments = []
if playbook is None:
arguments.append('playbook')
if callbacks is None:
arguments.append('callbacks')
if runner_callbacks is None:
arguments.append('runner_callbacks')
if stats is None:
arguments.append('stats')
if arguments:
raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
if extra_vars is None:
extra_vars = {}
if only_tags is None:
only_tags = [ 'all' ]
if skip_tags is None:
skip_tags = []
self.check = check
self.diff = diff
self.module_path = module_path
self.forks = forks
self.timeout = timeout
self.remote_user = remote_user
self.remote_pass = remote_pass
self.remote_port = remote_port
self.transport = transport
self.callbacks = callbacks
self.runner_callbacks = runner_callbacks
self.stats = stats
self.extra_vars = extra_vars
self.global_vars = {}
self.private_key_file = private_key_file
self.only_tags = only_tags
self.skip_tags = skip_tags
self.any_errors_fatal = any_errors_fatal
self.vault_password = vault_password
self.force_handlers = force_handlers
self.become = become
self.become_method = become_method
self.become_user = become_user
self.become_pass = become_pass
self.callbacks.playbook = self
self.runner_callbacks.playbook = self
if inventory is None:
self.inventory = ansible.inventory.Inventory(host_list)
self.inventory.subset(subset)
else:
self.inventory = inventory
if self.module_path is not None:
utils.plugins.module_finder.add_directory(self.module_path)
self.basedir = os.path.dirname(playbook) or '.'
utils.plugins.push_basedir(self.basedir)
# let inventory know the playbook basedir so it can load more vars
self.inventory.set_playbook_basedir(self.basedir)
vars = extra_vars.copy()
vars['playbook_dir'] = os.path.abspath(self.basedir)
if self.inventory.basedir() is not None:
vars['inventory_dir'] = self.inventory.basedir()
if self.inventory.src() is not None:
vars['inventory_file'] = self.inventory.src()
self.filename = playbook
(self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
ansible.callbacks.load_callback_plugins()
ansible.callbacks.set_playbook(self.callbacks, self)
self._ansible_version = utils.version_info(gitinfo=True)
# *****************************************************
def _get_playbook_vars(self, play_ds, existing_vars):
'''
Gets the vars specified with the play and blends them
with any existing vars that have already been read in
'''
new_vars = existing_vars.copy()
if 'vars' in play_ds:
if isinstance(play_ds['vars'], dict):
new_vars.update(play_ds['vars'])
elif isinstance(play_ds['vars'], list):
for v in play_ds['vars']:
new_vars.update(v)
return new_vars
# *****************************************************
def _get_include_info(self, play_ds, basedir, existing_vars={}):
'''
Gets any key=value pairs specified with the included file
name and returns the merged vars along with the path
'''
new_vars = existing_vars.copy()
tokens = split_args(play_ds.get('include', ''))
for t in tokens[1:]:
try:
(k,v) = unquote(t).split("=", 1)
new_vars[k] = template(basedir, v, new_vars)
except ValueError, e:
raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
return (new_vars, unquote(tokens[0]))
# *****************************************************
def _get_playbook_vars_files(self, play_ds, existing_vars_files):
new_vars_files = list(existing_vars_files)
if 'vars_files' in play_ds:
new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
return new_vars_files
# *****************************************************
def _extend_play_vars(self, play, vars={}):
'''
Extends the given play's variables with the additional specified vars.
'''
if 'vars' not in play or not play['vars']:
# someone left out or put an empty "vars:" entry in their playbook
return vars.copy()
play_vars = None
if isinstance(play['vars'], dict):
play_vars = play['vars'].copy()
play_vars.update(vars)
elif isinstance(play['vars'], list):
# nobody should really do this, but handle vars: a=1 b=2
play_vars = play['vars'][:]
play_vars.extend([{k:v} for k,v in vars.iteritems()])
return play_vars
# *****************************************************
def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
'''
run top level error checking on playbooks and allow them to include other playbooks.
'''
playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password)
accumulated_plays = []
play_basedirs = []
if type(playbook_data) != list:
raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
basedir = os.path.dirname(path) or '.'
utils.plugins.push_basedir(basedir)
for play in playbook_data:
if type(play) != dict:
raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
if 'include' in play:
# a playbook (list of plays) decided to include some other list of plays
# from another file. The result is a flat list of plays in the end.
play_vars = self._get_playbook_vars(play, vars)
play_vars_files = self._get_playbook_vars_files(play, vars_files)
inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
play_vars.update(inc_vars)
included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
(plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
for p in plays:
# support for parameterized play includes works by passing
# those variables along to the subservient play
p['vars'] = self._extend_play_vars(p, play_vars)
# now add in the vars_files
p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
accumulated_plays.extend(plays)
play_basedirs.extend(basedirs)
else:
# this is a normal (non-included play)
accumulated_plays.append(play)
play_basedirs.append(basedir)
return (accumulated_plays, play_basedirs)
# *****************************************************
def run(self):
''' run all patterns in the playbook '''
plays = []
matched_tags_all = set()
unmatched_tags_all = set()
# loop through all patterns and run them
self.callbacks.on_start()
for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
assert play is not None
matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
matched_tags_all = matched_tags_all | matched_tags
unmatched_tags_all = unmatched_tags_all | unmatched_tags
# Remove tasks we wish to skip
matched_tags = matched_tags - set(self.skip_tags)
# if we have matched_tags, the play must be run.
# if the play contains no tasks, assume we just want to gather facts
# in this case there are actually 3 meta tasks (handler flushes) not 0
# tasks, so that's why there's a check against 3
if (len(matched_tags) > 0 or len(play.tasks()) == 3):
plays.append(play)
# if the playbook is invoked with --tags or --skip-tags that don't
# exist at all in the playbooks then we need to raise an error so that
# the user can correct the arguments.
unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
(matched_tags_all | unmatched_tags_all))
for t in RESERVED_TAGS:
unknown_tags.discard(t)
if len(unknown_tags) > 0:
for t in RESERVED_TAGS:
unmatched_tags_all.discard(t)
msg = 'tag(s) not found in playbook: %s. possible values: %s'
unknown = ','.join(sorted(unknown_tags))
unmatched = ','.join(sorted(unmatched_tags_all))
raise errors.AnsibleError(msg % (unknown, unmatched))
for play in plays:
ansible.callbacks.set_play(self.callbacks, play)
ansible.callbacks.set_play(self.runner_callbacks, play)
if not self._run_play(play):
break
ansible.callbacks.set_play(self.callbacks, None)
ansible.callbacks.set_play(self.runner_callbacks, None)
# summarize the results
results = {}
for host in self.stats.processed.keys():
results[host] = self.stats.summarize(host)
return results
# *****************************************************
def _async_poll(self, poller, async_seconds, async_poll_interval):
''' launch an async job, if poll_interval is set, wait for completion '''
results = poller.wait(async_seconds, async_poll_interval)
# mark any hosts that are still listed as started as failed
# since these likely got killed by async_wrapper
for host in poller.hosts_to_poll:
reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
results['contacted'][host] = reason
return results
# *****************************************************
def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
''' returns a list of hosts that haven't failed and aren't dark '''
return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
# *****************************************************
def _run_task_internal(self, task, include_failed=False):
''' run a particular module step in a playbook '''
hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
self.inventory.restrict_to(hosts)
runner = ansible.runner.Runner(
pattern=task.play.hosts,
inventory=self.inventory,
module_name=task.module_name,
module_args=task.module_args,
forks=self.forks,
remote_pass=self.remote_pass,
module_path=self.module_path,
timeout=self.timeout,
remote_user=task.remote_user,
remote_port=task.play.remote_port,
module_vars=task.module_vars,
play_vars=task.play_vars,
play_file_vars=task.play_file_vars,
role_vars=task.role_vars,
role_params=task.role_params,
default_vars=task.default_vars,
extra_vars=self.extra_vars,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
basedir=task.play.basedir,
conditional=task.when,
callbacks=self.runner_callbacks,
transport=task.transport,
is_playbook=True,
check=self.check,
diff=self.diff,
environment=task.environment,
complex_args=task.args,
accelerate=task.play.accelerate,
accelerate_port=task.play.accelerate_port,
accelerate_ipv6=task.play.accelerate_ipv6,
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
vault_pass = self.vault_password,
run_hosts=hosts,
no_log=task.no_log,
run_once=task.run_once,
become=task.become,
become_method=task.become_method,
become_user=task.become_user,
become_pass=task.become_pass,
)
runner.module_vars.update({'play_hosts': hosts})
runner.module_vars.update({'ansible_version': self._ansible_version})
if task.async_seconds == 0:
results = runner.run()
else:
results, poller = runner.run_async(task.async_seconds)
self.stats.compute(results)
if task.async_poll_interval > 0:
# if not polling, playbook requested fire and forget, so don't poll
results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
else:
for (host, res) in results.get('contacted', {}).iteritems():
self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
contacted = results.get('contacted',{})
dark = results.get('dark', {})
self.inventory.lift_restriction()
if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
return None
return results
# *****************************************************
def _run_task(self, play, task, is_handler):
''' run a single task in the playbook and recursively run any subtasks. '''
ansible.callbacks.set_task(self.callbacks, task)
ansible.callbacks.set_task(self.runner_callbacks, task)
if task.role_name:
name = '%s | %s' % (task.role_name, task.name)
else:
name = task.name
try:
# v1 HACK: we don't have enough information to template many names
# at this point. Rather than making this work for all cases in
# v1, just make this degrade gracefully. Will fix in v2
name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False)
except:
pass
self.callbacks.on_task_start(name, is_handler)
if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task:
ansible.callbacks.set_task(self.callbacks, None)
ansible.callbacks.set_task(self.runner_callbacks, None)
return True
# template ignore_errors
# TODO: Is this needed here? cond is templated again in
# check_conditional after some more manipulations.
# TODO: we don't have enough information here to template cond either
# (see note on templating name above)
cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
# load up an appropriate ansible runner to run the task in parallel
include_failed = is_handler and play.force_handlers
results = self._run_task_internal(task, include_failed=include_failed)
# if no hosts are matched, carry on
hosts_remaining = True
if results is None:
hosts_remaining = False
results = {}
contacted = results.get('contacted', {})
self.stats.compute(results, ignore_errors=task.ignore_errors)
def _register_play_vars(host, result):
# when 'register' is used, persist the result in the vars cache
# rather than the setup cache - vars should be transient between
# playbook executions
if 'stdout' in result and 'stdout_lines' not in result:
result['stdout_lines'] = result['stdout'].splitlines()
utils.update_hash(self.VARS_CACHE, host, {task.register: result})
def _save_play_facts(host, facts):
# saves play facts in SETUP_CACHE, unless the module executed was
# set_fact, in which case we add them to the VARS_CACHE
if task.module_name in ('set_fact', 'include_vars'):
utils.update_hash(self.VARS_CACHE, host, facts)
else:
utils.update_hash(self.SETUP_CACHE, host, facts)
# add facts to the global setup cache
for host, result in contacted.iteritems():
if 'results' in result:
# task ran with_ lookup plugin, so facts are encapsulated in
# multiple list items in the results key
for res in result['results']:
if type(res) == dict:
facts = res.get('ansible_facts', {})
_save_play_facts(host, facts)
else:
# when facts are returned, persist them in the setup cache
facts = result.get('ansible_facts', {})
_save_play_facts(host, facts)
# if requested, save the result into the registered variable name
if task.register:
_register_play_vars(host, result)
# also have to register some failed, but ignored, tasks
if task.ignore_errors and task.register:
failed = results.get('failed', {})
for host, result in failed.iteritems():
_register_play_vars(host, result)
# flag which notify handlers need to be run
if task.notify and len(task.notify) > 0:
for host, results in results.get('contacted',{}).iteritems():
if results.get('changed', False):
for handler_name in task.notify:
self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host)
ansible.callbacks.set_task(self.callbacks, None)
ansible.callbacks.set_task(self.runner_callbacks, None)
return hosts_remaining
# *****************************************************
def _flag_handler(self, play, handler_name, host):
'''
if a task has any notify elements, flag handlers for run
at end of execution cycle for hosts that have indicated
changes have been made
'''
found = False
for x in play.handlers():
if handler_name == template(play.basedir, x.name, x.module_vars):
found = True
self.callbacks.on_notify(host, x.name)
x.notified_by.append(host)
if not found:
raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
# *****************************************************
def _do_setup_step(self, play):
''' get facts from the remote system '''
host_list = self._trim_unavailable_hosts(play._play_hosts)
if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
if len(host_list) == 0:
return {}
elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'):
return {}
self.callbacks.on_setup()
self.inventory.restrict_to(host_list)
ansible.callbacks.set_task(self.callbacks, None)
ansible.callbacks.set_task(self.runner_callbacks, None)
# push any variables down to the system
setup_results = ansible.runner.Runner(
basedir=self.basedir,
pattern=play.hosts,
module_name='setup',
module_args={},
inventory=self.inventory,
forks=self.forks,
module_path=self.module_path,
timeout=self.timeout,
remote_user=play.remote_user,
remote_pass=self.remote_pass,
remote_port=play.remote_port,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
callbacks=self.runner_callbacks,
become=play.become,
become_method=play.become_method,
become_user=play.become_user,
become_pass=self.become_pass,
vault_pass=self.vault_password,
transport=play.transport,
is_playbook=True,
module_vars=play.vars,
play_vars=play.vars,
play_file_vars=play.vars_file_vars,
role_vars=play.role_vars,
default_vars=play.default_vars,
check=self.check,
diff=self.diff,
accelerate=play.accelerate,
accelerate_port=play.accelerate_port,
).run()
self.stats.compute(setup_results, setup=True)
self.inventory.lift_restriction()
# now for each result, load into the setup cache so we can
# let runner template out future commands
setup_ok = setup_results.get('contacted', {})
for (host, result) in setup_ok.iteritems():
utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {}))
return setup_results
# *****************************************************
def generate_retry_inventory(self, replay_hosts):
'''
called by /usr/bin/ansible when a playbook run fails. It generates an inventory
that allows re-running on ONLY the failed hosts. This may duplicate some
variable information in group_vars/host_vars but that is ok, and expected.
'''
buf = StringIO.StringIO()
for x in replay_hosts:
buf.write("%s\n" % x)
basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH)
filename = "%s.retry" % os.path.basename(self.filename)
filename = filename.replace(".yml","")
filename = os.path.join(basedir, filename)
try:
if not os.path.exists(basedir):
os.makedirs(basedir)
fd = open(filename, 'w')
fd.write(buf.getvalue())
fd.close()
except:
ansible.callbacks.display(
"\nERROR: could not create retry file. Check the value of \n"
+ "the configuration variable 'retry_files_save_path' or set \n"
+ "'retry_files_enabled' to False to avoid this message.\n",
color='red'
)
return None
return filename
# *****************************************************
def tasks_to_run_in_play(self, play):
tasks = []
for task in play.tasks():
# only run the task if the requested tags match or has 'always' tag
u = set(['untagged'])
task_set = set(task.tags)
if 'always' in task.tags:
should_run = True
else:
if 'all' in self.only_tags:
should_run = True
else:
should_run = False
if 'tagged' in self.only_tags:
if task_set != u:
should_run = True
elif 'untagged' in self.only_tags:
if task_set == u:
should_run = True
else:
if task_set.intersection(self.only_tags):
should_run = True
# Check for tags that we need to skip
if 'all' in self.skip_tags:
should_run = False
else:
if 'tagged' in self.skip_tags:
if task_set != u:
should_run = False
elif 'untagged' in self.skip_tags:
if task_set == u:
should_run = False
else:
if should_run:
if task_set.intersection(self.skip_tags):
should_run = False
if should_run:
tasks.append(task)
return tasks
# *****************************************************
def _run_play(self, play):
''' run a list of tasks for a given pattern, in order '''
self.callbacks.on_play_start(play.name)
# Get the hosts for this play
play._play_hosts = self.inventory.list_hosts(play.hosts)
# if no hosts matches this play, drop out
if not play._play_hosts:
self.callbacks.on_no_hosts_matched()
return True
# get facts from system
self._do_setup_step(play)
# now with that data, handle contentional variable file imports!
all_hosts = self._trim_unavailable_hosts(play._play_hosts)
play.update_vars_files(all_hosts, vault_password=self.vault_password)
hosts_count = len(all_hosts)
if play.serial.endswith("%"):
# This is a percentage, so calculate it based on the
# number of hosts
serial_pct = int(play.serial.replace("%",""))
serial = int((serial_pct/100.0) * len(all_hosts))
# Ensure that no matter how small the percentage, serial
# can never fall below 1, so that things actually happen
serial = max(serial, 1)
else:
serial = int(play.serial)
serialized_batch = []
if serial <= 0:
serialized_batch = [all_hosts]
else:
# do N forks all the way through before moving to next
while len(all_hosts) > 0:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batch.append(play_hosts)
task_errors = False
for on_hosts in serialized_batch:
# restrict the play to just the hosts we have in our on_hosts block that are
# available.
play._play_hosts = self._trim_unavailable_hosts(on_hosts)
self.inventory.also_restrict_to(on_hosts)
for task in self.tasks_to_run_in_play(play):
if task.meta is not None:
# meta tasks can force handlers to run mid-play
if task.meta == 'flush_handlers':
self.run_handlers(play)
# skip calling the handler till the play is finished
continue
if not self._run_task(play, task, False):
# whether no hosts matched is fatal or not depends if it was on the initial step.
# if we got exactly no hosts on the first step (setup!) then the host group
# just didn't match anything and that's ok
return False
# Get a new list of what hosts are left as available, the ones that
# did not go fail/dark during the task
host_list = self._trim_unavailable_hosts(play._play_hosts)
# Set max_fail_pct to 0, So if any hosts fails, bail out
if task.any_errors_fatal and len(host_list) < hosts_count:
play.max_fail_pct = 0
# If threshold for max nodes failed is exceeded, bail out.
if play.serial > 0:
# if serial is set, we need to shorten the size of host_count
play_count = len(play._play_hosts)
if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count):
host_list = None
else:
if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
host_list = None
# if no hosts remain, drop out
if not host_list:
if play.force_handlers:
task_errors = True
break
else:
self.callbacks.on_no_hosts_remaining()
return False
# lift restrictions after each play finishes
self.inventory.lift_also_restriction()
if task_errors and not play.force_handlers:
# if there were failed tasks and handler execution
# is not forced, quit the play with an error
return False
else:
# no errors, go ahead and execute all handlers
if not self.run_handlers(play):
return False
return True
def run_handlers(self, play):
on_hosts = play._play_hosts
hosts_count = len(on_hosts)
for task in play.tasks():
if task.meta is not None:
fired_names = {}
for handler in play.handlers():
if len(handler.notified_by) > 0:
self.inventory.restrict_to(handler.notified_by)
# Resolve the variables first
handler_name = template(play.basedir, handler.name, handler.module_vars)
if handler_name not in fired_names:
self._run_task(play, handler, True)
# prevent duplicate handler includes from running more than once
fired_names[handler_name] = 1
host_list = self._trim_unavailable_hosts(play._play_hosts)
if handler.any_errors_fatal and len(host_list) < hosts_count:
play.max_fail_pct = 0
if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
host_list = None
if not host_list and not play.force_handlers:
self.callbacks.on_no_hosts_remaining()
return False
self.inventory.lift_restriction()
new_list = handler.notified_by[:]
for host in handler.notified_by:
if host in on_hosts:
while host in new_list:
new_list.remove(host)
handler.notified_by = new_list
continue
return True
| apache-2.0 | 5,962,869,106,696,795,000 | 39.900458 | 143 | 0.554816 | false |
aferr/TemporalPartitioningMemCtl | src/mem/slicc/ast/ObjDeclAST.py | 16 | 3765 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols import Var
class ObjDeclAST(DeclAST):
def __init__(self, slicc, type_ast, ident, pairs):
super(ObjDeclAST, self).__init__(slicc, pairs)
self.type_ast = type_ast
self.ident = ident
def __repr__(self):
return "[ObjDecl: %r]" % self.ident
def generate(self):
machineComponentSym = False
self["chip_object"] = "yes"
if "hack" in self:
warning("'hack=' is now deprecated")
if "network" in self and "virtual_network" not in self:
self.error("Network queues require a 'virtual_network' attribute")
type = self.type_ast.type
if type.isBuffer and "ordered" not in self:
self.error("Buffer object decls require an 'ordered' attribute")
if "ordered" in self:
value = self["ordered"]
if value not in ("true", "false"):
self.error("The 'ordered' attribute is '%s' " + \
"must be 'true' or 'false'.", value)
if "random" in self:
value = self["random"]
if value not in ("true", "false"):
self.error("The 'random' attribute is '%s' " + \
"must be 'true' or 'false'.", value)
machine = self.symtab.state_machine
# FIXME : should all use accessors here to avoid public member
# variables
if self.ident == "id":
c_code = "m_chip_ptr.getID()"
elif self.ident == "version":
c_code = "m_version"
elif self.ident == "machineID":
c_code = "m_machineID"
elif machine:
c_code = "(*m_%s_%s_ptr)" % (machine.ident, self.ident)
else:
c_code = "(*m_%s_ptr)" % (self.ident)
v = Var(self.symtab, self.ident, self.location, type, c_code,
self.pairs, machine)
if machine:
machine.addObject(v)
self.symtab.newSymbol(v)
# used to cheat-- that is, access components in other machines
if machineComponentSym:
self.symtab.newMachComponentSym(v)
| bsd-3-clause | -370,105,406,688,727,900 | 39.053191 | 78 | 0.64834 | false |
scottpurdy/nupic | tests/integration/nupic/opf/opf_experiment_results_test.py | 10 | 15627 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file tests specific experiments to see if they are providing the
correct results. These are high level tests of the algorithms themselves.
"""
import os
import shutil
from subprocess import call
import time
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
class OPFExperimentResultsTest(unittest.TestCase):
def testExperimentResults(self):
"""Run specific experiments and verify that they are producing the correct
results.
opfDir is the examples/opf directory in the install path
and is used to find run_opf_experiment.py
The testdir is the directory that contains the experiments we will be
running. When running in the auto-build setup, this will be a temporary
directory that has had this script, as well as the specific experiments
we will be running, copied into it by the qa/autotest/prediction_results.py
script.
When running stand-alone from the command line, this will point to the
examples/prediction directory in the install tree (same as predictionDir)
"""
nupic_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "..")
opfDir = os.path.join(nupic_dir, "examples", "opf")
testDir = opfDir
# The testdir is the directory that contains the experiments we will be
# running. When running in the auto-build setup, this will be a temporary
# directory that has had this script, as well as the specific experiments
# we will be running, copied into it by the
# qa/autotest/prediction_results.py script.
# When running stand-alone from the command line, we can simply point to the
# examples/prediction directory in the install tree.
if not os.path.exists(os.path.join(testDir, "experiments/classification")):
testDir = opfDir
# Generate any dynamically generated datasets now
command = ['python', os.path.join(testDir, 'experiments', 'classification',
'makeDatasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
# Generate any dynamically generated datasets now
command = ['python', os.path.join(testDir, 'experiments', 'multistep',
'make_datasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
# Generate any dynamically generated datasets now
command = ['python', os.path.join(testDir, 'experiments',
'spatial_classification', 'make_datasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
# Run from the test directory so that we can find our experiments
os.chdir(testDir)
runExperiment = os.path.join(nupic_dir, "scripts", "run_opf_experiment.py")
# A list of experiments to run. Valid attributes:
# experimentDir - Required, path to the experiment directory containing
# description.py
# args - optional. List of arguments for run_opf_experiment
# results - A dictionary of expected results. The keys are tuples
# containing (predictionLogFileName, columnName). The
# value is a (min, max) expected value from the last row
# in the prediction log.
multistepTests = [
# For this one, in theory the error for 1 step should be < 0.20
{ 'experimentDir': 'experiments/multistep/simple_0',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this one, in theory the error for 1 step should be < 0.50, but we
# get slightly higher because our sample size is smaller than ideal
{ 'experimentDir': 'experiments/multistep/simple_0_f2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
(0.0, 0.66),
}
},
# For this one, in theory the error for 1 step should be < 0.20
{ 'experimentDir': 'experiments/multistep/simple_1',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this test, we haven't figured out the theoretical error, this
# error is determined empirically from actual results
{ 'experimentDir': 'experiments/multistep/simple_1_f2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
(0.0, 3.76),
}
},
# For this one, in theory the error for 1 step should be < 0.20, but we
# get slightly higher because our sample size is smaller than ideal
{ 'experimentDir': 'experiments/multistep/simple_2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.31),
}
},
# For this one, in theory the error for 1 step should be < 0.10 and for
# 3 step < 0.30, but our actual results are better.
{ 'experimentDir': 'experiments/multistep/simple_3',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.06),
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=3:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this test, we haven't figured out the theoretical error, this
# error is determined empirically from actual results
{ 'experimentDir': 'experiments/multistep/simple_3_f2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
(0.0, 0.6),
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=3:window=200:field=field2"):
(0.0, 1.8),
}
},
# Test missing record support.
# Should have 0 error by the end of the dataset
{ 'experimentDir': 'experiments/missing_record/simple_0',
'results': {
('DefaultTask.NontemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=25:field=field1"):
(1.0, 1.0),
}
},
] # end of multistepTests
classificationTests = [
# ----------------------------------------------------------------------
# Classification Experiments
{ 'experimentDir': 'experiments/classification/category_hub_TP_0',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.020),
}
},
{ 'experimentDir': 'experiments/classification/category_TM_0',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.045),
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classConfidences:neg_auc:computeEvery=10:window=200'): (-1.0, -0.98),
}
},
{ 'experimentDir': 'experiments/classification/category_TM_1',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.005),
}
},
{ 'experimentDir': 'experiments/classification/scalar_TP_0',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.155),
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classConfidences:neg_auc:computeEvery=10:window=200'): (-1.0, -0.900),
}
},
{ 'experimentDir': 'experiments/classification/scalar_TP_1',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.03),
}
},
] # End of classification tests
spatialClassificationTests = [
{ 'experimentDir': 'experiments/spatial_classification/category_0',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"):
(0.0, 0.05),
}
},
{ 'experimentDir': 'experiments/spatial_classification/category_1',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"):
(0.0, 0.0),
}
},
{ 'experimentDir': 'experiments/spatial_classification/scalar_0',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"):
(0.0, 0.025),
}
},
{ 'experimentDir': 'experiments/spatial_classification/scalar_1',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"):
(-1e-10, 0.01),
}
},
]
anomalyTests = [
# ----------------------------------------------------------------------
# Classification Experiments
{ 'experimentDir': 'experiments/anomaly/temporal/simple',
'results': {
('DefaultTask.TemporalAnomaly.predictionLog.csv',
'anomalyScore:passThruPrediction:window=1000:field=f'): (0.02,
0.04),
}
},
] # End of anomaly tests
tests = []
tests += multistepTests
tests += classificationTests
tests += spatialClassificationTests
tests += anomalyTests
# Uncomment this to only run a specific experiment(s)
#tests = tests[7:8]
# This contains a list of tuples: (expDir, key, results)
summaryOfResults = []
startTime = time.time()
testIdx = -1
for test in tests:
testIdx += 1
expDirectory = test['experimentDir']
# -------------------------------------------------------------------
# Remove files/directories generated by previous tests:
toDelete = []
# Remove inference results
path = os.path.join(expDirectory, "inference")
toDelete.append(path)
path = os.path.join(expDirectory, "savedmodels")
toDelete.append(path)
for path in toDelete:
if not os.path.exists(path):
continue
print "Removing %s ..." % path
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
# ------------------------------------------------------------------------
# Run the test.
args = test.get('args', [])
print "Running experiment %s ..." % (expDirectory)
command = ['python', runExperiment, expDirectory] + args
retVal = call(command)
# If retVal is non-zero and this was not a negative test or if retVal is
# zero and this is a negative test something went wrong.
if retVal:
print "Details of failed test: %s" % test
print("TestIdx %d, OPF experiment '%s' failed with return code %i." %
(testIdx, expDirectory, retVal))
self.assertFalse(retVal)
# -----------------------------------------------------------------------
# Check the results
for (key, expValues) in test['results'].items():
(logFilename, colName) = key
# Open the prediction log file
logFile = FileRecordStream(os.path.join(expDirectory, 'inference',
logFilename))
colNames = [x[0] for x in logFile.getFields()]
if not colName in colNames:
print "TestIdx %d: %s not one of the columns in " \
"prediction log file. Available column names are: %s" % (testIdx,
colName, colNames)
self.assertTrue(colName in colNames)
colIndex = colNames.index(colName)
# Read till we get to the last line
while True:
try:
row = logFile.next()
except StopIteration:
break
result = row[colIndex]
# Save summary of results
summaryOfResults.append((expDirectory, colName, result))
print "Actual result for %s, %s:" % (expDirectory, colName), result
print "Expected range:", expValues
failed = (expValues[0] is not None and result < expValues[0]) \
or (expValues[1] is not None and result > expValues[1])
if failed:
print ("TestIdx %d: Experiment %s failed. \nThe actual result"
" for %s (%s) was outside the allowed range of %s" % (testIdx,
expDirectory, colName, result, expValues))
else:
print " Within expected range."
self.assertFalse(failed)
# =======================================================================
# Print summary of results:
print
print "Summary of results in all experiments run:"
print "========================================="
prevExpDir = None
for (expDir, key, results) in summaryOfResults:
if expDir != prevExpDir:
print
print expDir
prevExpDir = expDir
print " %s: %s" % (key, results)
print "\nElapsed time: %.1f seconds" % (time.time() - startTime)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | 7,133,146,218,512,597,000 | 37.301471 | 114 | 0.592628 | false |
leandrotoledo/python-telegram-bot | tests/test_conversationhandler.py | 2 | 63261 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import logging
from time import sleep
import pytest
from flaky import flaky
from telegram import (
CallbackQuery,
Chat,
ChosenInlineResult,
InlineQuery,
Message,
PreCheckoutQuery,
ShippingQuery,
Update,
User,
MessageEntity,
)
from telegram.ext import (
ConversationHandler,
CommandHandler,
CallbackQueryHandler,
MessageHandler,
Filters,
InlineQueryHandler,
CallbackContext,
DispatcherHandlerStop,
TypeHandler,
JobQueue,
)
@pytest.fixture(scope='class')
def user1():
return User(first_name='Misses Test', id=123, is_bot=False)
@pytest.fixture(scope='class')
def user2():
return User(first_name='Mister Test', id=124, is_bot=False)
@pytest.fixture(autouse=True)
def start_stop_job_queue(dp):
dp.job_queue = JobQueue()
dp.job_queue.set_dispatcher(dp)
dp.job_queue.start()
yield
dp.job_queue.stop()
def raise_dphs(func):
def decorator(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if self.raise_dp_handler_stop:
raise DispatcherHandlerStop(result)
return result
return decorator
class TestConversationHandler:
# State definitions
# At first we're thirsty. Then we brew coffee, we drink it
# and then we can start coding!
END, THIRSTY, BREWING, DRINKING, CODING = range(-1, 4)
# Drinking state definitions (nested)
# At first we're holding the cup. Then we sip coffee, and last we swallow it
HOLDING, SIPPING, SWALLOWING, REPLENISHING, STOPPING = map(chr, range(ord('a'), ord('f')))
current_state, entry_points, states, fallbacks = None, None, None, None
group = Chat(0, Chat.GROUP)
second_group = Chat(1, Chat.GROUP)
raise_dp_handler_stop = False
test_flag = False
def test_slot_behaviour(self, recwarn, mro_slots):
handler = ConversationHandler(self.entry_points, self.states, self.fallbacks)
for attr in handler.__slots__:
assert getattr(handler, attr, 'err') != 'err', f"got extra slot '{attr}'"
assert not handler.__dict__, f"got missing slot(s): {handler.__dict__}"
assert len(mro_slots(handler)) == len(set(mro_slots(handler))), "duplicate slot"
handler.custom, handler._persistence = 'should give warning', handler._persistence
assert len(recwarn) == 1 and 'custom' in str(recwarn[0].message), [
w.message for w in recwarn.list
]
# Test related
@pytest.fixture(autouse=True)
def reset(self):
self.raise_dp_handler_stop = False
self.test_flag = False
self.current_state = {}
self.entry_points = [CommandHandler('start', self.start)]
self.states = {
self.THIRSTY: [CommandHandler('brew', self.brew), CommandHandler('wait', self.start)],
self.BREWING: [CommandHandler('pourCoffee', self.drink)],
self.DRINKING: [
CommandHandler('startCoding', self.code),
CommandHandler('drinkMore', self.drink),
CommandHandler('end', self.end),
],
self.CODING: [
CommandHandler('keepCoding', self.code),
CommandHandler('gettingThirsty', self.start),
CommandHandler('drinkMore', self.drink),
],
}
self.fallbacks = [CommandHandler('eat', self.start)]
self.is_timeout = False
# for nesting tests
self.nested_states = {
self.THIRSTY: [CommandHandler('brew', self.brew), CommandHandler('wait', self.start)],
self.BREWING: [CommandHandler('pourCoffee', self.drink)],
self.CODING: [
CommandHandler('keepCoding', self.code),
CommandHandler('gettingThirsty', self.start),
CommandHandler('drinkMore', self.drink),
],
}
self.drinking_entry_points = [CommandHandler('hold', self.hold)]
self.drinking_states = {
self.HOLDING: [CommandHandler('sip', self.sip)],
self.SIPPING: [CommandHandler('swallow', self.swallow)],
self.SWALLOWING: [CommandHandler('hold', self.hold)],
}
self.drinking_fallbacks = [
CommandHandler('replenish', self.replenish),
CommandHandler('stop', self.stop),
CommandHandler('end', self.end),
CommandHandler('startCoding', self.code),
CommandHandler('drinkMore', self.drink),
]
self.drinking_entry_points.extend(self.drinking_fallbacks)
# Map nested states to parent states:
self.drinking_map_to_parent = {
# Option 1 - Map a fictional internal state to an external parent state
self.REPLENISHING: self.BREWING,
# Option 2 - Map a fictional internal state to the END state on the parent
self.STOPPING: self.END,
# Option 3 - Map the internal END state to an external parent state
self.END: self.CODING,
# Option 4 - Map an external state to the same external parent state
self.CODING: self.CODING,
# Option 5 - Map an external state to the internal entry point
self.DRINKING: self.DRINKING,
}
# State handlers
def _set_state(self, update, state):
self.current_state[update.message.from_user.id] = state
return state
# Actions
@raise_dphs
def start(self, bot, update):
if isinstance(update, Update):
return self._set_state(update, self.THIRSTY)
return self._set_state(bot, self.THIRSTY)
@raise_dphs
def end(self, bot, update):
return self._set_state(update, self.END)
@raise_dphs
def start_end(self, bot, update):
return self._set_state(update, self.END)
@raise_dphs
def start_none(self, bot, update):
return self._set_state(update, None)
@raise_dphs
def brew(self, bot, update):
if isinstance(update, Update):
return self._set_state(update, self.BREWING)
return self._set_state(bot, self.BREWING)
@raise_dphs
def drink(self, bot, update):
return self._set_state(update, self.DRINKING)
@raise_dphs
def code(self, bot, update):
return self._set_state(update, self.CODING)
@raise_dphs
def passout(self, bot, update):
assert update.message.text == '/brew'
assert isinstance(update, Update)
self.is_timeout = True
@raise_dphs
def passout2(self, bot, update):
assert isinstance(update, Update)
self.is_timeout = True
@raise_dphs
def passout_context(self, update, context):
assert update.message.text == '/brew'
assert isinstance(context, CallbackContext)
self.is_timeout = True
@raise_dphs
def passout2_context(self, update, context):
assert isinstance(context, CallbackContext)
self.is_timeout = True
# Drinking actions (nested)
@raise_dphs
def hold(self, bot, update):
return self._set_state(update, self.HOLDING)
@raise_dphs
def sip(self, bot, update):
return self._set_state(update, self.SIPPING)
@raise_dphs
def swallow(self, bot, update):
return self._set_state(update, self.SWALLOWING)
@raise_dphs
def replenish(self, bot, update):
return self._set_state(update, self.REPLENISHING)
@raise_dphs
def stop(self, bot, update):
return self._set_state(update, self.STOPPING)
# Tests
@pytest.mark.parametrize(
'attr',
[
'entry_points',
'states',
'fallbacks',
'per_chat',
'name',
'per_user',
'allow_reentry',
'conversation_timeout',
'map_to_parent',
],
indirect=False,
)
def test_immutable(self, attr):
ch = ConversationHandler(
'entry_points',
{'states': ['states']},
'fallbacks',
per_chat='per_chat',
per_user='per_user',
per_message=False,
allow_reentry='allow_reentry',
conversation_timeout='conversation_timeout',
name='name',
map_to_parent='map_to_parent',
)
value = getattr(ch, attr)
if isinstance(value, list):
assert value[0] == attr
elif isinstance(value, dict):
assert list(value.keys())[0] == attr
else:
assert getattr(ch, attr) == attr
with pytest.raises(ValueError, match=f'You can not assign a new value to {attr}'):
setattr(ch, attr, True)
def test_immutable_per_message(self):
ch = ConversationHandler(
'entry_points',
{'states': ['states']},
'fallbacks',
per_chat='per_chat',
per_user='per_user',
per_message=False,
allow_reentry='allow_reentry',
conversation_timeout='conversation_timeout',
name='name',
map_to_parent='map_to_parent',
)
assert ch.per_message is False
with pytest.raises(ValueError, match='You can not assign a new value to per_message'):
ch.per_message = True
def test_per_all_false(self):
with pytest.raises(ValueError, match="can't all be 'False'"):
ConversationHandler(
self.entry_points,
self.states,
self.fallbacks,
per_chat=False,
per_user=False,
per_message=False,
)
def test_name_and_persistent(self, dp):
with pytest.raises(ValueError, match="when handler is unnamed"):
dp.add_handler(ConversationHandler([], {}, [], persistent=True))
c = ConversationHandler([], {}, [], name="handler", persistent=True)
assert c.name == "handler"
def test_conversation_handler(self, dp, bot, user1, user2):
handler = ConversationHandler(
entry_points=self.entry_points, states=self.states, fallbacks=self.fallbacks
)
dp.add_handler(handler)
# User one, starts the state machine.
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.THIRSTY
# The user is thirsty and wants to brew coffee.
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.BREWING
# Lets see if an invalid command makes sure, no state is changed.
message.text = '/nothing'
message.entities[0].length = len('/nothing')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.BREWING
# Lets see if the state machine still works by pouring coffee.
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.DRINKING
# Let's now verify that for another user, who did not start yet,
# the state has not been changed.
message.from_user = user2
dp.process_update(Update(update_id=0, message=message))
with pytest.raises(KeyError):
self.current_state[user2.id]
def test_conversation_handler_end(self, caplog, dp, bot, user1):
handler = ConversationHandler(
entry_points=self.entry_points, states=self.states, fallbacks=self.fallbacks
)
dp.add_handler(handler)
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=0, message=message))
message.text = '/end'
message.entities[0].length = len('/end')
caplog.clear()
with caplog.at_level(logging.ERROR):
dp.process_update(Update(update_id=0, message=message))
assert len(caplog.records) == 0
assert self.current_state[user1.id] == self.END
with pytest.raises(KeyError):
print(handler.conversations[(self.group.id, user1.id)])
def test_conversation_handler_fallback(self, dp, bot, user1, user2):
handler = ConversationHandler(
entry_points=self.entry_points, states=self.states, fallbacks=self.fallbacks
)
dp.add_handler(handler)
# first check if fallback will not trigger start when not started
message = Message(
0,
None,
self.group,
from_user=user1,
text='/eat',
entities=[MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/eat'))],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
with pytest.raises(KeyError):
self.current_state[user1.id]
# User starts the state machine.
message.text = '/start'
message.entities[0].length = len('/start')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.THIRSTY
# The user is thirsty and wants to brew coffee.
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.BREWING
# Now a fallback command is issued
message.text = '/eat'
message.entities[0].length = len('/eat')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.THIRSTY
def test_unknown_state_warning(self, dp, bot, user1, recwarn):
handler = ConversationHandler(
entry_points=[CommandHandler("start", lambda u, c: 1)],
states={
1: [TypeHandler(Update, lambda u, c: 69)],
2: [TypeHandler(Update, lambda u, c: -1)],
},
fallbacks=self.fallbacks,
name="xyz",
)
dp.add_handler(handler)
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
sleep(0.5)
dp.process_update(Update(update_id=1, message=message))
sleep(0.5)
assert len(recwarn) == 1
assert str(recwarn[0].message) == (
"Handler returned state 69 which is unknown to the ConversationHandler xyz."
)
def test_conversation_handler_per_chat(self, dp, bot, user1, user2):
handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
per_user=False,
)
dp.add_handler(handler)
# User one, starts the state machine.
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
# The user is thirsty and wants to brew coffee.
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
# Let's now verify that for another user, who did not start yet,
# the state will be changed because they are in the same group.
message.from_user = user2
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations[(self.group.id,)] == self.DRINKING
def test_conversation_handler_per_user(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
per_chat=False,
)
dp.add_handler(handler)
# User one, starts the state machine.
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
# The user is thirsty and wants to brew coffee.
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
# Let's now verify that for the same user in a different group, the state will still be
# updated
message.chat = self.second_group
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations[(user1.id,)] == self.DRINKING
def test_conversation_handler_per_message(self, dp, bot, user1, user2):
def entry(bot, update):
return 1
def one(bot, update):
return 2
def two(bot, update):
return ConversationHandler.END
handler = ConversationHandler(
entry_points=[CallbackQueryHandler(entry)],
states={1: [CallbackQueryHandler(one)], 2: [CallbackQueryHandler(two)]},
fallbacks=[],
per_message=True,
)
dp.add_handler(handler)
# User one, starts the state machine.
message = Message(
0, None, self.group, from_user=user1, text='msg w/ inlinekeyboard', bot=bot
)
cbq = CallbackQuery(0, user1, None, message=message, data='data', bot=bot)
dp.process_update(Update(update_id=0, callback_query=cbq))
assert handler.conversations[(self.group.id, user1.id, message.message_id)] == 1
dp.process_update(Update(update_id=0, callback_query=cbq))
assert handler.conversations[(self.group.id, user1.id, message.message_id)] == 2
# Let's now verify that for a different user in the same group, the state will not be
# updated
cbq.from_user = user2
dp.process_update(Update(update_id=0, callback_query=cbq))
assert handler.conversations[(self.group.id, user1.id, message.message_id)] == 2
def test_end_on_first_message(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=[CommandHandler('start', self.start_end)], states={}, fallbacks=[]
)
dp.add_handler(handler)
# User starts the state machine and immediately ends it.
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
assert len(handler.conversations) == 0
def test_end_on_first_message_async(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=[
CommandHandler(
'start', lambda bot, update: dp.run_async(self.start_end, bot, update)
)
],
states={},
fallbacks=[],
)
dp.add_handler(handler)
# User starts the state machine with an async function that immediately ends the
# conversation. Async results are resolved when the users state is queried next time.
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.update_queue.put(Update(update_id=0, message=message))
sleep(0.1)
# Assert that the Promise has been accepted as the new state
assert len(handler.conversations) == 1
message.text = 'resolve promise pls'
message.entities[0].length = len('resolve promise pls')
dp.update_queue.put(Update(update_id=0, message=message))
sleep(0.1)
# Assert that the Promise has been resolved and the conversation ended.
assert len(handler.conversations) == 0
def test_end_on_first_message_async_handler(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=[CommandHandler('start', self.start_end, run_async=True)],
states={},
fallbacks=[],
)
dp.add_handler(handler)
# User starts the state machine with an async function that immediately ends the
# conversation. Async results are resolved when the users state is queried next time.
message = Message(
0,
None,
self.group,
text='/start',
from_user=user1,
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.update_queue.put(Update(update_id=0, message=message))
sleep(0.1)
# Assert that the Promise has been accepted as the new state
assert len(handler.conversations) == 1
message.text = 'resolve promise pls'
message.entities[0].length = len('resolve promise pls')
dp.update_queue.put(Update(update_id=0, message=message))
sleep(0.1)
# Assert that the Promise has been resolved and the conversation ended.
assert len(handler.conversations) == 0
def test_none_on_first_message(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=[CommandHandler('start', self.start_none)], states={}, fallbacks=[]
)
dp.add_handler(handler)
# User starts the state machine and a callback function returns None
message = Message(0, None, self.group, from_user=user1, text='/start', bot=bot)
dp.process_update(Update(update_id=0, message=message))
assert len(handler.conversations) == 0
def test_none_on_first_message_async(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=[
CommandHandler(
'start', lambda bot, update: dp.run_async(self.start_none, bot, update)
)
],
states={},
fallbacks=[],
)
dp.add_handler(handler)
# User starts the state machine with an async function that returns None
# Async results are resolved when the users state is queried next time.
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.update_queue.put(Update(update_id=0, message=message))
sleep(0.1)
# Assert that the Promise has been accepted as the new state
assert len(handler.conversations) == 1
message.text = 'resolve promise pls'
dp.update_queue.put(Update(update_id=0, message=message))
sleep(0.1)
# Assert that the Promise has been resolved and the conversation ended.
assert len(handler.conversations) == 0
def test_none_on_first_message_async_handler(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=[CommandHandler('start', self.start_none, run_async=True)],
states={},
fallbacks=[],
)
dp.add_handler(handler)
# User starts the state machine with an async function that returns None
# Async results are resolved when the users state is queried next time.
message = Message(
0,
None,
self.group,
text='/start',
from_user=user1,
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.update_queue.put(Update(update_id=0, message=message))
sleep(0.1)
# Assert that the Promise has been accepted as the new state
assert len(handler.conversations) == 1
message.text = 'resolve promise pls'
dp.update_queue.put(Update(update_id=0, message=message))
sleep(0.1)
# Assert that the Promise has been resolved and the conversation ended.
assert len(handler.conversations) == 0
def test_per_chat_message_without_chat(self, bot, user1):
handler = ConversationHandler(
entry_points=[CommandHandler('start', self.start_end)], states={}, fallbacks=[]
)
cbq = CallbackQuery(0, user1, None, None, bot=bot)
update = Update(0, callback_query=cbq)
assert not handler.check_update(update)
def test_channel_message_without_chat(self, bot):
handler = ConversationHandler(
entry_points=[MessageHandler(Filters.all, self.start_end)], states={}, fallbacks=[]
)
message = Message(0, date=None, chat=Chat(0, Chat.CHANNEL, 'Misses Test'), bot=bot)
update = Update(0, channel_post=message)
assert not handler.check_update(update)
update = Update(0, edited_channel_post=message)
assert not handler.check_update(update)
def test_all_update_types(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=[CommandHandler('start', self.start_end)], states={}, fallbacks=[]
)
message = Message(0, None, self.group, from_user=user1, text='ignore', bot=bot)
callback_query = CallbackQuery(0, user1, None, message=message, data='data', bot=bot)
chosen_inline_result = ChosenInlineResult(0, user1, 'query', bot=bot)
inline_query = InlineQuery(0, user1, 'query', 0, bot=bot)
pre_checkout_query = PreCheckoutQuery(0, user1, 'USD', 100, [], bot=bot)
shipping_query = ShippingQuery(0, user1, [], None, bot=bot)
assert not handler.check_update(Update(0, callback_query=callback_query))
assert not handler.check_update(Update(0, chosen_inline_result=chosen_inline_result))
assert not handler.check_update(Update(0, inline_query=inline_query))
assert not handler.check_update(Update(0, message=message))
assert not handler.check_update(Update(0, pre_checkout_query=pre_checkout_query))
assert not handler.check_update(Update(0, shipping_query=shipping_query))
def test_no_jobqueue_warning(self, dp, bot, user1, caplog):
handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
)
# save dp.job_queue in temp variable jqueue
# and then set dp.job_queue to None.
jqueue = dp.job_queue
dp.job_queue = None
dp.add_handler(handler)
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
with caplog.at_level(logging.WARNING):
dp.process_update(Update(update_id=0, message=message))
sleep(0.5)
assert len(caplog.records) == 1
assert (
caplog.records[0].message
== "Ignoring `conversation_timeout` because the Dispatcher has no JobQueue."
)
# now set dp.job_queue back to it's original value
dp.job_queue = jqueue
def test_schedule_job_exception(self, dp, bot, user1, monkeypatch, caplog):
def mocked_run_once(*a, **kw):
raise Exception("job error")
monkeypatch.setattr(dp.job_queue, "run_once", mocked_run_once)
handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
conversation_timeout=100,
)
dp.add_handler(handler)
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
with caplog.at_level(logging.ERROR):
dp.process_update(Update(update_id=0, message=message))
sleep(0.5)
assert len(caplog.records) == 2
assert (
caplog.records[0].message
== "Failed to schedule timeout job due to the following exception:"
)
assert caplog.records[1].message == "job error"
def test_promise_exception(self, dp, bot, user1, caplog):
"""
Here we make sure that when a run_async handle raises an
exception, the state isn't changed.
"""
def conv_entry(*a, **kw):
return 1
def raise_error(*a, **kw):
raise Exception("promise exception")
handler = ConversationHandler(
entry_points=[CommandHandler("start", conv_entry)],
states={1: [MessageHandler(Filters.all, raise_error)]},
fallbacks=self.fallbacks,
run_async=True,
)
dp.add_handler(handler)
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
# start the conversation
dp.process_update(Update(update_id=0, message=message))
sleep(0.1)
message.text = "error"
dp.process_update(Update(update_id=0, message=message))
sleep(0.1)
message.text = "resolve promise pls"
caplog.clear()
with caplog.at_level(logging.ERROR):
dp.process_update(Update(update_id=0, message=message))
sleep(0.5)
assert len(caplog.records) == 3
assert caplog.records[0].message == "Promise function raised exception"
assert caplog.records[1].message == "promise exception"
# assert res is old state
assert handler.conversations.get((self.group.id, user1.id))[0] == 1
def test_conversation_timeout(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
)
dp.add_handler(handler)
# Start state machine, then reach timeout
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations.get((self.group.id, user1.id)) == self.THIRSTY
sleep(0.75)
assert handler.conversations.get((self.group.id, user1.id)) is None
# Start state machine, do something, then reach timeout
dp.process_update(Update(update_id=1, message=message))
assert handler.conversations.get((self.group.id, user1.id)) == self.THIRSTY
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=2, message=message))
assert handler.conversations.get((self.group.id, user1.id)) == self.BREWING
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
def test_timeout_not_triggered_on_conv_end_async(self, bot, dp, user1):
def timeout(*a, **kw):
self.test_flag = True
self.states.update({ConversationHandler.TIMEOUT: [TypeHandler(Update, timeout)]})
handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
run_async=True,
)
dp.add_handler(handler)
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
# start the conversation
dp.process_update(Update(update_id=0, message=message))
sleep(0.1)
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=1, message=message))
sleep(0.1)
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=2, message=message))
sleep(0.1)
message.text = '/end'
message.entities[0].length = len('/end')
dp.process_update(Update(update_id=3, message=message))
sleep(1)
# assert timeout handler didn't got called
assert self.test_flag is False
def test_conversation_timeout_dispatcher_handler_stop(self, dp, bot, user1, caplog):
handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
)
def timeout(*args, **kwargs):
raise DispatcherHandlerStop()
self.states.update({ConversationHandler.TIMEOUT: [TypeHandler(Update, timeout)]})
dp.add_handler(handler)
# Start state machine, then reach timeout
message = Message(
0,
None,
self.group,
text='/start',
from_user=user1,
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
with caplog.at_level(logging.WARNING):
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations.get((self.group.id, user1.id)) == self.THIRSTY
sleep(0.9)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert len(caplog.records) == 1
rec = caplog.records[-1]
assert rec.getMessage().startswith('DispatcherHandlerStop in TIMEOUT')
def test_conversation_handler_timeout_update_and_context(self, cdp, bot, user1):
context = None
def start_callback(u, c):
nonlocal context, self
context = c
return self.start(u, c)
states = self.states
timeout_handler = CommandHandler('start', None)
states.update({ConversationHandler.TIMEOUT: [timeout_handler]})
handler = ConversationHandler(
entry_points=[CommandHandler('start', start_callback)],
states=states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
)
cdp.add_handler(handler)
# Start state machine, then reach timeout
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
update = Update(update_id=0, message=message)
def timeout_callback(u, c):
nonlocal update, context, self
self.is_timeout = True
assert u is update
assert c is context
timeout_handler.callback = timeout_callback
cdp.process_update(update)
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert self.is_timeout
@flaky(3, 1)
def test_conversation_timeout_keeps_extending(self, dp, bot, user1):
handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
)
dp.add_handler(handler)
# Start state machine, wait, do something, verify the timeout is extended.
# t=0 /start (timeout=.5)
# t=.35 /brew (timeout=.85)
# t=.5 original timeout
# t=.6 /pourCoffee (timeout=1.1)
# t=.85 second timeout
# t=1.1 actual timeout
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations.get((self.group.id, user1.id)) == self.THIRSTY
sleep(0.35) # t=.35
assert handler.conversations.get((self.group.id, user1.id)) == self.THIRSTY
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations.get((self.group.id, user1.id)) == self.BREWING
sleep(0.25) # t=.6
assert handler.conversations.get((self.group.id, user1.id)) == self.BREWING
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations.get((self.group.id, user1.id)) == self.DRINKING
sleep(0.4) # t=1.0
assert handler.conversations.get((self.group.id, user1.id)) == self.DRINKING
sleep(0.3) # t=1.3
assert handler.conversations.get((self.group.id, user1.id)) is None
def test_conversation_timeout_two_users(self, dp, bot, user1, user2):
handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
)
dp.add_handler(handler)
# Start state machine, do something as second user, then reach timeout
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations.get((self.group.id, user1.id)) == self.THIRSTY
message.text = '/brew'
message.entities[0].length = len('/brew')
message.entities[0].length = len('/brew')
message.from_user = user2
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations.get((self.group.id, user2.id)) is None
message.text = '/start'
message.entities[0].length = len('/start')
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations.get((self.group.id, user2.id)) == self.THIRSTY
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert handler.conversations.get((self.group.id, user2.id)) is None
def test_conversation_handler_timeout_state(self, dp, bot, user1):
states = self.states
states.update(
{
ConversationHandler.TIMEOUT: [
CommandHandler('brew', self.passout),
MessageHandler(~Filters.regex('oding'), self.passout2),
]
}
)
handler = ConversationHandler(
entry_points=self.entry_points,
states=states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
)
dp.add_handler(handler)
# CommandHandler timeout
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert self.is_timeout
# MessageHandler timeout
self.is_timeout = False
message.text = '/start'
message.entities[0].length = len('/start')
dp.process_update(Update(update_id=1, message=message))
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert self.is_timeout
# Timeout but no valid handler
self.is_timeout = False
dp.process_update(Update(update_id=0, message=message))
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
message.text = '/startCoding'
message.entities[0].length = len('/startCoding')
dp.process_update(Update(update_id=0, message=message))
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert not self.is_timeout
def test_conversation_handler_timeout_state_context(self, cdp, bot, user1):
states = self.states
states.update(
{
ConversationHandler.TIMEOUT: [
CommandHandler('brew', self.passout_context),
MessageHandler(~Filters.regex('oding'), self.passout2_context),
]
}
)
handler = ConversationHandler(
entry_points=self.entry_points,
states=states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
)
cdp.add_handler(handler)
# CommandHandler timeout
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
cdp.process_update(Update(update_id=0, message=message))
message.text = '/brew'
message.entities[0].length = len('/brew')
cdp.process_update(Update(update_id=0, message=message))
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert self.is_timeout
# MessageHandler timeout
self.is_timeout = False
message.text = '/start'
message.entities[0].length = len('/start')
cdp.process_update(Update(update_id=1, message=message))
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert self.is_timeout
# Timeout but no valid handler
self.is_timeout = False
cdp.process_update(Update(update_id=0, message=message))
message.text = '/brew'
message.entities[0].length = len('/brew')
cdp.process_update(Update(update_id=0, message=message))
message.text = '/startCoding'
message.entities[0].length = len('/startCoding')
cdp.process_update(Update(update_id=0, message=message))
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert not self.is_timeout
def test_conversation_timeout_cancel_conflict(self, dp, bot, user1):
# Start state machine, wait half the timeout,
# then call a callback that takes more than the timeout
# t=0 /start (timeout=.5)
# t=.25 /slowbrew (sleep .5)
# | t=.5 original timeout (should not execute)
# | t=.75 /slowbrew returns (timeout=1.25)
# t=1.25 timeout
def slowbrew(_bot, update):
sleep(0.25)
# Let's give to the original timeout a chance to execute
sleep(0.25)
# By returning None we do not override the conversation state so
# we can see if the timeout has been executed
states = self.states
states[self.THIRSTY].append(CommandHandler('slowbrew', slowbrew))
states.update({ConversationHandler.TIMEOUT: [MessageHandler(None, self.passout2)]})
handler = ConversationHandler(
entry_points=self.entry_points,
states=states,
fallbacks=self.fallbacks,
conversation_timeout=0.5,
)
dp.add_handler(handler)
# CommandHandler timeout
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
bot=bot,
)
dp.process_update(Update(update_id=0, message=message))
sleep(0.25)
message.text = '/slowbrew'
message.entities[0].length = len('/slowbrew')
dp.process_update(Update(update_id=0, message=message))
assert handler.conversations.get((self.group.id, user1.id)) is not None
assert not self.is_timeout
sleep(0.7)
assert handler.conversations.get((self.group.id, user1.id)) is None
assert self.is_timeout
def test_conversation_timeout_warning_only_shown_once(self, recwarn):
ConversationHandler(
entry_points=self.entry_points,
states={
self.THIRSTY: [
ConversationHandler(
entry_points=self.entry_points,
states={
self.BREWING: [CommandHandler('pourCoffee', self.drink)],
},
fallbacks=self.fallbacks,
)
],
self.DRINKING: [
ConversationHandler(
entry_points=self.entry_points,
states={
self.CODING: [CommandHandler('startCoding', self.code)],
},
fallbacks=self.fallbacks,
)
],
},
fallbacks=self.fallbacks,
conversation_timeout=100,
)
assert len(recwarn) == 1
assert str(recwarn[0].message) == (
"Using `conversation_timeout` with nested conversations is currently not "
"supported. You can still try to use it, but it will likely behave "
"differently from what you expect."
)
def test_per_message_warning_is_only_shown_once(self, recwarn):
ConversationHandler(
entry_points=self.entry_points,
states={
self.THIRSTY: [CommandHandler('pourCoffee', self.drink)],
self.BREWING: [CommandHandler('startCoding', self.code)],
},
fallbacks=self.fallbacks,
per_message=True,
)
assert len(recwarn) == 1
assert str(recwarn[0].message) == (
"If 'per_message=True', all entry points and state handlers"
" must be 'CallbackQueryHandler', since no other handlers"
" have a message context."
)
def test_per_message_false_warning_is_only_shown_once(self, recwarn):
ConversationHandler(
entry_points=self.entry_points,
states={
self.THIRSTY: [CallbackQueryHandler(self.drink)],
self.BREWING: [CallbackQueryHandler(self.code)],
},
fallbacks=self.fallbacks,
per_message=False,
)
assert len(recwarn) == 1
assert str(recwarn[0].message) == (
"If 'per_message=False', 'CallbackQueryHandler' will not be "
"tracked for every message."
)
def test_warnings_per_chat_is_only_shown_once(self, recwarn):
def hello(bot, update):
return self.BREWING
def bye(bot, update):
return ConversationHandler.END
ConversationHandler(
entry_points=self.entry_points,
states={
self.THIRSTY: [InlineQueryHandler(hello)],
self.BREWING: [InlineQueryHandler(bye)],
},
fallbacks=self.fallbacks,
per_chat=True,
)
assert len(recwarn) == 1
assert str(recwarn[0].message) == (
"If 'per_chat=True', 'InlineQueryHandler' can not be used,"
" since inline queries have no chat context."
)
def test_nested_conversation_handler(self, dp, bot, user1, user2):
self.nested_states[self.DRINKING] = [
ConversationHandler(
entry_points=self.drinking_entry_points,
states=self.drinking_states,
fallbacks=self.drinking_fallbacks,
map_to_parent=self.drinking_map_to_parent,
)
]
handler = ConversationHandler(
entry_points=self.entry_points, states=self.nested_states, fallbacks=self.fallbacks
)
dp.add_handler(handler)
# User one, starts the state machine.
message = Message(
0,
None,
self.group,
from_user=user1,
text='/start',
bot=bot,
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
)
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.THIRSTY
# The user is thirsty and wants to brew coffee.
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.BREWING
# Lets pour some coffee.
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.DRINKING
# The user is holding the cup
message.text = '/hold'
message.entities[0].length = len('/hold')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.HOLDING
# The user is sipping coffee
message.text = '/sip'
message.entities[0].length = len('/sip')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.SIPPING
# The user is swallowing
message.text = '/swallow'
message.entities[0].length = len('/swallow')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.SWALLOWING
# The user is holding the cup again
message.text = '/hold'
message.entities[0].length = len('/hold')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.HOLDING
# The user wants to replenish the coffee supply
message.text = '/replenish'
message.entities[0].length = len('/replenish')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.REPLENISHING
assert handler.conversations[(0, user1.id)] == self.BREWING
# The user wants to drink their coffee again
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.DRINKING
# The user is now ready to start coding
message.text = '/startCoding'
message.entities[0].length = len('/startCoding')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.CODING
# The user decides it's time to drink again
message.text = '/drinkMore'
message.entities[0].length = len('/drinkMore')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.DRINKING
# The user is holding their cup
message.text = '/hold'
message.entities[0].length = len('/hold')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.HOLDING
# The user wants to end with the drinking and go back to coding
message.text = '/end'
message.entities[0].length = len('/end')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.END
assert handler.conversations[(0, user1.id)] == self.CODING
# The user wants to drink once more
message.text = '/drinkMore'
message.entities[0].length = len('/drinkMore')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.DRINKING
# The user wants to stop altogether
message.text = '/stop'
message.entities[0].length = len('/stop')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.STOPPING
assert handler.conversations.get((0, user1.id)) is None
def test_conversation_dispatcher_handler_stop(self, dp, bot, user1, user2):
self.nested_states[self.DRINKING] = [
ConversationHandler(
entry_points=self.drinking_entry_points,
states=self.drinking_states,
fallbacks=self.drinking_fallbacks,
map_to_parent=self.drinking_map_to_parent,
)
]
handler = ConversationHandler(
entry_points=self.entry_points, states=self.nested_states, fallbacks=self.fallbacks
)
def test_callback(u, c):
self.test_flag = True
dp.add_handler(handler)
dp.add_handler(TypeHandler(Update, test_callback), group=1)
self.raise_dp_handler_stop = True
# User one, starts the state machine.
message = Message(
0,
None,
self.group,
text='/start',
bot=bot,
from_user=user1,
entities=[
MessageEntity(type=MessageEntity.BOT_COMMAND, offset=0, length=len('/start'))
],
)
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.THIRSTY
assert not self.test_flag
# The user is thirsty and wants to brew coffee.
message.text = '/brew'
message.entities[0].length = len('/brew')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.BREWING
assert not self.test_flag
# Lets pour some coffee.
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.DRINKING
assert not self.test_flag
# The user is holding the cup
message.text = '/hold'
message.entities[0].length = len('/hold')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.HOLDING
assert not self.test_flag
# The user is sipping coffee
message.text = '/sip'
message.entities[0].length = len('/sip')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.SIPPING
assert not self.test_flag
# The user is swallowing
message.text = '/swallow'
message.entities[0].length = len('/swallow')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.SWALLOWING
assert not self.test_flag
# The user is holding the cup again
message.text = '/hold'
message.entities[0].length = len('/hold')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.HOLDING
assert not self.test_flag
# The user wants to replenish the coffee supply
message.text = '/replenish'
message.entities[0].length = len('/replenish')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.REPLENISHING
assert handler.conversations[(0, user1.id)] == self.BREWING
assert not self.test_flag
# The user wants to drink their coffee again
message.text = '/pourCoffee'
message.entities[0].length = len('/pourCoffee')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.DRINKING
assert not self.test_flag
# The user is now ready to start coding
message.text = '/startCoding'
message.entities[0].length = len('/startCoding')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.CODING
assert not self.test_flag
# The user decides it's time to drink again
message.text = '/drinkMore'
message.entities[0].length = len('/drinkMore')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.DRINKING
assert not self.test_flag
# The user is holding their cup
message.text = '/hold'
message.entities[0].length = len('/hold')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.HOLDING
assert not self.test_flag
# The user wants to end with the drinking and go back to coding
message.text = '/end'
message.entities[0].length = len('/end')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.END
assert handler.conversations[(0, user1.id)] == self.CODING
assert not self.test_flag
# The user wants to drink once more
message.text = '/drinkMore'
message.entities[0].length = len('/drinkMore')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.DRINKING
assert not self.test_flag
# The user wants to stop altogether
message.text = '/stop'
message.entities[0].length = len('/stop')
dp.process_update(Update(update_id=0, message=message))
assert self.current_state[user1.id] == self.STOPPING
assert handler.conversations.get((0, user1.id)) is None
assert not self.test_flag
def test_conversation_handler_run_async_true(self, dp):
conv_handler = ConversationHandler(
entry_points=self.entry_points,
states=self.states,
fallbacks=self.fallbacks,
run_async=True,
)
all_handlers = conv_handler.entry_points + conv_handler.fallbacks
for state_handlers in conv_handler.states.values():
all_handlers += state_handlers
for handler in all_handlers:
assert handler.run_async
def test_conversation_handler_run_async_false(self, dp):
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', self.start_end, run_async=True)],
states=self.states,
fallbacks=self.fallbacks,
run_async=False,
)
for handler in conv_handler.entry_points:
assert handler.run_async
all_handlers = conv_handler.fallbacks
for state_handlers in conv_handler.states.values():
all_handlers += state_handlers
for handler in all_handlers:
assert not handler.run_async.value
| lgpl-3.0 | -6,040,542,914,925,633,000 | 36.125 | 99 | 0.585969 | false |
CG-F16-4-Rutgers/steersuite-rutgers | steerstats/tools/GetRecordings.py | 8 | 1612 | #!/usr/bin/python
# Replace <USERNAME_DATABASE>, <USERNAME>, and <PASSWORD> below with your actual DB, user, and password.
import psycopg2
import psycopg2.extras
import sys
# Append parent directory to access steersuite api.
sys.path.append("../")
from steersuitedb.Recording import Recording
from steersuitedb.Util import getTime
from steersuitedb.Scenario import Scenario
from steersuitedb.Algorithm import Algorithm
from steersuitedb.Benchmark import Benchmark
from steersuitedb.ConnectionManager import ConnectionManager
con = None
recording_dir="recordings/"
try:
conM = ConnectionManager()
con = conM.getConnection()
test_id=4
_recording_ids = []
# gets a Dict like cursor so that columns can be referenced by name.
cur = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("select recording_id from recording")
recording_ids = cur.fetchall()
for _recording_id in recording_ids:
_recording_ids.append(int(_recording_id['recording_id']))
print _recording_ids
# provide a list of keys for the recordings
recording = Recording()
# provide a list of keys for the recordings
for recording_id in _recording_ids:
result = recording.getRecording(cur, recording_id)
recording_file = open(recording_dir+str(recording_id)+".rec","w+b")
recording_file.write(result.getRecordingData())
recording_file.close()
except psycopg2.DatabaseError, e:
print 'Error pprAI-test: %s' % e
sys.exit(1)
finally:
if con:
con.close()
| gpl-3.0 | -2,643,345,603,503,362,000 | 25 | 104 | 0.693548 | false |
labase/activnce | main/test/test_evaluation.py | 1 | 15883 | # -*- coding: utf-8 -*-
"""
################################################
Plataforma ActivUFRJ
################################################
:Author: *Núcleo de Computação Eletrônica (NCE/UFRJ)*
:Contact: [email protected]
:Date: $Date: 2009-2010 $
:Status: This is a "work in progress"
:Revision: $Revision: 0.01 $
:Home: `LABASE <http://labase.nce.ufrj.br/>`__
:Copyright: ©2009, `GPL <http://is.gd/3Udt>__.
"""
from main import WsgiApp
from webtest import TestApp
from webob import Request, Response
from test_core import REGISTRY_FORM, LOGIN_FORM, registry, login, \
MAGKEYS_DEFAULT
#import mocker
#from mocker import Mocker
from datetime import datetime
import evaluation.model
import core.model
import wiki.model
import log.model
import unittest
CREATE_COMMUNITY_FORM = { "name":"comunidade5",
"description": "Primeira Comunidade",
"participacao": "Mediante Convite",
"privacidade": "Pública" }
EVALUATION_DEFAULT = lambda: dict( {
"comunidade5/av1":
{
"_id":"comunidade5/av1",
"nome":"av1",
"descricao":"primeira avaliação",
"tipo":"participantes",
"avaliados":['teste'],
"owner": "teste",
"data_inicio": "2010-05-05 14:45:40.631966",
"data_encerramento": "2020-05-07 14:45:40.631966",
"pontuacao": [3,1],
"data_cri": "2010-05-05 14:45:40.631966"
}
})
EVALUATION_COMMUNITY_DEFAULT = lambda: dict( {
"comunidade5":
{
"avaliacoes":["comunidade5/av1"]
}
})
CREATE_EVALUATION_FORM = lambda: dict(
nome = "av2",
descricao = "descrição",
dia1 = "30",
mes1 = "04",
ano1 = "2010",
hora1 = "08",
min1 = "00",
dia2 = "30",
mes2 = "05",
ano2 = "2020",
hora2 = "08",
min2 = "00",
pontuacao = "2,1",
teste = "S"
)
PERFORM_EVALUATION_FORM = lambda: dict(
opcao1 = "teste",
opcao2 = "teste2",
)
create_evaluation = {}
create_community = {}
perform_evaluation = {}
class TestEvaluation(unittest.TestCase):
"""Testes unitários para o gerenciamento de avaliações"""
def setUp(self):
#self.mock = Mocker()
#MEMBER = self.mock.mock()
log.model.LOG = {}
log.model.NEWS = {}
evaluation.model.EVALUATION = EVALUATION_DEFAULT()
evaluation.model.EVALUATIONCOMMUNITY = EVALUATION_COMMUNITY_DEFAULT()
core.model.REGISTRY = {}
core.model.MAGKEYS = MAGKEYS_DEFAULT() # recria chave mágica para que possa fazer outro cadastro
core.model.INVITES = {}
core.model.USERSCHOOL = {}
#core.model.MAGKEYTYPES = {}
log.model.NOTIFICATIONERROR = {}
log.model.NOTIFICATION = {}
self.app = TestApp(WsgiApp())
login.update(LOGIN_FORM)
#core.model.REGISTRY['teste']['papeis'].append('docente')
registry.update(REGISTRY_FORM)
self.app.post('/new/user', registry)
registry['user'] = 'teste2'
core.model.MAGKEYS = MAGKEYS_DEFAULT() # recria chave mágica para que possa fazer outro cadastro
self.app.post('/new/user', registry)
registry['user'] = 'outrousuario'
core.model.MAGKEYS = MAGKEYS_DEFAULT() # recria chave mágica para que possa fazer outro cadastro
self.app.post('/new/user', registry)
self.app.post('/login', login)
create_community.update(CREATE_COMMUNITY_FORM)
core.model.REGISTRY['teste']['papeis'].append('docente')
self.app.post('/new/community', create_community)
create_evaluation.update(CREATE_EVALUATION_FORM())
perform_evaluation.update(PERFORM_EVALUATION_FORM())
def tearDown(self):
#self.mock.restore()
#self.mock.verify()
#self.mock,MEMBER =None,None
pass
# -------------------------------------------------------------
# Create new evaluation form
def test_returns_newevaluation_form(self):
"Retorna formulário para criação de nova avaliação para a comunidade5"
response = self.app.get('/evaluation/member/new/comunidade5', headers={'Accept-Language':'pt-br'})
assert u'Primeira Comunidade (comunidade5)' in response, "Erro: Não exibiu formulário para criar avaliação!"
def test_reject_newevaluation_form_invalid_community(self):
"Return error message: Comunidade inexistente."
response = self.app.get('/evaluation/member/new/inexistente', headers={'Accept-Language':'pt-br'})
assert u'Você não pode criar avaliação nesta comunidade.' in response, "Erro: Não exibiu mensagem informando que o usuário não é membro!"
def test_reject_newevaluation_form_user_as_community(self):
"Return error message: Comunidade inexistente."
response = self.app.get('/evaluation/member/new/teste', headers={'Accept-Language':'pt-br'})
assert u'Você não pode criar avaliação nesta comunidade.' in response, "Erro: Não exibiu mensagem informando que o usuário não é membro!"
def test_reject_newevaluation_form_user_not_member(self):
"Return error message: Você não é membro desta comunidade."
login["user"] = "outrousuario"
self.app.post('/login', login)
response = self.app.get('/evaluation/member/new/comunidade5', headers={'Accept-Language':'pt-br'})
assert u'Você não pode criar avaliação nesta comunidade.' in response, "Erro: Não exibiu mensagem informando que o usuário não é membro!"
# -------------------------------------------------------------
# Create new evaluation
def test_accept_newevaluation_ok(self):
"Check evaluation in database after evaluation ok"
response = self.app.post('/evaluation/member/new/comunidade5', create_evaluation).follow()
assert "comunidade5/av2" in evaluation.model.EVALUATION, "Erro: Não incluiu a avaliação na comunidade."
assert "comunidade5/av2" in evaluation.model.EVALUATIONCOMMUNITY["comunidade5"]["avaliacoes"], "Erro: Não incluiu a avaliação em EVALUATIONCOMMUNITY."
assert "comunidade5/av2" in response, "Erro: Não exibiu a página das avaliações da comunidade."
def test_reject_newcommunity_double_evaluation_name(self):
"Return error message: Já existe uma avaliação com este nome"
self.app.post('/evaluation/member/new/comunidade5', create_evaluation).follow()
#print evaluation.model.EVALUATION
response = self.app.post('/evaluation/member/new/comunidade5', create_evaluation)
assert u'Já existe uma avaliação com este nome' in response, "Erro: Não exibiu a mensagem 'Já existe uma avaliação com este nome'"
def test_reject_evaluation_without_name(self):
"Return error message: Nome de avaliação inválido"
create_evaluation["nome"]=""
response = self.app.post('/evaluation/member/new/comunidade5', create_evaluation)
assert u"Nome da avaliação não informado.<br/>" in response, "Erro: Não exibiu a mensagem de nome inválido."
def test_accept_evaluation_with_special_chars(self):
"Criação e aceitação de avaliação cujo nome contem caracteres especiais e espaço em branco."
create_evaluation["nome"]="Avaliação %$#@!"
response = self.app.post('/evaluation/member/new/comunidade5', create_evaluation).follow()
assert u"Avaliacao_" in response, "Erro: Não exibiu a mensagem de nome inválido."
def test_reject_evaluation_without_pontuation(self):
"Return error message: O campo 'Pontuação' não foi preenchido"
create_evaluation["pontuacao"]=""
response = self.app.post('/evaluation/member/new/comunidade5', create_evaluation)
assert u"O campo 'Pontuação' não foi preenchido" in response, "Erro: Não exibiu a mensagem de pontuação não preenchida."
def test_reject_evaluation_without_first_date(self):
"Return error message: Data de início inválida."
create_evaluation["dia1"]=""
response = self.app.post('/evaluation/member/new/comunidade5', create_evaluation)
assert u"Data de início inválida." in response, "Erro: Não exibiu a mensagem de data de início inválida."
def test_reject_evaluation_without_second_date(self):
"Return error message: Data de encerramento inválida."
create_evaluation["dia2"]=""
response = self.app.post('/evaluation/member/new/comunidade5', create_evaluation)
assert u"Data de encerramento inválida." in response, "Erro: Não exibiu a mensagem de data de encerramento inválida."
# -------------------------------------------------------------
# List of Evaluations
def test_list_of_evaluations(self):
"Return list of evaluations"
response = self.app.get('/evaluation/comunidade5')
assert u'<a href="/evaluation/comunidade5/av1">av1</a>' in response, "Erro: Não exibiu tela com a comunidade criada!"
# -------------------------------------------------------------
# Perform the evaluation
def test_returns_evaluation_form(self):
"Retorna formulário para realização da avaliação"
response = self.app.get('/evaluation/comunidade5/av1', headers={'Accept-Language':'pt-br'})
assert u'<i>Avaliação de participantes<br/>' in response, "Erro: Não exibiu formulário para criar avaliação!"
assert u'<select name="opcao1">' in response, "Erro: Não exibiu formulário para criar avaliação!"
def test_evaluation_form_before_beginning(self):
"Retorna mensagem de avaliação não iniciada"
evaluation.model.EVALUATION["comunidade5/av1"]["data_inicio"] = "2019-05-05 14:45:40.631966"
response = self.app.get('/evaluation/comunidade5/av1', headers={'Accept-Language':'pt-br'})
assert u'<div class="tnmMSG">Fora do período de avaliação.</div>' in response, "Erro: Não exibiu mensagem de que a avaliação está fora do período de avaliação!"
def test_accept_evaluation_ok(self):
"Check evaluation in database after evaluation ok"
# inclui mais um participante na comunidade para que a avaliação possa ser feita
core.model.REGISTRY["comunidade5"]["participantes"].append("teste2")
core.model.REGISTRY["teste2"]["comunidades"].append("comunidade5")
# recupera o numero de pontos de cada um antes da avaliação
votos_em_teste = votos_em_teste2 = 0
if "teste" in evaluation.model.EVALUATION["comunidade5/av1"]:
votos_em_teste = int(evaluation.model.EVALUATION["comunidade5/av1"]["teste"]["votos_recebidos"])
if "teste2" in evaluation.model.EVALUATION["comunidade5/av1"]:
votos_em_teste2 = int(evaluation.model.EVALUATION["comunidade5/av1"]["teste2"]["votos_recebidos"])
# realiza a avaliação
response = self.app.post('/evaluation/comunidade5/av1', perform_evaluation)
# verifica se existem os registros dos usuários teste e teste2
assert "teste" in evaluation.model.EVALUATION["comunidade5/av1"], "Erro: Não incluiu teste no EVALUATION."
assert "teste2" in evaluation.model.EVALUATION["comunidade5/av1"], "Erro: Não incluiu teste2 no EVALUATION."
# verifica se as informações do avaliador foram armazenadas
assert evaluation.model.EVALUATION["comunidade5/av1"]["teste"]["votos_dados"]==["teste","teste2"], "Erro: Não incluiu a informação dos votos dados pelo avaliador no BD."
# verfica se os pontos dos avaliados foram incrementados corretamente
assert int(evaluation.model.EVALUATION["comunidade5/av1"]["teste"]["votos_recebidos"]) == votos_em_teste + 3, \
"Erro: Não incrementou o número de pontos do avaliado teste no BD."
assert int(evaluation.model.EVALUATION["comunidade5/av1"]["teste2"]["votos_recebidos"]) == votos_em_teste2 + 1, \
"Erro: Não incrementou o número de pontos do avaliado teste2 no BD."
# verifica se a mensagem final foi exibida
assert "Avaliação realizada com sucesso." in response, "Erro: Não exibiu mensagem de avaliação realizada com sucesso."
def test_reject_user_selected_twice(self):
"Verifica se um usuário foi selecionado mais de uma vez"
perform_evaluation["opcao2"]="teste"
response = self.app.post('/evaluation/comunidade5/av1', perform_evaluation)
assert "Alguma opção foi selecionada mais de uma vez." in response, "Erro: Não exibiu mensagem de usuário selecionado mais de uma vez."
def test_reject_no_user_selected(self):
"Verifica se alguma opção deixou de ser selecionada"
perform_evaluation["opcao2"]=""
response = self.app.post('/evaluation/comunidade5/av1', perform_evaluation)
assert "Alguma opção não foi selecionada." in response, "Erro: Não exibiu mensagem de opção não selecionada."
def test_reject_double_evaluation(self):
"Verifica se um usuário pode avaliar mais de uma vez"
# inclui mais um participante na comunidade para que a avaliação possa ser feita
core.model.REGISTRY["comunidade5"]["participantes"].append("teste2")
core.model.REGISTRY["teste2"]["comunidades"].append("comunidade5")
self.app.post('/evaluation/comunidade5/av1', perform_evaluation)
response = self.app.post('/evaluation/comunidade5/av1', perform_evaluation)
assert "Você já realizou esta avaliação." in response, "Erro: Não exibiu mensagem de avaliação duplicada."
# -------------------------------------------------------------
# Show final results
def test_show_results_of_evaluation(self):
"Check evaluation in database after evaluation ok"
# inclui mais um participante na comunidade para que a avaliação possa ser feita
core.model.REGISTRY["comunidade5"]["participantes"].append("teste2")
core.model.REGISTRY["teste2"]["comunidades"].append("comunidade5")
# realiza duas avaliações como teste e como teste2
self.app.post('/evaluation/comunidade5/av1', perform_evaluation)
self.app.post('/login', login)
self.app.post('/evaluation/comunidade5/av1', perform_evaluation)
# altero a data de encerramento
evaluation.model.EVALUATION["comunidade5/av1"]["data_encerramento"] = "2010-05-06 14:45:40.631966"
# entro na avaliação mais uma vez para ver o resultado
response = self.app.get('/evaluation/comunidade5/av1', headers={'Accept-Language':'pt-br'})
# verifica se a mensagem final foi exibida
assert "Resultado da avaliação comunidade5/av1:" in response, "Erro: Não exibiu resultado da avaliação."
assert "teste - 6 pontos <br/>" in response, "Erro: Não exibiu resultado da avaliação."
assert "teste2 - 2 pontos <br/>" in response, "Erro: Não exibiu resultado da avaliação."
def test_show_results_of_evaluation(self):
"Check evaluation in database after evaluation ok"
# inclui mais um participante na comunidade para que a avaliação possa ser feita
core.model.REGISTRY["comunidade5"]["participantes"].append("teste2")
core.model.REGISTRY["teste2"]["comunidades"].append("comunidade5")
# realiza duas avaliações como teste e como teste2
self.app.post('/evaluation/comunidade5/av1', perform_evaluation)
login["user"] = "teste2"
self.app.post('/login', login)
self.app.post('/evaluation/comunidade5/av1', perform_evaluation)
# altero a data de encerramento
evaluation.model.EVALUATION["comunidade5/av1"]["data_encerramento"] = "2010-05-06 14:45:40.631966"
# entro na avaliação mais uma vez para ver o resultado
response = self.app.get('/evaluation/comunidade5/av1', headers={'Accept-Language':'pt-br'})
# verifica se a mensagem final foi exibida
assert "Fora do período de avaliação." in response, "Erro: Não exibiu mensagem de que está fora do período de avaliação."
| gpl-2.0 | -2,253,725,257,032,222,700 | 50.189542 | 173 | 0.667263 | false |
openspending/spendb | spendb/tests/views/test_home.py | 5 | 3357 | import re
import json
from flask import url_for
from spendb.core import db
from spendb.model.dataset import Dataset
from spendb.tests.base import ControllerTestCase
from spendb.tests.helpers import make_account, load_fixture
class TestHomeController(ControllerTestCase):
def setUp(self):
super(TestHomeController, self).setUp()
self.dataset = load_fixture('cra')
self.user = make_account('test')
def test_index(self):
response = self.client.get(url_for('home.index'))
assert 'SpenDB' in response.data
def test_locale(self):
set_l = url_for('home.set_locale')
data = json.dumps({'locale': 'en'})
self.client.post(set_l, data=data,
headers={'Content-Type': 'application/json'})
def test_feeds(self):
# Anonymous user with one public dataset
response = self.client.get(url_for('home.feed_rss'))
assert 'application/xml' in response.content_type
assert '<title>Recently Created Datasets</title>' in response.data
assert '<item><title>Country Regional Analysis v2009' in response.data, response.data
cra = Dataset.by_name('cra')
cra.private = True
db.session.add(cra)
db.session.commit()
# Anonymous user with one private dataset
response = self.client.get(url_for('home.feed_rss'))
assert 'application/xml' in response.content_type
assert '<title>Recently Created Datasets</title>' in response.data
assert '<item><title>Country Regional Analysis v2009' not in response.data
# Logged in user with one public dataset
cra.private = False
db.session.add(cra)
db.session.commit()
response = self.client.get(url_for('home.feed_rss'),
query_string={'api_key': self.user.api_key})
assert 'application/xml' in response.content_type
assert '<title>Recently Created Datasets</title>' in response.data
assert '<item><title>Country Regional Analysis v2009' in response.data
# Logged in user with one private dataset
cra.private = True
db.session.add(cra)
db.session.commit()
response = self.client.get(url_for('home.feed_rss'),
query_string={'api_key': self.user.api_key})
assert 'application/xml' in response.content_type
assert '<title>Recently Created Datasets</title>' in response.data
assert '<item><title>Country Regional Analysis v2009' not in response.data
# Logged in admin user with one private dataset
admin_user = make_account('admin')
admin_user.admin = True
db.session.add(admin_user)
db.session.commit()
response = self.client.get(url_for('home.feed_rss'),
query_string={'api_key': admin_user.api_key})
assert '<title>Recently Created Datasets</title>' in response.data
assert '<item><title>Country Regional Analysis v2009' in response.data
assert 'application/xml' in response.content_type
response = self.client.get('/')
norm = re.sub('\s+', ' ', response.data)
assert ('<link rel="alternate" type="application/rss+xml" title="'
'Latest Datasets on SpenDB"' in
norm)
| agpl-3.0 | -1,144,154,089,912,542,500 | 40.9625 | 93 | 0.629431 | false |
LittleLama/Sick-Beard-BoxCar2 | lib/hachoir_parser/image/jpeg.py | 90 | 14363 | """
JPEG picture parser.
Information:
- APP14 documents
http://partners.adobe.com/public/developer/en/ps/sdk/5116.DCT_Filter.pdf
http://java.sun.com/j2se/1.5.0/docs/api/javax/imageio/metadata/doc-files/jpeg_metadata.html#color
- APP12:
http://search.cpan.org/~exiftool/Image-ExifTool/lib/Image/ExifTool/TagNames.pod
Author: Victor Stinner
"""
from lib.hachoir_core.error import HachoirError
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, Enum,
Bit, Bits, NullBits, NullBytes,
String, RawBytes)
from lib.hachoir_parser.image.common import PaletteRGB
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_parser.image.exif import Exif
from lib.hachoir_parser.image.photoshop_metadata import PhotoshopMetadata
MAX_FILESIZE = 100 * 1024 * 1024
# The four tables (hash/sum for color/grayscale JPEG) comes
# from ImageMagick project
QUALITY_HASH_COLOR = (
1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645,
632, 623, 613, 607, 600, 594, 589, 585, 581, 571,
555, 542, 529, 514, 494, 474, 457, 439, 424, 410,
397, 386, 373, 364, 351, 341, 334, 324, 317, 309,
299, 294, 287, 279, 274, 267, 262, 257, 251, 247,
243, 237, 232, 227, 222, 217, 213, 207, 202, 198,
192, 188, 183, 177, 173, 168, 163, 157, 153, 148,
143, 139, 132, 128, 125, 119, 115, 108, 104, 99,
94, 90, 84, 79, 74, 70, 64, 59, 55, 49,
45, 40, 34, 30, 25, 20, 15, 11, 6, 4,
0)
QUALITY_SUM_COLOR = (
32640,32635,32266,31495,30665,29804,29146,28599,28104,27670,
27225,26725,26210,25716,25240,24789,24373,23946,23572,22846,
21801,20842,19949,19121,18386,17651,16998,16349,15800,15247,
14783,14321,13859,13535,13081,12702,12423,12056,11779,11513,
11135,10955,10676,10392,10208, 9928, 9747, 9564, 9369, 9193,
9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347,
7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495,
5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698,
3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846,
1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128,
0)
QUALITY_HASH_GRAY = (
510, 505, 422, 380, 355, 338, 326, 318, 311, 305,
300, 297, 293, 291, 288, 286, 284, 283, 281, 280,
279, 278, 277, 273, 262, 251, 243, 233, 225, 218,
211, 205, 198, 193, 186, 181, 177, 172, 168, 164,
158, 156, 152, 148, 145, 142, 139, 136, 133, 131,
129, 126, 123, 120, 118, 115, 113, 110, 107, 105,
102, 100, 97, 94, 92, 89, 87, 83, 81, 79,
76, 74, 70, 68, 66, 63, 61, 57, 55, 52,
50, 48, 44, 42, 39, 37, 34, 31, 29, 26,
24, 21, 18, 16, 13, 11, 8, 6, 3, 2,
0)
QUALITY_SUM_GRAY = (
16320,16315,15946,15277,14655,14073,13623,13230,12859,12560,
12240,11861,11456,11081,10714,10360,10027, 9679, 9368, 9056,
8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125,
5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616,
4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688,
3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952,
2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211,
2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477,
1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736,
667, 592, 518, 441, 369, 292, 221, 151, 86, 64,
0)
JPEG_NATURAL_ORDER = (
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63)
class JpegChunkApp0(FieldSet):
UNIT_NAME = {
0: "pixels",
1: "dots per inch",
2: "dots per cm",
}
def createFields(self):
yield String(self, "jfif", 5, "JFIF string", charset="ASCII")
if self["jfif"].value != "JFIF\0":
raise ParserError(
"Stream doesn't look like JPEG chunk (wrong JFIF signature)")
yield UInt8(self, "ver_maj", "Major version")
yield UInt8(self, "ver_min", "Minor version")
yield Enum(UInt8(self, "units", "Units"), self.UNIT_NAME)
if self["units"].value == 0:
yield UInt16(self, "aspect_x", "Aspect ratio (X)")
yield UInt16(self, "aspect_y", "Aspect ratio (Y)")
else:
yield UInt16(self, "x_density", "X density")
yield UInt16(self, "y_density", "Y density")
yield UInt8(self, "thumb_w", "Thumbnail width")
yield UInt8(self, "thumb_h", "Thumbnail height")
thumb_size = self["thumb_w"].value * self["thumb_h"].value
if thumb_size != 0:
yield PaletteRGB(self, "thumb_palette", 256)
yield RawBytes(self, "thumb_data", thumb_size, "Thumbnail data")
class Ducky(FieldSet):
BLOCK_TYPE = {
0: "end",
1: "Quality",
2: "Comment",
3: "Copyright",
}
def createFields(self):
yield Enum(UInt16(self, "type"), self.BLOCK_TYPE)
if self["type"].value == 0:
return
yield UInt16(self, "size")
size = self["size"].value
if size:
yield RawBytes(self, "data", size)
class APP12(FieldSet):
"""
The JPEG APP12 "Picture Info" segment was used by some older cameras, and
contains ASCII-based meta information.
"""
def createFields(self):
yield String(self, "ducky", 5, '"Ducky" string', charset="ASCII")
while not self.eof:
yield Ducky(self, "item[]")
class StartOfFrame(FieldSet):
def createFields(self):
yield UInt8(self, "precision")
yield UInt16(self, "height")
yield UInt16(self, "width")
yield UInt8(self, "nr_components")
for index in range(self["nr_components"].value):
yield UInt8(self, "component_id[]")
yield UInt8(self, "high[]")
yield UInt8(self, "low[]")
class Comment(FieldSet):
def createFields(self):
yield String(self, "comment", self.size//8, strip="\0")
class AdobeChunk(FieldSet):
COLORSPACE_TRANSFORMATION = {
1: "YCbCr (converted from RGB)",
2: "YCCK (converted from CMYK)",
}
def createFields(self):
if self.stream.readBytes(self.absolute_address, 5) != "Adobe":
yield RawBytes(self, "raw", self.size//8, "Raw data")
return
yield String(self, "adobe", 5, "\"Adobe\" string", charset="ASCII")
yield UInt16(self, "version", "DCT encoder version")
yield Enum(Bit(self, "flag00"),
{False: "Chop down or subsampling", True: "Blend"})
yield NullBits(self, "flags0_reserved", 15)
yield NullBytes(self, "flags1", 2)
yield Enum(UInt8(self, "color_transform", "Colorspace transformation code"), self.COLORSPACE_TRANSFORMATION)
class StartOfScan(FieldSet):
def createFields(self):
yield UInt8(self, "nr_components")
for index in range(self["nr_components"].value):
comp_id = UInt8(self, "component_id[]")
yield comp_id
if not(1 <= comp_id.value <= self["nr_components"].value):
raise ParserError("JPEG error: Invalid component-id")
yield UInt8(self, "value[]")
yield RawBytes(self, "raw", 3) # TODO: What's this???
class RestartInterval(FieldSet):
def createFields(self):
yield UInt16(self, "interval", "Restart interval")
class QuantizationTable(FieldSet):
def createFields(self):
# Code based on function get_dqt() (jdmarker.c from libjpeg62)
yield Bits(self, "is_16bit", 4)
yield Bits(self, "index", 4)
if self["index"].value >= 4:
raise ParserError("Invalid quantification index (%s)" % self["index"].value)
if self["is_16bit"].value:
coeff_type = UInt16
else:
coeff_type = UInt8
for index in xrange(64):
natural = JPEG_NATURAL_ORDER[index]
yield coeff_type(self, "coeff[%u]" % natural)
def createDescription(self):
return "Quantification table #%u" % self["index"].value
class DefineQuantizationTable(FieldSet):
def createFields(self):
while self.current_size < self.size:
yield QuantizationTable(self, "qt[]")
class JpegChunk(FieldSet):
TAG_SOI = 0xD8
TAG_EOI = 0xD9
TAG_SOS = 0xDA
TAG_DQT = 0xDB
TAG_DRI = 0xDD
TAG_INFO = {
0xC4: ("huffman[]", "Define Huffman Table (DHT)", None),
0xD8: ("start_image", "Start of image (SOI)", None),
0xD9: ("end_image", "End of image (EOI)", None),
0xDA: ("start_scan", "Start Of Scan (SOS)", StartOfScan),
0xDB: ("quantization[]", "Define Quantization Table (DQT)", DefineQuantizationTable),
0xDC: ("nb_line", "Define number of Lines (DNL)", None),
0xDD: ("restart_interval", "Define Restart Interval (DRI)", RestartInterval),
0xE0: ("app0", "APP0", JpegChunkApp0),
0xE1: ("exif", "Exif metadata", Exif),
0xE2: ("icc", "ICC profile", None),
0xEC: ("app12", "APP12", APP12),
0xED: ("photoshop", "Photoshop", PhotoshopMetadata),
0xEE: ("adobe", "Image encoding information for DCT filters (Adobe)", AdobeChunk),
0xFE: ("comment[]", "Comment", Comment),
}
START_OF_FRAME = {
0xC0: u"Baseline",
0xC1: u"Extended sequential",
0xC2: u"Progressive",
0xC3: u"Lossless",
0xC5: u"Differential sequential",
0xC6: u"Differential progressive",
0xC7: u"Differential lossless",
0xC9: u"Extended sequential, arithmetic coding",
0xCA: u"Progressive, arithmetic coding",
0xCB: u"Lossless, arithmetic coding",
0xCD: u"Differential sequential, arithmetic coding",
0xCE: u"Differential progressive, arithmetic coding",
0xCF: u"Differential lossless, arithmetic coding",
}
for key, text in START_OF_FRAME.iteritems():
TAG_INFO[key] = ("start_frame", "Start of frame (%s)" % text.lower(), StartOfFrame)
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
tag = self["type"].value
if tag == 0xE1:
# Hack for Adobe extension: XAP metadata (as XML)
bytes = self.stream.readBytes(self.absolute_address + 32, 6)
if bytes == "Exif\0\0":
self._name = "exif"
self._description = "EXIF"
self._parser = Exif
else:
self._parser = None
elif tag in self.TAG_INFO:
self._name, self._description, self._parser = self.TAG_INFO[tag]
else:
self._parser = None
def createFields(self):
yield textHandler(UInt8(self, "header", "Header"), hexadecimal)
if self["header"].value != 0xFF:
raise ParserError("JPEG: Invalid chunk header!")
yield textHandler(UInt8(self, "type", "Type"), hexadecimal)
tag = self["type"].value
if tag in (self.TAG_SOI, self.TAG_EOI):
return
yield UInt16(self, "size", "Size")
size = (self["size"].value - 2)
if 0 < size:
if self._parser:
yield self._parser(self, "content", "Chunk content", size=size*8)
else:
yield RawBytes(self, "data", size, "Data")
def createDescription(self):
return "Chunk: %s" % self["type"].display
class JpegFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "jpeg",
"category": "image",
"file_ext": ("jpg", "jpeg"),
"mime": (u"image/jpeg",),
"magic": (
("\xFF\xD8\xFF\xE0", 0), # (Start Of Image, APP0)
("\xFF\xD8\xFF\xE1", 0), # (Start Of Image, EXIF)
("\xFF\xD8\xFF\xEE", 0), # (Start Of Image, Adobe)
),
"min_size": 22*8,
"description": "JPEG picture",
"subfile": "skip",
}
def validate(self):
if self.stream.readBytes(0, 2) != "\xFF\xD8":
return "Invalid file signature"
try:
for index, field in enumerate(self):
chunk_type = field["type"].value
if chunk_type not in JpegChunk.TAG_INFO:
return "Unknown chunk type: 0x%02X (chunk #%s)" % (chunk_type, index)
if index == 2:
# Only check 3 fields
break
except HachoirError:
return "Unable to parse at least three chunks"
return True
def createFields(self):
while not self.eof:
chunk = JpegChunk(self, "chunk[]")
yield chunk
if chunk["type"].value == JpegChunk.TAG_SOS:
# TODO: Read JPEG image data...
break
# TODO: is it possible to handle piped input?
if self._size is None:
raise NotImplementedError
has_end = False
size = (self._size - self.current_size) // 8
if size:
if 2 < size \
and self.stream.readBytes(self._size - 16, 2) == "\xff\xd9":
has_end = True
size -= 2
yield RawBytes(self, "data", size, "JPEG data")
if has_end:
yield JpegChunk(self, "chunk[]")
def createDescription(self):
desc = "JPEG picture"
if "sof/content" in self:
header = self["sof/content"]
desc += ": %ux%u pixels" % (header["width"].value, header["height"].value)
return desc
def createContentSize(self):
if "end" in self:
return self["end"].absolute_address + self["end"].size
if "data" not in self:
return None
start = self["data"].absolute_address
end = self.stream.searchBytes("\xff\xd9", start, MAX_FILESIZE*8)
if end is not None:
return end + 16
return None
| gpl-3.0 | 1,495,576,732,180,115,500 | 38.029891 | 116 | 0.566177 | false |
ossxp-com/repo | main.py | 3 | 10274 | #!/bin/sh
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
magic='--calling-python-from-/bin/sh--'
"""exec" python -E "$0" "$@" """#$magic"
if __name__ == '__main__':
import sys
if sys.argv[-1] == '#%s' % magic:
del sys.argv[-1]
del magic
import netrc
import optparse
import os
import re
import sys
import time
import urllib2
from trace import SetTrace
from git_command import git, GitCommand
from git_config import init_ssh, close_ssh
from command import InteractiveCommand
from command import MirrorSafeCommand
from command import PagedCommand
from editor import Editor
from error import DownloadError
from error import ManifestInvalidRevisionError
from error import NoSuchProjectError
from error import RepoChangedException
from manifest_xml import XmlManifest
from pager import RunPager
from subcmds import all as all_commands
global_options = optparse.OptionParser(
usage="repo [-p|--paginate|--no-pager] COMMAND [ARGS]"
)
global_options.add_option('-p', '--paginate',
dest='pager', action='store_true',
help='display command output in the pager')
global_options.add_option('--no-pager',
dest='no_pager', action='store_true',
help='disable the pager')
global_options.add_option('--trace',
dest='trace', action='store_true',
help='trace git command execution')
global_options.add_option('--time',
dest='time', action='store_true',
help='time repo command execution')
global_options.add_option('--version',
dest='show_version', action='store_true',
help='display this version of repo')
class _Repo(object):
def __init__(self, repodir):
self.repodir = repodir
self.commands = all_commands
# add 'branch' as an alias for 'branches'
all_commands['branch'] = all_commands['branches']
def _Run(self, argv):
name = None
glob = []
for i in xrange(0, len(argv)):
if not argv[i].startswith('-'):
name = argv[i]
if i > 0:
glob = argv[:i]
argv = argv[i + 1:]
break
if not name:
glob = argv
name = 'help'
argv = []
gopts, gargs = global_options.parse_args(glob)
if gopts.trace:
SetTrace()
if gopts.show_version:
if name == 'help':
name = 'version'
else:
print >>sys.stderr, 'fatal: invalid usage of --version'
sys.exit(1)
try:
cmd = self.commands[name]
except KeyError:
print >>sys.stderr,\
"repo: '%s' is not a repo command. See 'repo help'."\
% name
sys.exit(1)
cmd.repodir = self.repodir
cmd.manifest = XmlManifest(cmd.repodir)
Editor.globalConfig = cmd.manifest.globalConfig
if not isinstance(cmd, MirrorSafeCommand) and cmd.manifest.IsMirror:
print >>sys.stderr, \
"fatal: '%s' requires a working directory"\
% name
sys.exit(1)
copts, cargs = cmd.OptionParser.parse_args(argv)
if not gopts.no_pager and not isinstance(cmd, InteractiveCommand):
config = cmd.manifest.globalConfig
if gopts.pager:
use_pager = True
else:
use_pager = config.GetBoolean('pager.%s' % name)
if use_pager is None:
use_pager = cmd.WantPager(copts)
if use_pager:
RunPager(config)
try:
start = time.time()
try:
cmd.Execute(copts, cargs)
finally:
elapsed = time.time() - start
hours, remainder = divmod(elapsed, 3600)
minutes, seconds = divmod(remainder, 60)
if gopts.time:
if hours == 0:
print >>sys.stderr, 'real\t%dm%.3fs' \
% (minutes, seconds)
else:
print >>sys.stderr, 'real\t%dh%dm%.3fs' \
% (hours, minutes, seconds)
except DownloadError, e:
print >>sys.stderr, 'error: %s' % str(e)
sys.exit(1)
except ManifestInvalidRevisionError, e:
print >>sys.stderr, 'error: %s' % str(e)
sys.exit(1)
except NoSuchProjectError, e:
if e.name:
print >>sys.stderr, 'error: project %s not found' % e.name
else:
print >>sys.stderr, 'error: no project in current directory'
sys.exit(1)
def _MyRepoPath():
return os.path.dirname(__file__)
def _MyWrapperPath():
return os.path.join(os.path.dirname(__file__), 'repo')
def _CurrentWrapperVersion():
VERSION = None
pat = re.compile(r'^VERSION *=')
fd = open(_MyWrapperPath())
for line in fd:
if pat.match(line):
fd.close()
exec line
return VERSION
raise NameError, 'No VERSION in repo script'
def _CheckWrapperVersion(ver, repo_path):
if not repo_path:
repo_path = '~/bin/repo'
if not ver:
print >>sys.stderr, 'no --wrapper-version argument'
sys.exit(1)
exp = _CurrentWrapperVersion()
ver = tuple(map(lambda x: int(x), ver.split('.')))
if len(ver) == 1:
ver = (0, ver[0])
if exp[0] > ver[0] or ver < (0, 4):
exp_str = '.'.join(map(lambda x: str(x), exp))
print >>sys.stderr, """
!!! A new repo command (%5s) is available. !!!
!!! You must upgrade before you can continue: !!!
cp %s %s
""" % (exp_str, _MyWrapperPath(), repo_path)
sys.exit(1)
if exp > ver:
exp_str = '.'.join(map(lambda x: str(x), exp))
print >>sys.stderr, """
... A new repo command (%5s) is available.
... You should upgrade soon:
cp %s %s
""" % (exp_str, _MyWrapperPath(), repo_path)
def _CheckRepoDir(dir):
if not dir:
print >>sys.stderr, 'no --repo-dir argument'
sys.exit(1)
def _PruneOptions(argv, opt):
i = 0
while i < len(argv):
a = argv[i]
if a == '--':
break
if a.startswith('--'):
eq = a.find('=')
if eq > 0:
a = a[0:eq]
if not opt.has_option(a):
del argv[i]
continue
i += 1
_user_agent = None
def _UserAgent():
global _user_agent
if _user_agent is None:
py_version = sys.version_info
os_name = sys.platform
if os_name == 'linux2':
os_name = 'Linux'
elif os_name == 'win32':
os_name = 'Win32'
elif os_name == 'cygwin':
os_name = 'Cygwin'
elif os_name == 'darwin':
os_name = 'Darwin'
p = GitCommand(
None, ['describe', 'HEAD'],
cwd = _MyRepoPath(),
capture_stdout = True)
if p.Wait() == 0:
repo_version = p.stdout
if len(repo_version) > 0 and repo_version[-1] == '\n':
repo_version = repo_version[0:-1]
if len(repo_version) > 0 and repo_version[0] == 'v':
repo_version = repo_version[1:]
else:
repo_version = 'unknown'
_user_agent = 'git-repo/%s (%s) git/%s Python/%d.%d.%d' % (
repo_version,
os_name,
'.'.join(map(lambda d: str(d), git.version_tuple())),
py_version[0], py_version[1], py_version[2])
return _user_agent
class _UserAgentHandler(urllib2.BaseHandler):
def http_request(self, req):
req.add_header('User-Agent', _UserAgent())
return req
def https_request(self, req):
req.add_header('User-Agent', _UserAgent())
return req
class _BasicAuthHandler(urllib2.HTTPBasicAuthHandler):
def http_error_auth_reqed(self, authreq, host, req, headers):
try:
old_add_header = req.add_header
def _add_header(name, val):
val = val.replace('\n', '')
old_add_header(name, val)
req.add_header = _add_header
return urllib2.AbstractBasicAuthHandler.http_error_auth_reqed(
self, authreq, host, req, headers)
except:
reset = getattr(self, 'reset_retry_count', None)
if reset is not None:
reset()
elif getattr(self, 'retried', None):
self.retried = 0
raise
def init_http():
handlers = [_UserAgentHandler()]
mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
try:
n = netrc.netrc()
for host in n.hosts:
p = n.hosts[host]
mgr.add_password(None, 'http://%s/' % host, p[0], p[2])
mgr.add_password(None, 'https://%s/' % host, p[0], p[2])
except netrc.NetrcParseError:
pass
except IOError:
pass
handlers.append(_BasicAuthHandler(mgr))
if 'http_proxy' in os.environ:
url = os.environ['http_proxy']
handlers.append(urllib2.ProxyHandler({'http': url, 'https': url}))
if 'REPO_CURL_VERBOSE' in os.environ:
handlers.append(urllib2.HTTPHandler(debuglevel=1))
handlers.append(urllib2.HTTPSHandler(debuglevel=1))
urllib2.install_opener(urllib2.build_opener(*handlers))
def _Main(argv):
opt = optparse.OptionParser(usage="repo wrapperinfo -- ...")
opt.add_option("--repo-dir", dest="repodir",
help="path to .repo/")
opt.add_option("--wrapper-version", dest="wrapper_version",
help="version of the wrapper script")
opt.add_option("--wrapper-path", dest="wrapper_path",
help="location of the wrapper script")
_PruneOptions(argv, opt)
opt, argv = opt.parse_args(argv)
_CheckWrapperVersion(opt.wrapper_version, opt.wrapper_path)
_CheckRepoDir(opt.repodir)
repo = _Repo(opt.repodir)
try:
try:
init_ssh()
init_http()
repo._Run(argv)
finally:
close_ssh()
except KeyboardInterrupt:
sys.exit(1)
except RepoChangedException, rce:
# If repo changed, re-exec ourselves.
#
argv = list(sys.argv)
argv.extend(rce.extra_args)
try:
os.execv(__file__, argv)
except OSError, e:
print >>sys.stderr, 'fatal: cannot restart repo after upgrade'
print >>sys.stderr, 'fatal: %s' % e
sys.exit(128)
if __name__ == '__main__':
_Main(sys.argv[1:])
| apache-2.0 | -3,193,361,342,326,652,000 | 27.940845 | 74 | 0.601226 | false |
bbbenja/SickRage | sickbeard/sab.py | 11 | 8550 | # Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.tv
# Git: https://github.com/SiCKRAGETV/SickRage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import urllib, httplib
import sickbeard
import MultipartPostHandler
import urllib2, cookielib
try:
import json
except ImportError:
import simplejson as json
from sickbeard.common import USER_AGENT
from sickbeard import logger
from sickrage.helper.exceptions import ex
def sendNZB(nzb):
"""
Sends an NZB to SABnzbd via the API.
:param nzb: The NZBSearchResult object to send to SAB
"""
# set up a dict with the URL params in it
params = {}
if sickbeard.SAB_USERNAME != None:
params['ma_username'] = sickbeard.SAB_USERNAME
if sickbeard.SAB_PASSWORD != None:
params['ma_password'] = sickbeard.SAB_PASSWORD
if sickbeard.SAB_APIKEY != None:
params['apikey'] = sickbeard.SAB_APIKEY
category = sickbeard.SAB_CATEGORY
if nzb.show.is_anime:
category = sickbeard.SAB_CATEGORY_ANIME
if category != None:
params['cat'] = category
# use high priority if specified (recently aired episode)
if nzb.priority == 1:
if sickbeard.SAB_FORCED == 1:
params['priority'] = 2
else:
params['priority'] = 1
# if it's a normal result we just pass SAB the URL
if nzb.resultType == "nzb":
# for newzbin results send the ID to sab specifically
if nzb.provider.getID() == 'newzbin':
id = nzb.provider.getIDFromURL(nzb.url)
if not id:
logger.log("Unable to send NZB to sab, can't find ID in URL " + str(nzb.url), logger.ERROR)
return False
params['mode'] = 'addid'
params['name'] = id
else:
params['mode'] = 'addurl'
params['name'] = nzb.url
# if we get a raw data result we want to upload it to SAB
elif nzb.resultType == "nzbdata":
params['mode'] = 'addfile'
multiPartParams = {"nzbfile": (nzb.name + ".nzb", nzb.extraInfo[0])}
url = sickbeard.SAB_HOST + "api?" + urllib.urlencode(params)
logger.log(u"Sending NZB to SABnzbd")
logger.log(u"URL: " + url, logger.DEBUG)
try:
# if we have the URL to an NZB then we've built up the SAB API URL already so just call it
if nzb.resultType == "nzb":
f = urllib.urlopen(url)
# if we are uploading the NZB data to SAB then we need to build a little POST form and send it
elif nzb.resultType == "nzbdata":
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),
MultipartPostHandler.MultipartPostHandler)
req = urllib2.Request(url,
multiPartParams,
headers={'User-Agent': USER_AGENT})
f = opener.open(req)
except (EOFError, IOError), e:
logger.log(u"Unable to connect to SAB: " + ex(e), logger.ERROR)
return False
except httplib.InvalidURL, e:
logger.log(u"Invalid SAB host, check your config: " + ex(e), logger.ERROR)
return False
# this means we couldn't open the connection or something just as bad
if f == None:
logger.log(u"No data returned from SABnzbd, NZB not sent", logger.ERROR)
return False
# if we opened the URL connection then read the result from SAB
try:
result = f.readlines()
except Exception, e:
logger.log(u"Error trying to get result from SAB, NZB not sent: " + ex(e), logger.ERROR)
return False
# SAB shouldn't return a blank result, this most likely (but not always) means that it timed out and didn't recieve the NZB
if len(result) == 0:
logger.log(u"No data returned from SABnzbd, NZB not sent", logger.ERROR)
return False
# massage the result a little bit
sabText = result[0].strip()
logger.log(u"Result text from SAB: " + sabText, logger.DEBUG)
# do some crude parsing of the result text to determine what SAB said
if sabText == "ok":
logger.log(u"NZB sent to SAB successfully", logger.DEBUG)
return True
elif sabText == "Missing authentication":
logger.log(u"Incorrect username/password sent to SAB, NZB not sent", logger.ERROR)
return False
else:
logger.log(u"Unknown failure sending NZB to sab. Return text is: " + sabText, logger.ERROR)
return False
def _checkSabResponse(f):
"""
Check response from SAB
:param f: Response from SAV
:return: a list of (Boolean, string) which is True if SAB is not reporting an error
"""
try:
result = f.readlines()
except Exception, e:
logger.log(u"Error trying to get result from SAB" + ex(e), logger.ERROR)
return False, "Error from SAB"
if len(result) == 0:
logger.log(u"No data returned from SABnzbd, NZB not sent", logger.ERROR)
return False, "No data from SAB"
sabText = result[0].strip()
sabJson = {}
try:
sabJson = json.loads(sabText)
except ValueError, e:
pass
if sabText == "Missing authentication":
logger.log(u"Incorrect username/password sent to SAB", logger.ERROR)
return False, "Incorrect username/password sent to SAB"
elif 'error' in sabJson:
logger.log(sabJson['error'], logger.ERROR)
return False, sabJson['error']
else:
return True, sabText
def _sabURLOpenSimple(url):
"""
Open a connection to SAB
:param url: URL where SAB is at
:return: (boolean, string) list, True if connection can be made
"""
try:
f = urllib.urlopen(url)
except (EOFError, IOError), e:
logger.log(u"Unable to connect to SAB: " + ex(e), logger.ERROR)
return False, "Unable to connect"
except httplib.InvalidURL, e:
logger.log(u"Invalid SAB host, check your config: " + ex(e), logger.ERROR)
return False, "Invalid SAB host"
if f == None:
logger.log(u"No data returned from SABnzbd", logger.ERROR)
return False, "No data returned from SABnzbd"
else:
return True, f
def getSabAccesMethod(host=None, username=None, password=None, apikey=None):
"""
Find out how we should connect to SAB
:param host: hostname where SAB lives
:param username: username to use
:param password: password to use
:param apikey: apikey to use
:return: (boolean, string) with True if method was successful
"""
url = host + "api?mode=auth"
result, f = _sabURLOpenSimple(url)
if not result:
return False, f
result, sabText = _checkSabResponse(f)
if not result:
return False, sabText
return True, sabText
def testAuthentication(host=None, username=None, password=None, apikey=None):
"""
Sends a simple API request to SAB to determine if the given connection information is connect
:param host: The host where SAB is running (incl port)
:param username: The username to use for the HTTP request
:param password: The password to use for the HTTP request
:param apikey: The API key to provide to SAB
:return: A tuple containing the success boolean and a message
"""
# build up the URL parameters
params = {}
params['mode'] = 'queue'
params['output'] = 'json'
params['ma_username'] = username
params['ma_password'] = password
params['apikey'] = apikey
url = host + "api?" + urllib.urlencode(params)
# send the test request
logger.log(u"SABnzbd test URL: " + url, logger.DEBUG)
result, f = _sabURLOpenSimple(url)
if not result:
return False, f
# check the result and determine if it's good or not
result, sabText = _checkSabResponse(f)
if not result:
return False, sabText
return True, "Success"
| gpl-3.0 | 7,463,108,557,253,176,000 | 32.139535 | 127 | 0.63883 | false |
pyhys/minimalmodbus | setup.py | 1 | 2703 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Do not import non-standard modules here, as it will mess up the installation in clients.
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read().replace(".. :changelog:", "")
# Read version number etc from other file
# http://stackoverflow.com/questions/2058802/how-can-i-get-the-version-defined
# -in-setup-py-setuptools-in-my-package
with open("minimalmodbus.py") as mainfile:
main_py = mainfile.read()
metadata = dict(re.findall('__([a-z]+)__ *= *"([^"]+)"', main_py))
setup(
name="minimalmodbus",
version=metadata["version"],
license=metadata["license"],
author=metadata["author"],
url=metadata["url"],
project_urls={
"Documentation": "https://minimalmodbus.readthedocs.io",
"Source Code": metadata["url"],
"Bug Tracker": "https://github.com/pyhys/minimalmodbus/issues",
},
description="Easy-to-use Modbus RTU and Modbus ASCII implementation for Python",
keywords="minimalmodbus modbus modbus-serial modbus-RTU modbus-ASCII",
long_description=readme + "\n\n" + history,
long_description_content_type="text/x-rst",
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
install_requires=["pyserial>=3.0"],
py_modules=["minimalmodbus"],
test_suite="tests", # Discover all testcases in all files in this subdir
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Intended Audience :: Manufacturing",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Communications",
"Topic :: Home Automation",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Hardware :: Hardware Drivers",
"Topic :: Terminals :: Serial",
],
)
| apache-2.0 | -5,849,960,773,327,087,000 | 37.614286 | 90 | 0.631521 | false |
drakuna/odoo | openerp/osv/fields.py | 8 | 75655 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" Fields:
- simple
- relations (one2many, many2one, many2many)
- function
Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
* _auto_join: for one2many and many2one fields, tells whether select
queries will join the relational table instead of replacing the
field condition by an equivalent-one based on a search.
* readonly
* required
* size
"""
import base64
import datetime as DT
import functools
import logging
import pytz
import re
import xmlrpclib
from operator import itemgetter
from contextlib import contextmanager
from psycopg2 import Binary
import openerp
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import float_repr, float_round, frozendict, html_sanitize
import json
from openerp import SUPERUSER_ID, registry
@contextmanager
def _get_cursor():
# yield a valid cursor from any environment or create a new one if none found
with registry().cursor() as cr:
yield cr
EMPTY_DICT = frozendict()
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
return str(symb)
class _column(object):
""" Base of all fields, a database column
An instance of this object is a *description* of a database column. It will
not hold any data, but only provide the methods to manipulate data of an
ORM record or even prepare/update the database to hold such a field of data.
"""
_classic_read = True
_classic_write = True
_auto_join = False
_properties = False
_type = 'unknown'
_obj = None
_multi = False
_symbol_c = '%s'
_symbol_f = _symbol_set
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
_deprecated = False
__slots__ = [
'copy', # whether value is copied by BaseModel.copy()
'string',
'help',
'required',
'readonly',
'_domain',
'_context',
'states',
'priority',
'change_default',
'size',
'ondelete',
'translate',
'select',
'manual',
'write',
'read',
'selectable',
'group_operator',
'groups', # CSV list of ext IDs of groups
'deprecated', # Optional deprecation warning
'_args',
'_prefetch',
]
def __init__(self, string='unknown', required=False, readonly=False, domain=[], context={}, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
The 'manual' keyword argument specifies if the field is a custom one.
It corresponds to the 'state' column in ir_model_fields.
"""
# add parameters and default values
args['copy'] = args.get('copy', True)
args['string'] = string
args['help'] = args.get('help', '')
args['required'] = required
args['readonly'] = readonly
args['_domain'] = domain
args['_context'] = context
args['states'] = states
args['priority'] = priority
args['change_default'] = change_default
args['size'] = size
args['ondelete'] = ondelete.lower() if ondelete else None
args['translate'] = translate
args['select'] = select
args['manual'] = manual
args['write'] = args.get('write', False)
args['read'] = args.get('read', False)
args['selectable'] = args.get('selectable', True)
args['group_operator'] = args.get('group_operator', None)
args['groups'] = args.get('groups', None)
args['deprecated'] = args.get('deprecated', None)
args['_prefetch'] = args.get('_prefetch', True)
self._args = EMPTY_DICT
for key, val in args.iteritems():
setattr(self, key, val)
# prefetch only if _classic_write, not deprecated and not manual
if not self._classic_write or self.deprecated or self.manual:
self._prefetch = False
def __getattr__(self, name):
""" Access a non-slot attribute. """
if name == '_args':
raise AttributeError(name)
try:
return self._args[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
""" Set a slot or non-slot attribute. """
try:
object.__setattr__(self, name, value)
except AttributeError:
if self._args:
self._args[name] = value
else:
self._args = {name: value} # replace EMPTY_DICT
def __delattr__(self, name):
""" Remove a non-slot attribute. """
try:
del self._args[name]
except KeyError:
raise AttributeError(name)
def new(self, _computed_field=False, **args):
""" Return a column like `self` with the given parameters; the parameter
`_computed_field` tells whether the corresponding field is computed.
"""
# memory optimization: reuse self whenever possible; you can reduce the
# average memory usage per registry by 10 megabytes!
column = type(self)(**args)
return self if self.to_field_args() == column.to_field_args() else column
def to_field(self):
""" convert column `self` to a new-style field """
from openerp.fields import Field
return Field.by_type[self._type](origin=self, **self.to_field_args())
def to_field_args(self):
""" return a dictionary with all the arguments to pass to the field """
base_items = [
('copy', self.copy),
('index', self.select),
('manual', self.manual),
('string', self.string),
('help', self.help),
('readonly', self.readonly),
('required', self.required),
('states', self.states),
('groups', self.groups),
('change_default', self.change_default),
('deprecated', self.deprecated),
]
truthy_items = filter(itemgetter(1), [
('group_operator', self.group_operator),
('size', self.size),
('ondelete', self.ondelete),
('translate', self.translate),
('domain', self._domain),
('context', self._context),
])
return dict(base_items + truthy_items + self._args.items())
def restart(self):
pass
def set(self, cr, obj, id, name, value, user=None, context=None):
cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%s', (self._symbol_set[1](value), id))
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
raise Exception(_('undefined get method !'))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
ids = obj.search(cr, uid, args+self._domain+[(name, 'ilike', value)], offset, limit, context=context)
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
def as_display_name(self, cr, uid, obj, value, context=None):
"""Converts a field value to a suitable string representation for a record,
e.g. when this field is used as ``rec_name``.
:param obj: the ``BaseModel`` instance this column belongs to
:param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
for this column
"""
# delegated to class method, so a column type A can delegate
# to a column type B.
return self._as_display_name(self, cr, uid, obj, value, context=None)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# This needs to be a class method, in case a column type A as to delegate
# to a column type B.
return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
# ---------------------------------------------------------
class boolean(_column):
_type = 'boolean'
_symbol_c = '%s'
_symbol_f = bool
_symbol_set = (_symbol_c, _symbol_f)
__slots__ = []
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
__slots__ = []
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
__slots__ = ['selection']
def __init__(self, string, selection, size=None, **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, size=size, selection=selection, **args)
def to_field_args(self):
args = super(reference, self).to_field_args()
args['selection'] = self.selection
return args
def get(self, cr, obj, ids, name, uid=None, context=None, values=None):
result = {}
# copy initial values fetched previously.
for value in values:
result[value['id']] = value[name]
if value[name]:
model, res_id = value[name].split(',')
if not obj.pool[model].exists(cr, uid, [int(res_id)], context=context):
result[value['id']] = False
return result
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
if value:
# reference fields have a 'model,id'-like value, that we need to convert
# to a real name
model_name, res_id = value.split(',')
if model_name in obj.pool and res_id:
model = obj.pool[model_name]
names = model.name_get(cr, uid, [int(res_id)], context=context)
return names[0][1] if names else False
return tools.ustr(value)
# takes a string (encoded in utf8) and returns a string (encoded in utf8)
def _symbol_set_char(self, symb):
#TODO:
# * we need to remove the "symb==False" from the next line BUT
# for now too many things rely on this broken behavior
# * the symb==None test should be common to all data types
if symb is None or symb == False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
u_symb = tools.ustr(symb)
return u_symb[:self.size].encode('utf8')
class char(_column):
_type = 'char'
__slots__ = ['_symbol_f', '_symbol_set', '_symbol_set_char']
def __init__(self, string="unknown", size=None, **args):
_column.__init__(self, string=string, size=size or None, **args)
# self._symbol_set_char defined to keep the backward compatibility
self._symbol_f = self._symbol_set_char = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
class text(_column):
_type = 'text'
__slots__ = []
class html(text):
_type = 'html'
_symbol_c = '%s'
__slots__ = ['_sanitize', '_strip_style', '_symbol_f', '_symbol_set']
def _symbol_set_html(self, value):
if value is None or value is False:
return None
if not self._sanitize:
return value
return html_sanitize(value, strip_style=self._strip_style)
def __init__(self, string='unknown', sanitize=True, strip_style=False, **args):
super(html, self).__init__(string=string, **args)
self._sanitize = sanitize
self._strip_style = strip_style
# symbol_set redefinition because of sanitize specific behavior
self._symbol_f = self._symbol_set_html
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(html, self).to_field_args()
args['sanitize'] = self._sanitize
args['strip_style'] = self._strip_style
return args
import __builtin__
def _symbol_set_float(self, x):
result = __builtin__.float(x or 0.0)
digits = self.digits
if digits:
precision, scale = digits
result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale)
return result
class float(_column):
_type = 'float'
_symbol_c = '%s'
_symbol_get = lambda self,x: x or 0.0
__slots__ = ['_digits', '_digits_compute', '_symbol_f', '_symbol_set']
@property
def digits(self):
if self._digits_compute:
with _get_cursor() as cr:
return self._digits_compute(cr)
else:
return self._digits
def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
_column.__init__(self, string=string, required=required, **args)
# synopsis: digits_compute(cr) -> (precision, scale)
self._digits = digits
self._digits_compute = digits_compute
self._symbol_f = lambda x: _symbol_set_float(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(float, self).to_field_args()
args['digits'] = self._digits_compute or self._digits
return args
def digits_change(self, cr):
pass
class monetary(_column):
_type = 'monetary'
_symbol_set = ('%s', lambda x: __builtin__.float(x or 0.0))
_symbol_get = lambda self,x: x or 0.0
def to_field_args(self):
raise NotImplementedError("fields.monetary is only supported in the new API, "
"but you can use widget='monetary' in client-side views")
class date(_column):
_type = 'date'
__slots__ = []
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def today(*args):
""" Returns the current date in a format fit for being a
default value to a ``date`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.date.today().strftime(
tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def context_today(model, cr, uid, context=None, timestamp=None):
"""Returns the current date as seen in the client's timezone
in a format fit for date fields.
This method may be passed as value to initialize _defaults.
:param Model model: model (osv) for which the date value is being
computed - automatically passed when used in
_defaults.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a
datetime, regular dates can't be converted
between timezones.)
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: str
"""
today = timestamp or DT.datetime.now()
context_today = None
if context and context.get('tz'):
tz_name = context['tz']
else:
user = model.pool['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_today = utc.localize(today, is_dst=False) # UTC = no DST
context_today = utc_today.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific today date, "
"using the UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def date_to_datetime(model, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
user_date = DT.datetime.strptime(userdate, tools.DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = model.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + DT.timedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
class datetime(_column):
_type = 'datetime'
__slots__ = []
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def now(*args):
""" Returns the current datetime in a format fit for being a
default value to a ``datetime`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_DATETIME_FORMAT)
@staticmethod
def context_timestamp(cr, uid, timestamp, context=None):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
if context and context.get('tz'):
tz_name = context['tz']
else:
registry = openerp.modules.registry.RegistryManager.get(cr.dbname)
user = registry['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
value = datetime.context_timestamp(cr, uid, DT.datetime.strptime(value, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
return tools.ustr(value.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT))
class binary(_column):
_type = 'binary'
_classic_read = False
_classic_write = property(lambda self: not self.attachment)
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast in symbol_f.
# This str coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
_symbol_c = '%s'
_symbol_f = lambda symb: symb and Binary(str(symb)) or None
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x and str(x)
__slots__ = ['attachment', 'filters']
def __init__(self, string='unknown', filters=None, **args):
args['_prefetch'] = args.get('_prefetch', False)
args['attachment'] = args.get('attachment', False)
_column.__init__(self, string=string, filters=filters, **args)
def to_field_args(self):
args = super(binary, self).to_field_args()
args['attachment'] = self.attachment
return args
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
result = dict.fromkeys(ids, False)
if self.attachment:
# values are stored in attachments, retrieve them
atts = obj.pool['ir.attachment'].browse(cr, SUPERUSER_ID, [], context)
domain = [
('res_model', '=', obj._name),
('res_field', '=', name),
('res_id', 'in', ids),
]
for att in atts.search(domain):
# the 'bin_size' flag is handled by the field 'datas' itself
result[att.res_id] = att.datas
else:
# If client is requesting only the size of the field, we return it
# instead of the content. Presumably a separate request will be done
# to read the actual content if it's needed at some point.
context = context or {}
if context.get('bin_size') or context.get('bin_size_%s' % name):
postprocess = lambda val: tools.human_size(long(val))
else:
postprocess = lambda val: val
for val in (values or []):
result[val['id']] = postprocess(val[name])
return result
def set(self, cr, obj, id, name, value, user=None, context=None):
assert self.attachment
# retrieve the attachment that stores the value, and adapt it
att = obj.pool['ir.attachment'].browse(cr, SUPERUSER_ID, [], context).search([
('res_model', '=', obj._name),
('res_field', '=', name),
('res_id', '=', id),
])
if value:
if att:
att.write({'datas': value})
else:
att.create({
'name': name,
'res_model': obj._name,
'res_field': name,
'res_id': id,
'type': 'binary',
'datas': value,
})
else:
att.unlink()
return []
class selection(_column):
_type = 'selection'
__slots__ = ['selection']
def __init__(self, selection, string='unknown', **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, selection=selection, **args)
def to_field_args(self):
args = super(selection, self).to_field_args()
args['selection'] = self.selection
return args
@classmethod
def reify(cls, cr, uid, model, field, context=None):
""" Munges the field's ``selection`` attribute as necessary to get
something useable out of it: calls it if it's a function, applies
translations to labels if it's not.
A callable ``selection`` is considered translated on its own.
:param orm.Model model:
:param _column field:
"""
if callable(field.selection):
return field.selection(model, cr, uid, context)
if not (context and 'lang' in context):
return field.selection
# field_to_dict isn't given a field name, only a field object, we
# need to get the name back in order to perform the translation lookup
field_name = next(
name for name, column in model._columns.iteritems()
if column == field)
translation_filter = "%s,%s" % (model._name, field_name)
translate = functools.partial(
model.pool['ir.translation']._get_source,
cr, uid, translation_filter, 'selection', context['lang'])
return [
(value, translate(label))
for value, label in field.selection
]
# ---------------------------------------------------------
# Relationals fields
# ---------------------------------------------------------
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update
# (2, ID) remove (delete)
# (3, ID) unlink one (target id or target of relation)
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
class many2one(_column):
_classic_read = False
_classic_write = True
_type = 'many2one'
_symbol_c = '%s'
_symbol_f = lambda x: x or None
_symbol_set = (_symbol_c, _symbol_f)
__slots__ = ['_obj', '_auto_join']
def __init__(self, obj, string='unknown', auto_join=False, **args):
args['ondelete'] = args.get('ondelete', 'set null')
_column.__init__(self, string=string, **args)
self._obj = obj
self._auto_join = auto_join
def to_field_args(self):
args = super(many2one, self).to_field_args()
args['comodel_name'] = self._obj
args['auto_join'] = self._auto_join
return args
def set(self, cr, obj_src, id, field, values, user=None, context=None):
if not context:
context = {}
obj = obj_src.pool[self._obj]
self._table = obj._table
if type(values) == type([]):
for act in values:
if act[0] == 0:
id_new = obj.create(cr, act[2])
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
elif act[0] == 1:
obj.write(cr, [act[1]], act[2], context=context)
elif act[0] == 2:
cr.execute('delete from '+self._table+' where id=%s', (act[1],))
elif act[0] == 3 or act[0] == 5:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
elif act[0] == 4:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (act[1], id))
else:
if values:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (values, id))
else:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
return value[1] if isinstance(value, tuple) else tools.ustr(value)
class one2many(_column):
_classic_read = False
_classic_write = False
_type = 'one2many'
__slots__ = ['_obj', '_fields_id', '_limit', '_auto_join']
def __init__(self, obj, fields_id, string='unknown', limit=None, auto_join=False, **args):
# one2many columns are not copied by default
args['copy'] = args.get('copy', False)
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, **args)
self._obj = obj
self._fields_id = fields_id
self._limit = limit
self._auto_join = auto_join
#one2many can't be used as condition for defaults
assert(self.change_default != True)
def to_field_args(self):
args = super(one2many, self).to_field_args()
args['comodel_name'] = self._obj
args['inverse_name'] = self._fields_id
args['auto_join'] = self._auto_join
args['limit'] = self._limit
return args
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if self._context:
context = dict(context or {})
context.update(self._context)
# retrieve the records in the comodel
comodel = obj.pool[self._obj].browse(cr, user, [], context)
inverse = self._fields_id
domain = self._domain(obj) if callable(self._domain) else self._domain
domain = domain + [(inverse, 'in', ids)]
records = comodel.search(domain, limit=self._limit)
result = {id: [] for id in ids}
# read the inverse of records without prefetching other fields on them
for record in records.with_context(prefetch_fields=False):
# record[inverse] may be a record or an integer
result[int(record[inverse])].append(record.id)
return result
def set(self, cr, obj, id, field, values, user=None, context=None):
result = []
context = dict(context or {})
context.update(self._context)
if not values:
return
obj = obj.pool[self._obj]
rec = obj.browse(cr, user, [], context=context)
with rec.env.norecompute():
_table = obj._table
for act in values:
if act[0] == 0:
act[2][self._fields_id] = id
id_new = obj.create(cr, user, act[2], context=context)
result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context)
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
inverse_field = obj._fields.get(self._fields_id)
assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the model has on delete cascade, just delete the row
if inverse_field.ondelete == "cascade":
obj.unlink(cr, user, [act[1]], context=context)
else:
cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],))
elif act[0] == 4:
# check whether the given record is already linked
rec = obj.browse(cr, SUPERUSER_ID, act[1], {'prefetch_fields': False})
if int(rec[self._fields_id]) != id:
# Must use write() to recompute parent_store structure if needed and check access rules
obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {})
elif act[0] == 5:
inverse_field = obj._fields.get(self._fields_id)
assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the o2m has a static domain we must respect it when unlinking
domain = self._domain(obj) if callable(self._domain) else self._domain
extra_domain = domain or []
ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
# If the model has cascade deletion, we delete the rows because it is the intended behavior,
# otherwise we only nullify the reverse foreign key column.
if inverse_field.ondelete == "cascade":
obj.unlink(cr, user, ids_to_unlink, context=context)
else:
obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
elif act[0] == 6:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
ids2 = act[2] or [0]
cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2))
ids3 = map(lambda x:x[0], cr.fetchall())
obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {})
return result
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
domain = self._domain(obj) if callable(self._domain) else self._domain
return obj.pool[self._obj].name_search(cr, uid, value, domain, operator, context=context,limit=limit)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('One2Many columns should not be used as record name (_rec_name)')
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update (write fields to ID)
# (2, ID) remove (calls unlink on ID, that will also delete the relationship because of the ondelete)
# (3, ID) unlink (delete the relationship between the two objects but does not delete ID)
# (4, ID) link (add a relationship)
# (5, ID) unlink all
# (6, ?, ids) set a list of links
#
class many2many(_column):
"""Encapsulates the logic of a many-to-many bidirectional relationship, handling the
low-level details of the intermediary relationship table transparently.
A many-to-many relationship is always symmetrical, and can be declared and accessed
from either endpoint model.
If ``rel`` (relationship table name), ``id1`` (source foreign key column name)
or id2 (destination foreign key column name) are not specified, the system will
provide default values. This will by default only allow one single symmetrical
many-to-many relationship between the source and destination model.
For multiple many-to-many relationship between the same models and for
relationships where source and destination models are the same, ``rel``, ``id1``
and ``id2`` should be specified explicitly.
:param str obj: destination model
:param str rel: optional name of the intermediary relationship table. If not specified,
a canonical name will be derived based on the alphabetically-ordered
model names of the source and destination (in the form: ``amodel_bmodel_rel``).
Automatic naming is not possible when the source and destination are
the same, for obvious ambiguity reasons.
:param str id1: optional name for the column holding the foreign key to the current
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `src_model_id`).
:param str id2: optional name for the column holding the foreign key to the destination
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `dest_model_id`)
:param str string: field label
"""
_classic_read = False
_classic_write = False
_type = 'many2many'
__slots__ = ['_obj', '_rel', '_id1', '_id2', '_limit', '_auto_join']
def __init__(self, obj, rel=None, id1=None, id2=None, string='unknown', limit=None, **args):
"""
"""
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, **args)
self._obj = obj
if rel and '.' in rel:
raise Exception(_('The second argument of the many2many field %s must be a SQL table !'\
'You used %s, which is not a valid SQL table name.')% (string,rel))
self._rel = rel
self._id1 = id1
self._id2 = id2
self._limit = limit
self._auto_join = False
def to_field_args(self):
args = super(many2many, self).to_field_args()
args['comodel_name'] = self._obj
args['relation'] = self._rel
args['column1'] = self._id1
args['column2'] = self._id2
args['limit'] = self._limit
return args
def _sql_names(self, source_model):
"""Return the SQL names defining the structure of the m2m relationship table
:return: (m2m_table, local_col, dest_col) where m2m_table is the table name,
local_col is the name of the column holding the current model's FK, and
dest_col is the name of the column holding the destination model's FK, and
"""
tbl, col1, col2 = self._rel, self._id1, self._id2
if not all((tbl, col1, col2)):
# the default table name is based on the stable alphabetical order of tables
dest_model = source_model.pool[self._obj]
tables = tuple(sorted([source_model._table, dest_model._table]))
if not tbl:
assert tables[0] != tables[1], 'Implicit/Canonical naming of m2m relationship table '\
'is not possible when source and destination models are '\
'the same'
tbl = '%s_%s_rel' % tables
openerp.models.check_pg_name(tbl)
if not col1:
col1 = '%s_id' % source_model._table
if not col2:
col2 = '%s_id' % dest_model._table
return tbl, col1, col2
def _get_query_and_where_params(self, cr, model, ids, values, where_params):
""" Extracted from ``get`` to facilitate fine-tuning of the generated
query. """
query = """SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s
FROM %(rel)s, %(from_c)s
WHERE %(where_c)s
AND %(rel)s.%(id1)s IN %%s
AND %(rel)s.%(id2)s = %(tbl)s.id
%(order_by)s
%(limit)s
OFFSET %(offset)d
""" % values
return query, where_params + [tuple(ids)]
def get(self, cr, model, ids, name, user=None, offset=0, context=None, values=None):
if not context:
context = {}
if not values:
values = {}
res = {}
if not ids:
return res
for id in ids:
res[id] = []
if offset:
_logger.warning(
"Specifying offset at a many2many.get() is deprecated and may"
" produce unpredictable results.")
obj = model.pool[self._obj]
rel, id1, id2 = self._sql_names(model)
# static domains are lists, and are evaluated both here and on client-side, while string
# domains supposed by dynamic and evaluated on client-side only (thus ignored here)
# FIXME: make this distinction explicit in API!
domain = isinstance(self._domain, list) and self._domain or []
wquery = obj._where_calc(cr, user, domain, context=context)
obj._apply_ir_rules(cr, user, wquery, 'read', context=context)
order_by = obj._generate_order_by(cr, user, None, wquery, context=context)
from_c, where_c, where_params = wquery.get_sql()
if not where_c:
where_c = '1=1'
limit_str = ''
if self._limit is not None:
limit_str = ' LIMIT %d' % self._limit
query_parts = {
'rel': rel,
'from_c': from_c,
'tbl': obj._table,
'id1': id1,
'id2': id2,
'where_c': where_c,
'limit': limit_str,
'order_by': order_by,
'offset': offset,
}
query, where_params = self._get_query_and_where_params(cr, model, ids,
query_parts,
where_params)
cr.execute(query, where_params)
for r in cr.fetchall():
res[r[1]].append(r[0])
return res
def set(self, cr, model, id, name, values, user=None, context=None):
if not context:
context = {}
if not values:
return
rel, id1, id2 = self._sql_names(model)
obj = model.pool[self._obj]
for act in values:
if not (isinstance(act, list) or isinstance(act, tuple)) or not act:
continue
if act[0] == 0:
idnew = obj.create(cr, user, act[2], context=context)
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, idnew))
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s', (id, act[1]))
elif act[0] == 4:
# following queries are in the same transaction - so should be relatively safe
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s', (id, act[1]))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, act[1]))
elif act[0] == 5:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s', (id,))
elif act[0] == 6:
d1, d2,tables = obj.pool.get('ir.rule').domain_get(cr, user, obj._name, context=context)
if d1:
d1 = ' and ' + ' and '.join(d1)
else:
d1 = ''
cr.execute('delete from '+rel+' where '+id1+'=%s AND '+id2+' IN (SELECT '+rel+'.'+id2+' FROM '+rel+', '+','.join(tables)+' WHERE '+rel+'.'+id1+'=%s AND '+rel+'.'+id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2)
for act_nbr in act[2]:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s, %s)', (id, act_nbr))
#
# TODO: use a name_search
#
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', operator, value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('Many2Many columns should not be used as record name (_rec_name)')
def get_nice_size(value):
size = 0
if isinstance(value, (int,long)):
size = value
elif value: # this is supposed to be a string
size = len(value)
if size < 12: # suppose human size
return value
return tools.human_size(size)
# See http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# and http://bugs.python.org/issue10066
invalid_xml_low_bytes = re.compile(r'[\x00-\x08\x0b-\x0c\x0e-\x1f]')
def sanitize_binary_value(value):
# binary fields should be 7-bit ASCII base64-encoded data,
# but we do additional sanity checks to make sure the values
# are not something else that won't pass via XML-RPC
if isinstance(value, (xmlrpclib.Binary, tuple, list, dict)):
# these builtin types are meant to pass untouched
return value
# Handle invalid bytes values that will cause problems
# for XML-RPC. See for more info:
# - http://bugs.python.org/issue10066
# - http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# Coercing to unicode would normally allow it to properly pass via
# XML-RPC, transparently encoded as UTF-8 by xmlrpclib.
# (this works for _any_ byte values, thanks to the fallback
# to latin-1 passthrough encoding when decoding to unicode)
value = tools.ustr(value)
# Due to Python bug #10066 this could still yield invalid XML
# bytes, specifically in the low byte range, that will crash
# the decoding side: [\x00-\x08\x0b-\x0c\x0e-\x1f]
# So check for low bytes values, and if any, perform
# base64 encoding - not very smart or useful, but this is
# our last resort to avoid crashing the request.
if invalid_xml_low_bytes.search(value):
# b64-encode after restoring the pure bytes with latin-1
# passthrough encoding
value = base64.b64encode(value.encode('latin-1'))
return value
# ---------------------------------------------------------
# Function fields
# ---------------------------------------------------------
class function(_column):
"""
A field whose value is computed by a function (rather
than being read from the database).
:param fnct: the callable that will compute the field value.
:param arg: arbitrary value to be passed to ``fnct`` when computing the value.
:param fnct_inv: the callable that will allow writing values in that field
(if not provided, the field is read-only).
:param fnct_inv_arg: arbitrary value to be passed to ``fnct_inv`` when
writing a value.
:param str type: type of the field simulated by the function field
:param fnct_search: the callable that allows searching on the field
(if not provided, search will not return any result).
:param store: store computed value in database
(see :ref:`The *store* parameter <field-function-store>`).
:type store: True or dict specifying triggers for field computation
:param multi: name of batch for batch computation of function fields.
All fields with the same batch name will be computed by
a single function call. This changes the signature of the
``fnct`` callable.
.. _field-function-fnct: The ``fnct`` parameter
.. rubric:: The ``fnct`` parameter
The callable implementing the function field must have the following signature:
.. function:: fnct(model, cr, uid, ids, field_name(s), arg, context)
Implements the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param field_name(s): name of the field to compute, or if ``multi`` is provided,
list of field names to compute.
:type field_name(s): str | [str]
:param arg: arbitrary value passed when declaring the function field
:rtype: dict
:return: mapping of ``ids`` to computed values, or if multi is provided,
to a map of field_names to computed values
The values in the returned dictionary must be of the type specified by the type
argument in the field declaration.
Here is an example with a simple function ``char`` function field::
# declarations
def compute(self, cr, uid, ids, field_name, arg, context):
result = {}
# ...
return result
_columns['my_char'] = fields.function(compute, type='char', size=50)
# when called with ``ids=[1,2,3]``, ``compute`` could return:
{
1: 'foo',
2: 'bar',
3: False # null values should be returned explicitly too
}
If ``multi`` is set, then ``field_name`` is replaced by ``field_names``: a list
of the field names that should be computed. Each value in the returned
dictionary must then be a dictionary mapping field names to values.
Here is an example where two function fields (``name`` and ``age``)
are both computed by a single function field::
# declarations
def compute(self, cr, uid, ids, field_names, arg, context):
result = {}
# ...
return result
_columns['name'] = fields.function(compute_person_data, type='char',\
size=50, multi='person_data')
_columns[''age'] = fields.function(compute_person_data, type='integer',\
multi='person_data')
# when called with ``ids=[1,2,3]``, ``compute_person_data`` could return:
{
1: {'name': 'Bob', 'age': 23},
2: {'name': 'Sally', 'age': 19},
3: {'name': 'unknown', 'age': False}
}
.. _field-function-fnct-inv:
.. rubric:: The ``fnct_inv`` parameter
This callable implements the write operation for the function field
and must have the following signature:
.. function:: fnct_inv(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context)
Callable that implements the ``write`` operation for the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param int id: the identifier of the object to write on
:param str field_name: name of the field to set
:param fnct_inv_arg: arbitrary value passed when declaring the function field
:return: True
When writing values for a function field, the ``multi`` parameter is ignored.
.. _field-function-fnct-search:
.. rubric:: The ``fnct_search`` parameter
This callable implements the search operation for the function field
and must have the following signature:
.. function:: fnct_search(model, cr, uid, model_again, field_name, criterion, context)
Callable that implements the ``search`` operation for the function field by expanding
a search criterion based on the function field into a new domain based only on
columns that are stored in the database.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param orm model_again: same value as ``model`` (seriously! this is for backwards
compatibility)
:param str field_name: name of the field to search on
:param list criterion: domain component specifying the search criterion on the field.
:rtype: list
:return: domain to use instead of ``criterion`` when performing the search.
This new domain must be based only on columns stored in the database, as it
will be used directly without any translation.
The returned value must be a domain, that is, a list of the form [(field_name, operator, operand)].
The most generic way to implement ``fnct_search`` is to directly search for the records that
match the given ``criterion``, and return their ``ids`` wrapped in a domain, such as
``[('id','in',[1,3,5])]``.
.. _field-function-store:
.. rubric:: The ``store`` parameter
The ``store`` parameter allows caching the result of the field computation in the
database, and defining the triggers that will invalidate that cache and force a
recomputation of the function field.
When not provided, the field is computed every time its value is read.
The value of ``store`` may be either ``True`` (to recompute the field value whenever
any field in the same record is modified), or a dictionary specifying a more
flexible set of recomputation triggers.
A trigger specification is a dictionary that maps the names of the models that
will trigger the computation, to a tuple describing the trigger rule, in the
following form::
store = {
'trigger_model': (mapping_function,
['trigger_field1', 'trigger_field2'],
priority),
}
A trigger rule is defined by a 3-item tuple where:
* The ``mapping_function`` is defined as follows:
.. function:: mapping_function(trigger_model, cr, uid, trigger_ids, context)
Callable that maps record ids of a trigger model to ids of the
corresponding records in the source model (whose field values
need to be recomputed).
:param orm model: trigger_model
:param list trigger_ids: ids of the records of trigger_model that were
modified
:rtype: list
:return: list of ids of the source model whose function field values
need to be recomputed
* The second item is a list of the fields who should act as triggers for
the computation. If an empty list is given, all fields will act as triggers.
* The last item is the priority, used to order the triggers when processing them
after any write operation on a model that has function field triggers. The
default priority is 10.
In fact, setting store = True is the same as using the following trigger dict::
store = {
'model_itself': (lambda self, cr, uid, ids, context: ids,
[],
10)
}
"""
_properties = True
__slots__ = [
'_type',
'_classic_read',
'_classic_write',
'_symbol_c',
'_symbol_f',
'_symbol_set',
'_symbol_get',
'_fnct',
'_arg',
'_fnct_inv',
'_fnct_inv_arg',
'_fnct_search',
'_multi',
'store',
'_digits',
'_digits_compute',
'selection',
'_obj',
]
@property
def digits(self):
if self._digits_compute:
with _get_cursor() as cr:
return self._digits_compute(cr)
else:
return self._digits
#
# multi: compute several fields in one call
#
def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, store=False, multi=False, **args):
self._classic_read = False
self._classic_write = False
self._prefetch = False
self._symbol_c = '%s'
self._symbol_f = _symbol_set
self._symbol_set = (self._symbol_c, self._symbol_f)
self._symbol_get = None
# pop attributes that should not be assigned to self
self._digits = args.pop('digits', (16,2))
self._digits_compute = args.pop('digits_compute', None)
self._obj = args.pop('relation', obj)
# function fields are not copied by default
args['copy'] = args.get('copy', False)
_column.__init__(self, **args)
self._type = type
self._fnct = fnct
self._arg = arg
self._fnct_inv = fnct_inv
self._fnct_inv_arg = fnct_inv_arg
self._fnct_search = fnct_search
self.store = store
self._multi = multi
if not fnct_inv:
self.readonly = 1
if not fnct_search and not store:
self.selectable = False
if callable(args.get('selection')):
from openerp import api
self.selection = api.expected(api.cr_uid_context, args['selection'])
if store:
if self._type != 'many2one':
# m2o fields need to return tuples with name_get, not just foreign keys
self._classic_read = True
self._classic_write = True
if type=='binary':
self._symbol_get=lambda x:x and str(x)
else:
self._prefetch = True
if type == 'char':
self._symbol_c = char._symbol_c
self._symbol_f = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
elif type == 'float':
self._symbol_c = float._symbol_c
self._symbol_f = lambda x: _symbol_set_float(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
else:
type_class = globals().get(type)
if type_class is not None:
self._symbol_c = type_class._symbol_c
self._symbol_f = type_class._symbol_f
self._symbol_set = type_class._symbol_set
def new(self, _computed_field=False, **args):
if _computed_field:
# field is computed, we need an instance of a non-function column
type_class = globals()[self._type]
return type_class(**args)
else:
# HACK: function fields are tricky to recreate, simply return a copy
import copy
return copy.copy(self)
def to_field_args(self):
args = super(function, self).to_field_args()
args['store'] = bool(self.store)
if self._type in ('float',):
args['digits'] = self._digits_compute or self._digits
elif self._type in ('binary',):
# limitation: binary function fields cannot be stored in attachments
args['attachment'] = False
elif self._type in ('selection', 'reference'):
args['selection'] = self.selection
elif self._type in ('many2one', 'one2many', 'many2many'):
args['comodel_name'] = self._obj
return args
def digits_change(self, cr):
pass
def search(self, cr, uid, obj, name, args, context=None):
if not self._fnct_search:
#CHECKME: should raise an exception
return []
return self._fnct_search(obj, cr, uid, obj, name, args, context=context)
def postprocess(self, cr, uid, obj, field, value=None, context=None):
return self._postprocess_batch(cr, uid, obj, field, {0: value}, context=context)[0]
def _postprocess_batch(self, cr, uid, obj, field, values, context=None):
if not values:
return values
if context is None:
context = {}
field_type = obj._columns[field]._type
new_values = dict(values)
if field_type == 'binary':
if context.get('bin_size'):
# client requests only the size of binary fields
for rid, value in values.iteritems():
if value:
new_values[rid] = get_nice_size(value)
elif not context.get('bin_raw'):
for rid, value in values.iteritems():
if value:
new_values[rid] = sanitize_binary_value(value)
return new_values
def get(self, cr, obj, ids, name, uid=False, context=None, values=None):
multi = self._multi
# if we already have a value, don't recompute it.
# This happen if case of stored many2one fields
if values and not multi and name in values[0]:
result = dict((v['id'], v[name]) for v in values)
elif values and multi and all(n in values[0] for n in name):
result = dict((v['id'], dict((n, v[n]) for n in name)) for v in values)
else:
result = self._fnct(obj, cr, uid, ids, name, self._arg, context)
if multi:
swap = {}
for rid, values in result.iteritems():
for f, v in values.iteritems():
if f not in name:
continue
swap.setdefault(f, {})[rid] = v
for field, values in swap.iteritems():
new_values = self._postprocess_batch(cr, uid, obj, field, values, context)
for rid, value in new_values.iteritems():
result[rid][field] = value
else:
result = self._postprocess_batch(cr, uid, obj, name, result, context)
return result
def set(self, cr, obj, id, name, value, user=None, context=None):
if not context:
context = {}
if self._fnct_inv:
self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# Function fields are supposed to emulate a basic field type,
# so they can delegate to the basic type for record name rendering
return globals()[field._type]._as_display_name(field, cr, uid, obj, value, context=context)
# ---------------------------------------------------------
# Related fields
# ---------------------------------------------------------
class related(function):
"""Field that points to some data inside another field of the current record.
Example::
_columns = {
'foo_id': fields.many2one('my.foo', 'Foo'),
'bar': fields.related('foo_id', 'frol', type='char', string='Frol of Foo'),
}
"""
__slots__ = ['arg', '_relations']
def _related_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
# assume self._arg = ('foo', 'bar', 'baz')
# domain = [(name, op, val)] => search [('foo.bar.baz', op, val)]
field = '.'.join(self._arg)
return map(lambda x: (field, x[1], x[2]), domain)
def _related_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for instance in obj.browse(cr, uid, ids, context=context):
# traverse all fields except the last one
for field in self.arg[:-1]:
instance = instance[field][:1]
if instance:
# write on the last field of the target record
instance.write({self.arg[-1]: values})
def _related_read(self, obj, cr, uid, ids, field_name, args, context=None):
res = {}
for record in obj.browse(cr, SUPERUSER_ID, ids, context=context):
value = record
# traverse all fields except the last one
for field in self.arg[:-1]:
value = value[field][:1]
# read the last field on the target record
res[record.id] = value[self.arg[-1]]
if self._type == 'many2one':
# res[id] is a recordset; convert it to (id, name) or False.
# Perform name_get as root, as seeing the name of a related object depends on
# access right of source document, not target, so user may not have access.
value_ids = list(set(value.id for value in res.itervalues() if value))
value_name = dict(obj.pool[self._obj].name_get(cr, SUPERUSER_ID, value_ids, context=context))
res = dict((id, bool(value) and (value.id, value_name[value.id])) for id, value in res.iteritems())
elif self._type in ('one2many', 'many2many'):
# res[id] is a recordset; convert it to a list of ids
res = dict((id, value.ids) for id, value in res.iteritems())
return res
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(related, self).__init__(self._related_read, arg, self._related_write, fnct_inv_arg=arg, fnct_search=self._related_search, **args)
if self.store is True:
# TODO: improve here to change self.store = {...} according to related objects
pass
class sparse(function):
__slots__ = ['serialization_field']
def convert_value(self, obj, cr, uid, record, value, read_value, context=None):
"""
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
"""
if self._type == 'many2many':
if not value:
return []
assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
return value[0][2]
elif self._type == 'one2many':
if not read_value:
read_value = []
relation_obj = obj.pool[self.relation]
for vals in value:
assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
if vals[0] == 0:
read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
elif vals[0] == 1:
relation_obj.write(cr, uid, vals[1], vals[2], context=context)
elif vals[0] == 2:
relation_obj.unlink(cr, uid, vals[1], context=context)
read_value.remove(vals[1])
return read_value
return value
def _sparse_write(self,obj,cr, uid, ids, field_name, value, args, context=None):
if not type(ids) == list:
ids = [ids]
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
if value is None:
# simply delete the key to unset it.
serialized.pop(field_name, None)
else:
serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
return True
def _sparse_read(self, obj, cr, uid, ids, field_names, args, context=None):
results = {}
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
results[record.id] = {}
for field_name in field_names:
field_type = obj._columns[field_name]._type
value = serialized.get(field_name, False)
if field_type in ('one2many','many2many'):
value = value or []
if value:
# filter out deleted records as superuser
relation_obj = obj.pool[obj._columns[field_name].relation]
value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
if type(value) in (int,long) and field_type == 'many2one':
relation_obj = obj.pool[obj._columns[field_name].relation]
# check for deleted record as superuser
if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
value = False
results[record.id][field_name] = value
return results
def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field
super(sparse, self).__init__(self._sparse_read, fnct_inv=self._sparse_write, multi='__sparse_multi', **kwargs)
# ---------------------------------------------------------
# Dummy fields
# ---------------------------------------------------------
class dummy(function):
__slots__ = ['arg', '_relations']
def _dummy_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
return []
def _dummy_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
return False
def _dummy_read(self, obj, cr, uid, ids, field_name, args, context=None):
return {}
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(dummy, self).__init__(self._dummy_read, arg, self._dummy_write, fnct_inv_arg=arg, fnct_search=self._dummy_search, **args)
# ---------------------------------------------------------
# Serialized fields
# ---------------------------------------------------------
class serialized(_column):
""" A field able to store an arbitrary python data structure.
Note: only plain components allowed.
"""
_type = 'serialized'
__slots__ = []
def _symbol_set_struct(val):
return json.dumps(val)
def _symbol_get_struct(self, val):
return json.loads(val or '{}')
_symbol_c = '%s'
_symbol_f = _symbol_set_struct
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = _symbol_get_struct
def __init__(self, *args, **kwargs):
kwargs['_prefetch'] = kwargs.get('_prefetch', False)
super(serialized, self).__init__(*args, **kwargs)
# TODO: review completly this class for speed improvement
class property(function):
__slots__ = []
def to_field_args(self):
args = super(property, self).to_field_args()
args['company_dependent'] = True
return args
def _property_search(self, tobj, cr, uid, obj, name, domain, context=None):
ir_property = obj.pool['ir.property']
result = []
for field, operator, value in domain:
result += ir_property.search_multi(cr, uid, name, tobj._name, operator, value, context=context)
return result
def _property_write(self, obj, cr, uid, id, prop_name, value, obj_dest, context=None):
ir_property = obj.pool['ir.property']
ir_property.set_multi(cr, uid, prop_name, obj._name, {id: value}, context=context)
return True
def _property_read(self, obj, cr, uid, ids, prop_names, obj_dest, context=None):
ir_property = obj.pool['ir.property']
res = {id: {} for id in ids}
for prop_name in prop_names:
field = obj._fields[prop_name]
values = ir_property.get_multi(cr, uid, prop_name, obj._name, ids, context=context)
if field.type == 'many2one':
# name_get the non-null values as SUPERUSER_ID
vals = sum(set(filter(None, values.itervalues())),
obj.pool[field.comodel_name].browse(cr, uid, [], context=context))
vals_name = dict(vals.sudo().name_get()) if vals else {}
for id, value in values.iteritems():
ng = False
if value and value.id in vals_name:
ng = value.id, vals_name[value.id]
res[id][prop_name] = ng
else:
for id, value in values.iteritems():
res[id][prop_name] = value
return res
def __init__(self, **args):
if 'view_load' in args:
_logger.warning("view_load attribute is deprecated on ir.fields. Args: %r", args)
args = dict(args)
args['obj'] = args.pop('relation', '') or args.get('obj', '')
super(property, self).__init__(
fnct=self._property_read,
fnct_inv=self._property_write,
fnct_search=self._property_search,
multi='properties',
**args
)
class column_info(object):
""" Struct containing details about an osv column, either one local to
its model, or one inherited via _inherits.
.. attribute:: name
name of the column
.. attribute:: column
column instance, subclass of :class:`_column`
.. attribute:: parent_model
if the column is inherited, name of the model that contains it,
``None`` for local columns.
.. attribute:: parent_column
the name of the column containing the m2o relationship to the
parent model that contains this column, ``None`` for local columns.
.. attribute:: original_parent
if the column is inherited, name of the original parent model that
contains it i.e in case of multilevel inheritance, ``None`` for
local columns.
"""
__slots__ = ['name', 'column', 'parent_model', 'parent_column', 'original_parent']
def __init__(self, name, column, parent_model=None, parent_column=None, original_parent=None):
self.name = name
self.column = column
self.parent_model = parent_model
self.parent_column = parent_column
self.original_parent = original_parent
def __str__(self):
return '%s(%s, %s, %s, %s, %s)' % (
self.__class__.__name__, self.name, self.column,
self.parent_model, self.parent_column, self.original_parent)
| gpl-3.0 | -2,648,863,562,472,183,000 | 40.409414 | 228 | 0.55991 | false |
madprog/PyPanel | test.py | 1 | 1206 | import Xlib
import Xlib.display
display = Xlib.display.Display()
screen = display.screen()
root = screen.root
window = root.create_window(
0, 0, 800, 600,
0,
screen.root_depth,
window_class=Xlib.X.InputOutput,
visual=Xlib.X.CopyFromParent,
colormap=Xlib.X.CopyFromParent,
event_mask=(
Xlib.X.ExposureMask
|Xlib.X.ButtonPressMask
|Xlib.X.ButtonReleaseMask
|Xlib.X.EnterWindowMask
)
)
_WIN_STATE = display.intern_atom("_WIN_STATE")
_MOTIF_WM_HINTS = display.intern_atom("_MOTIF_WM_HINTS")
window.set_wm_name("PyPanel")
window.set_wm_class("pypanel","PyPanel")
window.set_wm_hints(flags=(Xlib.Xutil.InputHint|Xlib.Xutil.StateHint), input=0, initial_state=1)
window.set_wm_normal_hints(flags=(Xlib.Xutil.PPosition|Xlib.Xutil.PMaxSize|Xlib.Xutil.PMinSize),
min_width=800, min_height=600,
max_width=800, max_height=600)
window.change_property(_WIN_STATE,Xlib.Xatom.CARDINAL,32,[1])
window.change_property(_MOTIF_WM_HINTS, _MOTIF_WM_HINTS, 32, [0x2, 0x0, 0x0, 0x0, 0x0])
#window.change_property(self._DESKTOP, Xatom.CARDINAL, 32, [0xffffffffL])
#window.change_property(dsp.intern_atom("_NET_WM_WINDOW_TYPE"),
#Xatom.ATOM, 32, [dsp.intern_atom("_NET_WM_WINDOW_TYPE_DOCK")])
| gpl-2.0 | 3,472,756,760,394,274,300 | 33.457143 | 96 | 0.731343 | false |
CMPUT410W15T02/CMPUT410W15-project | testenv/lib/python2.7/site-packages/django/contrib/gis/geoip/tests.py | 57 | 4734 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.geoip import HAS_GEOIP
from django.utils import six
if HAS_GEOIP:
from . import GeoIP, GeoIPException
if HAS_GEOS:
from ..geos import GEOSGeometry
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
@skipUnless(HAS_GEOIP and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
@skipUnless(HAS_GEOS, "Geos is required")
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '128.249.1.1'
fqdn = 'tmc.edu'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertTrue(isinstance(geom, GEOSGeometry))
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city("www.osnabrueck.de")
self.assertEqual('Osnabrück', d['city'])
d = g.country('200.7.49.81')
self.assertEqual('Curaçao', d['country_name'])
| gpl-2.0 | 3,687,879,909,385,123,000 | 37.471545 | 89 | 0.60038 | false |
Williams224/davinci-scripts | ksteta3pi/Consideredbkg/MC_12_11104101_MagUp.py | 3 | 9326 | #-- GAUDI jobOptions generated on Fri Jul 17 16:37:45 2015
#-- Contains event types :
#-- 11104101 - 106 files - 2116085 events - 602.40 GBytes
#-- Extra information about the data processing phases:
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000001_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000002_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000003_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000004_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000005_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000006_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000007_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000008_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000009_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000010_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000011_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000012_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000013_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000014_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000015_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000016_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000017_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000019_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000020_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000021_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000022_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000023_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000024_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000025_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000055_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000056_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000057_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000058_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000059_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000060_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000061_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000062_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000063_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000065_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000066_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000068_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000070_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000073_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000075_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000077_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000079_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000082_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000086_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000090_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000092_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000093_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000096_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000099_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000100_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000101_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000102_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000103_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000104_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000105_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000106_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000107_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000108_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000109_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000110_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000111_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000112_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000113_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000114_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000115_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000116_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000117_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000118_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000119_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000120_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000121_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000122_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000123_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000124_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000125_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000126_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000127_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000128_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000129_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000130_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000131_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000132_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000133_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000134_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000135_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000136_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000137_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000138_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000139_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000140_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000141_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000142_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000143_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000144_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000145_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000146_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000147_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000148_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000149_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000150_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000151_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000152_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000153_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000154_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000155_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000156_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00041907/0000/00041907_00000157_2.AllStreams.dst'
], clear=True)
| mit | -2,458,781,069,985,709,600 | 79.396552 | 113 | 0.79777 | false |
softak/webfaction_demo | vendor-local/lib/python/rdflib/compare.py | 2 | 7859 | # -*- coding: utf-8 -*-
"""
A collection of utilities for canonicalizing and inspecting graphs.
Among other things, they solve of the problem of deterministic bnode
comparisons.
Warning: the time to canonicalize bnodes may increase exponentially on larger
graphs. Use with care!
Example of comparing two graphs::
>>> g1 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel
... <http://example.org/same>,
... [ :label "Same" ],
... <http://example.org/a>,
... [ :label "A" ] .
... ''')
>>> g2 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel
... <http://example.org/same>,
... [ :label "Same" ],
... <http://example.org/b>,
... [ :label "B" ] .
... ''')
>>>
>>> iso1 = to_isomorphic(g1)
>>> iso2 = to_isomorphic(g2)
These are not isomorphic::
>>> iso1 == iso2
False
Diff the two graphs::
>>> in_both, in_first, in_second = graph_diff(iso1, iso2)
Present in both::
>>> def dump_nt_sorted(g):
... for l in sorted(g.serialize(format='nt').splitlines()):
... if l: print l
>>> dump_nt_sorted(in_both)
<http://example.org> <http://example.org/ns#rel> <http://example.org/same> .
<http://example.org> <http://example.org/ns#rel> _:cb1373e1895e37293a13204e8048bdcdc7 .
_:cb1373e1895e37293a13204e8048bdcdc7 <http://example.org/ns#label> "Same" .
Only in first::
>>> dump_nt_sorted(in_first)
<http://example.org> <http://example.org/ns#rel> <http://example.org/a> .
<http://example.org> <http://example.org/ns#rel> _:cb12f880a18a57364752aaeb157f2e66bb .
_:cb12f880a18a57364752aaeb157f2e66bb <http://example.org/ns#label> "A" .
Only in second::
>>> dump_nt_sorted(in_second)
<http://example.org> <http://example.org/ns#rel> <http://example.org/b> .
<http://example.org> <http://example.org/ns#rel> _:cb0a343fb77929ad37cf00a0317f06b801 .
_:cb0a343fb77929ad37cf00a0317f06b801 <http://example.org/ns#label> "B" .
"""
# TODO:
# - Doesn't handle quads.
# - Add warning and/or safety mechanism before working on large graphs?
# - use this in existing Graph.isomorphic?
from rdflib.graph import Graph, ConjunctiveGraph, ReadOnlyGraphAggregate
from rdflib.term import BNode
import hashlib
class IsomorphicGraph(ConjunctiveGraph):
"""
Ported from <http://www.w3.org/2001/sw/DataAccess/proto-tests/tools/rdfdiff.py>
(Sean B Palmer's RDF Graph Isomorphism Tester).
"""
def __init__(self, **kwargs):
super(IsomorphicGraph, self).__init__(**kwargs)
def __eq__(self, other):
"""Graph isomorphism testing."""
if not isinstance(other, IsomorphicGraph):
return False
elif len(self) != len(other):
return False
elif list(self) == list(other):
return True # TODO: really generally cheaper?
return self.internal_hash() == other.internal_hash()
def __ne__(self, other):
"""Negative graph isomorphism testing."""
return not self.__eq__(other)
def internal_hash(self):
"""
This is defined instead of __hash__ to avoid a circular recursion
scenario with the Memory store for rdflib which requires a hash lookup
in order to return a generator of triples.
"""
return _TripleCanonicalizer(self).to_hash()
class _TripleCanonicalizer(object):
def __init__(self, graph, hashfunc=hash):
self.graph = graph
self.hashfunc = hashfunc
def to_hash(self):
return self.hashfunc(tuple(sorted(
map(self.hashfunc, self.canonical_triples()) )))
def canonical_triples(self):
for triple in self.graph:
yield tuple(self._canonicalize_bnodes(triple))
def _canonicalize_bnodes(self, triple):
for term in triple:
if isinstance(term, BNode):
yield BNode(value="cb%s"%self._canonicalize(term))
else:
yield term
def _canonicalize(self, term, done=False):
return self.hashfunc(tuple(sorted(self._vhashtriples(term, done))))
def _vhashtriples(self, term, done):
for triple in self.graph:
if term in triple:
yield tuple(self._vhashtriple(triple, term, done))
def _vhashtriple(self, triple, target_term, done):
for i, term in enumerate(triple):
if not isinstance(term, BNode):
yield term
elif done or (term == target_term):
yield i
else:
yield self._canonicalize(term, done=True)
def to_isomorphic(graph):
if isinstance(graph, IsomorphicGraph):
return graph
return IsomorphicGraph(store=graph.store)
def isomorphic(graph1, graph2):
"""
Compare graph for equality. Uses an algorithm to compute unique hashes
which takes bnodes into account.
Examples::
>>> g1 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel <http://example.org/a> .
... <http://example.org> :rel <http://example.org/b> .
... <http://example.org> :rel [ :label "A bnode." ] .
... ''')
>>> g2 = Graph().parse(format='n3', data='''
... @prefix ns: <http://example.org/ns#> .
... <http://example.org> ns:rel [ ns:label "A bnode." ] .
... <http://example.org> ns:rel <http://example.org/b>,
... <http://example.org/a> .
... ''')
>>> isomorphic(g1, g2)
True
>>> g3 = Graph().parse(format='n3', data='''
... @prefix : <http://example.org/ns#> .
... <http://example.org> :rel <http://example.org/a> .
... <http://example.org> :rel <http://example.org/b> .
... <http://example.org> :rel <http://example.org/c> .
... ''')
>>> isomorphic(g1, g3)
False
"""
return _TripleCanonicalizer(graph1).to_hash() == _TripleCanonicalizer(graph2).to_hash()
def to_canonical_graph(g1):
"""
Creates a canonical, read-only graph where all bnode id:s are based on
deterministical MD5 checksums, correlated with the graph contents.
"""
graph = Graph()
graph += _TripleCanonicalizer(g1, _md5_hash).canonical_triples()
return ReadOnlyGraphAggregate([graph])
def graph_diff(g1, g2):
"""
Returns three sets of triples: "in both", "in first" and "in second".
"""
# bnodes have deterministic values in canonical graphs:
cg1 = to_canonical_graph(g1)
cg2 = to_canonical_graph(g2)
in_both = cg1*cg2
in_first = cg1-cg2
in_second = cg2-cg1
return (in_both, in_first, in_second)
def _md5_hash(t):
h = hashlib.md5()
for i in t:
if isinstance(i, tuple):
h.update(_md5_hash(i))
else:
h.update(unicode(i).encode("utf8"))
return h.hexdigest()
_MOCK_BNODE = BNode()
def similar(g1, g2):
"""
Checks if the two graphs are "similar", by comparing sorted triples where
all bnodes have been replaced by a singular mock bnode (the
``_MOCK_BNODE``).
This is a much cheaper, but less reliable, alternative to the comparison
algorithm in ``isomorphic``.
"""
return all(t1 == t2 for (t1, t2) in _squashed_graphs_triples(g1, g2))
def _squashed_graphs_triples(g1, g2):
for (t1, t2) in zip(sorted(_squash_graph(g1)), sorted(_squash_graph(g2))):
yield t1, t2
def _squash_graph(graph):
return (_squash_bnodes(triple) for triple in graph)
def _squash_bnodes(triple):
return tuple((isinstance(t, BNode) and _MOCK_BNODE) or t for t in triple)
| bsd-3-clause | -1,866,312,328,738,348,000 | 30.947154 | 91 | 0.589133 | false |
damianam/easybuild-framework | easybuild/toolchains/gompic.py | 3 | 1465 | ##
# Copyright 2012-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for gompic compiler toolchain (includes GCC and OpenMPI and CUDA).
:author: Kenneth Hoste (Ghent University)
:author: Fotis Georgatos (Uni.Lu, NTUA)
"""
from easybuild.toolchains.gcccuda import GccCUDA
from easybuild.toolchains.mpi.openmpi import OpenMPI
class Gompic(GccCUDA, OpenMPI):
"""Compiler toolchain with GCC+CUDA and OpenMPI."""
NAME = 'gompic'
SUBTOOLCHAIN = GccCUDA.NAME
| gpl-2.0 | -6,589,957,370,738,518,000 | 36.564103 | 96 | 0.750853 | false |
ragnarula/python-osc | pythonosc/osc_bundle_builder.py | 5 | 1732 | """Build OSC bundles for client applications."""
from pythonosc import osc_bundle
from pythonosc import osc_message
from pythonosc.parsing import osc_types
# Shortcut to specify an immediate execution of messages in the bundle.
IMMEDIATELY = osc_types.IMMEDIATELY
class BuildError(Exception):
"""Error raised when an error occurs building the bundle."""
class OscBundleBuilder(object):
"""Builds arbitrary OscBundle instances."""
def __init__(self, timestamp):
"""Build a new bundle with the associated timestamp.
Args:
- timestamp: system time represented as a floating point number of
seconds since the epoch in UTC or IMMEDIATELY.
"""
self._timestamp = timestamp
self._contents = []
def add_content(self, content):
"""Add a new content to this bundle.
Args:
- content: Either an OscBundle or an OscMessage
"""
self._contents.append(content)
def build(self):
"""Build an OscBundle with the current state of this builder.
Raises:
- BuildError: if we could not build the bundle.
"""
dgram = b'#bundle\x00'
try:
dgram += osc_types.write_date(self._timestamp)
for content in self._contents:
if (type(content) == osc_message.OscMessage
or type(content) == osc_bundle.OscBundle):
size = content.size
dgram += osc_types.write_int(size)
dgram += content.dgram
else:
raise BuildError(
"Content must be either OscBundle or OscMessage"
"found {}".format(type(content)))
return osc_bundle.OscBundle(dgram)
except osc_types.BuildError as be:
raise BuildError('Could not build the bundle {}'.format(be))
| unlicense | 5,738,941,090,748,354,000 | 28.862069 | 72 | 0.655312 | false |
ivanamihalek/tcga | tcga/01_somatic_mutations/032_gene_vs_TP53_background.py | 1 | 19181 | #!/usr/bin/python -u
#
#
# This source code is part of tcga, a TCGA processing pipeline, written by Ivana Mihalek.
# Copyright (C) 2014-2016 Ivana Mihalek.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: [email protected]
#
#
import os
from time import time
import commands
from tcga_utils.mysql import *
from random import randrange, sample
from scipy import stats
#########################################
def simulation (M, Nr, Nb, l, number_of_iterations):
avg_number_of_double_labeled = 0
pval_le = 0.0 # probabilty of being less-or_equal-to
pval_ge = 0.0 # probabilty of being greater-or-equal-to
if not number_of_iterations > 0:
return [avg_number_of_double_labeled, pval_le, pval_ge]
for i in range(number_of_iterations):
#####
slots = []
for s in range(M):
slots.append({"r":0, "b":0})
number_of_double_labeled = 0
for j in range(Nr):
random_slot = randrange(M)
slots[random_slot]["r"] += 1
for j in range(Nb):
random_slot = randrange(M)
slots[random_slot]["b"] += 1
for s in range(M):
if slots[s]["r"]>0 and slots[s]["b"]>0:
#print " %3d %2d %2d " % (s, slots[s]["r"] , slots[s]["b"])
number_of_double_labeled += 1
#####
avg_number_of_double_labeled += number_of_double_labeled
if ( number_of_double_labeled <= l ): pval_le += 1.0
if ( number_of_double_labeled >= l ): pval_ge += 1.0
##################################
avg_number_of_double_labeled /= float(number_of_iterations)
pval_le /= float(number_of_iterations)
pval_ge /= float(number_of_iterations)
return [avg_number_of_double_labeled, pval_le, pval_ge]
#########################################
def read_cancer_names ():
full_name= {}
inf = open ("/home/ivana/pypeworks/tcga/cancer_names.txt", "r")
for line in inf:
line = line.rstrip()
field = line.split ("\t")
if field[0] == 'READ':
field[0] = 'REA'
full_name[field[0]] = field[1]
inf.close()
return full_name
#########################################
def expected (a, b, n):
expected = 0
# probability that there are no mutations of type a
p_a = 1.0
for i in range(a):
p = (1-1.0/n)
p_a *= p
# probability that there are no mutations of type b
p_b = 1.0
for i in range(b):
p = (1-1.0/n)
p_b *= p
# expected number of co-ocurrences of a and b
expected = (1-p_a)*(1-p_b)*n
#if a > 0 and b > 0 :
# print ">>>>>> %3d %3d %3d %5.2f %5.2f %5.2f " % ( a, b, n, 1-p_a, 1-p_b, expected)
return expected
#########################################
def mkey (gene1, gene2):
mut_key = ""
if gene1 < gene2: # alphabetical
mut_key = gene1 + "_" + gene2
else:
mut_key = gene2 + "_" + gene1
return mut_key
#########################################
def main():
if len(sys.argv) == 1:
tp53_mode = True
else:
tp53_mode = False
full_name = read_cancer_names ()
db = connect_to_mysql()
cursor = db.cursor()
db_names = ["ACC", "BLCA", "BRCA", "CESC", "CHOL", "COAD","ESCA", "GBM", "HNSC", "KICH" ,"KIRC",
"KIRP","LAML", "LGG", "LIHC", "LUAD", "LUSC", "OV", "PAAD", "PCPG", "PRAD", "REA",
"SARC", "SKCM", "STAD", "TGCT", "THCA", "THYM", "UCEC", "UCS", "UVM"]
tables = ["somatic_mutations", "metastatic_mutations"]
pancan_samples = 0
pancan_ct_gene1 = {}
pancan_ct = {}
pancan_pt_gene1 = {}
pancan_pt = {}
pancan_coappearance = {}
number_of_samples_in_dbs_in_which_both_appear = {}
if tp53_mode:
gene_list = ['RPL11', 'RPL5', 'MDM2']
#print "number of different genes:"
#switch_to_db(cursor, 'baseline')
#qry = "select distinct approved_symbol from hgnc_id_translation where locus_type = 'gene with protein product' "
#rows = search_db(cursor, qry)
#gene_list = [row[0] for row in rows if row[0] != "TP53"]
#print "total genes:", len(gene_list)
#gene_list = gene_list[:30]
#gene_list = sample(gene_list, 500)
gene1 = 'TP53'
pancan_ct[gene1] = 0
pancan_pt[gene1] = 0
for j in range (len(gene_list)):
gene2 = gene_list[j]
pancan_ct_gene1[gene2] = 0
pancan_pt_gene1[gene2] = 0
pancan_ct[gene2] = 0
pancan_pt[gene2] = 0
mut_key = mkey(gene1, gene2)
pancan_coappearance[mut_key] = 0
number_of_samples_in_dbs_in_which_both_appear[gene2] = 0
else:
gene_list = [ x.upper() for x in sys.argv[1:] ]
#print gene_list
for i in range (len(gene_list)):
gene1 = gene_list[i]
pancan_ct[gene1] = 0
pancan_pt[gene1] = 0
for j in range (i+1,len(gene_list)):
gene2 = gene_list[j]
mut_key = mkey(gene1, gene2)
pancan_coappearance[mut_key] = 0
for db_name in db_names:
header = "\n"
header += "######################################" + "\n"
header += " %s %s " % (db_name, full_name[db_name])
header += "\n"
start = time()
#outf = sys.stdout
if tp53_mode:
outf = open ("coapp_tables/%s_tp53_coapps.table" % db_name, "w")
else:
outf = open ("coapp_tables/%s_coapps.table" % db_name, "w")
switch_to_db (cursor, db_name)
############################
db_entries = 0
for table in tables:
qry = "select count(1) from " + table
rows = search_db(cursor, qry)
db_entries += rows[0][0]
if db_entries==0: continue
############################
short_barcodes = []
rows = []
for table in tables:
qry = "select distinct sample_barcode_short from %s " % table
ret = search_db(cursor, qry)
if ret: rows += ret
number_of_patients = len(rows)
for row in rows:
short_barcodes.append(row[0])
co_appearance = {}
mut_ct = {}
patients_per_gene = {}
for gene in gene_list:
mut_ct[gene] = 0
patients_per_gene[gene] = 0
if tp53_mode:
mut_ct['TP53'] = 0
patients_per_gene['TP53'] = 0
if tp53_mode:
gene1 = 'TP53'
for j in range (len(gene_list)):
gene2 = gene_list[j]
mut_key = mkey(gene1, gene2)
co_appearance[mut_key] = 0
else:
for i in range (len(gene_list)):
gene1 = gene_list[i]
for j in range (i+1, len(gene_list)):
gene2 = gene_list[j]
mut_key = mkey (gene1, gene2)
co_appearance[mut_key] = 0
total_muts = 0
############################
for sample_barcode_short in short_barcodes:
############################
rows = []
for table in tables:
qry = "select hugo_symbol, variant_classification, aa_change "
qry += " from %s " % table
qry += " where sample_barcode_short = '%s' " % sample_barcode_short
qry += " and not variant_classification like '%s' " % "silent"
qry += " and not variant_classification like '%s' " % "RNA"
ret = search_db (cursor, qry)
if ret: rows += ret
if not rows: continue
mutations_found = {}
for row in rows:
[ hugo_symbol, variant_classification, aa_change] = row
if hugo_symbol in gene_list + ['TP53']:
# find genes that are mutated, once or twice, doesn't matter
mutations_found[hugo_symbol] = True
# here keep track of the actual number of mutations
mut_ct[hugo_symbol] += 1
############################
if mutations_found:
total_muts += len(rows)
for hugo_symbol in mutations_found.keys():
patients_per_gene[hugo_symbol] += 1
# make sure the key is always in the same order
if tp53_mode:
if mutations_found.has_key('TP53'):
for gene2 in mutations_found.keys():
if gene2=='TP53': continue
mut_key = mkey (gene1, gene2)
co_appearance[mut_key] += 1
else:
all_mutated_genes_from_the_list = mutations_found.keys();
for i in range (len(all_mutated_genes_from_the_list)):
gene1 = all_mutated_genes_from_the_list[i]
for j in range (i+1, len(all_mutated_genes_from_the_list)):
gene2 = all_mutated_genes_from_the_list[j]
mut_key = mkey (gene1, gene2)
co_appearance[mut_key] += 1
pancan_samples += number_of_patients
header += "number of different patients: " + str(number_of_patients)+ "\n"
header += "total number of entries: " + str(db_entries)+ "\n"
header += "number of functional mutations (not silent and not 'RNA') " + str(total_muts)+ "\n"
header += " %8s %4s %4s %8s %4s %4s %15s %10s %10s %10s " % ("gene1", "#pts1", "#muts1", "gene2",
"#pts2", "#muts2", "co-appearance",
"expct_co-app", "pval <=", "pval >=")
#header += "\n"
outstr = ""
if tp53_mode:
gene1 = 'TP53'
ct1 = mut_ct [gene1]
pt1 = patients_per_gene[gene1]
if not pt1: continue
if float(pt1)/number_of_patients < 0.001: continue
for j in range (len(gene_list)):
gene2 = gene_list[j]
mut_key = mkey (gene1, gene2)
pancan_coappearance[mut_key] += co_appearance[mut_key]
appears_together = co_appearance[mut_key]
ct2 = mut_ct [gene2]
if not ct2:
print "mutation ct 0 for ", gene2
continue
pt2 = patients_per_gene[gene2]
if float(pt2)/number_of_patients < 0.00001:
print "patients per ", gene2, " number of patients:", number_of_patients
continue
# the number of times gene1 appears in tumors in which both gene1 and gene2 appear
pancan_ct_gene1[gene2] += ct1
pancan_pt_gene1[gene2] += pt1
pancan_ct[gene2] += ct2
pancan_pt[gene2] += pt2
number_of_samples_in_dbs_in_which_both_appear [gene2] += number_of_patients
expctd = float(pt1)/pancan_samples*pt2
if abs((expctd - appears_together)/expctd) < 0.1: continue
a = pt2 - appears_together # rpl5 mutated and p53 wt
b = pancan_samples - pt1 - pt2 + appears_together # rpl5 wt and p53 wt (in pt1 andp2 we subtracted the overlap twice
c = appears_together # rpl5 mutated and p53 mutated
d = pt1 - appears_together # rpl5 wt and p53 mutated
if expctd > appears_together:
# pval to have smaller overlap than expected - that is greater overalp with wt p53 type
[odds, pval] = stats.fisher_exact([[a, b], [c, d]], "greater")
pval_lt = pval
pval_gt = 1.0
else:
[odds, pval] = stats.fisher_exact([[a, b], [c, d]], "less")
pval_lt = 1.0
pval_gt = pval
outstr += "%8s %4d %4d %8s %4d %4d " % (gene1, pt1, ct1, gene2, pt2, ct2)
outstr += "%15d %10.2f %10.4f %10.4f" % (co_appearance[mut_key], expctd, pval_lt, pval_gt)
outstr += "\n"
else:
for i in range (len(gene_list)):
gene1 = gene_list[i]
ct1 = mut_ct [gene1]
pt1 = patients_per_gene[gene1]
pancan_ct[gene1] += ct1
pancan_pt[gene1] += pt1
for j in range (i+1,len(gene_list)):
gene2 = gene_list[j]
mut_key = mkey (gene1, gene2)
pancan_coappearance[mut_key] += co_appearance[mut_key]
ovlp = co_appearance[mut_key]
number_of_iterations = 2*number_of_patients
ct2 = mut_ct [gene2]
if not ct2: continue
pt2 = patients_per_gene[gene2]
# cmd = "coapp_sim %d %d %d %d %d " % (number_of_patients, pt1, pt2, ovlp, number_of_iterations)
# [avg, pval_le, pval_ge] = [float(x) for x in commands.getoutput(cmd).split()]#
a = pt2 - ovlp # rpl5 mutated and p53 wt
b = number_of_patients - pt1 - pt2 + ovlp # rpl5 wt and p53 wt (in pt1 andp2 we subtracted the overlap twice
c = ovlp # rpl5 mutated and p53 mutated
d = pt1 - ovlp # rpl5 wt and p53 mutated
[odds,pval] = stats.fisher_exact([[a, b], [c, d]], "greater")
outstr += "%8s %4d %4d %8s %4d %4d" % (gene1, pt1, ct1, gene2, pt2, ct2)
outstr += "%15d %10.2f %10.2f" % ( co_appearance[mut_key], float(pt1)/number_of_patients*pt2, pval)
outstr += "\n"
if outstr:
print >> outf, header
print >> outf, outstr
outf.close()
print db_name, "done in %8.2f min" % ( (time() - start)/60 )
#outf = sys.stdout
if tp53_mode:
outf = open ("coapp_tables/pancan_tp53_coapps.table", "w")
else:
outf = open ("coapp_tables/pancan_coapps.table", "w")
print >> outf, "######################################"
print >> outf, "pan-cancer"
print >> outf, " %8s %4s %4s %8s %4s %4s %15s %15s %10s %10s " % ("gene1", "#pts1", "#muts1", "gene2",
"#pts2", "#muts2", "co-appearance",
"expct_co-app", "pval <=", "pval >=")
if tp53_mode:
gene11 = 'TP53'
ct1 = pancan_ct[gene1]
pt1 = pancan_pt[gene1]
start = time()
for j in range (len(gene_list)):
gene2 = gene_list[j]
print ">>>>> ", gene2
mut_key = mkey (gene1, gene2)
appears_together = pancan_coappearance[mut_key]
number_of_patients = number_of_samples_in_dbs_in_which_both_appear[gene2]
ct1 = pancan_ct_gene1[gene2]
pt1 = pancan_pt_gene1[gene2]
if not pt1 or float(pt1)/number_of_patients < 0.00001: continue
ct2 = pancan_ct[gene2]
pt2 = pancan_pt[gene2]
if not pt2 or float(pt2)/number_of_patients < 0.00001: continue
expctd = float(pt1)/number_of_patients*pt2
if abs((expctd - appears_together)/expctd) < 0.0001: continue
a = pt2 - appears_together # rpl5 mutated and p53 wt
b = number_of_patients - pt1 - pt2 + appears_together # rpl5 wt and p53 wt (in pt1 andp2 we subtracted the overlap twice
c = appears_together # rpl5 mutated and p53 mutated
d = pt1 - appears_together # rpl5 wt and p53 mutated
if expctd > appears_together:
[odds,pval] = stats.fisher_exact([[a, b], [c, d]], "greater")
pval_lt = pval
pval_gt = 1.0
else:
[odds,pval] = stats.fisher_exact([[a, b], [c, d]], "less")
pval_lt = 1.0
pval_gt = pval
print >> outf, "%8s %4d %4d %8s %4d %4d " % (gene1, pt1, ct1, gene2, pt2, ct2),
print >> outf, "%15d %10.2f %10.4f %10.4f" % ( appears_together, expctd, pval_lt, pval_gt)
#if not j%10:
# print " %4d time: %8.2f min" % (j, (time()-start)/60 )
# start = time()
else:
for i in range (len(gene_list)):
gene1 = gene_list[i]
ct1 = pancan_ct[gene1]
pt1 = pancan_pt[gene1]
for j in range (i+1,len(gene_list)):
gene2 = gene_list[j]
mut_key = mkey (gene1, gene2)
appears_together = pancan_coappearance[mut_key]
ct2 = pancan_ct[gene2]
pt2 = pancan_pt[gene2]
number_of_iterations = 4*pancan_samples
#[avg, pval_le, pval_ge] = simulation (pancan_samples, ct1, ct2, appears_together, number_of_iterations)
#cmd = "coapp_sim %d %d %d %d %d " % (pancan_samples, ct1, ct2, appears_together, number_of_iterations)
#[avg, pval_le, pval_ge] = [float(x) for x in commands.getoutput(cmd).split()]
a = pt2 - appears_together # rpl5 mutated and p53 wt
b = pancan_samples - pt1 - pt2 + appears_together # rpl5 wt and p53 wt (in pt1 andp2 we subtracted the overlap twice
c = appears_together # rpl5 mutated and p53 mutated
d = pt1 - appears_together # rpl5 wt and p53 mutated
[odds,pval] = stats.fisher_exact([[a, b], [c, d]], "greater")
print >> outf, "%8s %4d %4d %8s %4d %4d " % (gene1, pt1, ct1, gene2, pt2, ct2),
print >> outf, "%15d %10.2f %10.2f" % (appears_together, float(pt1)/pancan_samples*pt2, pval)
cursor.close()
db.close()
#########################################
if __name__ == '__main__':
main()
| gpl-3.0 | -9,180,114,208,996,389,000 | 40.697826 | 132 | 0.478286 | false |
sajuptpm/neutron-ipam | neutron/plugins/vmware/api_client/request.py | 1 | 12202 | # Copyright 2012 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from abc import ABCMeta
from abc import abstractmethod
import copy
import eventlet
import httplib
import time
import six
import six.moves.urllib.parse as urlparse
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import ctrl_conn_to_str
LOG = logging.getLogger(__name__)
DEFAULT_REQUEST_TIMEOUT = 30
DEFAULT_HTTP_TIMEOUT = 10
DEFAULT_RETRIES = 2
DEFAULT_REDIRECTS = 2
DEFAULT_API_REQUEST_POOL_SIZE = 1000
DEFAULT_MAXIMUM_REQUEST_ID = 4294967295
DOWNLOAD_TIMEOUT = 180
@six.add_metaclass(ABCMeta)
class ApiRequest(object):
'''An abstract baseclass for all ApiRequest implementations.
This defines the interface and property structure for both eventlet and
gevent-based ApiRequest classes.
'''
# List of allowed status codes.
ALLOWED_STATUS_CODES = [
httplib.OK,
httplib.CREATED,
httplib.NO_CONTENT,
httplib.MOVED_PERMANENTLY,
httplib.TEMPORARY_REDIRECT,
httplib.BAD_REQUEST,
httplib.UNAUTHORIZED,
httplib.FORBIDDEN,
httplib.NOT_FOUND,
httplib.CONFLICT,
httplib.INTERNAL_SERVER_ERROR,
httplib.SERVICE_UNAVAILABLE
]
@abstractmethod
def start(self):
pass
@abstractmethod
def join(self):
pass
@abstractmethod
def copy(self):
pass
def _issue_request(self):
'''Issue a request to a provider.'''
conn = (self._client_conn or
self._api_client.acquire_connection(True,
copy.copy(self._headers),
rid=self._rid()))
if conn is None:
error = Exception(_("No API connections available"))
self._request_error = error
return error
url = self._url
LOG.debug(_("[%(rid)d] Issuing - request url: %(conn)s "
"body: %(body)s"),
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'body': self._body})
issued_time = time.time()
is_conn_error = False
is_conn_service_unavail = False
response = None
try:
redirects = 0
while (redirects <= self._redirects):
# Update connection with user specified request timeout,
# the connect timeout is usually smaller so we only set
# the request timeout after a connection is established
if conn.sock is None:
conn.connect()
conn.sock.settimeout(self._http_timeout)
elif conn.sock.gettimeout() != self._http_timeout:
conn.sock.settimeout(self._http_timeout)
headers = copy.copy(self._headers)
cookie = self._api_client.auth_cookie(conn)
if cookie:
headers["Cookie"] = cookie
gen = self._api_client.config_gen
if gen:
headers["X-Nvp-Wait-For-Config-Generation"] = gen
LOG.debug(_("Setting X-Nvp-Wait-For-Config-Generation "
"request header: '%s'"), gen)
try:
conn.request(self._method, url, self._body, headers)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.warn(_("[%(rid)d] Exception issuing request: "
"%(e)s"),
{'rid': self._rid(), 'e': e})
response = conn.getresponse()
response.body = response.read()
response.headers = response.getheaders()
elapsed_time = time.time() - issued_time
LOG.debug(_("[%(rid)d] Completed request '%(conn)s': "
"%(status)s (%(elapsed)s seconds)"),
{'rid': self._rid(),
'conn': self._request_str(conn, url),
'status': response.status,
'elapsed': elapsed_time})
new_gen = response.getheader('X-Nvp-Config-Generation', None)
if new_gen:
LOG.debug(_("Reading X-Nvp-config-Generation response "
"header: '%s'"), new_gen)
if (self._api_client.config_gen is None or
self._api_client.config_gen < int(new_gen)):
self._api_client.config_gen = int(new_gen)
if response.status == httplib.UNAUTHORIZED:
if cookie is None and self._url != "/ws.v1/login":
# The connection still has no valid cookie despite
# attemps to authenticate and the request has failed
# with unauthorized status code. If this isn't a
# a request to authenticate, we should abort the
# request since there is no point in retrying.
self._abort = True
else:
# If request is unauthorized, clear the session cookie
# for the current provider so that subsequent requests
# to the same provider triggers re-authentication.
self._api_client.set_auth_cookie(conn, None)
self._api_client.set_auth_cookie(conn, None)
elif response.status == httplib.SERVICE_UNAVAILABLE:
is_conn_service_unavail = True
if response.status not in [httplib.MOVED_PERMANENTLY,
httplib.TEMPORARY_REDIRECT]:
break
elif redirects >= self._redirects:
LOG.info(_("[%d] Maximum redirects exceeded, aborting "
"request"), self._rid())
break
redirects += 1
conn, url = self._redirect_params(conn, response.headers,
self._client_conn is None)
if url is None:
response.status = httplib.INTERNAL_SERVER_ERROR
break
LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"),
{'rid': self._rid(),
'conn': self._request_str(conn, url)})
# yield here, just in case we are not out of the loop yet
eventlet.greenthread.sleep(0)
# If we receive any of these responses, then
# our server did not process our request and may be in an
# errored state. Raise an exception, which will cause the
# the conn to be released with is_conn_error == True
# which puts the conn on the back of the client's priority
# queue.
if (response.status == httplib.INTERNAL_SERVER_ERROR and
response.status > httplib.NOT_IMPLEMENTED):
LOG.warn(_("[%(rid)d] Request '%(method)s %(url)s' "
"received: %(status)s"),
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': response.status})
raise Exception(_('Server error return: %s'), response.status)
return response
except Exception as e:
if isinstance(e, httplib.BadStatusLine):
msg = (_("Invalid server response"))
else:
msg = unicode(e)
if response is None:
elapsed_time = time.time() - issued_time
LOG.warn(_("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
"(%(elapsed)s seconds)"),
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'msg': msg, 'elapsed': elapsed_time})
self._request_error = e
is_conn_error = True
return e
finally:
# Make sure we release the original connection provided by the
# acquire_connection() call above.
if self._client_conn is None:
self._api_client.release_connection(conn, is_conn_error,
is_conn_service_unavail,
rid=self._rid())
def _redirect_params(self, conn, headers, allow_release_conn=False):
"""Process redirect response, create new connection if necessary.
Args:
conn: connection that returned the redirect response
headers: response headers of the redirect response
allow_release_conn: if redirecting to a different server,
release existing connection back to connection pool.
Returns: Return tuple(conn, url) where conn is a connection object
to the redirect target and url is the path of the API request
"""
url = None
for name, value in headers:
if name.lower() == "location":
url = value
break
if not url:
LOG.warn(_("[%d] Received redirect status without location header"
" field"), self._rid())
return (conn, None)
# Accept location with the following format:
# 1. /path, redirect to same node
# 2. scheme://hostname:[port]/path where scheme is https or http
# Reject others
# 3. e.g. relative paths, unsupported scheme, unspecified host
result = urlparse.urlparse(url)
if not result.scheme and not result.hostname and result.path:
if result.path[0] == "/":
if result.query:
url = "%s?%s" % (result.path, result.query)
else:
url = result.path
return (conn, url) # case 1
else:
LOG.warn(_("[%(rid)d] Received invalid redirect location: "
"'%(url)s'"), {'rid': self._rid(), 'url': url})
return (conn, None) # case 3
elif result.scheme not in ["http", "https"] or not result.hostname:
LOG.warn(_("[%(rid)d] Received malformed redirect "
"location: %(url)s"), {'rid': self._rid(), 'url': url})
return (conn, None) # case 3
# case 2, redirect location includes a scheme
# so setup a new connection and authenticate
if allow_release_conn:
self._api_client.release_connection(conn)
conn_params = (result.hostname, result.port, result.scheme == "https")
conn = self._api_client.acquire_redirect_connection(conn_params, True,
self._headers)
if result.query:
url = "%s?%s" % (result.path, result.query)
else:
url = result.path
return (conn, url)
def _rid(self):
'''Return current request id.'''
return self._request_id
@property
def request_error(self):
'''Return any errors associated with this instance.'''
return self._request_error
def _request_str(self, conn, url):
'''Return string representation of connection.'''
return "%s %s/%s" % (self._method, ctrl_conn_to_str(conn), url)
| apache-2.0 | -4,152,591,981,171,219,000 | 41.221453 | 79 | 0.527864 | false |
azizk/dil | scripts/path.py | 1 | 13790 | # -*- coding: utf-8 -*-
# Author: Aziz Köksal
# License: zlib/libpng
from __future__ import unicode_literals
import os, shutil
from re import compile as re_compile
from sys import version_info as vi
from glob import glob
from codecs import open
__all__ = ["Path", "Paths"]
op = os.path
def isiterable(x):
""" Returns True for iterable objects, strings not included. """
return hasattr(x, '__iter__')
class Path(unicode):
""" Models a path in an object oriented way. """
sep = os.sep # File system path separator: '/' or '\'.
pathsep = os.pathsep # Separator in the PATH environment variable.
def __new__(cls, *parts):
if len(parts) == 1 and isiterable(parts[0]):
parts = parts[0]
return unicode.__new__(cls, op.join(*parts) if len(parts) else '')
def __div__(self, path_s):
""" Joins this path with another path.
Path('/home/a') / 'bc.d' == '/home/a/bc.d'
or:
Returns a list of paths prefixed with 'self'.
Path('/home/a') / ['bc.d', 'ef.g'] == \
['/home/a/bc.d', '/home/a/ef.g'] """
if isiterable(path_s):
return Paths(Path(self, p) for p in path_s)
else:
return Path(self, path_s)
def __rdiv__(self, path_s):
""" Joins this path with another path.
'/home/a' / Path('bc.d') == '/home/a/bc.d'
or:
Returns a list of paths postfixed with 'self'.
['/var', '/'] / Path('tmp') == ['/var/tmp', '/tmp'] """
if isiterable(path_s):
return Paths(Path(p, self) for p in path_s)
else:
return Path(path_s, self)
def __add__(self, path):
""" Path('/home/a') + 'bc.d' == '/home/abc.d'
or:
Path('/home/a') + ['b', 'c'] == \
['/home/ab', '/home/ac'] """
if isiterable(path):
return Paths(self + p for p in path)
else:
return Path(unicode.__add__(self, path))
def __radd__(self, path):
""" '/home/a' + Path('bc.d') == '/home/abc.d'
or:
['/home/a', '/home/b'] + Path('c') == \
['/home/ac', '/home/bc'] """
if isiterable(path):
return Paths(p + self for p in path)
else:
return Path(unicode.__add__(unicode(path), self))
def __mod__(self, args):
""" Path('/bin%d') % 32 == '/bin32' """
return Path(unicode.__mod__(self, args))
def format(self, *args, **kwargs):
""" Path('/{x}/lib').format(x='usr') == '/usr/lib' """
return Path(unicode.format(self, *args, **kwargs))
def __repr__(self):
return "Path(%s)" % unicode.__repr__(self)
@property
def uni(self):
""" Returns itself as a Unicode string. """
return unicode(self)
@property
def name(self):
""" Path('/home/a/bc.d').name == 'bc.d'
Path('/home/a/.').name == '.'
Path('/home/a/').name == ''
Path('/home/a').name == 'a' """
return Path(op.basename(self))
@property
def namebase(self):
""" Path('/home/a/bc.d').namebase == 'bc'
Path('/home/a/bc.').namebase == 'bc'
Path('/home/a/bc').namebase == 'bc'
Path('/home/a/.d').namebase == '.d'
Path('/home/a/.').namebase == '.'
Path('/home/a/').namebase == ''
"""
return self.name.noext
@property
def ext(self):
""" Path('/home/a/bc.d').ext == 'd' """
return Path(self.dext[1:])
@property
def dext(self):
""" Path('/home/a/bc.d').dext == '.d' """
return Path(op.splitext(self)[1])
@property
def noext(self):
""" Path('/home/a/bc.d').noext == '/home/a/bc' """
return Path(op.splitext(self)[0])
@property
def abspath(self):
""" Path('./a/bc.d').abspath == '/home/a/bc.d' """
return Path(op.abspath(self))
@property
def realpath(self):
""" Resolves symbolic links. """
return Path(op.realpath(self))
@property
def normpath(self):
""" Path('/home/x/.././a//bc.d').normpath == '/home/a/bc.d' """
return Path(op.normpath(self))
@property
def folder(self):
""" Returns the folder of this path.
Path('/home/a/bc.d').folder == '/home/a'
Path('/home/a/').folder == '/home/a'
Path('/home/a').folder == '/home'
Path('/').folder == '/' """
return Path(op.dirname(self))
def up(self, n=1):
""" Returns a new Path going n levels back in the directory hierarchy. """
while n:
n -= 1
self = self.folder
return self
@property
def exists(self):
""" Returns True if the path exists, but False for broken symlinks."""
return op.exists(self)
@property
def lexists(self):
""" Returns True if the path exists. Also True for broken symlinks. """
return op.lexists(self)
@property
def atime(self):
""" Returns last accessed timestamp. """
return op.getatime(self)
@property
def mtime(self):
""" Returns last modified timestamp. """
return op.getmtime(self)
@property
def ctime(self):
""" Returns last changed timestamp. """
return op.getctime(self)
@property
def size(self):
""" Returns the byte size of a file. """
return op.getsize(self)
@property
def isabs(self):
""" Returns True if the path is absolute. """
return op.isabs(self)
@property
def isfile(self):
""" Returns True if the path is a file. """
return op.isfile(self)
@property
def isdir(self):
""" Returns True if the path is a directory. """
return op.isdir(self)
@property
def islink(self):
""" Returns True if the path is a symlink. """
return op.islink(self)
@property
def ismount(self):
""" Returns True if the path is a mount point. """
return op.ismount(self)
@classmethod
def supports_unicode(cls):
""" Returns True if the system can handle Unicode file names. """
return op.supports_unicode_filenames()
@classmethod
def cwd(cls):
""" Return the current working directory. """
return Path(os.getcwd())
def chdir(self):
""" Changes the current working directory to 'self'. """
os.chdir(self)
return self
def mkdir(self, mode=0777):
""" Creates a directory (and its parents), if it doesn't already exist. """
if not self.exists:
os.makedirs(self, mode)
return self
mk = mkdir
def remove(self):
""" Removes a file, symlink or directory tree. """
if self.lexists:
if self.isfile:
os.remove(self)
else:
shutil.rmtree(self, ignore_errors=True)
return self
rm = remove
def copy(self, to):
""" Copies a file or a directory tree to another path. """
if self.isfile:
shutil.copy(self, to)
else:
shutil.copytree(self, to)
return self
cp = copy
def move(self, to):
""" Moves a file or directory to another path.
Deletes the destination first, if existent. """
to = Path(to)
if to.lexists:
if to.islink:
if not to.exists:
to.remove() # Broken symlink.
else:
return self.move(to.realpath)
elif to.isfile:
to.remove()
else: # Delete file or dir with the same name in the destination folder.
to = (to/self.normpath.name).remove()
shutil.move(self, to)
return self
mv = move
def rename(self, to):
""" Renames a file or directory. May throw an OSError. """
os.rename(self, to)
return self
def renames(self, to):
""" Recursively renames a file or directory. """
os.renames(self, to)
return self
def walk(self, **kwargs):
""" Returns a generator that walks through a directory tree. """
if vi[:2] < (2,6): # Only Python 2.6 or newer supports followlinks.
kwargs.pop("followlinks", None)
return os.walk(self, **kwargs)
def glob(self, pattern):
""" Matches the file name pattern, taking 'self' as the folder.
Returns a Paths object containing the matches. """
return Paths(glob(unicode(self/pattern)))
def rxglob(self, byname=None, bypath=None, prunedir=None):
""" Walks through a dir tree using regular expressions.
Also accepts callback functions.
Returns a Paths object containing the matches. """
def check(rx):
return rx if callable(rx) else rx.search if hasattr(rx, "search") else \
re_compile(rx).search if rx else lambda x: False
byname, bypath, prunedir = map(check, (byname, bypath, prunedir))
found = Paths()
for root, dirs, files in self.walk(followlinks=True):
dirs[:] = [dir for dir in dirs if not prunedir(Path(root, dir))]
for filename in files:
fullpath = Path(root, filename)
if byname(Path(filename)) or bypath(fullpath):
found.append(fullpath)
return found
def open(self, mode='rb', encoding='utf-8'):
""" Opens a file with an encoding (default=UTF-8.) """
return open(self, mode=mode, encoding=encoding)
def write(self, content, mode='w', encoding='utf-8'):
""" Writes content to a file. """
f = self.open(mode, encoding)
f.write(content)
f.close()
return self
def read(self, mode='rb', encoding='utf-8'):
""" Reads the contents of a file. """
f = self.open(mode, encoding)
content = f.read()
f.close()
return content
class Paths(list):
""" A list of Path objects with convenience functions. """
def __init__(self, *paths):
if len(paths) == 1 and isiterable(paths[0]):
paths = paths[0]
list.__init__(self, (p if isinstance(p, Path) else Path(p) for p in paths))
def __div__(self, path_s):
""" Paths('/a', '/b') / 'c.d' == Paths('/a/c.d', '/b/c.d')
or:
Paths('a/b', 'c/d') / ['w.x', 'y.z'] == \
Paths('a/b/w.x', 'a/b/y.z', 'c/d/w.x', 'c/d/y.z') """
if isiterable(path_s):
return Paths(Path(p1, p2) for p1 in self for p2 in path_s)
else:
return Paths(Path(p, path_s) for p in self)
def __rdiv__(self, path_s):
""" '/home' / Paths('a', 'b') == Paths('/home/a', '/home/b')
or:
['a/b', 'c/d'] / Paths('w.x', 'y.z') == \
Paths('a/b/w.x', 'a/b/y.z', 'c/d/w.x', 'c/d/y.z') """
if isiterable(path_s):
return Paths(Path(p1, p2) for p1 in path_s for p2 in self)
else:
return Paths(Path(path_s, p) for p in self)
# NB: adding any iterable object always results in a Paths object
# which may be unwanted or unexpected.
def __add__(self, path_s):
""" Paths('/a', '/b') + '.c' == Paths('/a.c', 'b.c')
Paths('a', 'b') + ['c'] == Paths('a', 'b', 'c') """
if isiterable(path_s):
return Paths(list.__add__(self, path_s))
else:
return Paths(p + unicode(path_s) for p in self)
def __radd__(self, path_s):
""" '/home/' + Paths('a', 'b') == Paths('/home/a', '/home/b')
['a'] + Paths('b', 'c') == Paths('a', 'b', 'c') """
if isiterable(path_s):
return Paths(list.__add__(path_s, self))
else:
return Paths(unicode(path_s) + p for p in self)
def __mod__(self, args):
return Paths(p.__mod__(args) for p in self)
def format(self, *args, **kwargs):
return Paths(p.format(*args, **kwargs) for p in self)
def __repr__(self):
return "Paths(%s)" % list.__repr__(self)[1:-1]
def common(self):
return Path(op.commonprefix(self))
@property
def names(self):
return Paths(p.name for p in self)
@property
def bases(self):
return Paths(p.namebase for p in self)
@property
def exts(self):
return Paths(p.ext for p in self)
@property
def dexts(self):
return Paths(p.dext for p in self)
@property
def noexts(self):
return Paths(p.noext for p in self)
@property
def abspaths(self):
return Paths(p.abspath for p in self)
@property
def realpaths(self):
return Paths(p.realpath for p in self)
@property
def normpaths(self):
return Paths(p.normpath for p in self)
@property
def folders(self):
return Paths(p.folder for p in self)
def up(self, n=1):
for p in self:
p.up(n)
return self
@property
def exist(self):
return [p.exists for p in self]
@property
def lexist(self):
return [p.lexists for p in self]
@property
def atimes(self):
return [p.atime for p in self]
@property
def mtimes(self):
return [p.mtime for p in self]
@property
def ctimes(self):
return [p.ctime for p in self]
@property
def sizes(self):
return [p.size for p in self]
@property
def isabs(self):
return [p.isabs for p in self]
@property
def isfile(self):
return [p.isfile for p in self]
@property
def isdir(self):
return [p.isdir for p in self]
@property
def islink(self):
return [p.islink for p in self]
@property
def ismount(self):
return [p.ismount for p in self]
def mkdir(self, mode=0777):
for p in self: p.mkdir(mode)
return self
mk = mkdirs = mkdir
def remove(self):
for p in self: p.rm()
return self
rm = remove
def copy(self, to):
if isiterable(to):
map(Path.cp, self, to)
else:
for p in self:
p.cp(to)
return self
cp = copy
def move(self, to):
if isiterable(to):
map(Path.mv, self, to)
else:
for p in self:
p.mv(to)
return self
mv = move
def rename(self, to):
map(Path.rename, self, to)
return self
def renames(self, to):
map(Path.renames, self, to)
return self
def walk(self, **kwargs):
from itertools import chain
return chain(*(p.walk(**kwargs) for p in self))
def glob(self, pattern):
return Paths(q for p in self for q in glob(unicode(p/pattern)))
def rxglob(self, *args, **kwargs):
return Paths(q for p in self for q in p.rxglob(*args, **kwargs))
def open(self, **kwargs):
return [p.open(**kwargs) for p in self]
def write(self, content, **kwargs):
for p in self:
p.write(content, **kwargs)
return self
def read(self, **kwargs):
return [p.read(**kwargs) for p in self]
| gpl-3.0 | -7,201,101,554,351,409,000 | 25.931641 | 79 | 0.581623 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/models/boot_diagnostics_instance_view_py3.py | 5 | 1413 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BootDiagnosticsInstanceView(Model):
"""The instance view of a virtual machine boot diagnostics.
:param console_screenshot_blob_uri: The console screenshot blob URI.
:type console_screenshot_blob_uri: str
:param serial_console_log_blob_uri: The Linux serial console log blob Uri.
:type serial_console_log_blob_uri: str
"""
_attribute_map = {
'console_screenshot_blob_uri': {'key': 'consoleScreenshotBlobUri', 'type': 'str'},
'serial_console_log_blob_uri': {'key': 'serialConsoleLogBlobUri', 'type': 'str'},
}
def __init__(self, *, console_screenshot_blob_uri: str=None, serial_console_log_blob_uri: str=None, **kwargs) -> None:
super(BootDiagnosticsInstanceView, self).__init__(**kwargs)
self.console_screenshot_blob_uri = console_screenshot_blob_uri
self.serial_console_log_blob_uri = serial_console_log_blob_uri
| mit | -1,333,872,485,965,438,700 | 43.15625 | 122 | 0.634112 | false |
nerdvegas/rez | src/rez/vendor/pygraph/algorithms/heuristics/chow.py | 11 | 2760 | # Copyright (c) 2008-2009 Pedro Matiello <[email protected]>
# Salim Fadhley <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Edmond Chow's heuristic for A*.
"""
# Imports
from rez.vendor.pygraph.algorithms.minmax import shortest_path
class chow(object):
"""
An implementation of the graph searching heuristic proposed by Edmond Chow.
Remember to call the C{optimize()} method before the heuristic search.
For details, check: U{http://www.edmondchow.com/pubs/levdiff-aaai.pdf}.
"""
def __init__(self, *centers):
"""
Initialize a Chow heuristic object.
"""
self.centers = centers
self.nodes = {}
def optimize(self, graph):
"""
Build a dictionary mapping each pair of nodes to a number (the distance between them).
@type graph: graph
@param graph: Graph.
"""
for center in self.centers:
shortest_routes = shortest_path(graph, center)[1]
for node, weight in list(shortest_routes.items()):
self.nodes.setdefault(node, []).append(weight)
def __call__(self, start, end):
"""
Estimate how far start is from end.
@type start: node
@param start: Start node.
@type end: node
@param end: End node.
"""
assert len( list(self.nodes.keys()) ) > 0, "You need to optimize this heuristic for your graph before it can be used to estimate."
cmp_sequence = list(zip( self.nodes[start], self.nodes[end] ))
chow_number = max( abs( a-b ) for a,b in cmp_sequence )
return chow_number
| lgpl-3.0 | 8,333,658,013,238,419,000 | 34.844156 | 138 | 0.650362 | false |
netanelravid/screener | screener/utils/decorators.py | 1 | 3900 | from functools import wraps
from os import makedirs
from os.path import isdir
from future.utils import raise_with_traceback
from os import path
from requests import (
get as http_get_request,
HTTPError,
)
from requests.exceptions import (
ConnectionError,
ConnectTimeout
)
from screener.exceptions import (
InvalidTargetException,
BadStatusCode,
BAD_TARGET_ERRORS,
UnknownError,
ConnectionTimeout,
DuplicateFile,
CrawlerError)
from screener.settings import (
SUCCESS_PRINT,
FAILURE_PRINT)
logger = None
LOGGER_NAME = __name__
CRAWLER_EXCEPTION_MESSAGE = {
BadStatusCode: 'bad status code',
InvalidTargetException: 'invalid target',
ConnectionTimeout: 'connection timeout',
UnknownError: 'Unknown error, enable -v for more info'
}
INVALID_TARGET_MESSAGE = "Failed to establish a new connection"
def validate_path(*dec_args, **dec_kwargs):
def outer(wrapped):
wraps(wrapped=wrapped)
def inner(*args, **kwargs):
filename = kwargs['filename']
if not filename:
raise IOError('Invalid filename')
file_ext = dec_kwargs['ext']
if not file_ext:
raise IOError('Invalid file extension')
folder = kwargs['folder']
if not folder:
raise IOError('Invalid folder')
filename_with_exit = '{name}.{ext}'.format(
name=filename,
ext=file_ext
)
file_path = path.join(folder, filename_with_exit)
if not isdir(folder):
logger.warning('folder {dir} does not exist, creating it.'.format( # noqa
dir=folder
))
makedirs(folder)
elif path.isfile(file_path):
raise DuplicateFile('File already exist')
return wrapped(*args, **kwargs)
return inner
return outer
def validate_target(wrapped):
wraps(wrapped=wrapped)
def _check_bad_status_code(response, error_message):
try:
status_code = response.status_code
except AttributeError:
return
if status_code and (400 <= status_code <= 600):
raise BadStatusCode(msg=error_message)
def _check_bad_target(exception, error_message):
if (isinstance(exception, BAD_TARGET_ERRORS) or
(isinstance(exception, ConnectionError) and (INVALID_TARGET_MESSAGE in error_message))): # noqa
raise InvalidTargetException(msg=error_message)
def _validate_target(url):
response = None
try:
response = http_get_request(url=url)
response.raise_for_status()
except HTTPError as exc:
error_msg = str(exc.message)
_check_bad_status_code(response=response, error_message=error_msg)
raise UnknownError(msg=error_msg)
except ConnectTimeout:
raise ConnectionTimeout(msg='Connection timeout')
except (BAD_TARGET_ERRORS, ConnectionError) as exc:
error_msg = str(exc.message)
_check_bad_target(exception=exc, error_message=error_msg)
raise UnknownError(msg=error_msg)
except Exception as exc:
raise_with_traceback(UnknownError(msg=str(exc.message)))
def inner(*args, **kwargs):
url = kwargs['url']
msg = 'Validate URL {url}\t'.format(url=url)
logger.info(msg)
print(msg),
try:
_validate_target(url=url)
except CrawlerError as e:
print('{failed} ({error})'.format(
failed=FAILURE_PRINT,
error=CRAWLER_EXCEPTION_MESSAGE[e.__class__]
))
raise e
print(SUCCESS_PRINT)
logger.info('URL has been validated successfully.')
return wrapped(*args, **kwargs)
return inner
| apache-2.0 | -4,352,944,230,025,209,000 | 30.707317 | 108 | 0.603077 | false |
UTNkar/moore | src/moore/urls.py | 1 | 1606 | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.urls import path
from search import views as search_views
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from .api import api_router
from members.views import member_check_api
urlpatterns = [
# Needs to be imported before wagtail urls
url(r'^api/', api_router.urls),
# Needs to be imported before wagtail admin
url(r'', include('involvement.urls')),
path('member_check_api/', member_check_api, name='member_check_api'),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/$', search_views.search, name='search'),
url(r'^accounts/', include('members.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'', include('google.urls')),
path('instagram/', include('instagram.urls')),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's page serving mechanism. This should be the last pattern in
# the list:
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT
)
| agpl-3.0 | 8,173,072,607,114,854,000 | 29.884615 | 74 | 0.709215 | false |
hkariti/ansible | lib/ansible/modules/cloud/amazon/lightsail.py | 16 | 15841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lightsail
short_description: Create or delete a virtual machine instance in AWS Lightsail
description:
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
version_added: "2.4"
author: "Nick Ball (@nickball)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
name:
description:
- Name of the instance
required: true
default : null
zone:
description:
- AWS availability zone in which to launch the instance. Required when state='present'
required: false
default: null
blueprint_id:
description:
- ID of the instance blueprint image. Required when state='present'
required: false
default: null
bundle_id:
description:
- Bundle of specification info for the instance. Required when state='present'
required: false
default: null
user_data:
description:
- Launch script that can configure the instance with additional data
required: false
default: null
key_pair_name:
description:
- Name of the key pair to use with the instance
required: false
default: null
wait:
description:
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
requirements:
- "python >= 2.6"
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a new Lightsail instance, register the instance details
- lightsail:
state: present
name: myinstance
region: us-east-1
zone: us-east-1a
blueprint_id: ubuntu_16_04
bundle_id: nano_1_0
key_pair_name: id_rsa
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
wait_timeout: 500
register: my_instance
- debug:
msg: "Name is {{ my_instance.instance.name }}"
- debug:
msg: "IP is {{ my_instance.instance.publicIpAddress }}"
# Delete an instance if present
- lightsail:
state: absent
region: us-east-1
name: myinstance
'''
RETURN = '''
changed:
description: if a snapshot has been modified/created
returned: always
type: bool
sample:
changed: true
instance:
description: instance data
returned: always
type: dict
sample:
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
blueprint_id: "ubuntu_16_04"
blueprint_name: "Ubuntu"
bundle_id: "nano_1_0"
created_at: "2017-03-27T08:38:59.714000-04:00"
hardware:
cpu_count: 1
ram_size_in_gb: 0.5
is_static_ip: false
location:
availability_zone: "us-east-1a"
region_name: "us-east-1"
name: "my_instance"
networking:
monthly_transfer:
gb_per_month_allocated: 1024
ports:
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 80
protocol: tcp
to_port: 80
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 22
protocol: tcp
to_port: 22
private_ip_address: "172.26.8.14"
public_ip_address: "34.207.152.202"
resource_type: "Instance"
ssh_key_name: "keypair"
state:
code: 16
name: running
support_code: "588307843083/i-0997c97831ee21e33"
username: "ubuntu"
'''
import time
import traceback
try:
import botocore
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
except ImportError:
# will be caught by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
HAS_BOTO3, camel_dict_to_snake_dict)
def create_instance(module, client, instance_name):
"""
Create an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the new instance.
"""
changed = False
# Check if instance already exists
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
zone = module.params.get('zone')
blueprint_id = module.params.get('blueprint_id')
bundle_id = module.params.get('bundle_id')
key_pair_name = module.params.get('key_pair_name')
user_data = module.params.get('user_data')
user_data = '' if user_data is None else user_data
resp = None
if inst is None:
try:
resp = client.create_instances(
instanceNames=[
instance_name
],
availabilityZone=zone,
blueprintId=blueprint_id,
bundleId=bundle_id,
userData=user_data,
keyPairName=key_pair_name,
)
resp = resp['operations'][0]
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
changed = True
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def delete_instance(module, client, instance_name):
"""
Terminates an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the instance deleted (pre-deletion).
If the instance to be deleted is running
"changed" will be set to False.
"""
# It looks like deleting removes the instance immediately, nothing to wait for
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before deleting
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
# sleep and retry
time.sleep(10)
# Attempt to delete
if inst is not None:
while not changed and ((wait and wait_max > time.time()) or (not wait)):
try:
client.delete_instance(instanceName=instance_name)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
return (changed, inst)
def restart_instance(module, client, instance_name):
"""
Reboot an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to reboot
Returns a dictionary of instance information
about the restarted instance
If the instance was not able to reboot,
"changed" will be set to False.
Wait will not apply here as this is an OS-level operation
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(3)
# send reboot
if inst is not None:
try:
client.reboot_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def startstop_instance(module, client, instance_name, state):
"""
Starts or stops an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to start/stop
state: Target state ("running" or "stopped")
Returns a dictionary of instance information
about the instance started/stopped
If the instance was not able to state change,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Try state change
if inst is not None and inst['state']['name'] != state:
try:
if state == 'running':
client.start_instance(instanceName=instance_name)
else:
client.stop_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
changed = True
# Grab current instance info
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def core(module):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
client = None
try:
client = boto3_conn(module, conn_type='client', resource='lightsail',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
changed = False
state = module.params['state']
name = module.params['name']
if state == 'absent':
changed, instance_dict = delete_instance(module, client, name)
elif state in ('running', 'stopped'):
changed, instance_dict = startstop_instance(module, client, name, state)
elif state == 'restarted':
changed, instance_dict = restart_instance(module, client, name)
elif state == 'present':
changed, instance_dict = create_instance(module, client, name)
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
def _find_instance_info(client, instance_name):
''' handle exceptions where this function is called '''
inst = None
try:
inst = client.get_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
raise
return inst['instance']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
zone=dict(type='str'),
blueprint_id=dict(type='str'),
bundle_id=dict(type='str'),
key_pair_name=dict(type='str'),
user_data=dict(type='str'),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=300),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not HAS_BOTOCORE:
module.fail_json(msg='Python module "botocore" is missing, please install it')
try:
core(module)
except (botocore.exceptions.ClientError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 | 5,025,600,697,318,624,000 | 32.349474 | 157 | 0.631021 | false |
klonage/nlt-gcs | Lib/distutils/fancy_getopt.py | 59 | 18432 | """distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
__revision__ = "$Id$"
import sys
import string
import re
import getopt
from distutils.errors import DistutilsGetoptError, DistutilsArgError
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = string.maketrans('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__ (self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
# __init__ ()
def _build_index (self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table (self, option_table):
self.option_table = option_table
self._build_index()
def add_option (self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError, \
"option conflict: already an option '%s'" % long_option
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option (self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name (self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return string.translate(long_option, longopt_xlate)
def _check_alias_dict (self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError, \
("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias)
if opt not in self.option_index:
raise DistutilsGetoptError, \
("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt)
def set_aliases (self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases (self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table (self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError, "invalid option tuple: %r" % (option,)
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError, \
("invalid long option '%s': "
"must be a string of length >= 2") % long
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError, \
("invalid short option '%s': "
"must a single character or None") % short
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError, \
("invalid negative alias '%s': "
"aliased option '%s' takes a value") % \
(long, alias_to)
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
else:
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError, \
("invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't") % (long, alias_to)
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError, \
("invalid long option name '%s' " +
"(must be letters, numbers, hyphens only") % long
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
# for option_table
# _grok_option_table()
def getopt (self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = 1
else:
created_object = 0
self._grok_option_table()
short_opts = string.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error, msg:
raise DistutilsArgError, msg
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
# getopt()
def get_option_order (self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError, "'getopt()' hasn't been called yet"
else:
return self.option_order
def generate_help (self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
# for self.option_table
return lines
# generate_help ()
def print_help (self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
# class FancyGetopt
def fancy_getopt (options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = string.maketrans(string.whitespace, ' ' * len(string.whitespace))
def wrap_text (text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = string.expandtabs(text)
text = string.translate(text, WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = filter(None, chunks) # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(string.join(cur_line, ''))
# while chunks
return lines
def translate_longopt(opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return string.translate(opt, longopt_xlate)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__ (self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
| gpl-3.0 | 3,998,547,862,423,171,600 | 36.082645 | 79 | 0.531955 | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/classes/graph.py | 1 | 65308 | """Base class for undirected graphs.
The Graph class allows any hashable object as a node
and can associate key/value attribute pairs with each undirected edge.
Self-loops are allowed but multiple edges are not (see MultiGraph).
For directed graphs see DiGraph and MultiDiGraph.
"""
from copy import deepcopy
import networkx as nx
from networkx.classes.coreviews import AdjacencyView
from networkx.classes.reportviews import NodeView, EdgeView, DegreeView
from networkx.exception import NetworkXError
import networkx.convert as convert
class Graph:
"""
Base class for undirected graphs.
A Graph stores nodes and edges with optional data, or attributes.
Graphs hold undirected edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes. By convention `None` is not used as a node.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be any format that is supported
by the to_networkx_graph() function, currently including edge list,
dict of dicts, dict of lists, NetworkX graph, NumPy matrix
or 2d ndarray, SciPy sparse matrix, or PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
DiGraph
MultiGraph
MultiDiGraph
OrderedGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.Graph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2, 3])
>>> G.add_nodes_from(range(100, 110))
>>> H = nx.path_graph(10)
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1, 2), (1, 3)])
or a collection of edges,
>>> G.add_edges_from(H.edges)
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.Graph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.nodes
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.nodes[1]
{'time': '5pm'}
>>> G.nodes[1]['room'] = 714 # node must exist already to use G.nodes
>>> del G.nodes[1]['room'] # remove attribute
>>> list(G.nodes(data=True))
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edges.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3, 4), (4, 5)], color='red')
>>> G.add_edges_from([(1, 2, {'color': 'blue'}), (2, 3, {'weight': 8})])
>>> G[1][2]['weight'] = 4.7
>>> G.edges[1, 2]['weight'] = 4
Warning: we protect the graph data structure by making `G.edges` a
read-only dict-like structure. However, you can assign to attributes
in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
data attributes: `G.edges[1, 2]['weight'] = 4`
(For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n < 3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
Often the best way to traverse all edges of a graph is via the neighbors.
The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()`
>>> for n, nbrsdict in G.adjacency():
... for nbr, eattr in nbrsdict.items():
... if 'weight' in eattr:
... # Do something useful with the edges
... pass
But the edges() method is often more convenient:
>>> for u, v, weight in G.edges.data('weight'):
... if weight is not None:
... # Do something useful with the edges
... pass
**Reporting:**
Simple graph information is obtained using object-attributes and methods.
Reporting typically provides views instead of containers to reduce memory
usage. The views update as the graph is updated similarly to dict-views.
The objects `nodes`, `edges` and `adj` provide access to data attributes
via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration
(e.g. `nodes.items()`, `nodes.data('color')`,
`nodes.data('color', default='blue')` and similarly for `edges`)
Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The Graph class uses a dict-of-dict-of-dict data structure.
The outer dict (node_dict) holds adjacency information keyed by node.
The next dict (adjlist_dict) represents the adjacency information and holds
edge data keyed by neighbor. The inner dict (edge_attr_dict) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these three dicts can be replaced in a subclass by a user defined
dict-like object. In general, the dict-like features should be
maintained but extra features can be added. To replace one of the
dicts create a new graph class by changing the class(!) variable
holding the factory for that dict-like structure. The variable names are
node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory,
adjlist_outer_dict_factory, edge_attr_dict_factory and graph_attr_dict_factory.
node_dict_factory : function, (default: dict)
Factory function to be used to create the dict containing node
attributes, keyed by node id.
It should require no arguments and return a dict-like object
node_attr_dict_factory: function, (default: dict)
Factory function to be used to create the node attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object
adjlist_outer_dict_factory : function, (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency info keyed by node.
It should require no arguments and return a dict-like object.
adjlist_inner_dict_factory : function, (default: dict)
Factory function to be used to create the adjacency list
dict which holds edge data keyed by neighbor.
It should require no arguments and return a dict-like object
edge_attr_dict_factory : function, (default: dict)
Factory function to be used to create the edge attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
graph_attr_dict_factory : function, (default: dict)
Factory function to be used to create the graph attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Typically, if your extension doesn't impact the data structure all
methods will inherit without issue except: `to_directed/to_undirected`.
By default these methods create a DiGraph/Graph class and you probably
want them to create your extension of a DiGraph/Graph. To facilitate
this we define two class variables that you can set in your subclass.
to_directed_class : callable, (default: DiGraph or MultiDiGraph)
Class to create a new graph structure in the `to_directed` method.
If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used.
to_undirected_class : callable, (default: Graph or MultiGraph)
Class to create a new graph structure in the `to_undirected` method.
If `None`, a NetworkX class (Graph or MultiGraph) is used.
Examples
--------
Create a low memory graph class that effectively disallows edge
attributes by using a single attribute dict for all edges.
This reduces the memory used, but you lose edge attributes.
>>> class ThinGraph(nx.Graph):
... all_edge_dict = {'weight': 1}
... def single_edge_dict(self):
... return self.all_edge_dict
... edge_attr_dict_factory = single_edge_dict
>>> G = ThinGraph()
>>> G.add_edge(2, 1)
>>> G[2][1]
{'weight': 1}
>>> G.add_edge(2, 2)
>>> G[2][1] is G[2][2]
True
Please see :mod:`~networkx.classes.ordered` for more examples of
creating graph subclasses by overwriting the base class `dict` with
a dictionary-like object.
"""
node_dict_factory = dict
node_attr_dict_factory = dict
adjlist_outer_dict_factory = dict
adjlist_inner_dict_factory = dict
edge_attr_dict_factory = dict
graph_attr_dict_factory = dict
def to_directed_class(self):
"""Returns the class to use for empty directed copies.
If you subclass the base classes, use this to designate
what directed class to use for `to_directed()` copies.
"""
return nx.DiGraph
def to_undirected_class(self):
"""Returns the class to use for empty undirected copies.
If you subclass the base classes, use this to designate
what directed class to use for `to_directed()` copies.
"""
return Graph
def __init__(self, incoming_graph_data=None, **attr):
"""Initialize a graph with edges, name, or graph attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name='my graph')
>>> e = [(1, 2), (2, 3), (3, 4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G = nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph_attr_dict_factory = self.graph_attr_dict_factory
self.node_dict_factory = self.node_dict_factory
self.node_attr_dict_factory = self.node_attr_dict_factory
self.adjlist_outer_dict_factory = self.adjlist_outer_dict_factory
self.adjlist_inner_dict_factory = self.adjlist_inner_dict_factory
self.edge_attr_dict_factory = self.edge_attr_dict_factory
self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes
self._node = self.node_dict_factory() # empty node attribute dict
self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict
# attempt to load graph with data
if incoming_graph_data is not None:
convert.to_networkx_graph(incoming_graph_data, create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
@property
def adj(self):
"""Graph adjacency object holding the neighbors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.adj behaves like a dict. Useful idioms include
`for nbr, datadict in G.adj[n].items():`.
The neighbor information is also provided by subscripting the graph.
So `for nbr, foovalue in G[node].data('foo', default=1):` works.
For directed graphs, `G.adj` holds outgoing (successor) info.
"""
return AdjacencyView(self._adj)
@property
def name(self):
"""String identifier of the graph.
This graph attribute appears in the attribute dict G.graph
keyed by the string `"name"`. as well as an attribute (technically
a property) `G.name`. This is entirely user controlled.
"""
return self.graph.get("name", "")
@name.setter
def name(self, s):
self.graph["name"] = s
def __str__(self):
"""Returns the graph name.
Returns
-------
name : string
The name of the graph.
Examples
--------
>>> G = nx.Graph(name='foo')
>>> str(G)
'foo'
"""
return self.name
def __iter__(self):
"""Iterate over the nodes. Use: 'for n in G'.
Returns
-------
niter : iterator
An iterator over all nodes in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [n for n in G]
[0, 1, 2, 3]
>>> list(G)
[0, 1, 2, 3]
"""
return iter(self._node)
def __contains__(self, n):
"""Returns True if n is a node, False otherwise. Use: 'n in G'.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> 1 in G
True
"""
try:
return n in self._node
except TypeError:
return False
def __len__(self):
"""Returns the number of nodes in the graph. Use: 'len(G)'.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
number_of_nodes, order which are identical
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> len(G)
4
"""
return len(self._node)
def __getitem__(self, n):
"""Returns a dict of neighbors of node n. Use: 'G[n]'.
Parameters
----------
n : node
A node in the graph.
Returns
-------
adj_dict : dictionary
The adjacency dictionary for nodes connected to n.
Notes
-----
G[n] is the same as G.adj[n] and similar to G.neighbors(n)
(which is an iterator over G.adj[n])
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G[0]
AtlasView({1: {}})
"""
return self.adj[n]
def add_node(self, node_for_adding, **attr):
"""Add a single node `node_for_adding` and update node attributes.
Parameters
----------
node_for_adding : node
A node can be any hashable Python object except None.
attr : keyword arguments, optional
Set or change node attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node('Hello')
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1, size=10)
>>> G.add_node(3, weight=0.4, UTM=('13S', 382871, 3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
if node_for_adding not in self._node:
self._adj[node_for_adding] = self.adjlist_inner_dict_factory()
attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory()
attr_dict.update(attr)
else: # update attr even if node already exists
self._node[node_for_adding].update(attr)
def add_nodes_from(self, nodes_for_adding, **attr):
"""Add multiple nodes.
Parameters
----------
nodes_for_adding : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple take
precedence over attributes specified via keyword arguments.
See Also
--------
add_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from('Hello')
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(), key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1, 2], size=10)
>>> G.add_nodes_from([3, 4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific nodes.
>>> G.add_nodes_from([(1, dict(size=11)), (2, {'color':'blue'})])
>>> G.nodes[1]['size']
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.nodes[1]['size']
11
"""
for n in nodes_for_adding:
# keep all this inside try/except because
# CPython throws TypeError on n not in self._node,
# while pre-2.7.5 ironpython throws on self._adj[n]
try:
if n not in self._node:
self._adj[n] = self.adjlist_inner_dict_factory()
attr_dict = self._node[n] = self.node_attr_dict_factory()
attr_dict.update(attr)
else:
self._node[n].update(attr)
except TypeError:
nn, ndict = n
if nn not in self._node:
self._adj[nn] = self.adjlist_inner_dict_factory()
newdict = attr.copy()
newdict.update(ndict)
attr_dict = self._node[nn] = self.node_attr_dict_factory()
attr_dict.update(newdict)
else:
olddict = self._node[nn]
olddict.update(attr)
olddict.update(ndict)
def remove_node(self, n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a non-existent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
-------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> list(G.edges)
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> list(G.edges)
[]
"""
adj = self._adj
try:
nbrs = list(adj[n]) # list handles self-loops (allows mutation)
del self._node[n]
except KeyError as e: # NetworkXError if n not in self
raise NetworkXError(f"The node {n} is not in the graph.") from e
for u in nbrs:
del adj[u][n] # remove all edges n-u in graph
del adj[n] # now remove node
def remove_nodes_from(self, nodes):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently
ignored.
See Also
--------
remove_node
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = list(G.nodes)
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> list(G.nodes)
[]
"""
adj = self._adj
for n in nodes:
try:
del self._node[n]
for u in list(adj[n]): # list handles self-loops
del adj[u][n] # (allows mutation of dict in loop)
del adj[n]
except KeyError:
pass
@property
def nodes(self):
"""A NodeView of the Graph as G.nodes or G.nodes().
Can be used as `G.nodes` for data lookup and for set-like operations.
Can also be used as `G.nodes(data='color', default=None)` to return a
NodeDataView which reports specific node data but no set operations.
It presents a dict-like interface as well with `G.nodes.items()`
iterating over `(node, nodedata)` 2-tuples and `G.nodes[3]['foo']`
providing the value of the `foo` attribute for node `3`. In addition,
a view `G.nodes.data('foo')` provides a dict-like interface to the
`foo` attribute of each node. `G.nodes.data('foo', default=1)`
provides a default for nodes that do not have attribute `foo`.
Parameters
----------
data : string or bool, optional (default=False)
The node attribute returned in 2-tuple (n, ddict[data]).
If True, return entire node attribute dict as (n, ddict).
If False, return just the nodes n.
default : value, optional (default=None)
Value used for nodes that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
NodeView
Allows set-like operations over the nodes as well as node
attribute dict lookup and calling to get a NodeDataView.
A NodeDataView iterates over `(n, data)` and has no set operations.
A NodeView iterates over `n` and includes set operations.
When called, if data is False, an iterator over nodes.
Otherwise an iterator of 2-tuples (node, attribute value)
where the attribute is specified in `data`.
If data is True then the attribute becomes the
entire data dictionary.
Notes
-----
If your node data is not needed, it is simpler and equivalent
to use the expression ``for n in G``, or ``list(G)``.
Examples
--------
There are two simple ways of getting a list of all nodes in the graph:
>>> G = nx.path_graph(3)
>>> list(G.nodes)
[0, 1, 2]
>>> list(G)
[0, 1, 2]
To get the node data along with the nodes:
>>> G.add_node(1, time='5pm')
>>> G.nodes[0]['foo'] = 'bar'
>>> list(G.nodes(data=True))
[(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})]
>>> list(G.nodes.data())
[(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})]
>>> list(G.nodes(data='foo'))
[(0, 'bar'), (1, None), (2, None)]
>>> list(G.nodes.data('foo'))
[(0, 'bar'), (1, None), (2, None)]
>>> list(G.nodes(data='time'))
[(0, None), (1, '5pm'), (2, None)]
>>> list(G.nodes.data('time'))
[(0, None), (1, '5pm'), (2, None)]
>>> list(G.nodes(data='time', default='Not Available'))
[(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')]
>>> list(G.nodes.data('time', default='Not Available'))
[(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')]
If some of your nodes have an attribute and the rest are assumed
to have a default attribute value you can create a dictionary
from node/attribute pairs using the `default` keyword argument
to guarantee the value is never None::
>>> G = nx.Graph()
>>> G.add_node(0)
>>> G.add_node(1, weight=2)
>>> G.add_node(2, weight=3)
>>> dict(G.nodes(data='weight', default=1))
{0: 1, 1: 2, 2: 3}
"""
nodes = NodeView(self)
# Lazy View creation: overload the (class) property on the instance
# Then future G.nodes use the existing View
# setattr doesn't work because attribute already exists
self.__dict__["nodes"] = nodes
return nodes
def number_of_nodes(self):
"""Returns the number of nodes in the graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
order, __len__ which are identical
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.number_of_nodes()
3
"""
return len(self._node)
def order(self):
"""Returns the number of nodes in the graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
number_of_nodes, __len__ which are identical
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.order()
3
"""
return len(self._node)
def has_node(self, n):
"""Returns True if the graph contains the node n.
Identical to `n in G`
Parameters
----------
n : node
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.has_node(0)
True
It is more readable and simpler to use
>>> 0 in G
True
"""
try:
return n in self._node
except TypeError:
return False
def add_edge(self, u_of_edge, v_of_edge, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by directly
accessing the edge's attribute dictionary. See examples below.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use
an edge attribute (by default `weight`) to hold a numerical value.
Examples
--------
The following all add the edge e=(1, 2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1, 2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from([(1, 2)]) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
For non-string attribute keys, use subscript notation.
>>> G.add_edge(1, 2)
>>> G[1][2].update({0: 5})
>>> G.edges[1, 2].update({0: 5})
"""
u, v = u_of_edge, v_of_edge
# add nodes
if u not in self._node:
self._adj[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._node:
self._adj[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
# add the edge
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
self._adj[u][v] = datadict
self._adj[v][u] = datadict
def add_edges_from(self, ebunch_to_add, **attr):
"""Add all the edges in ebunch_to_add.
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as as 2-tuples (u, v) or
3-tuples (u, v, d) where d is a dictionary containing edge data.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in an ebunch take precedence over
attributes specified via keyword arguments.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples
>>> e = zip(range(0, 3), range(1, 4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1, 2), (2, 3)], weight=3)
>>> G.add_edges_from([(3, 4), (1, 4)], label='WN2898')
"""
for e in ebunch_to_add:
ne = len(e)
if ne == 3:
u, v, dd = e
elif ne == 2:
u, v = e
dd = {} # doesn't need edge_attr_dict_factory
else:
raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.")
if u not in self._node:
self._adj[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._node:
self._adj[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
datadict.update(dd)
self._adj[u][v] = datadict
self._adj[v][u] = datadict
def add_weighted_edges_from(self, ebunch_to_add, weight="weight", **attr):
"""Add weighted edges in `ebunch_to_add` with specified weight attr
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the list or container will be added
to the graph. The edges must be given as 3-tuples (u, v, w)
where w is a number.
weight : string, optional (default= 'weight')
The attribute name for the edge weights to be added.
attr : keyword arguments, optional (default= no attributes)
Edge attributes to add/update for all edges.
See Also
--------
add_edge : add a single edge
add_edges_from : add multiple edges
Notes
-----
Adding the same edge twice for Graph/DiGraph simply updates
the edge data. For MultiGraph/MultiDiGraph, duplicate edges
are stored.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_weighted_edges_from([(0, 1, 3.0), (1, 2, 7.5)])
"""
self.add_edges_from(((u, v, {weight: d}) for u, v, d in ebunch_to_add), **attr)
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u, v : nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, etc
>>> G.remove_edge(0, 1)
>>> e = (1, 2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2, 3, {'weight':7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self._adj[u][v]
if u != v: # self-loop needs only one entry removed
del self._adj[v][u]
except KeyError as e:
raise NetworkXError(f"The edge {u}-{v} is not in the graph") from e
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u, v) edge between u and v.
- 3-tuples (u, v, k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> ebunch=[(1, 2), (2, 3)]
>>> G.remove_edges_from(ebunch)
"""
adj = self._adj
for e in ebunch:
u, v = e[:2] # ignore edge data if present
if u in adj and v in adj[u]:
del adj[u][v]
if u != v: # self loop needs only one entry removed
del adj[v][u]
def update(self, edges=None, nodes=None):
"""Update the graph using nodes/edges/graphs as input.
Like dict.update, this method takes a graph as input, adding the
graph's nodes and edges to this graph. It can also take two inputs:
edges and nodes. Finally it can take either edges or nodes.
To specify only nodes the keyword `nodes` must be used.
The collections of edges and nodes are treated similarly to
the add_edges_from/add_nodes_from methods. When iterated, they
should yield 2-tuples (u, v) or 3-tuples (u, v, datadict).
Parameters
----------
edges : Graph object, collection of edges, or None
The first parameter can be a graph or some edges. If it has
attributes `nodes` and `edges`, then it is taken to be a
Graph-like object and those attributes are used as collections
of nodes and edges to be added to the graph.
If the first parameter does not have those attributes, it is
treated as a collection of edges and added to the graph.
If the first argument is None, no edges are added.
nodes : collection of nodes, or None
The second parameter is treated as a collection of nodes
to be added to the graph unless it is None.
If `edges is None` and `nodes is None` an exception is raised.
If the first parameter is a Graph, then `nodes` is ignored.
Examples
--------
>>> G = nx.path_graph(5)
>>> G.update(nx.complete_graph(range(4,10)))
>>> from itertools import combinations
>>> edges = ((u, v, {'power': u * v})
... for u, v in combinations(range(10, 20), 2)
... if u * v < 225)
>>> nodes = [1000] # for singleton, use a container
>>> G.update(edges, nodes)
Notes
-----
It you want to update the graph using an adjacency structure
it is straightforward to obtain the edges/nodes from adjacency.
The following examples provide common cases, your adjacency may
be slightly different and require tweaks of these examples.
>>> # dict-of-set/list/tuple
>>> adj = {1: {2, 3}, 2: {1, 3}, 3: {1, 2}}
>>> e = [(u, v) for u, nbrs in adj.items() for v in nbrs]
>>> G.update(edges=e, nodes=adj)
>>> DG = nx.DiGraph()
>>> # dict-of-dict-of-attribute
>>> adj = {1: {2: 1.3, 3: 0.7}, 2: {1: 1.4}, 3: {1: 0.7}}
>>> e = [(u, v, {'weight': d}) for u, nbrs in adj.items()
... for v, d in nbrs.items()]
>>> DG.update(edges=e, nodes=adj)
>>> # dict-of-dict-of-dict
>>> adj = {1: {2: {'weight': 1.3}, 3: {'color': 0.7, 'weight':1.2}}}
>>> e = [(u, v, {'weight': d}) for u, nbrs in adj.items()
... for v, d in nbrs.items()]
>>> DG.update(edges=e, nodes=adj)
>>> # predecessor adjacency (dict-of-set)
>>> pred = {1: {2, 3}, 2: {3}, 3: {3}}
>>> e = [(v, u) for u, nbrs in pred.items() for v in nbrs]
>>> # MultiGraph dict-of-dict-of-dict-of-attribute
>>> MDG = nx.MultiDiGraph()
>>> adj = {1: {2: {0: {'weight': 1.3}, 1: {'weight': 1.2}}},
... 3: {2: {0: {'weight': 0.7}}}}
>>> e = [(u, v, ekey, d) for u, nbrs in adj.items()
... for v, keydict in nbrs.items()
... for ekey, d in keydict.items()]
>>> MDG.update(edges=e)
See Also
--------
add_edges_from: add multiple edges to a graph
add_nodes_from: add multiple nodes to a graph
"""
if edges is not None:
if nodes is not None:
self.add_nodes_from(nodes)
self.add_edges_from(edges)
else:
# check if edges is a Graph object
try:
graph_nodes = edges.nodes
graph_edges = edges.edges
except AttributeError:
# edge not Graph-like
self.add_edges_from(edges)
else: # edges is Graph-like
self.add_nodes_from(graph_nodes.data())
self.add_edges_from(graph_edges.data())
self.graph.update(edges.graph)
elif nodes is not None:
self.add_nodes_from(nodes)
else:
raise NetworkXError("update needs nodes or edges input")
def has_edge(self, u, v):
"""Returns True if the edge (u, v) is in the graph.
This is the same as `v in G[u]` without KeyError exceptions.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
Returns
-------
edge_ind : bool
True if edge is in the graph, False otherwise.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.has_edge(0, 1) # using two nodes
True
>>> e = (0, 1)
>>> G.has_edge(*e) # e is a 2-tuple (u, v)
True
>>> e = (0, 1, {'weight':7})
>>> G.has_edge(*e[:2]) # e is a 3-tuple (u, v, data_dictionary)
True
The following syntax are equivalent:
>>> G.has_edge(0, 1)
True
>>> 1 in G[0] # though this gives KeyError if 0 not in G
True
"""
try:
return v in self._adj[u]
except KeyError:
return False
def neighbors(self, n):
"""Returns an iterator over all neighbors of node n.
This is identical to `iter(G[n])`
Parameters
----------
n : node
A node in the graph
Returns
-------
neighbors : iterator
An iterator over all neighbors of node n
Raises
------
NetworkXError
If the node n is not in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [n for n in G.neighbors(0)]
[1]
Notes
-----
Alternate ways to access the neighbors are ``G.adj[n]`` or ``G[n]``:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge('a', 'b', weight=7)
>>> G['a']
AtlasView({'b': {'weight': 7}})
>>> G = nx.path_graph(4)
>>> [n for n in G[0]]
[1]
"""
try:
return iter(self._adj[n])
except KeyError as e:
raise NetworkXError(f"The node {n} is not in the graph.") from e
@property
def edges(self):
"""An EdgeView of the Graph as G.edges or G.edges().
edges(self, nbunch=None, data=False, default=None)
The EdgeView provides set-like operations on the edge-tuples
as well as edge attribute lookup. When called, it also provides
an EdgeDataView object which allows control of access to edge
attributes (but does not provide set-like operations).
Hence, `G.edges[u, v]['color']` provides the value of the color
attribute for edge `(u, v)` while
`for (u, v, c) in G.edges.data('color', default='red'):`
iterates through all the edges yielding the color attribute
with default `'red'` if no color attribute exists.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u, v, ddict[data]).
If True, return edge attribute dict in 3-tuple (u, v, ddict).
If False, return 2-tuple (u, v).
default : value, optional (default=None)
Value used for edges that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
edges : EdgeView
A view of edge attributes, usually it iterates over (u, v)
or (u, v, d) tuples of edges, but can also be used for
attribute lookup as `edges[u, v]['foo']`.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.path_graph(3) # or MultiGraph, etc
>>> G.add_edge(2, 3, weight=5)
>>> [e for e in G.edges]
[(0, 1), (1, 2), (2, 3)]
>>> G.edges.data() # default data is {} (empty dict)
EdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})])
>>> G.edges.data('weight', default=1)
EdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)])
>>> G.edges([0, 3]) # only edges incident to these nodes
EdgeDataView([(0, 1), (3, 2)])
>>> G.edges(0) # only edges incident to a single node (use G.adj[0]?)
EdgeDataView([(0, 1)])
"""
return EdgeView(self)
def get_edge_data(self, u, v, default=None):
"""Returns the attribute dictionary associated with edge (u, v).
This is identical to `G[u][v]` except the default is returned
instead of an exception if the edge doesn't exist.
Parameters
----------
u, v : nodes
default: any Python object (default=None)
Value to return if the edge (u, v) is not found.
Returns
-------
edge_dict : dictionary
The edge attribute dictionary.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G[0][1]
{}
Warning: Assigning to `G[u][v]` is not permitted.
But it is safe to assign attributes `G[u][v]['foo']`
>>> G[0][1]['weight'] = 7
>>> G[0][1]['weight']
7
>>> G[1][0]['weight']
7
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.get_edge_data(0, 1) # default edge data is {}
{}
>>> e = (0, 1)
>>> G.get_edge_data(*e) # tuple form
{}
>>> G.get_edge_data('a', 'b', default=0) # edge not in graph, return 0
0
"""
try:
return self._adj[u][v]
except KeyError:
return default
def adjacency(self):
"""Returns an iterator over (node, adjacency dict) tuples for all nodes.
For directed graphs, only outgoing neighbors/adjacencies are included.
Returns
-------
adj_iter : iterator
An iterator over (node, adjacency dictionary) for all nodes in
the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> [(n, nbrdict) for n, nbrdict in G.adjacency()]
[(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})]
"""
return iter(self._adj.items())
@property
def degree(self):
"""A DegreeView for the Graph as G.degree or G.degree().
The node degree is the number of edges adjacent to the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iterator for (node, degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
Degree of the node
OR if multiple nodes are requested
nd_view : A DegreeView object capable of iterating (node, degree) pairs
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.degree[0] # node 0 has degree 1
1
>>> list(G.degree([0, 1, 2]))
[(0, 1), (1, 2), (2, 2)]
"""
return DegreeView(self)
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear()
>>> list(G.nodes)
[]
>>> list(G.edges)
[]
"""
self._adj.clear()
self._node.clear()
self.graph.clear()
def clear_edges(self):
"""Remove all edges from the graph without altering nodes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear_edges()
>>> list(G.nodes)
[0, 1, 2, 3]
>>> list(G.edges)
[]
"""
for neighbours_dict in self._adj.values():
neighbours_dict.clear()
def is_multigraph(self):
"""Returns True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Returns True if graph is directed, False otherwise."""
return False
def copy(self, as_view=False):
"""Returns a copy of the graph.
The copy method by default returns an independent shallow copy
of the graph and attributes. That is, if an attribute is a
container, that container is shared by the original an the copy.
Use Python's `copy.deepcopy` for new containers.
If `as_view` is True then a view is returned instead of a copy.
Notes
-----
All copies reproduce the graph structure, but data attributes
may be handled in different ways. There are four types of copies
of a graph that people might want.
Deepcopy -- A "deepcopy" copies the graph structure as well as
all data attributes and any objects they might contain.
The entire graph object is new so that changes in the copy
do not affect the original object. (see Python's copy.deepcopy)
Data Reference (Shallow) -- For a shallow copy the graph structure
is copied but the edge, node and graph attribute dicts are
references to those in the original graph. This saves
time and memory but could cause confusion if you change an attribute
in one graph and it changes the attribute in the other.
NetworkX does not provide this level of shallow copy.
Independent Shallow -- This copy creates new independent attribute
dicts and then does a shallow copy of the attributes. That is, any
attributes that are containers are shared between the new graph
and the original. This is exactly what `dict.copy()` provides.
You can obtain this style copy using:
>>> G = nx.path_graph(5)
>>> H = G.copy()
>>> H = G.copy(as_view=False)
>>> H = nx.Graph(G)
>>> H = G.__class__(G)
Fresh Data -- For fresh data, the graph structure is copied while
new empty data attribute dicts are created. The resulting graph
is independent of the original and it has no edge, node or graph
attributes. Fresh copies are not enabled. Instead use:
>>> H = G.__class__()
>>> H.add_nodes_from(G)
>>> H.add_edges_from(G.edges)
View -- Inspired by dict-views, graph-views act like read-only
versions of the original graph, providing a copy of the original
structure without requiring any memory for copying the information.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Parameters
----------
as_view : bool, optional (default=False)
If True, the returned graph-view provides a read-only view
of the original graph without actually copying any data.
Returns
-------
G : Graph
A copy of the graph.
See Also
--------
to_directed: return a directed copy of the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> H = G.copy()
"""
if as_view is True:
return nx.graphviews.generic_graph_view(self)
G = self.__class__()
G.graph.update(self.graph)
G.add_nodes_from((n, d.copy()) for n, d in self._node.items())
G.add_edges_from(
(u, v, datadict.copy())
for u, nbrs in self._adj.items()
for v, datadict in nbrs.items()
)
return G
def to_directed(self, as_view=False):
"""Returns a directed representation of the graph.
Returns
-------
G : DiGraph
A directed graph with the same name, same nodes, and with
each edge (u, v, data) replaced by two directed edges
(u, v, data) and (v, u, data).
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Warning: If you have subclassed Graph to use dict-like objects
in the data structure, those changes do not transfer to the
DiGraph created by this method.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_edge(0, 1)
>>> H = G.to_directed()
>>> list(H.edges)
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_edge(0, 1)
>>> H = G.to_directed()
>>> list(H.edges)
[(0, 1)]
"""
graph_class = self.to_directed_class()
if as_view is True:
return nx.graphviews.generic_graph_view(self, graph_class)
# deepcopy when not a view
G = graph_class()
G.graph.update(deepcopy(self.graph))
G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
G.add_edges_from(
(u, v, deepcopy(data))
for u, nbrs in self._adj.items()
for v, data in nbrs.items()
)
return G
def to_undirected(self, as_view=False):
"""Returns an undirected copy of the graph.
Parameters
----------
as_view : bool (optional, default=False)
If True return a view of the original undirected graph.
Returns
-------
G : Graph/MultiGraph
A deepcopy of the graph.
See Also
--------
Graph, copy, add_edge, add_edges_from
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar `G = nx.DiGraph(D)` which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Warning: If you have subclassed DiGraph to use dict-like objects
in the data structure, those changes do not transfer to the
Graph created by this method.
Examples
--------
>>> G = nx.path_graph(2) # or MultiGraph, etc
>>> H = G.to_directed()
>>> list(H.edges)
[(0, 1), (1, 0)]
>>> G2 = H.to_undirected()
>>> list(G2.edges)
[(0, 1)]
"""
graph_class = self.to_undirected_class()
if as_view is True:
return nx.graphviews.generic_graph_view(self, graph_class)
# deepcopy when not a view
G = graph_class()
G.graph.update(deepcopy(self.graph))
G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
G.add_edges_from(
(u, v, deepcopy(d))
for u, nbrs in self._adj.items()
for v, d in nbrs.items()
)
return G
def subgraph(self, nodes):
"""Returns a SubGraph view of the subgraph induced on `nodes`.
The induced subgraph of the graph contains the nodes in `nodes`
and the edges between those nodes.
Parameters
----------
nodes : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : SubGraph View
A subgraph view of the graph. The graph structure cannot be
changed but node/edge attributes can and are shared with the
original graph.
Notes
-----
The graph, edge and node attributes are shared with the original graph.
Changes to the graph structure is ruled out by the view, but changes
to attributes are reflected in the original graph.
To create a subgraph with its own copy of the edge/node attributes use:
G.subgraph(nodes).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([n for n in G if n not in set(nodes)])
Subgraph views are sometimes NOT what you want. In most cases where
you want to do more than simply look at the induced edges, it makes
more sense to just create the subgraph as its own graph with code like:
::
# Create a subgraph SG based on a (possibly multigraph) G
SG = G.__class__()
SG.add_nodes_from((n, G.nodes[n]) for n in largest_wcc)
if SG.is_multigraph():
SG.add_edges_from((n, nbr, key, d)
for n, nbrs in G.adj.items() if n in largest_wcc
for nbr, keydict in nbrs.items() if nbr in largest_wcc
for key, d in keydict.items())
else:
SG.add_edges_from((n, nbr, d)
for n, nbrs in G.adj.items() if n in largest_wcc
for nbr, d in nbrs.items() if nbr in largest_wcc)
SG.graph.update(G.graph)
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> H = G.subgraph([0, 1, 2])
>>> list(H.edges)
[(0, 1), (1, 2)]
"""
induced_nodes = nx.filters.show_nodes(self.nbunch_iter(nodes))
# if already a subgraph, don't make a chain
subgraph = nx.graphviews.subgraph_view
if hasattr(self, "_NODE_OK"):
return subgraph(self._graph, induced_nodes, self._EDGE_OK)
return subgraph(self, induced_nodes)
def edge_subgraph(self, edges):
"""Returns the subgraph induced by the specified edges.
The induced subgraph contains each edge in `edges` and each
node incident to any one of those edges.
Parameters
----------
edges : iterable
An iterable of edges in this graph.
Returns
-------
G : Graph
An edge-induced subgraph of this graph with the same edge
attributes.
Notes
-----
The graph, edge, and node attributes in the returned subgraph
view are references to the corresponding attributes in the original
graph. The view is read-only.
To create a full graph version of the subgraph with its own copy
of the edge or node attributes, use::
>>> G.edge_subgraph(edges).copy() # doctest: +SKIP
Examples
--------
>>> G = nx.path_graph(5)
>>> H = G.edge_subgraph([(0, 1), (3, 4)])
>>> list(H.nodes)
[0, 1, 3, 4]
>>> list(H.edges)
[(0, 1), (3, 4)]
"""
return nx.edge_subgraph(self, edges)
def size(self, weight=None):
"""Returns the number of edges or total of all edge weights.
Parameters
----------
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
Returns
-------
size : numeric
The number of edges or
(if weight keyword is provided) the total weight sum.
If weight is None, returns an int. Otherwise a float
(or more general numeric if the weights are more general).
See Also
--------
number_of_edges
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.size()
3
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge('a', 'b', weight=2)
>>> G.add_edge('b', 'c', weight=4)
>>> G.size()
2
>>> G.size(weight='weight')
6.0
"""
s = sum(d for v, d in self.degree(weight=weight))
# If `weight` is None, the sum of the degrees is guaranteed to be
# even, so we can perform integer division and hence return an
# integer. Otherwise, the sum of the weighted degrees is not
# guaranteed to be an integer, so we perform "real" division.
return s // 2 if weight is None else s / 2
def number_of_edges(self, u=None, v=None):
"""Returns the number of edges between two nodes.
Parameters
----------
u, v : nodes, optional (default=all edges)
If u and v are specified, return the number of edges between
u and v. Otherwise return the total number of all edges.
Returns
-------
nedges : int
The number of edges in the graph. If nodes `u` and `v` are
specified return the number of edges between those nodes. If
the graph is directed, this only returns the number of edges
from `u` to `v`.
See Also
--------
size
Examples
--------
For undirected graphs, this method counts the total number of
edges in the graph:
>>> G = nx.path_graph(4)
>>> G.number_of_edges()
3
If you specify two nodes, this counts the total number of edges
joining the two nodes:
>>> G.number_of_edges(0, 1)
1
For directed graphs, this method can count the total number of
directed edges from `u` to `v`:
>>> G = nx.DiGraph()
>>> G.add_edge(0, 1)
>>> G.add_edge(1, 0)
>>> G.number_of_edges(0, 1)
1
"""
if u is None:
return int(self.size())
if v in self._adj[u]:
return 1
return 0
def nbunch_iter(self, nbunch=None):
"""Returns an iterator over nodes contained in nbunch that are
also in the graph.
The nodes in nbunch are checked for membership in the graph
and if not are silently ignored.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
Returns
-------
niter : iterator
An iterator over nodes in nbunch that are also in the graph.
If nbunch is None, iterate over all nodes in the graph.
Raises
------
NetworkXError
If nbunch is not a node or or sequence of nodes.
If a node in nbunch is not hashable.
See Also
--------
Graph.__iter__
Notes
-----
When nbunch is an iterator, the returned iterator yields values
directly from nbunch, becoming exhausted when nbunch is exhausted.
To test whether nbunch is a single node, one can use
"if nbunch in self:", even after processing with this routine.
If nbunch is not a node or a (possibly empty) sequence/iterator
or None, a :exc:`NetworkXError` is raised. Also, if any object in
nbunch is not hashable, a :exc:`NetworkXError` is raised.
"""
if nbunch is None: # include all nodes via iterator
bunch = iter(self._adj)
elif nbunch in self: # if nbunch is a single node
bunch = iter([nbunch])
else: # if nbunch is a sequence of nodes
def bunch_iter(nlist, adj):
try:
for n in nlist:
if n in adj:
yield n
except TypeError as e:
message = e.args[0]
# capture error for non-sequence/iterator nbunch.
if "iter" in message:
msg = "nbunch is not a node or a sequence of nodes."
raise NetworkXError(msg) from e
# capture error for unhashable node.
elif "hashable" in message:
msg = f"Node {n} in sequence nbunch is not a valid node."
raise NetworkXError(msg) from e
else:
raise
bunch = bunch_iter(nbunch, self._adj)
return bunch
| gpl-3.0 | -3,307,363,310,976,054,300 | 33.408851 | 87 | 0.554297 | false |
davidsminor/cortex | test/IECore/ImageTests.py | 12 | 1928 | ##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from EXRReader import *
from TIFFReader import *
from ImagePrimitive import *
from JPEGReader import *
from CINReader import *
| bsd-3-clause | 5,486,261,741,784,589,000 | 46.02439 | 76 | 0.700726 | false |
XiaodunServerGroup/medicalmooc | common/lib/xmodule/xmodule/tests/test_lti_unit.py | 6 | 17140 | # -*- coding: utf-8 -*-
"""Test for LTI Xmodule functional logic."""
from mock import Mock, patch, PropertyMock
import mock
import textwrap
import json
from lxml import etree
import json
from webob.request import Request
from copy import copy
from collections import OrderedDict
import urllib
import oauthlib
import hashlib
import base64
from xmodule.lti_module import LTIDescriptor, LTIError
from . import LogicTest
class LTIModuleTest(LogicTest):
"""Logic tests for LTI module."""
descriptor_class = LTIDescriptor
def setUp(self):
super(LTIModuleTest, self).setUp()
self.environ = {'wsgi.url_scheme': 'http', 'REQUEST_METHOD': 'POST'}
self.request_body_xml_template = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{messageIdentifier}</imsx_messageIdentifier>
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<{action}>
<resultRecord>
<sourcedGUID>
<sourcedId>{sourcedId}</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>{grade}</textString>
</resultScore>
</result>
</resultRecord>
</{action}>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
""")
self.system.get_real_user = Mock()
self.system.publish = Mock()
self.user_id = self.xmodule.runtime.anonymous_student_id
self.lti_id = self.xmodule.lti_id
self.module_id = '//MITx/999/lti/'
sourcedId = u':'.join(urllib.quote(i) for i in (self.lti_id, self.module_id, self.user_id))
self.DEFAULTS = {
'sourcedId': sourcedId,
'action': 'replaceResultRequest',
'grade': '0.5',
'messageIdentifier': '528243ba5241b',
}
def get_request_body(self, params={}):
data = copy(self.DEFAULTS)
data.update(params)
return self.request_body_xml_template.format(**data)
def get_response_values(self, response):
parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8')
root = etree.fromstring(response.body.strip(), parser=parser)
lti_spec_namespace = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0"
namespaces = {'def': lti_spec_namespace}
code_major = root.xpath("//def:imsx_codeMajor", namespaces=namespaces)[0].text
description = root.xpath("//def:imsx_description", namespaces=namespaces)[0].text
messageIdentifier = root.xpath("//def:imsx_messageIdentifier", namespaces=namespaces)[0].text
imsx_POXBody = root.xpath("//def:imsx_POXBody", namespaces=namespaces)[0]
try:
action = imsx_POXBody.getchildren()[0].tag.replace('{'+lti_spec_namespace+'}', '')
except Exception:
action = None
return {
'code_major': code_major,
'description': description,
'messageIdentifier': messageIdentifier,
'action': action
}
@patch('xmodule.lti_module.LTIModule.get_client_key_secret', return_value=('test_client_key', u'test_client_secret'))
def test_authorization_header_not_present(self, get_key_secret):
"""
Request has no Authorization header.
This is an unknown service request, i.e., it is not a part of the original service specification.
"""
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'OAuth verification error: Malformed authorization header',
'messageIdentifier': self.DEFAULTS['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
@patch('xmodule.lti_module.LTIModule.get_client_key_secret', return_value=('test_client_key', u'test_client_secret'))
def test_authorization_header_empty(self, get_key_secret):
"""
Request Authorization header has no value.
This is an unknown service request, i.e., it is not a part of the original service specification.
"""
request = Request(self.environ)
request.authorization = "bad authorization header"
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'OAuth verification error: Malformed authorization header',
'messageIdentifier': self.DEFAULTS['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_real_user_is_none(self):
"""
If we have no real user, we should send back failure response.
"""
self.xmodule.verify_oauth_body_sign = Mock()
self.xmodule.has_score = True
self.system.get_real_user = Mock(return_value=None)
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'User not found.',
'messageIdentifier': self.DEFAULTS['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_grade_not_in_range(self):
"""
Grade returned from Tool Provider is outside the range 0.0-1.0.
"""
self.xmodule.verify_oauth_body_sign = Mock()
request = Request(self.environ)
request.body = self.get_request_body(params={'grade': '10'})
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'Request body XML parsing error: score value outside the permitted range of 0-1.',
'messageIdentifier': 'unknown',
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_bad_grade_decimal(self):
"""
Grade returned from Tool Provider doesn't use a period as the decimal point.
"""
self.xmodule.verify_oauth_body_sign = Mock()
request = Request(self.environ)
request.body = self.get_request_body(params={'grade': '0,5'})
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'Request body XML parsing error: invalid literal for float(): 0,5',
'messageIdentifier': 'unknown',
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_unsupported_action(self):
"""
Action returned from Tool Provider isn't supported.
`replaceResultRequest` is supported only.
"""
self.xmodule.verify_oauth_body_sign = Mock()
request = Request(self.environ)
request.body = self.get_request_body({'action': 'wrongAction'})
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'unsupported',
'description': 'Target does not support the requested operation.',
'messageIdentifier': self.DEFAULTS['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_good_request(self):
"""
Response from Tool Provider is correct.
"""
self.xmodule.verify_oauth_body_sign = Mock()
self.xmodule.has_score = True
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
description_expected = 'Score for {sourcedId} is now {score}'.format(
sourcedId=self.DEFAULTS['sourcedId'],
score=self.DEFAULTS['grade'],
)
real_response = self.get_response_values(response)
expected_response = {
'action': 'replaceResultResponse',
'code_major': 'success',
'description': description_expected,
'messageIdentifier': self.DEFAULTS['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_user_id(self):
expected_user_id = unicode(urllib.quote(self.xmodule.runtime.anonymous_student_id))
real_user_id = self.xmodule.get_user_id()
self.assertEqual(real_user_id, expected_user_id)
def test_outcome_service_url(self):
expected_outcome_service_url = '{scheme}://{host}{path}'.format(
scheme='http' if self.xmodule.runtime.debug else 'https',
host=self.xmodule.runtime.hostname,
path=self.xmodule.runtime.handler_url(self.xmodule, 'grade_handler', thirdparty=True).rstrip('/?')
)
real_outcome_service_url = self.xmodule.get_outcome_service_url()
self.assertEqual(real_outcome_service_url, expected_outcome_service_url)
def test_resource_link_id(self):
with patch('xmodule.lti_module.LTIModule.id', new_callable=PropertyMock) as mock_id:
mock_id.return_value = self.module_id
expected_resource_link_id = unicode(urllib.quote(self.module_id))
real_resource_link_id = self.xmodule.get_resource_link_id()
self.assertEqual(real_resource_link_id, expected_resource_link_id)
def test_lis_result_sourcedid(self):
with patch('xmodule.lti_module.LTIModule.id', new_callable=PropertyMock) as mock_id:
mock_id.return_value = self.module_id
expected_sourcedId = u':'.join(urllib.quote(i) for i in (self.lti_id, self.module_id, self.user_id))
real_lis_result_sourcedid = self.xmodule.get_lis_result_sourcedid()
self.assertEqual(real_lis_result_sourcedid, expected_sourcedId)
@patch('xmodule.course_module.CourseDescriptor.id_to_location')
def test_client_key_secret(self, test):
"""
LTI module gets client key and secret provided.
"""
#this adds lti passports to system
mocked_course = Mock(lti_passports = ['lti_id:test_client:test_secret'])
modulestore = Mock()
modulestore.get_item.return_value = mocked_course
runtime = Mock(modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
self.xmodule.lti_id = "lti_id"
key, secret = self.xmodule.get_client_key_secret()
expected = ('test_client', 'test_secret')
self.assertEqual(expected, (key, secret))
@patch('xmodule.course_module.CourseDescriptor.id_to_location')
def test_client_key_secret_not_provided(self, test):
"""
LTI module attempts to get client key and secret provided in cms.
There are key and secret but not for specific LTI.
"""
#this adds lti passports to system
mocked_course = Mock(lti_passports = ['test_id:test_client:test_secret'])
modulestore = Mock()
modulestore.get_item.return_value = mocked_course
runtime = Mock(modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
#set another lti_id
self.xmodule.lti_id = "another_lti_id"
key_secret = self.xmodule.get_client_key_secret()
expected = ('','')
self.assertEqual(expected, key_secret)
@patch('xmodule.course_module.CourseDescriptor.id_to_location')
def test_bad_client_key_secret(self, test):
"""
LTI module attempts to get client key and secret provided in cms.
There are key and secret provided in wrong format.
"""
#this adds lti passports to system
mocked_course = Mock(lti_passports = ['test_id_test_client_test_secret'])
modulestore = Mock()
modulestore.get_item.return_value = mocked_course
runtime = Mock(modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
self.xmodule.lti_id = 'lti_id'
with self.assertRaises(LTIError):
self.xmodule.get_client_key_secret()
@patch('xmodule.lti_module.signature.verify_hmac_sha1', return_value=True)
@patch('xmodule.lti_module.LTIModule.get_client_key_secret', return_value=('test_client_key', u'test_client_secret'))
def test_successful_verify_oauth_body_sign(self, get_key_secret, mocked_verify):
"""
Test if OAuth signing was successful.
"""
try:
self.xmodule.verify_oauth_body_sign(self.get_signed_grade_mock_request())
except LTIError:
self.fail("verify_oauth_body_sign() raised LTIError unexpectedly!")
@patch('xmodule.lti_module.signature.verify_hmac_sha1', return_value=False)
@patch('xmodule.lti_module.LTIModule.get_client_key_secret', return_value=('test_client_key', u'test_client_secret'))
def test_failed_verify_oauth_body_sign(self, get_key_secret, mocked_verify):
"""
Oauth signing verify fail.
"""
with self.assertRaises(LTIError):
req = self.get_signed_grade_mock_request()
self.xmodule.verify_oauth_body_sign(req)
def get_signed_grade_mock_request(self):
"""
Example of signed request from LTI Provider.
"""
mock_request = Mock()
mock_request.headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/xml',
'Authorization': u'OAuth oauth_nonce="135685044251684026041377608307", \
oauth_timestamp="1234567890", oauth_version="1.0", \
oauth_signature_method="HMAC-SHA1", \
oauth_consumer_key="test_client_key", \
oauth_signature="my_signature%3D", \
oauth_body_hash="gz+PeJZuF2//n9hNUnDj2v5kN70="'
}
mock_request.url = u'http://testurl'
mock_request.http_method = u'POST'
mock_request.body = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
</imsx_POXEnvelopeRequest>
""")
return mock_request
def test_good_custom_params(self):
"""
Custom parameters are presented in right format.
"""
self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']
self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))
self.xmodule.oauth_params = Mock()
self.xmodule.get_input_fields()
self.xmodule.oauth_params.assert_called_with(
{u'custom_test_custom_params': u'test_custom_param_value'},
'test_client_key', 'test_client_secret'
)
def test_bad_custom_params(self):
"""
Custom parameters are presented in wrong format.
"""
bad_custom_params = ['test_custom_params: test_custom_param_value']
self.xmodule.custom_parameters = bad_custom_params
self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))
self.xmodule.oauth_params = Mock()
with self.assertRaises(LTIError):
self.xmodule.get_input_fields()
def test_max_score(self):
self.xmodule.weight = 100.0
self.xmodule.graded = True
self.assertEqual(self.xmodule.max_score(), None)
self.xmodule.has_score = True
self.assertEqual(self.xmodule.max_score(), 100.0)
self.xmodule.graded = False
self.assertEqual(self.xmodule.max_score(), 100.0)
| agpl-3.0 | -6,699,141,150,232,263,000 | 41.216749 | 121 | 0.614644 | false |
awmartin/spatialpixel-code | mapping/slippymapper/slippymapper.py | 2 | 10805 | import math
from ...util import lazyimages
from marker import *
from tile_servers import tile_servers
import sys
# TODO Extract the processing-specific code.
# Fundamental transformations. Reference: http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
def lonToTile(lon, zoom):
"""Given a longitude and zoom value, return the X map tile index."""
n = 2.0 ** zoom
return ((lon + 180.0) / 360.0) * n
def latToTile(lat, zoom):
"""Given a latitude and zoom value, return the Y map tile index."""
n = 2.0 ** zoom
lat_rad = math.radians(lat)
return (1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n
def tileToLon(tile, zoom):
"""Given a tile and zoom, give the longitude."""
n = 2.0 ** zoom
return tile / n * 360.0 - 180.0
def tileToLat(tile, zoom):
"""Given a tile and zoom, give the latitude."""
n = 2.0 ** zoom
lat_rad = math.atan(math.sinh(math.pi * (1.0 - 2.0 * tile / n)))
return math.degrees(lat_rad)
class SlippyMapper(object):
"""SlippyMap will draw a map given a location, zoom, and public tile server."""
tile_size = 256.0
def __init__(self, lat, lon, zoom=12, server='toner', width=512, height=512):
self._width = width
self._height = height
self._basemap = None
self.set_server(server)
self.lat = lat
self.lon = lon
self.set_zoom(zoom)
self.centerX = lonToTile(self.lon, self.zoom)
self.centerY = latToTile(self.lat, self.zoom)
self.offsetX = floor((floor(self.centerX) - self.centerX) * self.tile_size)
self.offsetY = floor((floor(self.centerY) - self.centerY) * self.tile_size)
self.lazyImageManager = lazyimages.LazyImageManager()
self.layers = []
self.markers = []
def set_server_url(self, zxyurl):
"""Allows you to set a custom Z/X/Y server URL instead of picking an included one.
If you look at tile_servers.py, you'll see what these URLs typically look like.
Currently, slippymapper assumes you're targeting a Z/X/Y server. For example:
mymap.setServerUrl("https://tile.server.org/%s/%s/%s.png")
The "%s" interpolation is automatically filled out with the Z, X, and Y values,
respectively.
"""
self.url = zxyurl
self.server = 'custom'
setServerUrl = set_server_url
def set_server(self, server):
"""Set the current render server given the name of a predefined public server.
See the tile_servers.py file for possible tile servers. All you need to do is provide
the name of the server, like "carto-dark". This defaults to Stamen's "toner" server.
Setting this after the map is rendered requires re-rendering the map by calling render().
"""
if server in tile_servers:
self.server = server
self.url = tile_servers[server]
elif server is None:
# Don't render a map at all.
self.server = None
self.url = None
else:
sys.stderr.write("""Got %s as a tile server but that didn't exist.
Available servers are %s. Falling back to 'toner'.
You can also specify a custom ZXY URL with the setServerUrl() method.""" % \
(server, ", ".join(tile_servers.keys())))
self.server = 'toner'
self.url = tile_servers['toner']
setServer = set_server
def set_zoom(self, zoom):
self.zoom = max(min(zoom, 18), 0)
self.centerX = lonToTile(self.lon, self.zoom)
self.centerY = latToTile(self.lat, self.zoom)
setZoom = set_zoom
def set_center(self, lat, lon):
self.lat = lat
self.lon = lon
self.centerX = lonToTile(self.lon, self.zoom)
self.centerY = latToTile(self.lat, self.zoom)
setCenter = set_center
@property
def has_rendered(self):
return self._basemap is not None
hasRendered = has_rendered
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def bounding_box(self):
lonwest = self.xToLon(0)
loneast = self.xToLon(self.width)
latnorth = self.yToLat(0)
latsouth = self.yToLat(self.height)
return (latsouth, lonwest, latnorth, loneast)
boundingBox = bounding_box
bbox = bounding_box
def set_size(self, width, height):
self._width = width
self._height = height
# The basemap is None until we render the first time. So if it's not rendered, rebuild the map.
# Thus, setting the size will require re-rendering the map.
if self.has_rendered:
self._basemap = createGraphics(floor(self._width), floor(self._height))
setSize = set_size
def clear(self):
if self.has_rendered:
self._basemap.beginDraw()
self._basemap.background(255, 0)
self._basemap.endDraw()
@property
def has_tile_server(self):
return self.url is not None
def get_tile_url(self, x, y):
# Interpolate the URL for this particular tile.
# e.g. .../12/1208/1541.png
return self.url % (self.zoom, x, y)
# Inspired by math contained in https://github.com/dfacts/staticmaplite/
def render(self):
"""Create the map by requesting tiles from the specified tile server."""
if not self.has_rendered:
self._basemap = createGraphics(floor(self._width), floor(self._height))
self.clear()
if self.has_tile_server:
numColumns = self.width / self.tile_size
numRows = self.height / self.tile_size
tiles_start_x = floor(self.centerX - numColumns / 2.0)
tiles_start_y = floor(self.centerY - numRows / 2.0)
tiles_end_x = ceil(self.centerX + numColumns / 2.0)
tiles_end_y = ceil(self.centerY + numRows / 2.0)
self.offsetX = -floor((self.centerX - floor(self.centerX)) * self.tile_size) + \
floor(self.width / 2.0) + \
floor(tiles_start_x - floor(self.centerX)) * self.tile_size
self.offsetY = -floor((self.centerY - floor(self.centerY)) * self.tile_size) + \
floor(self.height / 2.0) + \
floor(tiles_start_y - floor(self.centerY)) * self.tile_size
def onTileLoaded(tile, meta):
self._basemap.beginDraw()
x = meta['destX']
y = meta['destY']
self._basemap.image(tile, x, y)
self._basemap.endDraw()
for x in xrange(tiles_start_x, tiles_end_x):
for y in xrange(tiles_start_y, tiles_end_y):
tile_url = self.get_tile_url(x, y)
# Compute the x and y coordinates for where this tile will go on the map.
destX = (x - tiles_start_x) * self.tile_size + self.offsetX
destY = (y - tiles_start_y) * self.tile_size + self.offsetY
# Attempts to load all the images lazily.
meta = {
'url' : tile_url,
'destX' : destX,
'destY' : destY,
'x' : x,
'y' : y,
}
self.lazyImageManager.addLazyImage(tile_url, onTileLoaded, meta)
# Kick off all the layer rendering.
for layer in self.layers:
layer.render()
for marker in self.markers:
marker.draw()
# TODO Revisit map filters.
# def makeGrayscale(self):
# self._basemap.loadPixels()
# for i in xrange(0, self._basemap.width * self._basemap.height):
# b = self._basemap.brightness(self._basemap.pixels[i])
# self._basemap.pixels[i] = self._basemap.color(b, b, b)
# self._basemap.updatePixels()
# def makeFaded(self):
# self._basemap.noStroke()
# self._basemap.fill(255, 255, 255, 128)
# self._basemap.rect(0, 0, width, height)
def draw(self):
"""Draws the base map on the Processing sketch canvas."""
self.updateLazyImageLoading()
if self.has_tile_server and self.has_rendered:
image(self._basemap, 0, 0)
for layer in self.layers:
layer.draw()
for marker in self.markers:
marker.draw()
def updateLazyImageLoading(self):
if self.lazyImageManager.allLazyImagesLoaded:
return
self.lazyImageManager.request()
def add_marker(self, latitude, longitude, marker=None):
if marker is None:
m = CircleMarker(6)
elif callable(marker):
# The case that marker is a function of: x, y, pgraphics.
m = SimpleMarker(marker)
elif isinstance(marker, str):
m = TextMarker(marker)
elif isinstance(marker, unicode):
m = TextMarker(str(marker))
elif isinstance(marker, int) or isinstance(marker, float):
m = CircleMarker(marker)
elif isinstance(marker, PImage):
m = ImageMarker(marker)
else:
m = marker
m.setUnderlayMap(self)
m.setLocation(latitude, longitude)
self.markers.append(m)
return m
addMarker = add_marker
def add_layer(self, layer):
self.layers.append(layer)
layer.setUnderlayMap(self)
addLayer = add_layer
def save(self, filename):
self.flattened().save(filename)
def flattened(self):
export = createGraphics(self.width, self.height)
export.beginDraw()
if self.has_rendered and self.has_tile_server:
export.image(self._basemap, 0, 0)
for layer in self.layers:
export.image(layer.layer, 0, 0)
for marker in self.markers:
marker.render(export)
export.endDraw()
return export
def lonToX(self, lon):
return (self.width / 2.0) - self.tile_size * (self.centerX - lonToTile(lon, self.zoom))
def latToY(self, lat):
return (self.height / 2.0) - self.tile_size * (self.centerY - latToTile(lat, self.zoom))
def xToLon(self, x):
tile = (x - (self.width / 2.0)) / self.tile_size + self.centerX
return tileToLon(tile, self.zoom)
def yToLat(self, y):
tile = (y - (self.height / 2.0)) / self.tile_size + self.centerY
return tileToLat(tile, self.zoom)
def latlonToPixel(self, loc):
return (self.lonToX(loc[0]), self.latToY(loc[1]))
def pixelToLatLon(self, pixel):
return (self.yToLat(pixel[1]), self.xToLon(pixel[0]))
| mit | -8,290,212,591,541,616,000 | 32.246154 | 103 | 0.580565 | false |
qgis/QGIS | tests/src/python/test_qgsprojectionselectionwidgets.py | 15 | 11638 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for various projection selection widgets.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '12/11/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtTest import QSignalSpy
from qgis.gui import (QgsProjectionSelectionWidget,
QgsProjectionSelectionTreeWidget,
QgsProjectionSelectionDialog)
from qgis.core import QgsCoordinateReferenceSystem, QgsProject, QgsProjUtils
from qgis.testing import start_app, unittest
start_app()
class TestQgsProjectionSelectionWidgets(unittest.TestCase):
def testShowingHiding(self):
""" test showing and hiding options """
w = QgsProjectionSelectionWidget()
# layer crs
w.setOptionVisible(QgsProjectionSelectionWidget.LayerCrs, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.LayerCrs))
w.setOptionVisible(QgsProjectionSelectionWidget.LayerCrs, True)
# should still be hidden, because layer crs not set
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.LayerCrs))
w.setLayerCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.LayerCrs))
w.setOptionVisible(QgsProjectionSelectionWidget.LayerCrs, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.LayerCrs))
# project crs
w.setOptionVisible(QgsProjectionSelectionWidget.ProjectCrs, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.ProjectCrs))
w.setOptionVisible(QgsProjectionSelectionWidget.ProjectCrs, True)
# should still be hidden, because project crs was not set
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.ProjectCrs))
QgsProject.instance().setCrs(QgsCoordinateReferenceSystem('EPSG:3113'))
w = QgsProjectionSelectionWidget()
w.setOptionVisible(QgsProjectionSelectionWidget.ProjectCrs, True)
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.ProjectCrs))
w.setOptionVisible(QgsProjectionSelectionWidget.ProjectCrs, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.ProjectCrs))
# default crs
w.setOptionVisible(QgsProjectionSelectionWidget.DefaultCrs, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.DefaultCrs))
w.setOptionVisible(QgsProjectionSelectionWidget.DefaultCrs, True)
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.DefaultCrs))
# current crs
w = QgsProjectionSelectionWidget()
w.setOptionVisible(QgsProjectionSelectionWidget.CurrentCrs, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.CurrentCrs))
w.setOptionVisible(QgsProjectionSelectionWidget.CurrentCrs, True)
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.CurrentCrs))
w = QgsProjectionSelectionWidget()
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
w.setOptionVisible(QgsProjectionSelectionWidget.CurrentCrs, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.CurrentCrs))
w.setOptionVisible(QgsProjectionSelectionWidget.CurrentCrs, True)
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.CurrentCrs))
# not set
w = QgsProjectionSelectionWidget()
w.setOptionVisible(QgsProjectionSelectionWidget.CrsNotSet, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.CrsNotSet))
w.setOptionVisible(QgsProjectionSelectionWidget.CrsNotSet, True)
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.CrsNotSet))
w.setOptionVisible(QgsProjectionSelectionWidget.CrsNotSet, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.CrsNotSet))
def testShowingNotSetOption(self):
""" test showing the not set option """
w = QgsProjectionSelectionWidget()
# start with an invalid CRS
w.setCrs(QgsCoordinateReferenceSystem())
# add the not-set option
w.setOptionVisible(QgsProjectionSelectionWidget.CrsNotSet, True)
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.CrsNotSet))
# current crs (which would show "invalid") should be hidden
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.CurrentCrs))
# hide not-set option
w.setOptionVisible(QgsProjectionSelectionWidget.CrsNotSet, False)
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.CrsNotSet))
# and now current crs option ('invalid') should be reshown
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.CurrentCrs))
# repeat with a slightly different workflow
w = QgsProjectionSelectionWidget()
# start with an invalid CRS
w.setCrs(QgsCoordinateReferenceSystem())
# add the not-set option
w.setOptionVisible(QgsProjectionSelectionWidget.CrsNotSet, True)
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.CrsNotSet))
# current crs (which would show "invalid") should be hidden
self.assertFalse(w.optionVisible(QgsProjectionSelectionWidget.CurrentCrs))
# now set a current crs
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
# both current and not set options should be shown
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.CurrentCrs))
self.assertTrue(w.optionVisible(QgsProjectionSelectionWidget.CrsNotSet))
def testSignal(self):
w = QgsProjectionSelectionWidget()
w.show()
spy = QSignalSpy(w.crsChanged)
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(w.crs().authid(), 'EPSG:3111')
self.assertEqual(len(spy), 1)
# setting the same crs doesn't emit the signal
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(len(spy), 1)
def testTreeWidgetGettersSetters(self):
""" basic tests for QgsProjectionSelectionTreeWidget """
w = QgsProjectionSelectionTreeWidget()
w.show()
self.assertFalse(w.hasValidSelection())
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(w.crs().authid(), 'EPSG:3111')
self.assertTrue(w.hasValidSelection())
def testTreeWidgetUnknownCrs(self):
w = QgsProjectionSelectionTreeWidget()
w.show()
self.assertFalse(w.hasValidSelection())
w.setCrs(QgsCoordinateReferenceSystem.fromWkt('GEOGCS["WGS 84",DATUM["unknown",SPHEROID["WGS84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]]'))
self.assertTrue(w.crs().isValid())
self.assertFalse(w.crs().authid())
self.assertTrue(w.hasValidSelection())
self.assertEqual(w.crs().toWkt(QgsCoordinateReferenceSystem.WKT2_2018), 'GEOGCRS["WGS 84",DATUM["unknown",ELLIPSOID["WGS84",6378137,298.257223563,LENGTHUNIT["metre",1,ID["EPSG",9001]]]],PRIMEM["Greenwich",0,ANGLEUNIT["degree",0.0174532925199433]],CS[ellipsoidal,2],AXIS["longitude",east,ORDER[1],ANGLEUNIT["degree",0.0174532925199433]],AXIS["latitude",north,ORDER[2],ANGLEUNIT["degree",0.0174532925199433]]]')
def testTreeWidgetNotSetOption(self):
""" test allowing no projection option for QgsProjectionSelectionTreeWidget """
w = QgsProjectionSelectionTreeWidget()
w.show()
w.setShowNoProjection(True)
self.assertTrue(w.showNoProjection())
w.setShowNoProjection(False)
self.assertFalse(w.showNoProjection())
w.setShowNoProjection(True)
# no projection should be a valid selection
w.setCrs(QgsCoordinateReferenceSystem())
self.assertTrue(w.hasValidSelection())
self.assertFalse(w.crs().isValid())
def testDialogGettersSetters(self):
""" basic tests for QgsProjectionSelectionTreeWidget """
w = QgsProjectionSelectionDialog()
w.show()
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(w.crs().authid(), 'EPSG:3111')
def testDialogNotSetOption(self):
""" test allowing no projection option for QgsProjectionSelectionTreeWidget """
w = QgsProjectionSelectionDialog()
w.show()
w.setShowNoProjection(True)
self.assertTrue(w.showNoProjection())
w.setShowNoProjection(False)
self.assertFalse(w.showNoProjection())
w.setShowNoProjection(True)
w.setCrs(QgsCoordinateReferenceSystem())
self.assertFalse(w.crs().isValid())
def testTreeWidgetDeferredLoad(self):
"""
Test that crs setting made before widget is initialized is respected
"""
w = QgsProjectionSelectionTreeWidget()
spy = QSignalSpy(w.crsSelected)
self.assertFalse(w.hasValidSelection())
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(len(spy), 1)
self.assertTrue(w.hasValidSelection())
self.assertEqual(w.crs().authid(), 'EPSG:3111')
self.assertEqual(len(spy), 1)
w = QgsProjectionSelectionTreeWidget()
spy = QSignalSpy(w.crsSelected)
self.assertFalse(w.hasValidSelection())
w.setCrs(QgsCoordinateReferenceSystem())
self.assertEqual(len(spy), 1)
self.assertTrue(w.hasValidSelection())
self.assertFalse(w.crs().isValid())
self.assertEqual(len(spy), 1)
w.setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
self.assertEqual(len(spy), 2)
# expect same behavior if we show
w = QgsProjectionSelectionTreeWidget()
spy = QSignalSpy(w.crsSelected)
self.assertFalse(w.hasValidSelection())
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(len(spy), 1)
w.show()
self.assertTrue(w.hasValidSelection())
self.assertEqual(w.crs().authid(), 'EPSG:3111')
self.assertEqual(len(spy), 1)
w = QgsProjectionSelectionTreeWidget()
spy = QSignalSpy(w.crsSelected)
self.assertFalse(w.hasValidSelection())
w.setCrs(QgsCoordinateReferenceSystem())
self.assertEqual(len(spy), 1)
w.show()
self.assertTrue(w.hasValidSelection())
self.assertFalse(w.crs().isValid())
self.assertEqual(len(spy), 1)
# no double signals if same crs set
w = QgsProjectionSelectionTreeWidget()
spy = QSignalSpy(w.crsSelected)
self.assertFalse(w.hasValidSelection())
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(len(spy), 1)
# no double signals if same crs set
w = QgsProjectionSelectionTreeWidget()
spy = QSignalSpy(w.crsSelected)
self.assertFalse(w.hasValidSelection())
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
w.show()
w.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(len(spy), 1)
w.setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
self.assertEqual(len(spy), 2)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -2,600,888,642,415,344,600 | 45.738956 | 417 | 0.707682 | false |
lucas8/MPSI | ipt/int/code.py | 1 | 5446 | #!/usr/bin/python3
import numpy as np
import random as rd
from math import *
import matplotlib.pyplot as plt
########################################
####### Méthode des rectangles #########
########################################
tests = False
### Exercice 1-a ###
def Irg(f, a, b, n):
ints = np.linspace(a, b, n + 1)
summ = 0
for i in range(len(ints) - 1):
summ += f(ints[i]) * (ints[i+1] - ints[i])
return summ
### Exercice 1-b ###
# sin x croissante sur [0; \pi/2] : Irg donne une
# minoration de l'intégrale.
sing = Irg(sin, 0, pi/2, 10)
### Exercice 1-c ###
def Ird(f, a, b, n):
ints = np.linspace(a, b, n + 1)
summ = 0
for i in range(len(ints) - 1):
summ += f(ints[i+1]) * (ints[i+1] - ints[i])
return summ
### Exercice 1-d ###
# sin x croissante sur [0; \pi/2] : Ird donne une
# majoration de l'intégrale.
sind = Ird(sin, 0, pi/2, 10)
### Exercice 1-e ###
# On peut considérer que c'est un valeur
# moyenne entre la valeur minimale et la
# valeur maximale : probablement plus
# précise.
sina = .5 * (sind + sing)
### Exercice 2 ###
def Er(meth, f, a, b, n, intex, m1):
intc = meth(f, a, b, n)
err = (b-a) * (b-a) / (2*n) * m1
return abs(intex - intc) <= err
if tests:
countg = 0
countd = 0
for n in range(1, 1001):
if Er(Irg, sin, 0, pi/2, n, 1, 1):
countg += 1
if Er(Ird, sin, 0, pi/2, n, 1, 1):
countd += 1
print("Validg : ", countg)
print("Validd : ", countd)
### Exercice 3-a ###
def Irm(f, a, b, n):
ints = np.linspace(a, b, n + 1)
summ = 0
for i in range(len(ints) - 1):
summ += f((ints[i] + ints[i+1])/2) * (ints[i+1] - ints[i])
return summ
### Exercice 3-b ###
def Er2(meth, f, a, b, n, intex, m2):
intc = meth(f, a, b, n)
err = (b-a) * (b-a) * (b-a) / (24*n*n) * m2
return abs(intex - intc) <= err
if tests:
countm = 0
for n in range(1, 1001):
if Er2(Irm, sin, 0, pi/2, n, 1, 1):
countm += 1
print("Validm : ", countm)
### Exercice 3-c ###
def f1(x): return x + 2
def f2(x): return x*x
if tests:
print(Irm(f1, 0, 1, 10))
print(Irm(f2, 0, 1, 10))
########################################
######## Méthode des trapèzes ##########
########################################
tests = False
### Exercice 1 ###
def It(f, a, b, n):
ints = np.linspace(a, b, n+1)
summ = 0
for i in range(len(ints) - 1):
summ += (ints[i+1] - ints[i]) * (f(ints[i]) + f(ints[i+1])) / 2
return summ
### Exercice 2 ###
def Er3(meth, f, a, b, n, intex, m2):
intc = meth(f, a, b, n)
err = (b-a) * (b-a) * (b-a) / (12*n*n) * m2
return abs(intex - intc) <= err
if tests:
count = 0
for n in range(1, 1001):
if Er3(It, sin, 0, pi/2, n, 1, 1):
count += 1
print("Valid trap : ", count)
### Exercice 3 ###
if tests:
print(It(f1, 0, 1, 10))
print(It(f2, 0, 1, 10))
########################################
######### Méthode de simpson ###########
########################################
tests = False
### Exercice 1 ###
def Isimp(f, a, b, n):
ints = np.linspace(a, b, n+1)
summ = 0
for i in range(len(ints) - 1):
summ += (ints[i+1] - ints[i]) * (f(ints[i])/6 + 2*f((ints[i]+ints[i+1])/2)/3 + f(ints[i+1])/6)
return summ
### Exercice 2 ###
def Er4(meth, f, a, b, n, intex, m4):
intc = meth(f, a, b, n)
err = (b-a) * (b-a) * (b-a) * (b-a) * (b-a) / (2880*n*n*n*n) * m4
return abs(intex - intc) <= err
if tests:
count = 0
for n in range(1, 1001):
if Er4(Isimp, sin, 0, pi/2, n, 1, 1):
count += 1
print("Valid simp : ", count)
### Exercice 3 ###
def f3(x): return x*x*x
def f4(x): return x*x*x*x
if tests:
print(Isimp(f3, 0, 1, 10))
print(Isimp(f4, 0, 1, 10))
########################################
####### Méthode de monte-carlo #########
########################################
tests = False
def Imont(f, a, b, m, n):
count = 0
for i in range(n):
x = rd.uniform(a, b)
y = rd.uniform(0, m)
if y < f(x):
count += 1
return m * (b-a) * count / n
def f(x): return sqrt(1 - x*x)
if tests:
mpi = 4 * Imont(f, 0, 1, 1, 1000000)
print(mpi)
########################################
########## Graphes d'erreur ############
########################################
tests = True
def plotfn(f, a, b, n, cl = 'blue', lb = ''):
xs = np.linspace(a, b, n)
ys = [f(x) for x in xs]
plt.plot(xs, ys, linewidth = 1, color = cl)
plt.text(xs[-1], ys[-1], lb)
def createfn(m, p):
return lambda x: m*x + p
def calcerr(meth, f, a, b, n, intex):
return log(abs(meth(f, a, b, n) - intex))
def errgraph(mts, f, a, b, n, intex, l):
N = 100
for m in l:
plotfn(createfn(-m, 0), 0, log(n), N, 'red', str(-m))
i = 0
cls = ['blue', 'green', 'orange', 'salmon', 'cyan']
my = 0
for m in mts:
meth = m[0]
ys = [calcerr(meth, f, a, b, i, intex) for i in range(1, n+1)]
xs = [log(i) for i in range(1, n+1)]
plt.plot(xs, ys, linewidth = 2, color = cls[i])
plt.text(xs[-1], ys[-1], m[1])
i = divmod(i+1, len(cls))[1]
my = max(my, ys[0])
plt.title("Error graph")
plt.axis([0, log(n), ys[-1] - 1, my])
plt.show()
if tests:
errgraph([[Ird,"ird"], [Irm,"irm"], [It,"trap"], [Isimp, "simp"]],
sin, 0, pi/2, 1000, 1, [.5, 1, 2, 4])
| mit | -8,215,199,403,479,353,000 | 25.014354 | 102 | 0.459077 | false |
mwmuni/LIGGGHTS_GUI | OpenGL/GL/ARB/texture_cube_map_array.py | 9 | 2171 | '''OpenGL extension ARB.texture_cube_map_array
This module customises the behaviour of the
OpenGL.raw.GL.ARB.texture_cube_map_array to provide a more
Python-friendly API
Overview (from the spec)
The GL_EXT_texture_array extension, and subsequently OpenGL 3.0 have
introduced the concept of one- and two-dimensional array textures.
An array texture is an ordered set of images with the same size and
format. Each image in an array texture has a unique level. This
extension expands texture array support to include cube map
textures.
A cube map array texture is a 2-dimensional array texture that may
contain many cube map layers. Each cube map layer is a unique cube
map image set. Images in a cube map array have the same size and
format limitations as one- and two-dimensional array textures. A
cube map array texture is specified using TexImage3D in a similar
manner to two-dimensional arrays. Cube map array textures can be
bound to a render targets of a frame buffer object as
two-dimensional arrays are using FramebufferTextureLayer.
When accessed by a programmable shader, a cube map array texture
acts as a single unit. The "s", "t", "r" texture coordinates are
treated as a regular cube map texture fetch. The "q" texture is
treated as an unnormalized floating-point value identifying the
layer of the cube map array texture. Cube map array texture lookups
do not filter between layers.
This extension does not provide for the use of cube map array
textures with fixed-function fragment processing.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_cube_map_array.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_cube_map_array import *
from OpenGL.raw.GL.ARB.texture_cube_map_array import _EXTENSION_NAME
def glInitTextureCubeMapArrayARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | gpl-3.0 | -3,980,971,157,940,666,400 | 41.588235 | 71 | 0.788577 | false |
tdgoodrich/inddgo_pure | scripts/inddgo-graph-parser.py | 1 | 2752 | #!/usr/bin/env python2
#This file is part of INDDGO.
#
#Copyright (C) 2012, Oak Ridge National Laboratory
#
#This product includes software produced by UT-Battelle, LLC under Contract No.
#DE-AC05-00OR22725 with the Department of Energy.
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the New BSD 3-clause software license (LICENSE).
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#LICENSE for more details.
#
#For more information please contact the INDDGO developers at:
#[email protected]
from optparse import OptionParser
from sys import exit
class graph_properties:
#add aditional properties here
properties = ("edges","diameter","eigenspectrum","vertices")
def __init__(self,graph_path):
self.items = dict.fromkeys(self.properties)
self.process_graph(graph_path)
def process_graph(self, graph_path):
raw_file = []
with open(graph_path, 'r') as graph_file:
for line in graph_file:
processed_line = line.partition("#")[0].strip()
if len(processed_line) > 0:
raw_file.append(processed_line)
for line in raw_file:
split_up_line = line.partition(" ")
if split_up_line[0] in self.items:
self.items[split_up_line[0]] = split_up_line[2]
else:
print "Tried to add unknown element, line in file was: ", line
def to_string(self):
print_string = []
for element in self.properties:
if self.items[element] is not None:
print_string.append(str(self.items[element]))
else:
print_string.append("")
return ",".join(print_string)
def properties_string(self):
return ",".join(self.properties)
usage_string = "usage: %prog [-h] [-o OUTPUT] input [input ...]"
parser = OptionParser(usage=usage_string)
parser.add_option("-o", "--output", action="store", type="string", dest="output", help="output file name")
(options, args) = parser.parse_args()
if(len(args) < 1):
parser.error("too few arguments")
exit()
graphs = []
for graph_name in args:
graphs.append(graph_properties(graph_name))
if options.output is None:
print graphs[0].properties_string()
for graph in graphs:
print graph.to_string()
else:
with open(options.output,"w") as out_file:
out_file.write(graphs[0].properties_string())
out_file.write("\n")
for graph in graphs:
out_file.write(graph.to_string())
out_file.write("\n")
| bsd-3-clause | 6,501,833,771,813,772,000 | 32.975309 | 106 | 0.640625 | false |
PeterPetrik/QGIS | python/plugins/processing/gui/ExtentSelectionPanel.py | 30 | 7535 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExtentSelectionPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
import warnings
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import (
QMenu,
QAction,
QDialog,
QVBoxLayout,
QDialogButtonBox,
QLabel
)
from qgis.PyQt.QtGui import QCursor
from qgis.PyQt.QtCore import QCoreApplication, pyqtSignal
from qgis.gui import QgsMapLayerComboBox
from qgis.utils import iface
from qgis.core import (QgsProcessingParameterDefinition,
QgsProcessingParameters,
QgsProject,
QgsReferencedRectangle,
QgsMapLayerProxyModel)
from processing.gui.RectangleMapTool import RectangleMapTool
from processing.core.ProcessingConfig import ProcessingConfig
from processing.tools.dataobjects import createContext
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBaseSelector.ui'))
class LayerSelectionDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle(self.tr('Select Extent'))
vl = QVBoxLayout()
vl.addWidget(QLabel(self.tr('Use extent from')))
self.combo = QgsMapLayerComboBox()
self.combo.setFilters(
QgsMapLayerProxyModel.HasGeometry | QgsMapLayerProxyModel.RasterLayer | QgsMapLayerProxyModel.MeshLayer)
self.combo.setShowCrs(ProcessingConfig.getSetting(ProcessingConfig.SHOW_CRS_DEF))
vl.addWidget(self.combo)
self.button_box = QDialogButtonBox()
self.button_box.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
vl.addWidget(self.button_box)
self.setLayout(vl)
def selected_layer(self):
return self.combo.currentLayer()
class ExtentSelectionPanel(BASE, WIDGET):
hasChanged = pyqtSignal()
def __init__(self, dialog, param):
super(ExtentSelectionPanel, self).__init__(None)
self.setupUi(self)
self.leText.textChanged.connect(lambda: self.hasChanged.emit())
self.dialog = dialog
self.param = param
self.crs = QgsProject.instance().crs()
if self.param.flags() & QgsProcessingParameterDefinition.FlagOptional:
if hasattr(self.leText, 'setPlaceholderText'):
self.leText.setPlaceholderText(
self.tr('[Leave blank to use min covering extent]'))
self.btnSelect.clicked.connect(self.selectExtent)
if iface is not None:
canvas = iface.mapCanvas()
self.prevMapTool = canvas.mapTool()
self.tool = RectangleMapTool(canvas)
self.tool.rectangleCreated.connect(self.updateExtent)
else:
self.prevMapTool = None
self.tool = None
if param.defaultValue() is not None:
context = createContext()
rect = QgsProcessingParameters.parameterAsExtent(param, {param.name(): param.defaultValue()}, context)
crs = QgsProcessingParameters.parameterAsExtentCrs(param, {param.name(): param.defaultValue()}, context)
if not rect.isNull():
try:
s = '{},{},{},{}'.format(
rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum())
if crs.isValid():
s += ' [' + crs.authid() + ']'
self.crs = crs
self.leText.setText(s)
except:
pass
def selectExtent(self):
popupmenu = QMenu()
useCanvasExtentAction = QAction(
QCoreApplication.translate("ExtentSelectionPanel", 'Use Canvas Extent'),
self.btnSelect)
useLayerExtentAction = QAction(
QCoreApplication.translate("ExtentSelectionPanel", 'Use Layer Extent…'),
self.btnSelect)
selectOnCanvasAction = QAction(
self.tr('Select Extent on Canvas'), self.btnSelect)
popupmenu.addAction(useCanvasExtentAction)
popupmenu.addAction(selectOnCanvasAction)
popupmenu.addSeparator()
popupmenu.addAction(useLayerExtentAction)
selectOnCanvasAction.triggered.connect(self.selectOnCanvas)
useLayerExtentAction.triggered.connect(self.useLayerExtent)
useCanvasExtentAction.triggered.connect(self.useCanvasExtent)
if self.param.flags() & QgsProcessingParameterDefinition.FlagOptional:
useMincoveringExtentAction = QAction(
self.tr('Use Min Covering Extent from Input Layers'),
self.btnSelect)
useMincoveringExtentAction.triggered.connect(
self.useMinCoveringExtent)
popupmenu.addAction(useMincoveringExtentAction)
popupmenu.exec_(QCursor.pos())
def useMinCoveringExtent(self):
self.leText.setText('')
def useLayerExtent(self):
dlg = LayerSelectionDialog(self)
if dlg.exec_():
layer = dlg.selected_layer()
self.setValueFromRect(QgsReferencedRectangle(layer.extent(), layer.crs()))
def useCanvasExtent(self):
self.setValueFromRect(QgsReferencedRectangle(iface.mapCanvas().extent(),
iface.mapCanvas().mapSettings().destinationCrs()))
def selectOnCanvas(self):
canvas = iface.mapCanvas()
canvas.setMapTool(self.tool)
self.dialog.showMinimized()
def updateExtent(self):
r = self.tool.rectangle()
self.setValueFromRect(r)
def setValueFromRect(self, r):
s = '{},{},{},{}'.format(
r.xMinimum(), r.xMaximum(), r.yMinimum(), r.yMaximum())
try:
self.crs = r.crs()
except:
self.crs = QgsProject.instance().crs()
if self.crs.isValid():
s += ' [' + self.crs.authid() + ']'
self.leText.setText(s)
self.tool.reset()
canvas = iface.mapCanvas()
canvas.setMapTool(self.prevMapTool)
self.dialog.showNormal()
self.dialog.raise_()
self.dialog.activateWindow()
def getValue(self):
if str(self.leText.text()).strip() != '':
return str(self.leText.text())
else:
return None
def setExtentFromString(self, s):
self.leText.setText(s)
| gpl-2.0 | 3,178,467,637,961,001,500 | 35.567961 | 116 | 0.588079 | false |
joopert/home-assistant | homeassistant/components/opensky/sensor.py | 3 | 5668 | """Sensor for the Open Sky Network."""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
LENGTH_KILOMETERS,
LENGTH_METERS,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import distance as util_distance
from homeassistant.util import location as util_location
_LOGGER = logging.getLogger(__name__)
CONF_ALTITUDE = "altitude"
ATTR_CALLSIGN = "callsign"
ATTR_ALTITUDE = "altitude"
ATTR_ON_GROUND = "on_ground"
ATTR_SENSOR = "sensor"
ATTR_STATES = "states"
DOMAIN = "opensky"
DEFAULT_ALTITUDE = 0
EVENT_OPENSKY_ENTRY = f"{DOMAIN}_entry"
EVENT_OPENSKY_EXIT = f"{DOMAIN}_exit"
SCAN_INTERVAL = timedelta(seconds=12) # opensky public limit is 10 seconds
OPENSKY_ATTRIBUTION = (
"Information provided by the OpenSky Network " "(https://opensky-network.org)"
)
OPENSKY_API_URL = "https://opensky-network.org/api/states/all"
OPENSKY_API_FIELDS = [
"icao24",
ATTR_CALLSIGN,
"origin_country",
"time_position",
"time_velocity",
ATTR_LONGITUDE,
ATTR_LATITUDE,
ATTR_ALTITUDE,
ATTR_ON_GROUND,
"velocity",
"heading",
"vertical_rate",
"sensors",
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RADIUS): vol.Coerce(float),
vol.Optional(CONF_NAME): cv.string,
vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude,
vol.Optional(CONF_ALTITUDE, default=DEFAULT_ALTITUDE): vol.Coerce(float),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Open Sky platform."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
add_entities(
[
OpenSkySensor(
hass,
config.get(CONF_NAME, DOMAIN),
latitude,
longitude,
config.get(CONF_RADIUS),
config.get(CONF_ALTITUDE),
)
],
True,
)
class OpenSkySensor(Entity):
"""Open Sky Network Sensor."""
def __init__(self, hass, name, latitude, longitude, radius, altitude):
"""Initialize the sensor."""
self._session = requests.Session()
self._latitude = latitude
self._longitude = longitude
self._radius = util_distance.convert(radius, LENGTH_KILOMETERS, LENGTH_METERS)
self._altitude = altitude
self._state = 0
self._hass = hass
self._name = name
self._previously_tracked = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def _handle_boundary(self, flights, event, metadata):
"""Handle flights crossing region boundary."""
for flight in flights:
if flight in metadata:
altitude = metadata[flight].get(ATTR_ALTITUDE)
else:
# Assume Flight has landed if missing.
altitude = 0
data = {
ATTR_CALLSIGN: flight,
ATTR_ALTITUDE: altitude,
ATTR_SENSOR: self._name,
}
self._hass.bus.fire(event, data)
def update(self):
"""Update device state."""
currently_tracked = set()
flight_metadata = {}
states = self._session.get(OPENSKY_API_URL).json().get(ATTR_STATES)
for state in states:
flight = dict(zip(OPENSKY_API_FIELDS, state))
callsign = flight[ATTR_CALLSIGN].strip()
if callsign != "":
flight_metadata[callsign] = flight
else:
continue
missing_location = (
flight.get(ATTR_LONGITUDE) is None or flight.get(ATTR_LATITUDE) is None
)
if missing_location:
continue
if flight.get(ATTR_ON_GROUND):
continue
distance = util_location.distance(
self._latitude,
self._longitude,
flight.get(ATTR_LATITUDE),
flight.get(ATTR_LONGITUDE),
)
if distance is None or distance > self._radius:
continue
altitude = flight.get(ATTR_ALTITUDE)
if altitude > self._altitude and self._altitude != 0:
continue
currently_tracked.add(callsign)
if self._previously_tracked is not None:
entries = currently_tracked - self._previously_tracked
exits = self._previously_tracked - currently_tracked
self._handle_boundary(entries, EVENT_OPENSKY_ENTRY, flight_metadata)
self._handle_boundary(exits, EVENT_OPENSKY_EXIT, flight_metadata)
self._state = len(currently_tracked)
self._previously_tracked = currently_tracked
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: OPENSKY_ATTRIBUTION}
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "flights"
@property
def icon(self):
"""Return the icon."""
return "mdi:airplane"
| apache-2.0 | 8,739,224,805,317,396,000 | 29.31016 | 87 | 0.602329 | false |
inquisite/Inquisite-Core | lib/managers/ListManager.py | 1 | 15412 | from lib.utils.Db import db
import re
from lib.exceptions.FindError import FindError
from lib.exceptions.DbError import DbError
from lib.exceptions.ValidationError import ValidationError
from lib.exceptions.SettingsValidationError import SettingsValidationError
from lib.decorators.Memoize import memoized
class ListManager:
def __init__(self):
pass
@staticmethod
def addList(repo_id, name, code, merge_setting, description='', items={}):
try:
repo_id = int(repo_id)
except TypeError:
raise DbError(message="Invalid repo_id provided", context="List.addList",
dberror="")
ret = {"exists": False}
try:
result = db.run("MATCH (l:List{code: {code}})--(r:Repository) WHERE ID(r) = {repo_id} RETURN ID(l) as id, l.name as name, l.code as code, l.description as description", {"code": code, "repo_id": int(repo_id)}).peek()
if result:
print result
ret = {
"exists": True,
"id": result['id'],
"code": result['code'],
"name": result['name'],
'description': result['description']
}
return ret
else:
result = db.run("MATCH (r:Repository) WHERE ID(r) = {repo_id} CREATE (l:List {name: {name}, code: {code}, description: {description}, merge_allowed: {merge}, storage: 'Graph'})-[:PART_OF]->(r) return ID(l) as id", {"repo_id": repo_id, "name": name, "code": code, "description": description, "merge": merge_setting}).peek()
except Exception as e:
raise DbError(message="Could not add list: " + e.message, context="List.addList",
dberror=e.message)
#add/edit List Items
item_status = {}
for item in items:
item_res = ListManager.addListItem(repo_id, code, item['display'], item['code'], item['description'])
if 'item_id' in item_res:
item_status[item['code']] = {'status_code': 200, 'item_id': item_res['item_id'], 'msg': 'Created new list item'}
else:
item_status[item['code']] = {'status_code': 200, 'item_id': None, 'msg': 'Could not create list item'}
if result:
ret = {
'id': result['id'],
'name': name,
'code': code,
'description': description,
'list_status': item_status
}
else:
raise DbError(message="Could not add list", context="List.addList",
dberror="")
return ret
@staticmethod
def editList(repo_id, list_id, name, code, description, items, delete_items, merge_setting):
try:
repo_id = int(repo_id)
list_id = int(list_id)
except TypeError:
raise DbError(message="Invalid repo_id or list_id provided", context="List.addListItem",
dberror="")
result = db.run(
"MATCH (r:Repository)--(l:List) WHERE ID(r) = {repo_id} AND ID(l) = {list_id} SET l.name = {name}, l.code = {code}, l.description = {description}, l.merge_allowed = {merge} RETURN ID(l) AS id",
{"repo_id": int(repo_id), "list_id": int(list_id), "name": name, "code": code, "description": description, "merge": merge_setting})
# add/edit fields
item_status = {}
for k in items:
if 'id' in items[k]:
# edit existing field
li_ret = ListManager.editListItem(repo_id, code, items[k].get('id', ''), items[k].get('display', ''), items[k].get('code', ''), items[k]['description'])
if 'item_id' in li_ret:
item_status[items[k]['code']] = {'status_code': 200, 'item_id': li_ret['item_id'],
'msg': 'Edited list item'}
else:
item_status[items[k]['code']] = {'status_code': 200, 'item_id': None,
'msg': 'Could not edit list item'}
else:
# add field
li_ret = ListManager.addListItem(repo_id, code, items[k].get('display', ''), items[k].get('code', ''), items[k].get('description', ''))
if 'item_id' in li_ret:
item_status[items[k]['code']] = {'status_code': 200, 'item_id': li_ret['item_id'], 'msg': 'Created new list item'}
else:
item_status[items[k]['code']] = {'status_code': 200, 'item_id': None, 'msg': 'Could not create new list item'}
# delete fields
if delete_items:
for item_id in delete_items:
ListManager.deleteListItem(repo_id, code, item_id)
if result:
ret = {}
for r in result:
ret['type'] = {
"id": r['id'],
"name": name,
"code": code,
"description": description,
"item_status": item_status
}
return ret
else:
raise DbError(message="Could not edit list", context="List.editList",
dberror="")
@staticmethod
def deleteList(repo_id, list_id):
try:
result = db.run("MATCH (l:List)-[x]-(r:Repository) WHERE ID(r) = {repo_id} AND ID(l) = {list_id} OPTIONAL MATCH (i:ListItem)-[y]-(l) DELETE x,y,l,i",
{"list_id": int(list_id), "repo_id": int(repo_id)})
if result is not None:
return {"list_id": list_id}
else:
raise FindError(message="Could not find list", context="Schema.deleteList", dberror="")
except Exception as e:
raise DbError(message="Could not delete list", context="Schema.deleteList", dberror=e.message)
@staticmethod
def addListItem(repo_id, code, display, item_code, description=None):
try:
repo_id = int(repo_id)
except TypeError:
raise DbError(message="Invalid repo_id provided", context="List.addListItem",
dberror="")
ret = {}
try:
if code is None or len(code) == 0:
raise ValidationError(message="List code is required", context="List.addListItem")
list_info = ListManager.getInfoForList(repo_id, code)
if list_info is None:
raise ValidationError(message="List code is invalid", context="List.addListItem")
if display is None:
raise ValidationError(message="Display value is required", context="List.addListItem")
if item_code is None:
raise ValidationError(message="List Item Code is required", context="List.addListItem")
if isinstance(code, int):
item_result = db.run("MATCH (i:ListItem {display: {display}})--(l:List {code: {code}})--(r:Repository) WHERE ID(r) = {repo_id} RETURN ID(i) as id, i.display as display", {"display": display, "code": code, "repo_id": repo_id}).peek()
else:
item_result = db.run("MATCH (i:ListItem {display: {display}})--(l:List {code: {code}})--(r:Repository) WHERE ID(r) = {repo_id} RETURN ID(i) as id, i.display as display", {"display": display, "code": code, "repo_id": repo_id}).peek()
if item_result is not None:
ret['exists'] = True
ret['item_id'] = item_result['id']
ret['display'] = item_result['display']
return ret
else:
item_flds = ["display: {display}", "code: {item_code}", "description: {description}"]
item_params = {"list_code": code, "repo_id": repo_id, "display": display, "item_code": item_code, "description": description}
add_result = db.run("MATCH (r:Repository)--(l:List {code: {list_code}}) WHERE ID(r) = {repo_id} CREATE (i:ListItem {" + ", ".join(item_flds) + "})-[:PART_OF]->(l) RETURN ID(i) as id, i.display as display, i.code as code", item_params)
r = add_result.peek()
if r:
ret['exists'] = False
ret['item_id'] = r['id']
ret['display'] = r['display']
ret['code'] = r['code']
return ret
else:
raise DbError(message="Could not add List Item", context="List.addListItem", dberror="")
except Exception as e:
raise DbError(message="Could not add List Item", context="List.addListItem", dberror=e.message)
@staticmethod
def editListItem(repo_id, code, item_id, display, item_code, description=None):
try:
repo_id = int(repo_id)
except TypeError:
raise DbError(message="Invalid repo_id provided", context="List.editListItem",
dberror="")
if code is None or len(code) == 0:
raise ValidationError(message="List code is required", context="List.editListItem")
if item_code is None or len(item_code) == 0:
raise ValidationError(message="List item code is required", context="List.editListItem")
if display is None or len(display) == 0:
raise ValidationError(message="List Item display is required", context="List.editListItem")
ret = {}
result = db.run(
"MATCH (i:ListItem {code: {item_code}})--(l:List {code: {code}})--(r:Repository) WHERE ID(r) = {repo_id} AND ID(i) <> {item_id} RETURN ID(i) as id, i.display as display",
{"item_code": item_code, "code": code, "repo_id": int(repo_id), "item_id": int(item_id)}).peek()
if result is not None:
ret['msg'] = "List Item already exists"
ret['item_id'] = result['id']
ret['display'] = result['display']
return ret
else:
flds = ["i.display = {display}", "i.code = {item_code}", "i.description = {description}"]
params = {"code": code, "repo_id": int(repo_id), "display": display, "item_code": item_code, "description": description, "item_id": int(item_id)}
result = db.run(
"MATCH (r:Repository)--(l:List {code: {code}})--(i:ListItem) WHERE ID(r) = {repo_id} AND ID(i) = {item_id} SET " + ", ".join(flds) + " RETURN ID(i) as id, i.display as display",
params)
r = result.peek()
# TODO: check query result
if r:
ret['item_id'] = r['id']
ret['display'] = r['display']
return ret
else:
raise DbError(message="Could not edit list item", context="List.editListItem", dberror="")
@staticmethod
def deleteListItem(repo_id, code, item_id):
try:
repo_id = int(repo_id)
except TypeError:
raise DbError(message="Invalid repo_id provided", context="List.deleteListItem",
dberror="")
try:
result = db.run(
"MATCH (r:Repository)--(l:List {code: {code}})-[x]-(i:ListItem) WHERE ID(r) = {repo_id} AND ID(i) = {item_id} DELETE i,x",
{"repo_id": int(repo_id), "item_id": int(item_id), "code": code})
if result is not None:
return True
else:
raise FindError(message="Could not find list item", context="List.deleteListItem", dberror="")
except Exception as e:
raise DbError(message="Could not delete list item", context="List.deleteListItem", dberror=e.message)
@staticmethod
def getListsForRepo(repo_id):
repo_id = int(repo_id)
ret = {'lists': []}
try:
lists_res = db.run("MATCH (r:Repository)--(l:List) WHERE ID(r) = {repo_id} RETURN ID(l) as id, l.name as name, l.code as code, l.description as description, l.merge_allowed as merge_allowed", {"repo_id": repo_id})
if lists_res:
for i_list in lists_res:
list_ret = {
'id': i_list['id'],
'name': i_list['name'],
'code': i_list['code'],
'description': i_list['description'],
'merge_allowed': i_list['merge_allowed']
}
ret['lists'].append(list_ret)
return ret
else:
return None
except Exception as e:
raise DbError(message="Could not get lists for repo", context="List.getListsForRepo", dberror=e.message)
@staticmethod
def getInfoForList(repo_id, code):
repo_id = int(repo_id)
try:
code = int(code)
except ValueError:
pass
try:
if isinstance(code, int):
list_res = db.run("MATCH (r:Repository)--(l:List) WHERE ID(l) = {code} AND ID(r) = {repo_id} RETURN ID(l) as id, l.name as name, l.code as code, l.description as description, l.merge_allowed as merge_allowed", {"code" :code, "repo_id": repo_id}).peek()
if list_res is None:
return None
items_res = db.run("MATCH (i:ListItem)--(l:List)--(r:Repository) WHERE ID(l) = {code} AND ID(r) = {repo_id} RETURN ID(i) as id, i.display as display, i.code as code, i.description as description", {"code": code, "repo_id": repo_id})
else:
list_res = db.run("MATCH (r:Repository)--(l:List) WHERE l.code = {code} AND ID(r) = {repo_id} RETURN ID(l) as id, l.name as name, l.code as code, l.description as description, l.merge_allowed as merge_allowed", {"code" :code, "repo_id": repo_id}).peek()
if list_res is None:
return None
items_res = db.run("MATCH (i:ListItem)--(l:List)--(r:Repository) WHERE l.code = {code} AND ID(r) = {repo_id} RETURN ID(i) as id, i.display as display, i.code as code, i.description as description", {"code": code, "repo_id": repo_id})
info = {'list_id': list_res['id'], 'name': list_res['name'], 'code': list_res['code'], 'description': list_res['description'], 'merge_allowed': list_res['merge_allowed']}
item_list = []
if items_res:
for r in items_res:
li = {'id': r['id'], 'display': r['display'], 'code': r['code'], 'description': r['description']}
item_list.append(li)
info['items'] = item_list
return info
except Exception as e:
raise DbError(message="Could not get list items for list", context="List.getInfoForList", dberror=e.message)
#
# Get number of unique values in a list
#
@staticmethod
def uniqueValueCount(valueSet):
uValues = set()
if isinstance(valueSet, list):
valueSet = set(valueSet)
for value in valueSet:
if not isinstance(value, basestring):
continue
split_regex = r'[,;|\/]{1}'
value_array = re.split(split_regex, value)
for va in value_array:
uValues.add(va)
if len(uValues) > 0:
return len(uValues)
return False
| gpl-3.0 | -2,056,531,698,664,674,300 | 47.772152 | 338 | 0.528355 | false |
kotobukki/MobSF-Translate | StaticAnalyzer/tools/enjarify/enjarify/jvm/mathops.py | 35 | 5689 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Autogenerated by genmathops.py - do not edit
from . import jvmops
from . import scalartypes as scalars
UNARY = {
0x7B: (jvmops.INEG, scalars.INT, scalars.INT),
0x7C: (jvmops.IXOR, scalars.INT, scalars.INT),
0x7D: (jvmops.LNEG, scalars.LONG, scalars.LONG),
0x7E: (jvmops.LXOR, scalars.LONG, scalars.LONG),
0x7F: (jvmops.FNEG, scalars.FLOAT, scalars.FLOAT),
0x80: (jvmops.DNEG, scalars.DOUBLE, scalars.DOUBLE),
0x81: (jvmops.I2L, scalars.INT, scalars.LONG),
0x82: (jvmops.I2F, scalars.INT, scalars.FLOAT),
0x83: (jvmops.I2D, scalars.INT, scalars.DOUBLE),
0x84: (jvmops.L2I, scalars.LONG, scalars.INT),
0x85: (jvmops.L2F, scalars.LONG, scalars.FLOAT),
0x86: (jvmops.L2D, scalars.LONG, scalars.DOUBLE),
0x87: (jvmops.F2I, scalars.FLOAT, scalars.INT),
0x88: (jvmops.F2L, scalars.FLOAT, scalars.LONG),
0x89: (jvmops.F2D, scalars.FLOAT, scalars.DOUBLE),
0x8A: (jvmops.D2I, scalars.DOUBLE, scalars.INT),
0x8B: (jvmops.D2L, scalars.DOUBLE, scalars.LONG),
0x8C: (jvmops.D2F, scalars.DOUBLE, scalars.FLOAT),
0x8D: (jvmops.I2B, scalars.INT, scalars.INT),
0x8E: (jvmops.I2C, scalars.INT, scalars.INT),
0x8F: (jvmops.I2S, scalars.INT, scalars.INT),
}
BINARY = {
0x90: (jvmops.IADD, scalars.INT, scalars.INT),
0x91: (jvmops.ISUB, scalars.INT, scalars.INT),
0x92: (jvmops.IMUL, scalars.INT, scalars.INT),
0x93: (jvmops.IDIV, scalars.INT, scalars.INT),
0x94: (jvmops.IREM, scalars.INT, scalars.INT),
0x95: (jvmops.IAND, scalars.INT, scalars.INT),
0x96: (jvmops.IOR, scalars.INT, scalars.INT),
0x97: (jvmops.IXOR, scalars.INT, scalars.INT),
0x98: (jvmops.ISHL, scalars.INT, scalars.INT),
0x99: (jvmops.ISHR, scalars.INT, scalars.INT),
0x9A: (jvmops.IUSHR, scalars.INT, scalars.INT),
0x9B: (jvmops.LADD, scalars.LONG, scalars.LONG),
0x9C: (jvmops.LSUB, scalars.LONG, scalars.LONG),
0x9D: (jvmops.LMUL, scalars.LONG, scalars.LONG),
0x9E: (jvmops.LDIV, scalars.LONG, scalars.LONG),
0x9F: (jvmops.LREM, scalars.LONG, scalars.LONG),
0xA0: (jvmops.LAND, scalars.LONG, scalars.LONG),
0xA1: (jvmops.LOR, scalars.LONG, scalars.LONG),
0xA2: (jvmops.LXOR, scalars.LONG, scalars.LONG),
0xA3: (jvmops.LSHL, scalars.LONG, scalars.INT),
0xA4: (jvmops.LSHR, scalars.LONG, scalars.INT),
0xA5: (jvmops.LUSHR, scalars.LONG, scalars.INT),
0xA6: (jvmops.FADD, scalars.FLOAT, scalars.FLOAT),
0xA7: (jvmops.FSUB, scalars.FLOAT, scalars.FLOAT),
0xA8: (jvmops.FMUL, scalars.FLOAT, scalars.FLOAT),
0xA9: (jvmops.FDIV, scalars.FLOAT, scalars.FLOAT),
0xAA: (jvmops.FREM, scalars.FLOAT, scalars.FLOAT),
0xAB: (jvmops.DADD, scalars.DOUBLE, scalars.DOUBLE),
0xAC: (jvmops.DSUB, scalars.DOUBLE, scalars.DOUBLE),
0xAD: (jvmops.DMUL, scalars.DOUBLE, scalars.DOUBLE),
0xAE: (jvmops.DDIV, scalars.DOUBLE, scalars.DOUBLE),
0xAF: (jvmops.DREM, scalars.DOUBLE, scalars.DOUBLE),
0xB0: (jvmops.IADD, scalars.INT, scalars.INT),
0xB1: (jvmops.ISUB, scalars.INT, scalars.INT),
0xB2: (jvmops.IMUL, scalars.INT, scalars.INT),
0xB3: (jvmops.IDIV, scalars.INT, scalars.INT),
0xB4: (jvmops.IREM, scalars.INT, scalars.INT),
0xB5: (jvmops.IAND, scalars.INT, scalars.INT),
0xB6: (jvmops.IOR, scalars.INT, scalars.INT),
0xB7: (jvmops.IXOR, scalars.INT, scalars.INT),
0xB8: (jvmops.ISHL, scalars.INT, scalars.INT),
0xB9: (jvmops.ISHR, scalars.INT, scalars.INT),
0xBA: (jvmops.IUSHR, scalars.INT, scalars.INT),
0xBB: (jvmops.LADD, scalars.LONG, scalars.LONG),
0xBC: (jvmops.LSUB, scalars.LONG, scalars.LONG),
0xBD: (jvmops.LMUL, scalars.LONG, scalars.LONG),
0xBE: (jvmops.LDIV, scalars.LONG, scalars.LONG),
0xBF: (jvmops.LREM, scalars.LONG, scalars.LONG),
0xC0: (jvmops.LAND, scalars.LONG, scalars.LONG),
0xC1: (jvmops.LOR, scalars.LONG, scalars.LONG),
0xC2: (jvmops.LXOR, scalars.LONG, scalars.LONG),
0xC3: (jvmops.LSHL, scalars.LONG, scalars.INT),
0xC4: (jvmops.LSHR, scalars.LONG, scalars.INT),
0xC5: (jvmops.LUSHR, scalars.LONG, scalars.INT),
0xC6: (jvmops.FADD, scalars.FLOAT, scalars.FLOAT),
0xC7: (jvmops.FSUB, scalars.FLOAT, scalars.FLOAT),
0xC8: (jvmops.FMUL, scalars.FLOAT, scalars.FLOAT),
0xC9: (jvmops.FDIV, scalars.FLOAT, scalars.FLOAT),
0xCA: (jvmops.FREM, scalars.FLOAT, scalars.FLOAT),
0xCB: (jvmops.DADD, scalars.DOUBLE, scalars.DOUBLE),
0xCC: (jvmops.DSUB, scalars.DOUBLE, scalars.DOUBLE),
0xCD: (jvmops.DMUL, scalars.DOUBLE, scalars.DOUBLE),
0xCE: (jvmops.DDIV, scalars.DOUBLE, scalars.DOUBLE),
0xCF: (jvmops.DREM, scalars.DOUBLE, scalars.DOUBLE),
}
BINARY_LIT = {
0xD0: jvmops.IADD,
0xD1: jvmops.ISUB,
0xD2: jvmops.IMUL,
0xD3: jvmops.IDIV,
0xD4: jvmops.IREM,
0xD5: jvmops.IAND,
0xD6: jvmops.IOR,
0xD7: jvmops.IXOR,
0xD8: jvmops.IADD,
0xD9: jvmops.ISUB,
0xDA: jvmops.IMUL,
0xDB: jvmops.IDIV,
0xDC: jvmops.IREM,
0xDD: jvmops.IAND,
0xDE: jvmops.IOR,
0xDF: jvmops.IXOR,
0xE0: jvmops.ISHL,
0xE1: jvmops.ISHR,
0xE2: jvmops.IUSHR,
}
| gpl-3.0 | 5,167,459,214,392,227,000 | 43.445313 | 74 | 0.682897 | false |
tejpochiraju/tejpochiraju.github.io | tag_pages.py | 1 | 3013 | #!/bin/python
import csv
data_file = "_data/projects.csv"
def add_to_set(input_list,output_set):
for el in input_list:
el = el.strip()
if el:
output_set.add(el.strip())
def main():
#Initialise the main dictionary
data = {}
tags = set()
contributions = set()
arenas = set()
domains = set()
statuses = set()
clients = set()
patents = set()
trls = set()
file_string = "---\nlayout: portfolio\npermalink: /tag/tag_url/\ntags: tag_name\n---"
nav = "nav:\n"
nav_section = " - title: category_name\n items:\n"
nav_item = " - page: \"item_name\"\n url: /tag/item_url/\n"
with open(data_file) as csvfile:
#Read the csv file as a list of dictionaries
reader = csv.DictReader(csvfile)
for row in reader:
row_tags = row["Tags"].split(",")
row_contributions = row["Contribution"].split(",")
row_arenas = row["Arena"].split(",")
row_domains = row["Domains"].split(",")
row_statuses = row["Status"].split(",")
row_clients = row["Client"].split(",")
row_patents = row["Patent"].split(",")
row_trls = row["TRL"].split(",")
add_to_set(row_tags,tags)
add_to_set(row_contributions,contributions)
add_to_set(row_arenas,arenas)
add_to_set(row_domains,domains)
add_to_set(row_statuses,statuses)
add_to_set(row_clients,clients)
add_to_set(row_patents,patents)
add_to_set(row_trls,trls)
nav += nav_section.replace("category_name","Contribution")
for item in contributions:
nav += nav_item.replace("item_name",item).replace("item_url",item.lower())
nav += nav_section.replace("category_name","Domain")
for item in domains:
nav += nav_item.replace("item_name",item).replace("item_url",item.lower())
nav += nav_section.replace("category_name","Status")
for item in statuses:
nav += nav_item.replace("item_name",item).replace("item_url",item.lower())
nav += nav_section.replace("category_name","Arena")
for item in arenas:
nav += nav_item.replace("item_name",item).replace("item_url",item.lower())
# nav += nav_section.replace("category_name","Patent")
# for item in patents:
# nav += nav_item.replace("item_name",item).replace("item_url",item.lower())
nav += nav_section.replace("category_name","TRL")
for item in trls:
nav += nav_item.replace("item_name",item).replace("item_url","trl{}".format(item).lower())
with open ("_data/navigation.yml", "w") as f:
f.write(nav)
for tag in tags:
tag_file_string = file_string.replace("tag_url",tag.lower()).replace("tag_name",tag)
with open ("_pages/tag/{}.md".format(tag.lower()), "w") as f:
f.write(tag_file_string)
#Execute the main loop
if __name__ == "__main__":
main()
| mit | -1,236,609,783,463,576,800 | 31.397849 | 98 | 0.567541 | false |
mcking49/apache-flask | Python/Lib/site-packages/gevent/_tblib.py | 9 | 9490 | # -*- coding: utf-8 -*-
# A vendored version of part of https://github.com/ionelmc/python-tblib
####
# Copyright (c) 2013-2014, Ionel Cristian Mărieș
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
####
# cpython.py
"""
Taken verbatim from Jinja2.
https://github.com/mitsuhiko/jinja2/blob/master/jinja2/debug.py#L267
"""
#import platform # XXX: gevent cannot import platform at the top level; interferes with monkey patching
import sys
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
tb_set_next = None
# try:
# if platform.python_implementation() == 'CPython':
# #tb_set_next = _init_ugly_crap()
# tb_set_next = None
# except Exception as exc:
# sys.stderr.write("Failed to initialize cpython support: {!r}".format(exc))
# del _init_ugly_crap
# __init__.py
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
#if not tb_set_next and not tproxy:
# raise ImportError("Cannot use tblib. Runtime not supported.")
from types import CodeType
from types import TracebackType
PY3 = sys.version_info[0] == 3
class _AttrDict(dict):
def __getattr__(self, attr):
return self[attr]
class __traceback_maker(Exception):
pass
class Code(object):
def __init__(self, code):
self.co_filename = code.co_filename
self.co_name = code.co_name
self.co_nlocals = code.co_nlocals
self.co_stacksize = code.co_stacksize
self.co_flags = code.co_flags
self.co_firstlineno = code.co_firstlineno
class Frame(object):
def __init__(self, frame):
# gevent: python 2.6 syntax fix
self.f_globals = {'__file__': frame.f_globals.get('__file__'),
'__name__': frame.f_globals.get('__name__')}
self.f_code = Code(frame.f_code)
class Traceback(object):
def __init__(self, tb):
self.tb_frame = Frame(tb.tb_frame)
self.tb_lineno = tb.tb_lineno
if tb.tb_next is None:
self.tb_next = None
else:
self.tb_next = Traceback(tb.tb_next)
def as_traceback(self):
if tproxy:
return tproxy(TracebackType, self.__tproxy_handler)
elif tb_set_next:
f_code = self.tb_frame.f_code
code = compile('\n' * (self.tb_lineno - 1) + 'raise __traceback_maker', self.tb_frame.f_code.co_filename, 'exec')
if PY3:
code = CodeType(
0, 0,
f_code.co_nlocals, f_code.co_stacksize, f_code.co_flags,
code.co_code, code.co_consts, code.co_names, code.co_varnames,
f_code.co_filename, f_code.co_name,
code.co_firstlineno, b"",
(), ()
)
else:
code = CodeType(
0,
f_code.co_nlocals, f_code.co_stacksize, f_code.co_flags,
code.co_code, code.co_consts, code.co_names, code.co_varnames,
f_code.co_filename.encode(), f_code.co_name.encode(),
code.co_firstlineno, b"",
(), ()
)
try:
exec(code, self.tb_frame.f_globals, {})
except:
tb = sys.exc_info()[2].tb_next
tb_set_next(tb, self.tb_next and self.tb_next.as_traceback())
try:
return tb
finally:
del tb
else:
raise RuntimeError("Cannot re-create traceback !")
def __tproxy_handler(self, operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
if args[0] == 'tb_next':
return self.tb_next and self.tb_next.as_traceback()
else:
return getattr(self, args[0])
else:
return getattr(self, operation)(*args, **kwargs)
# pickling_support.py
def unpickle_traceback(tb_frame, tb_lineno, tb_next):
ret = object.__new__(Traceback)
ret.tb_frame = tb_frame
ret.tb_lineno = tb_lineno
ret.tb_next = tb_next
return ret.as_traceback()
def pickle_traceback(tb):
return unpickle_traceback, (Frame(tb.tb_frame), tb.tb_lineno, tb.tb_next and Traceback(tb.tb_next))
def install():
try:
import copy_reg
except ImportError:
import copyreg as copy_reg
copy_reg.pickle(TracebackType, pickle_traceback)
# Added by gevent
# We have to defer the initialization, and especially the import of platform,
# until runtime. If we're monkey patched, we need to be sure to use
# the original __import__ to avoid switching through the hub due to
# import locks on Python 2. See also builtins.py for details.
def _unlocked_imports(f):
def g(a):
gb = None
if 'gevent.builtins' in sys.modules:
gb = sys.modules['gevent.builtins']
gb._unlock_imports()
try:
return f(a)
finally:
if gb is not None:
gb._lock_imports()
g.__name__ = f.__name__
g.__module__ = f.__module__
return g
def _import_dump_load():
global dumps
global loads
try:
import cPickle as pickle
except ImportError:
import pickle
dumps = pickle.dumps
loads = pickle.loads
dumps = loads = None
_installed = False
def _init():
global _installed
global tb_set_next
if _installed:
return
_installed = True
import platform
try:
if platform.python_implementation() == 'CPython':
tb_set_next = _init_ugly_crap()
except Exception as exc:
sys.stderr.write("Failed to initialize cpython support: {!r}".format(exc))
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
if not tb_set_next and not tproxy:
raise ImportError("Cannot use tblib. Runtime not supported.")
_import_dump_load()
install()
@_unlocked_imports
def dump_traceback(tb):
# Both _init and dump/load have to be unlocked, because
# copy_reg and pickle can do imports to resolve class names; those
# class names are in this module and greenlet safe though
_init()
return dumps(tb)
@_unlocked_imports
def load_traceback(s):
_init()
return loads(s)
| mit | 8,241,235,972,606,930,000 | 30.210526 | 125 | 0.606134 | false |
PytLab/gaft | gaft/operators/selection/linear_ranking_selection.py | 1 | 2063 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Linear Ranking Selection implementation. '''
from random import random
from itertools import accumulate
from bisect import bisect_right
from ...plugin_interfaces.operators.selection import Selection
class LinearRankingSelection(Selection):
''' Selection operator using Linear Ranking selection method.
Reference: Baker J E. Adaptive selection methods for genetic
algorithms[C]//Proceedings of an International Conference on Genetic
Algorithms and their applications. 1985: 101-111.
'''
def __init__(self, pmin=0.1, pmax=0.9):
# Selection probabilities for the worst and best individuals.
self.pmin, self.pmax = pmin, pmax
def select(self, population, fitness):
''' Select a pair of parent individuals using linear ranking method.
:param population: Population where the selection operation occurs.
:type population: :obj:`gaft.components.Population`
:return: Selected parents (a father and a mother)
:rtype: list of :obj:`gaft.components.IndividualBase`
'''
# Individual number.
NP = len(population)
# Add rank to all individuals in population.
all_fits = population.all_fits(fitness)
indvs = population.individuals
sorted_indvs = sorted(indvs,
key=lambda indv: all_fits[indvs.index(indv)])
# Assign selection probabilities linearly.
# NOTE: Here the rank i belongs to {1, ..., N}
p = lambda i: (self.pmin + (self.pmax - self.pmin)*(i-1)/(NP-1))
probabilities = [self.pmin] + [p(i) for i in range(2, NP)] + [self.pmax]
# Normalize probabilities.
psum = sum(probabilities)
wheel = list(accumulate([p/psum for p in probabilities]))
# Select parents.
father_idx = bisect_right(wheel, random())
father = sorted_indvs[father_idx]
mother_idx = (father_idx + 1) % len(wheel)
mother = sorted_indvs[mother_idx]
return father, mother
| gpl-3.0 | -8,291,782,990,477,367,000 | 34.568966 | 80 | 0.647601 | false |
jessrosenfield/pants | src/python/pants/reporting/plaintext_reporter.py | 13 | 9618 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import namedtuple
import six
from colors import cyan, green, red, yellow
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.reporting.plaintext_reporter_base import PlainTextReporterBase
from pants.reporting.report import Report
from pants.reporting.reporter import Reporter
from pants.util.memo import memoized_method
class ToolOutputFormat(object):
"""Configuration item for displaying Tool Output to the console."""
SUPPRESS = 'SUPPRESS' # Do not display output from the workunit unless its outcome != SUCCESS
INDENT = 'INDENT' # Indent the output to line up with the indentation of the other log output
UNINDENTED = 'UNINDENTED' # Display the output raw, with no leading indentation
@classmethod
@memoized_method
def keys(cls):
return [key for key in dir(cls) if not key.startswith('_') and key.isupper()]
class LabelFormat(object):
"""Configuration item for displaying a workunit label to the console."""
SUPPRESS = 'SUPPRESS' # Don't show the label at all
DOT = 'DOT' # Just output a single '.' with no newline
FULL = 'FULL' # Show the timestamp and label
CHILD_SUPPRESS = 'CHILD_SUPPRESS' # Suppress labels for all children of this node
CHILD_DOT = 'CHILD_DOT' # Display a dot for all children of this node
@classmethod
@memoized_method
def keys(cls):
return [key for key in dir(cls) if not key.startswith('_') and key.isupper()]
class PlainTextReporter(PlainTextReporterBase):
"""Plain-text reporting to stdout.
We only report progress for things under the default work root. It gets too
confusing to try and show progress for background work too.
"""
# Console reporting settings.
# outfile: Write to this file-like object.
# color: use ANSI colors in output.
# indent: Whether to indent the reporting to reflect the nesting of workunits.
# timing: Show timing report at the end of the run.
# cache_stats: Show artifact cache report at the end of the run.
Settings = namedtuple('Settings',
Reporter.Settings._fields + ('outfile', 'color', 'indent', 'timing',
'cache_stats', 'label_format',
'tool_output_format'))
_COLOR_BY_LEVEL = {
Report.FATAL: red,
Report.ERROR: red,
Report.WARN: yellow,
Report.INFO: green,
Report.DEBUG: cyan
}
# Format the std output from these workunit types as specified. If no format is specified, the
# default is ToolOutputFormat.SUPPRESS
TOOL_OUTPUT_FORMATTING = {
WorkUnitLabel.MULTITOOL: ToolOutputFormat.SUPPRESS,
WorkUnitLabel.BOOTSTRAP: ToolOutputFormat.SUPPRESS,
WorkUnitLabel.COMPILER : ToolOutputFormat.INDENT,
WorkUnitLabel.TEST : ToolOutputFormat.INDENT,
WorkUnitLabel.REPL : ToolOutputFormat.UNINDENTED,
WorkUnitLabel.RUN : ToolOutputFormat.UNINDENTED
}
# Format the labels from these workunit types as specified. If no format is specified, the
# default is LabelFormat.FULL
LABEL_FORMATTING = {
WorkUnitLabel.MULTITOOL: LabelFormat.CHILD_DOT,
WorkUnitLabel.BOOTSTRAP: LabelFormat.CHILD_SUPPRESS,
}
def __init__(self, run_tracker, settings):
super(PlainTextReporter, self).__init__(run_tracker, settings)
for key, value in settings.label_format.items():
if key not in WorkUnitLabel.keys():
self.emit('*** Got invalid key {} for --reporting-console-label-format. Expected one of {}\n'
.format(key, WorkUnitLabel.keys()))
if value not in LabelFormat.keys():
self.emit('*** Got invalid value {} for --reporting-console-label-format. Expected one of {}\n'
.format(value, LabelFormat.keys()))
for key, value in settings.tool_output_format.items():
if key not in WorkUnitLabel.keys():
self.emit('*** Got invalid key {} for --reporting-console-tool-output-format. Expected one of {}\n'
.format(key, WorkUnitLabel.keys()))
if value not in ToolOutputFormat.keys():
self.emit('*** Got invalid value {} for --reporting-console-tool-output-format. Expected one of {}\n'
.format(value, ToolOutputFormat.keys()))
# Mix in the new settings with the defaults.
self.LABEL_FORMATTING.update(settings.label_format.items())
self.TOOL_OUTPUT_FORMATTING.update(settings.tool_output_format.items())
def open(self):
"""Implementation of Reporter callback."""
pass
def close(self):
"""Implementation of Reporter callback."""
self.emit(self.generate_epilog(self.settings))
def start_workunit(self, workunit):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
label_format = self._get_label_format(workunit)
if label_format == LabelFormat.FULL:
self._emit_indented_workunit_label(workunit)
# Start output on a new line.
tool_output_format = self._get_tool_output_format(workunit)
if tool_output_format == ToolOutputFormat.INDENT:
self.emit(self._prefix(workunit, b'\n'))
elif tool_output_format == ToolOutputFormat.UNINDENTED:
self.emit(b'\n')
elif label_format == LabelFormat.DOT:
self.emit(b'.')
self.flush()
def end_workunit(self, workunit):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
if workunit.outcome() != WorkUnit.SUCCESS and not self._show_output(workunit):
# Emit the suppressed workunit output, if any, to aid in debugging the problem.
if self._get_label_format(workunit) != LabelFormat.FULL:
self._emit_indented_workunit_label(workunit)
for name, outbuf in workunit.outputs().items():
self.emit(self._prefix(workunit, b'\n==== {} ====\n'.format(name)))
self.emit(self._prefix(workunit, outbuf.read_from(0)))
self.flush()
def do_handle_log(self, workunit, level, *msg_elements):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
# If the element is a (msg, detail) pair, we ignore the detail. There's no
# useful way to display it on the console.
elements = [e if isinstance(e, six.string_types) else e[0] for e in msg_elements]
msg = b'\n' + b''.join(elements)
if self.use_color_for_workunit(workunit, self.settings.color):
msg = self._COLOR_BY_LEVEL.get(level, lambda x: x)(msg)
self.emit(self._prefix(workunit, msg))
self.flush()
def handle_output(self, workunit, label, s):
"""Implementation of Reporter callback."""
if not self.is_under_main_root(workunit):
return
tool_output_format = self._get_tool_output_format(workunit)
if tool_output_format == ToolOutputFormat.INDENT:
self.emit(self._prefix(workunit, s))
elif tool_output_format == ToolOutputFormat.UNINDENTED:
self.emit(s)
self.flush()
def emit(self, s):
self.settings.outfile.write(s)
def flush(self):
self.settings.outfile.flush()
def _get_label_format(self, workunit):
for label, label_format in self.LABEL_FORMATTING.items():
if workunit.has_label(label):
return label_format
# Recursively look for a setting to suppress child label formatting.
if workunit.parent:
label_format = self._get_label_format(workunit.parent)
if label_format == LabelFormat.CHILD_DOT:
return LabelFormat.DOT
if label_format == LabelFormat.CHILD_SUPPRESS:
return LabelFormat.SUPPRESS
return LabelFormat.FULL
def _get_tool_output_format(self, workunit):
for label, tool_output_format in self.TOOL_OUTPUT_FORMATTING.items():
if workunit.has_label(label):
return tool_output_format
return ToolOutputFormat.SUPPRESS
def _emit_indented_workunit_label(self, workunit):
self.emit(b'\n{} {} {}[{}]'.format(
workunit.start_time_string,
workunit.start_delta_string,
self._indent(workunit),
workunit.name if self.settings.indent else workunit.path()))
# Emit output from some tools and not others.
# This is an arbitrary choice, but one that turns out to be useful to users in practice.
def _show_output(self, workunit):
tool_output_format = self._get_tool_output_format(workunit)
return not tool_output_format == ToolOutputFormat.SUPPRESS
def _format_aggregated_timings(self, aggregated_timings):
return b'\n'.join([b'{timing:.3f} {label}'.format(**x) for x in aggregated_timings.get_all()])
def _format_artifact_cache_stats(self, artifact_cache_stats):
stats = artifact_cache_stats.get_all()
return b'No artifact cache reads.' if not stats else \
b'\n'.join([b'{cache_name} - Hits: {num_hits} Misses: {num_misses}'.format(**x)
for x in stats])
def _indent(self, workunit):
return b' ' * (len(workunit.ancestors()) - 1)
_time_string_filler = b' ' * len('HH:MM:SS mm:ss ')
def _prefix(self, workunit, s):
if self.settings.indent:
def replace(x, c):
return x.replace(c, c + PlainTextReporter._time_string_filler + self._indent(workunit))
return replace(replace(s, b'\r'), b'\n')
else:
return PlainTextReporter._time_string_filler + s
| apache-2.0 | -6,395,237,733,280,197,000 | 38.743802 | 109 | 0.674049 | false |
daniaki/pyPPI | pyppi/tests/test_uniprot.py | 1 | 9938 | import os
import time
from unittest import TestCase
from Bio import SwissProt
from ..database import create_session, delete_database, cleanup_database
from ..database.models import Protein
from ..data_mining.uniprot import (
parallel_download, download_record,
parse_record_into_protein,
go_terms, interpro_terms, pfam_terms,
keywords, gene_name, recent_accession, taxonid,
review_status, batch_map, function
)
base_path = os.path.dirname(__file__)
class TestUniProtMethods(TestCase):
def setUp(self):
self.records = open(os.path.normpath(
"{}/test_data/test_sprot_records.dat".format(base_path)
), 'rt')
self.db_path = '{}/databases/test.db'.format(base_path)
self.session, self.engine = create_session(self.db_path)
def tearDown(self):
self.records.close()
delete_database(self.session)
cleanup_database(self.session, self.engine)
def test_parses_gomf_correctly(self):
for record in SwissProt.parse(self.records):
result = go_terms(record, ont="mf")
break
expected = [
'GO:0045296',
'GO:0019899',
'GO:0042826',
'GO:0042802',
'GO:0051219',
'GO:0050815',
'GO:0008022',
'GO:0032403',
'GO:0019904',
'GO:0003714',
]
self.assertEqual(result, expected)
def test_parses_gobp_correctly(self):
for record in SwissProt.parse(self.records):
result = go_terms(record, ont="bp")
break
expected = [
'GO:0051220',
'GO:0035329',
'GO:0000165',
'GO:0061024',
'GO:0045744',
'GO:0035308',
'GO:0045892',
'GO:0043085',
'GO:1900740',
'GO:0051291',
'GO:0006605',
'GO:0043488',
'GO:0016032',
]
self.assertEqual(result, expected)
def test_parses_gocc_correctly(self):
for record in SwissProt.parse(self.records):
result = go_terms(record, ont="cc")
break
expected = [
'GO:0005737',
'GO:0030659',
'GO:0005829',
'GO:0070062',
'GO:0005925',
'GO:0042470',
'GO:0016020',
'GO:0005739',
'GO:0005634',
'GO:0048471',
'GO:0043234',
'GO:0017053',
]
self.assertEqual(result, expected)
def test_parses_interpro_correctly(self):
for record in SwissProt.parse(self.records):
result = interpro_terms(record)
break
expected = [
'IPR000308',
'IPR023409',
'IPR036815',
'IPR023410',
]
self.assertEqual(result, expected)
def test_parses_pfam_correctly(self):
for record in SwissProt.parse(self.records):
result = pfam_terms(record)
break
expected = ['PF00244']
self.assertEqual(result, expected)
def test_parses_keywords_correctly(self):
for record in SwissProt.parse(self.records):
result = keywords(record)
break
expected = [
'3D-structure', 'Acetylation', 'Alternative initiation',
'Complete proteome', 'Cytoplasm', 'Direct protein sequencing',
'Host-virus interaction', 'Isopeptide bond', 'Nitration',
'Phosphoprotein', 'Polymorphism', 'Reference proteome',
'Ubl conjugation'
]
self.assertEqual(result, expected)
def test_parses_review_status_correctly(self):
for record in SwissProt.parse(self.records):
result = review_status(record)
break
expected = 'Reviewed'
self.assertEqual(result, expected)
def test_parses_gene_name_correctly(self):
for record in SwissProt.parse(self.records):
result = gene_name(record)
break
expected = 'YWHAB'
self.assertEqual(result, expected)
def test_parses_taxonid_correctly(self):
for record in SwissProt.parse(self.records):
result = taxonid(record)
break
expected = 9606
self.assertEqual(result, expected)
def test_parses_recent_accession_correctly(self):
for record in SwissProt.parse(self.records):
result = recent_accession(record)
break
expected = 'P31946'
self.assertEqual(result, expected)
def test_parses_function_correctly(self):
for record in SwissProt.parse(self.records):
result = function(record)
break
self.assertIn("Adapter protein implicated in the regulation", result)
def test_parses_function_as_None_for_entry_with_no_comment(self):
for record in SwissProt.parse(self.records):
r = record
break
r.comments = [x for x in r.comments if "FUNCTION: " not in x]
result = function(r)
expected = None
self.assertEqual(result, expected)
def test_can_parse_record_into_protein_objects(self):
for record in SwissProt.parse(self.records):
obj = parse_record_into_protein(record)
break
self.assertEqual(obj.uniprot_id, "P31946")
self.assertEqual(obj.gene_id, "YWHAB")
self.assertEqual(obj.reviewed, True)
def test_returns_none_when_parsing_None_record(self):
self.assertIsNone(parse_record_into_protein(None))
def test_download_returns_None_if_taxids_not_matching(self):
record = download_record('P48193', wait=1, retries=0) # Mouse
self.assertEqual(record, None)
def test_download_returns_None_if_record_not_found(self):
record = download_record('abc', wait=1, retries=0) # Invalid Protein
self.assertEqual(record, None)
def test_can_parallel_download(self):
accessions = ['P30443', 'O75581', 'P51828']
records = parallel_download(accessions, n_jobs=3, wait=1, retries=0)
entries = [parse_record_into_protein(r) for r in records]
self.assertEqual(entries[0].uniprot_id, accessions[0])
self.assertEqual(entries[1].uniprot_id, accessions[1])
self.assertEqual(entries[2].uniprot_id, accessions[2])
def test_batch_map_keeps_unreviewed(self):
protein1 = Protein(
uniprot_id='P50224', taxon_id=9606, reviewed=False)
protein2 = Protein(
uniprot_id='P0DMN0', taxon_id=9606, reviewed=True)
protein3 = Protein(
uniprot_id='P0DMM9', taxon_id=9606, reviewed=False)
protein1.save(self.session, commit=True)
protein2.save(self.session, commit=True)
protein3.save(self.session, commit=True)
mapping = batch_map(
session=self.session, accessions=['P50224'], keep_unreviewed=True,
match_taxon_id=None
)
self.assertEqual(mapping, {"P50224": ['P0DMM9', 'P0DMN0']})
def test_batch_map_filters_unreviewed(self):
protein1 = Protein(
uniprot_id='P50224', taxon_id=9606, reviewed=False)
protein2 = Protein(
uniprot_id='P0DMN0', taxon_id=9606, reviewed=True)
protein3 = Protein(
uniprot_id='P0DMM9', taxon_id=9606, reviewed=False)
protein1.save(self.session, commit=True)
protein2.save(self.session, commit=True)
protein3.save(self.session, commit=True)
mapping = batch_map(
session=self.session, accessions=['P50224'], keep_unreviewed=False,
match_taxon_id=None
)
self.assertEqual(mapping, {"P50224": ["P0DMN0"]})
def test_batch_map_filters_non_matching_taxon_ids(self):
protein1 = Protein(
uniprot_id='P50224', taxon_id=9606, reviewed=False)
protein2 = Protein(
uniprot_id='P0DMN0', taxon_id=9606, reviewed=True)
protein3 = Protein(
uniprot_id='P0DMM9', taxon_id=9606, reviewed=False)
protein1.save(self.session, commit=True)
protein2.save(self.session, commit=True)
protein3.save(self.session, commit=True)
mapping = batch_map(
session=self.session, accessions=['P50224'], keep_unreviewed=False,
match_taxon_id=0
)
self.assertEqual(mapping, {"P50224": []})
def test_batch_map_filters_keeps_matching_taxon_ids(self):
protein1 = Protein(
uniprot_id='P50224', taxon_id=9606, reviewed=False)
protein2 = Protein(
uniprot_id='P0DMN0', taxon_id=9606, reviewed=True)
protein3 = Protein(
uniprot_id='P0DMM9', taxon_id=9606, reviewed=False)
protein1.save(self.session, commit=True)
protein2.save(self.session, commit=True)
protein3.save(self.session, commit=True)
mapping = batch_map(
session=self.session, accessions=['P50224'], keep_unreviewed=True,
match_taxon_id=9606
)
self.assertEqual(mapping, {"P50224": ['P0DMM9', 'P0DMN0']})
def test_batch_map_downloads_missing_records(self):
mapping = batch_map(
session=self.session, accessions=['P50224'], keep_unreviewed=True,
match_taxon_id=9606, allow_download=True
)
self.assertEqual(mapping, {"P50224": ['P0DMM9', 'P0DMN0']})
def test_batch_map_doesnt_save_invalid_record(self):
mapping = batch_map(
session=self.session, accessions=['P50224'], match_taxon_id=0,
allow_download=True
)
self.assertEqual(mapping, {"P50224": []})
def test_batch_return_empty_list_if_accession_maps_to_invalid_record(self):
mapping = batch_map(
session=self.session, accessions=['Q02248'], match_taxon_id=9606
)
self.assertEqual(mapping, {"Q02248": []})
| mit | -1,787,943,295,839,534,300 | 34.366548 | 79 | 0.59187 | false |
centricular/cerbero | cerbero/bootstrap/windows.py | 1 | 8745 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import tempfile
import shutil
from pathlib import Path
from cerbero.bootstrap import BootstrapperBase
from cerbero.bootstrap.bootstrapper import register_bootstrapper
from cerbero.config import Architecture, Distro, Platform
from cerbero.errors import ConfigurationError
from cerbero.utils import shell, _, fix_winpath, to_unixpath, git
from cerbero.utils import messages as m
# Toolchain
GCC_VERSION = '4.7.3'
MINGW_DOWNLOAD_SOURCE = 'http://gstreamer.freedesktop.org/data/cerbero/toolchain/windows'
MINGW_TARBALL_TPL = "mingw-%s-gcc-%s-%s-%s.tar.xz"
# MinGW Perl
PERL_VERSION = '5.24.0'
MINGW_PERL_TPL = 'https://sourceforge.net/projects/perl-mingw/files/{0}/perl-{0}-mingw32.zip'
# Extra dependencies
MINGWGET_DEPS = ['msys-wget', 'msys-flex', 'msys-bison', 'msys-perl']
GNOME_FTP = 'http://ftp.gnome.org/pub/gnome/binaries/win32/'
WINDOWS_BIN_DEPS = ['intltool/0.40/intltool_0.40.4-1_win32.zip']
class WindowsBootstrapper(BootstrapperBase):
'''
Bootstrapper for windows builds.
Installs the mingw-w64 compiler toolchain and headers for Directx
'''
def start(self):
if not git.check_line_endings(self.config.platform):
raise ConfigurationError("git is configured to use automatic line "
"endings conversion. You can fix it running:\n"
"$git config core.autocrlf false")
self.prefix = self.config.toolchain_prefix
self.perl_prefix = self.config.mingw_perl_prefix
self.platform = self.config.target_platform
self.arch = self.config.target_arch
if self.arch == Architecture.X86:
self.version = 'w32'
else:
self.version = 'w64'
self.platform = self.config.platform
self.check_dirs()
if self.platform == Platform.WINDOWS:
self.msys_mingw_bindir = Path(shutil.which('mingw-get')).parent
self.install_mingwget_deps()
self.install_mingw()
if self.platform == Platform.WINDOWS:
self.remove_mingw_cpp()
self.add_non_prefixed_strings()
if self.platform == Platform.WINDOWS:
# After mingw has been installed
self.install_bin_deps()
self.install_gl_headers()
if self.platform == Platform.WINDOWS:
self.install_openssl_mingw_perl()
def check_dirs(self):
if not os.path.exists(self.perl_prefix):
os.makedirs(self.perl_prefix)
if not os.path.exists(self.prefix):
os.makedirs(self.prefix)
etc_path = os.path.join(self.config.prefix, 'etc')
if not os.path.exists(etc_path):
os.makedirs(etc_path)
def install_mingw(self):
tarball = MINGW_TARBALL_TPL % (self.version, GCC_VERSION,
self.platform, self.arch)
tarfile = os.path.join(self.prefix, tarball)
tarfile = os.path.abspath(tarfile)
shell.download("%s/%s" % (MINGW_DOWNLOAD_SOURCE, tarball), tarfile, check_cert=False)
try:
shell.unpack(tarfile, self.prefix)
except Exception:
pass
self.fix_lib_paths()
if self.arch == Architecture.X86:
try:
shutil.rmtree('/mingw/lib')
except Exception:
pass
def install_openssl_mingw_perl(self):
'''
This perl is only used by openssl; we can't use it everywhere else
because it can't find msys tools, and so perl scripts like autom4te
fail to run, f.ex., m4. Lucky for us, openssl doesn't use those.
'''
url = MINGW_PERL_TPL.format(PERL_VERSION)
tarfile = os.path.join(self.perl_prefix, os.path.basename(url))
tarfile = os.path.abspath(tarfile)
shell.download(url, tarfile, check_cert=False)
try:
shell.unpack(tarfile, self.perl_prefix)
except Exception:
pass
# Move perl installation from perl-5.xx.y to perl
perldir = os.path.join(self.perl_prefix, 'perl-' + PERL_VERSION)
for d in os.listdir(perldir):
dest = os.path.join(self.perl_prefix, d)
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.move(os.path.join(perldir, d), self.perl_prefix)
def install_mingwget_deps(self):
for dep in MINGWGET_DEPS:
shell.call('mingw-get install %s' % dep)
def install_gl_headers(self):
m.action("Installing wglext.h")
if self.arch == Architecture.X86:
inst_path = os.path.join(self.prefix, 'i686-w64-mingw32/include/GL/wglext.h')
else:
inst_path = os.path.join(self.prefix, 'x86_64-w64-mingw32/include/GL/wglext.h')
gl_header = 'http://www.opengl.org/registry/api/GL/wglext.h'
shell.download(gl_header, inst_path, False, check_cert=False)
def install_bin_deps(self):
# FIXME: build intltool as part of the build tools bootstrap
for url in WINDOWS_BIN_DEPS:
temp = fix_winpath(tempfile.mkdtemp())
path = os.path.join(temp, 'download.zip')
shell.download(GNOME_FTP + url, path)
shell.unpack(path, self.config.toolchain_prefix)
# replace /opt/perl/bin/perl in intltool
files = shell.ls_files(['bin/intltool*'], self.config.toolchain_prefix)
for f in files:
shell.replace(os.path.join(self.config.toolchain_prefix, f),
{'/opt/perl/bin/perl': '/bin/perl'})
return
def fix_lib_paths(self):
orig_sysroot = self.find_mingw_sys_root()
if self.config.platform != Platform.WINDOWS:
new_sysroot = os.path.join(self.prefix, 'mingw', 'lib')
else:
new_sysroot = os.path.join(self.prefix, 'lib')
lib_path = new_sysroot
# Replace the old sysroot in all .la files
for path in [f for f in os.listdir(lib_path) if f.endswith('la')]:
path = os.path.abspath(os.path.join(lib_path, path))
shell.replace(path, {orig_sysroot: new_sysroot})
def find_mingw_sys_root(self):
if self.config.platform != Platform.WINDOWS:
f = os.path.join(self.prefix, 'mingw', 'lib', 'libstdc++.la')
else:
f = os.path.join(self.prefix, 'lib', 'libstdc++.la')
with open(f, 'r') as f:
# get the "libdir=/path" line
libdir = [x for x in f.readlines() if x.startswith('libdir=')][0]
# get the path
libdir = libdir.split('=')[1]
# strip the surrounding quotes
print("Replacing old libdir : ", libdir)
return libdir.strip()[1:-1]
def remove_mingw_cpp(self):
# Fixes glib's checks in configure, where cpp -v is called
# to get some include dirs (which doesn't looks like a good idea).
# If we only have the host-prefixed cpp, this problem is gone.
if (self.msys_mingw_bindir / 'cpp.exe').is_file():
shutil.move(self.msys_mingw_bindir / 'cpp.exe',
self.msys_mingw_bindir / 'cpp.exe.bck')
def add_non_prefixed_strings(self):
# libtool m4 macros uses non-prefixed 'strings' command. We need to
# create a copy here
if self.config.platform == Platform.WINDOWS:
ext = '.exe'
else:
ext = ''
if self.config.target_arch == Architecture.X86:
host = 'i686-w64-mingw32'
else:
host = 'x86_64-w64-mingw32'
bindir = os.path.join(self.config.toolchain_prefix, 'bin')
p_strings = os.path.join(bindir, '%s-strings%s' % (host, ext))
strings = os.path.join(bindir, 'strings%s' % ext)
if os.path.exists(strings):
os.remove(strings)
shutil.copy(p_strings, strings)
def register_all():
register_bootstrapper(Distro.WINDOWS, WindowsBootstrapper)
| lgpl-2.1 | 5,058,039,451,606,990,000 | 40.056338 | 93 | 0.6247 | false |
hainm/Torsions | torsions/TorsionScanSet.py | 1 | 10496 | # -*- coding: utf-8 -*-
"""
Created on Thu May 7 19:32:22 2015
@author: sternc1
"""
import pandas as pd
import numpy as np
import simtk.openmm as mm
import simtk.unit as u
import mdtraj as md
from copy import copy, deepcopy
import re
from cclib.parser import Gaussian
from cclib.parser.utils import convertor
from mdtraj import Trajectory
from simtk.unit import Quantity, nanometers, kilojoules_per_mole
from chemistry.charmm import CharmmPsfFile
def to_optimize(param, stream, penalty = 10):
""" returns a list of dihedrals to optimize and updates CharmmParameterSet
with stream files
Parameters
----------
param : CharmmParameterSet
stream: list of stream files
penalty: int for CGenFF penalty cutoff (Default = 10)
Returns list of tuples containing dihedrals to optimize
"""
if type(stream) != list:
stream = [stream]
keys = [i for i in param.dihedral_types.keys()]
for j in stream:
param.read_stream_file(j)
return [k for k in param.dihedral_types.keys()
if k not in keys and param.dihedral_types[k].penalty >= penalty]
def read_scan_logfile(logfiles, structure):
""" parses Guassian09 torsion-scan log file
parameters
----------
logfiles: str of list of str
Name of Guassian 09 torsion scan log file
structure: charmm psf file
returns
-------
TorsionScanSet
"""
topology = md.load_psf(structure)
structure = CharmmPsfFile(structure)
positions = np.ndarray((0, topology.n_atoms, 3))
qm_energies = np.ndarray(0)
torsions = np.ndarray((0, 4), dtype=int)
directions = np.ndarray(0, dtype=int)
steps = np.ndarray((0, 3), dtype=int)
if type(logfiles) != list:
logfiles = [logfiles]
for file in logfiles:
print("loading %s" % file)
direction = np.ndarray(1)
torsion = np.ndarray((1,4), dtype=int)
step = []
index = (2, 12, -1)
f = file.split('/')[-1].split('.')
if f[2] == 'pos':
direction[0] = 1
else:
direction[0] = 0
fi = open(file, 'r')
for line in fi:
if re.search(' Scan ', line):
t = line.split()[2].split(',')
t[0] = t[0][-1]
t[-1] = t[-1][0]
for i in range(len(t)):
torsion[0][i] = (int(t[i]) - 1)
if re.search('Step', line):
try:
step = np.array(([int(line.rsplit()[j]) for j in index]))
step = step[np.newaxis,:]
steps = np.append(steps, step, axis=0)
except:
pass
fi.close()
log = Gaussian(file)
data = log.parse()
# convert angstroms to nanometers
positions = np.append(positions, data.atomcoords*0.1, axis=0)
qm_energies = np.append(qm_energies, (convertor(data.scfenergies, "eV", "kJmol-1") -
min(convertor(data.scfenergies, "eV", "kJmol-1"))), axis=0)
for i in range(len(data.scfenergies)):
torsions = np.append(torsions, torsion, axis=0)
directions = np.append(directions, direction, axis=0)
return TorsionScanSet(positions, topology, structure, torsions, directions, steps, qm_energies)
class TorsionScanSet(Trajectory):
"""container object for torsion scan
A TorsionScanSet should be constructed by loading Gaussian 09 torsion scan log files from disk
with an mdtraj.Topology object
Examples
--------
>>> torsion_set = read_scan_logfile('FRG.scanN.dir.log')
>>> print torsion_set
<torsions.TorsionScanSet with 346 frames, 22 atoms, 1 residues, 4 unique torsions without MM Energy at 0x10b099b10>
Attributes
----------
structure: chemistry.Structure
qm_energy: simtk.unit.Quantity((n_frames), unit=kilojoule/mole)
mm_energy: simtk.unit.Quantity((n_frames), unit=kilojoule/mole)
delta_energy: simtk.unit.Quantity((n_frames), unit=kilojoule/mole)
torsion_index: {np.ndarray, shape(n_frames, 4)}
step: {np.ndarray, shape(n_frame, 3)}
direction: {np.ndarray, shape(n_frame)}. 0 = negative, 1 = positive
"""
def __init__(self, positions, topology, structure, torsions, directions, steps, qm_energies):
"""Create new TorsionScanSet object"""
assert isinstance(topology, object)
super(TorsionScanSet, self).__init__(positions, topology)
self.structure = structure
self.qm_energy = Quantity(value=qm_energies, unit=kilojoules_per_mole)
self.mm_energy = Quantity()
self.delta_energy = Quantity()
self.torsion_index = torsions
self.direction = directions
self.steps = steps
def to_dataframe(self):
""" convert TorsionScanSet to pandas dataframe """
data = []
for i in range(self.n_frames):
if len(self.mm_energy) == self.n_frames and len(self.delta_energy) == self.n_frames:
data.append((self.torsion_index[i], self.direction[i], self.steps[i], self.qm_energy[i], self.mm_energy[i],
self.delta_energy[i]))
else:
data.append((self.torsion_index[i], self.direction[i], self.steps[i], self.qm_energy[i], float('nan'), float('nan')))
torsion_set = pd.DataFrame(data, columns=[ "torsion", "scan_direction", "step_point_total", "QM_energy KJ/mol",
"MM_energy KJ/mole", "delta KJ/mole"])
return torsion_set
def _string_summary_basic(self):
"""Basic summary of TorsionScanSet in string form."""
energy_str = 'with MM Energy' if self._have_mm_energy else 'without MM Energy'
value = "torsions.TorsionScanSet with %d frames, %d atoms, %d residues, %s" % (
self.n_frames, self.n_atoms, self.n_residues, energy_str)
return value
def extract_geom_opt(self):
key = []
for i, step in enumerate(self.steps):
try:
if step[1] != self.steps[i+1][1]:
key.append(i)
except IndexError:
key.append(i)
new_torsionScanSet = self.slice(key)
return new_torsionScanSet
def compute_energy(self, param, offset, platform=None,):
""" Computes energy for a given structure with a given parameter set
Parameters
----------
param: chemistry.charmm.CharmmParameterSet
platform: simtk.openmm.Platform to evaluate energy on (if None, will select automatically)
"""
# Create Context.
integrator = mm.VerletIntegrator(0.004*u.picoseconds)
system = self.structure.createSystem(param)
if platform != None:
context = mm.Context(system, integrator, platform)
else:
context = mm.Context(system, integrator)
# Compute potential energies for all snapshots.
self.mm_energy = Quantity(value=np.zeros([self.n_frames], np.float64), unit=kilojoules_per_mole)
for i in range(self.n_frames):
context.setPositions(self.openmm_positions(i))
state = context.getState(getEnergy=True)
self.mm_energy[i] = state.getPotentialEnergy()
# Subtract off minimum of mm_energy
self.mm_energy -= self.mm_energy.min() + Quantity(value=float(offset.value), unit=kilojoules_per_mole)
self.delta_energy = (self.qm_energy - self.mm_energy)
# Compute deviation between MM and QM energies with offset
#self.delta_energy = mm_energy - self.qm_energy + Quantity(value=offset, unit=kilojoule_per_mole)
# Clean up.
del context
del system
del integrator
# print('Heap at end of compute_energy'), hp.heeap()
@property
def _have_mm_energy(self):
return len(self.mm_energy) is not 0
# @property
# def _unique_torsions(self):
# Not returning the right amount. debug
# torsions = []
# for i in range(len(self.torsion_index)):
# try:
# if (self.torsion_index[i] != self.torsion_index[i+1]).all():
# torsions.append(self.torsion_index[i]), torsions.append(self.torsion_index[i+1])
# except:
# pass
# return len(torsions), torsions
def __getitem__(self, key):
"Get a slice of this trajectory"
return self.slice(key)
def slice(self, key, copy=True):
"""Slice trajectory, by extracting one or more frames into a separate object
This method can also be called using index bracket notation, i.e
`traj[1] == traj.slice(1)`
Parameters
----------
key : {int, np.ndarray, slice}
The slice to take. Can be either an int, a list of ints, or a slice
object.
copy : bool, default=True
Copy the arrays after slicing. If you set this to false, then if
you modify a slice, you'll modify the original array since they
point to the same data.
"""
xyz = self.xyz[key]
time = self.time[key]
torsions = self.torsion_index[key]
direction = self.direction[key]
steps = self.steps[key]
qm_energy = self.qm_energy[key]
unitcell_lengths, unitcell_angles = None, None
if self.unitcell_angles is not None:
unitcell_angles = self.unitcell_angles[key]
if self.unitcell_lengths is not None:
unitcell_lengths = self.unitcell_lengths[key]
if copy:
xyz = xyz.copy()
time = time.copy()
topology = deepcopy(self._topology)
structure = deepcopy(self.structure)
torsions = torsions.copy()
direction = direction.copy()
steps = steps.copy()
qm_energy = qm_energy.copy()
if self.unitcell_angles is not None:
unitcell_angles = unitcell_angles.copy()
if self.unitcell_lengths is not None:
unitcell_lengths = unitcell_lengths.copy()
newtraj = self.__class__(
xyz, topology, structure, torsions, direction, steps, qm_energy)
if self._rmsd_traces is not None:
newtraj._rmsd_traces = np.array(self._rmsd_traces[key],
ndmin=1, copy=True)
return newtraj
| gpl-2.0 | -6,031,330,545,912,923,000 | 35.068729 | 133 | 0.588034 | false |
nijel/stardicter | stardicter/czechenglish.py | 1 | 1575 | # -*- coding: utf-8 -*-
#
# Copyright © 2006 - 2017 Michal Čihař <[email protected]>
#
# This file is part of Stardicter <https://cihar.com/software/slovnik/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Covertor for GNU/FDL Anglicko-Český slovník"""
from __future__ import unicode_literals
from stardicter.base import StardictWriter
URL = 'https://www.svobodneslovniky.cz/data/en-cs.txt.gz'
class CzechEnglishWriter(StardictWriter):
url = 'https://www.svobodneslovniky.cz/'
name = 'GNU/FDL Anglicko-Český slovník'
source = 'english'
target = 'czech'
license = 'GFDL-1.1'
download_url = URL
download_gzip = True
def is_data_line(self, line):
'''
Checks whether this is line with timestamp.
'''
return not line.startswith('# Date:')
def get_name(self, forward=True):
if forward:
return 'GNU/FDL Anglicko-Český slovník'
return 'GNU/FDL Česko-Anglický slovník'
| gpl-3.0 | -5,843,448,257,105,317,000 | 32.913043 | 71 | 0.695513 | false |
lahosken/pants | src/python/pants/java/nailgun_client.py | 5 | 7580 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import logging
import os
import signal
import socket
import sys
from pants.java.nailgun_io import NailgunStreamReader
from pants.java.nailgun_protocol import ChunkType, NailgunProtocol
from pants.util.socket import RecvBufferedSocket
logger = logging.getLogger(__name__)
class NailgunClientSession(NailgunProtocol):
"""Handles a single nailgun client session."""
def __init__(self, sock, in_fd, out_fd, err_fd, exit_on_broken_pipe=False):
self._sock = sock
self._input_reader = NailgunStreamReader(in_fd, self._sock) if in_fd else None
self._stdout = out_fd
self._stderr = err_fd
self._exit_on_broken_pipe = exit_on_broken_pipe
self.remote_pid = None
def _maybe_start_input_reader(self):
if self._input_reader:
self._input_reader.start()
def _maybe_stop_input_reader(self):
if self._input_reader:
self._input_reader.stop()
def _write_flush(self, fd, payload=None):
"""Write a payload to a given fd (if provided) and flush the fd."""
try:
if payload:
fd.write(payload)
fd.flush()
except (IOError, OSError) as e:
# If a `Broken Pipe` is encountered during a stdio fd write, we're headless - bail.
if e.errno == errno.EPIPE and self._exit_on_broken_pipe:
sys.exit()
# Otherwise, re-raise.
raise
def _process_session(self):
"""Process the outputs of the nailgun session."""
try:
for chunk_type, payload in self.iter_chunks(self._sock, return_bytes=True):
if chunk_type == ChunkType.STDOUT:
self._write_flush(self._stdout, payload)
elif chunk_type == ChunkType.STDERR:
self._write_flush(self._stderr, payload)
elif chunk_type == ChunkType.EXIT:
self._write_flush(self._stdout)
self._write_flush(self._stderr)
return int(payload)
elif chunk_type == ChunkType.PID:
self.remote_pid = int(payload)
elif chunk_type == ChunkType.START_READING_INPUT:
self._maybe_start_input_reader()
else:
raise self.ProtocolError('received unexpected chunk {} -> {}'.format(chunk_type, payload))
finally:
# Bad chunk types received from the server can throw NailgunProtocol.ProtocolError in
# NailgunProtocol.iter_chunks(). This ensures the NailgunStreamReader is always stopped.
self._maybe_stop_input_reader()
def execute(self, working_dir, main_class, *arguments, **environment):
# Send the nailgun request.
self.send_request(self._sock, working_dir, main_class, *arguments, **environment)
# Process the remainder of the nailgun session.
return self._process_session()
class NailgunClient(object):
"""A python nailgun client (see http://martiansoftware.com/nailgun for more info)."""
class NailgunError(Exception):
"""Indicates an error interacting with a nailgun server."""
class NailgunConnectionError(NailgunError):
"""Indicates an error upon initial connect to the nailgun server."""
# For backwards compatibility with nails expecting the ng c client special env vars.
ENV_DEFAULTS = dict(NAILGUN_FILESEPARATOR=os.sep, NAILGUN_PATHSEPARATOR=os.pathsep)
DEFAULT_NG_HOST = '127.0.0.1'
DEFAULT_NG_PORT = 2113
def __init__(self, host=DEFAULT_NG_HOST, port=DEFAULT_NG_PORT, ins=sys.stdin, out=None, err=None,
workdir=None, exit_on_broken_pipe=False):
"""Creates a nailgun client that can be used to issue zero or more nailgun commands.
:param string host: the nailgun server to contact (defaults to '127.0.0.1')
:param int port: the port the nailgun server is listening on (defaults to the default nailgun
port: 2113)
:param file ins: a file to read command standard input from (defaults to stdin) - can be None
in which case no input is read
:param file out: a stream to write command standard output to (defaults to stdout)
:param file err: a stream to write command standard error to (defaults to stderr)
:param string workdir: the default working directory for all nailgun commands (defaults to CWD)
:param bool exit_on_broken_pipe: whether or not to exit when `Broken Pipe` errors are encountered.
"""
self._host = host
self._port = port
self._stdin = ins
self._stdout = out or sys.stdout
self._stderr = err or sys.stderr
self._workdir = workdir or os.path.abspath(os.path.curdir)
self._exit_on_broken_pipe = exit_on_broken_pipe
self._session = None
def try_connect(self):
"""Creates a socket, connects it to the nailgun and returns the connected socket.
:returns: a connected `socket.socket`.
:raises: `NailgunClient.NailgunConnectionError` on failure to connect.
"""
sock = RecvBufferedSocket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
try:
sock.connect((self._host, self._port))
except (socket.error, socket.gaierror) as e:
logger.debug('Encountered socket exception {!r} when attempting connect to nailgun'.format(e))
sock.close()
raise self.NailgunConnectionError(
'Problem connecting to nailgun server at {}:{}: {!r}'.format(self._host, self._port, e))
else:
return sock
def send_control_c(self):
"""Sends SIGINT to a nailgun server using pid information from the active session."""
if self._session and self._session.remote_pid is not None:
os.kill(self._session.remote_pid, signal.SIGINT)
def execute(self, main_class, cwd=None, *args, **environment):
"""Executes the given main_class with any supplied args in the given environment.
:param string main_class: the fully qualified class name of the main entrypoint
:param string cwd: Set the working directory for this command
:param list args: any arguments to pass to the main entrypoint
:param dict environment: an env mapping made available to native nails via the nail context
:returns: the exit code of the main_class.
"""
environment = dict(self.ENV_DEFAULTS.items() + environment.items())
cwd = cwd or self._workdir
# N.B. This can throw NailgunConnectionError (catchable via NailgunError).
sock = self.try_connect()
self._session = NailgunClientSession(sock,
self._stdin,
self._stdout,
self._stderr,
self._exit_on_broken_pipe)
try:
return self._session.execute(cwd, main_class, *args, **environment)
except socket.error as e:
raise self.NailgunError('Problem communicating with nailgun server at {}:{}: {!r}'
.format(self._host, self._port, e))
except NailgunProtocol.ProtocolError as e:
raise self.NailgunError('Problem in nailgun protocol with nailgun server at {}:{}: {!r}'
.format(self._host, self._port, e))
finally:
sock.close()
self._session = None
def __repr__(self):
return 'NailgunClient(host={!r}, port={!r}, workdir={!r})'.format(self._host,
self._port,
self._workdir)
| apache-2.0 | 7,815,629,937,154,507,000 | 41.111111 | 102 | 0.653034 | false |
zbqf109/goodo | openerp/addons/payment_sips/models/sips.py | 3 | 9106 | # -*- coding: utf-'8' "-*-"
try:
import simplejson as json
except ImportError:
import json
import logging
from hashlib import sha256
import urlparse
import unicodedata
from openerp import models, fields, api
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_sips.controllers.main import SipsController
_logger = logging.getLogger(__name__)
CURRENCY_CODES = {
'EUR': '978',
'USD': '840',
'CHF': '756',
'GBP': '826',
'CAD': '124',
'JPY': '392',
'MXN': '484',
'TRY': '949',
'AUD': '036',
'NZD': '554',
'NOK': '578',
'BRL': '986',
'ARS': '032',
'KHR': '116',
'TWD': '901',
}
class AcquirerSips(models.Model):
_inherit = 'payment.acquirer'
# Fields
sips_merchant_id = fields.Char('SIPS API User Password',
required_if_provider='sips')
sips_secret = fields.Char('SIPS Secret', size=64, required_if_provider='sips')
# Methods
def _get_sips_urls(self, environment):
""" Worldline SIPS URLS """
url = {
'prod': 'https://payment-webinit.sips-atos.com/paymentInit',
'test': 'https://payment-webinit.simu.sips-atos.com/paymentInit', }
return {'sips_form_url': url.get(environment, url['test']), }
@api.model
def _get_providers(self):
providers = super(AcquirerSips, self)._get_providers()
providers.append(['sips', 'Sips'])
return providers
def _sips_generate_shasign(self, values):
""" Generate the shasign for incoming or outgoing communications.
:param dict values: transaction values
:return string: shasign
"""
if self.provider != 'sips':
raise ValidationError(_('Incorrect payment acquirer provider'))
data = values['Data']
# Test key provided by Worldine
key = u'002001000000001_KEY1'
if self.environment == 'prod':
key = getattr(self, 'sips_secret')
shasign = sha256(data + key)
return shasign.hexdigest()
@api.multi
def sips_form_generate_values(self, values):
self.ensure_one()
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
currency = self.env['res.currency'].sudo().browse(values['currency_id'])
currency_code = CURRENCY_CODES.get(currency.name, False)
if not currency_code:
raise ValidationError(_('Currency not supported by Wordline'))
amount = int(values['amount'] * 100)
if self.environment == 'prod':
# For production environment, key version 2 is required
merchant_id = getattr(self, 'sips_merchant_id')
key_version = '2'
else:
# Test key provided by Atos Wordline works only with version 1
merchant_id = '002001000000001'
key_version = '1'
sips_tx_values = dict(values)
sips_tx_values.update({
'Data': u'amount=%s|' % amount +
u'currencyCode=%s|' % currency_code +
u'merchantId=%s|' % merchant_id +
u'normalReturnUrl=%s|' % urlparse.urljoin(base_url, SipsController._return_url) +
u'automaticResponseUrl=%s|' % urlparse.urljoin(base_url, SipsController._return_url) +
u'transactionReference=%s|' % values['reference'] +
u'statementReference=%s|' % values['reference'] +
u'keyVersion=%s' % key_version,
'InterfaceVersion': 'HP_2.3',
})
return_context = {}
if sips_tx_values.get('return_url'):
return_context[u'return_url'] = u'%s' % sips_tx_values.pop('return_url')
return_context[u'reference'] = u'%s' % sips_tx_values['reference']
sips_tx_values['Data'] += u'|returnContext=%s' % (json.dumps(return_context))
shasign = self._sips_generate_shasign(sips_tx_values)
sips_tx_values['Seal'] = shasign
return sips_tx_values
@api.multi
def sips_get_form_action_url(self):
self.ensure_one()
return self._get_sips_urls(self.environment)['sips_form_url']
class TxSips(models.Model):
_inherit = 'payment.transaction'
# sips status
_sips_valid_tx_status = ['00']
_sips_wait_tx_status = ['90', '99']
_sips_refused_tx_status = ['05', '14', '34', '54', '75', '97']
_sips_error_tx_status = ['03', '12', '24', '25', '30', '40', '51', '63', '94']
_sips_pending_tx_status = ['60']
_sips_cancel_tx_status = ['17']
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _sips_data_to_object(self, data):
res = {}
for element in data.split('|'):
element_split = element.split('=')
res[element_split[0]] = element_split[1]
return res
@api.model
def _sips_form_get_tx_from_data(self, data):
""" Given a data dict coming from sips, verify it and find the related
transaction record. """
data = self._sips_data_to_object(data.get('Data'))
reference = data.get('transactionReference')
if not reference:
custom = json.loads(data.pop('returnContext', False) or '{}')
reference = custom.get('reference')
payment_tx = self.search([('reference', '=', reference)])
if not payment_tx or len(payment_tx) > 1:
error_msg = _('Sips: received data for reference %s') % reference
if not payment_tx:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.error(error_msg)
raise ValidationError(error_msg)
return payment_tx
@api.model
def _sips_form_get_invalid_parameters(self, tx, data):
invalid_parameters = []
data = self._sips_data_to_object(data.get('Data'))
# TODO: txn_id: should be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('transactionReference') != tx.acquirer_reference:
invalid_parameters.append(('transactionReference', data.get('transactionReference'), tx.acquirer_reference))
# check what is bought
if float_compare(float(data.get('amount', '0.0')) / 100, tx.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % tx.amount))
if tx.partner_reference and data.get('customerId') != tx.partner_reference:
invalid_parameters.append(('customerId', data.get('customerId'), tx.partner_reference))
return invalid_parameters
@api.model
def _sips_form_validate(self, tx, data):
data = self._sips_data_to_object(data.get('Data'))
status = data.get('responseCode')
data = {
'acquirer_reference': data.get('transactionReference'),
'partner_reference': data.get('customerId'),
'date_validate': data.get('transactionDateTime',
fields.Datetime.now())
}
res = False
if status in self._sips_valid_tx_status:
msg = 'Payment for tx ref: %s, got response [%s], set as done.' % \
(tx.reference, status)
_logger.info(msg)
data.update(state='done', state_message=msg)
res = True
elif status in self._sips_error_tx_status:
msg = 'Payment for tx ref: %s, got response [%s], set as ' \
'error.' % (tx.reference, status)
data.update(state='error', state_message=msg)
elif status in self._sips_wait_tx_status:
msg = 'Received wait status for payment ref: %s, got response ' \
'[%s], set as error.' % (tx.reference, status)
data.update(state='error', state_message=msg)
elif status in self._sips_refused_tx_status:
msg = 'Received refused status for payment ref: %s, got response' \
' [%s], set as error.' % (tx.reference, status)
data.update(state='error', state_message=msg)
elif status in self._sips_pending_tx_status:
msg = 'Payment ref: %s, got response [%s] set as pending.' \
% (tx.reference, status)
data.update(state='pending', state_message=msg)
elif status in self._sips_cancel_tx_status:
msg = 'Received notification for payment ref: %s, got response ' \
'[%s], set as cancel.' % (tx.reference, status)
data.update(state='cancel', state_message=msg)
else:
msg = 'Received unrecognized status for payment ref: %s, got ' \
'response [%s], set as error.' % (tx.reference, status)
data.update(state='error', state_message=msg)
_logger.info(msg)
tx.write(data)
return res
| gpl-3.0 | 5,179,069,987,020,952,000 | 38.25 | 120 | 0.570174 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/keyword_plan_service/transports/__init__.py | 3 | 1043 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import KeywordPlanServiceTransport
from .grpc import KeywordPlanServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[KeywordPlanServiceTransport]]
_transport_registry['grpc'] = KeywordPlanServiceGrpcTransport
__all__ = (
'KeywordPlanServiceTransport',
'KeywordPlanServiceGrpcTransport',
)
| apache-2.0 | 7,617,299,715,411,346,000 | 32.645161 | 89 | 0.767977 | false |
tntnatbry/tensorflow | tensorflow/python/ops/nn_test.py | 19 | 33278 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for miscellaneous functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.ops.nn_impl import _compute_sampled_logits
from tensorflow.python.platform import test as test_lib
class ZeroFractionTest(test_lib.TestCase):
def _ZeroFraction(self, x):
assert x.shape
total_elements = np.prod(x.shape)
nonzeros = np.count_nonzero(x.flatten())
return 1.0 - nonzeros / total_elements
def testZeroFraction(self):
x_shape = [5, 17]
x_np = np.random.randint(0, 2, size=x_shape).astype(np.float32)
y_np = self._ZeroFraction(x_np)
with self.test_session():
x_tf = constant_op.constant(x_np)
x_tf.set_shape(x_shape)
y_tf = nn_impl.zero_fraction(x_tf)
y_tf_np = y_tf.eval()
eps = 1e-8
self.assertAllClose(y_tf_np, y_np, eps)
def testZeroFractionEmpty(self):
with self.test_session():
x = np.zeros(0)
y = nn_impl.zero_fraction(x).eval()
self.assertTrue(np.isnan(y))
class SoftmaxTest(test_lib.TestCase):
def _softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = np.exp(x - m)
z = u.sum(1)[:, np.newaxis]
return u / z
def testSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._softmax(x_np)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax(x_tf)
y_tf_last_dim = nn_ops.softmax(x_tf, 1)
y_tf_np = y_tf.eval()
y_tf_last_dim_np = y_tf_last_dim.eval()
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_last_dim_np, y_np, eps)
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
eps = 1e-8
self.assertLess(err, eps)
class LogPoissonLossTest(test_lib.TestCase):
def _log_poisson_loss(self, x, z, compute_full_loss=False):
lpl = np.exp(x) - z * x
if compute_full_loss:
stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
return lpl
def testLogPoissonLoss(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32)
y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False)
y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True)
with self.test_session():
y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(
z_np, x_np, compute_full_loss=True)
y_tf_np = y_tf.eval()
y_tf_np_stirling = y_tf_stirling.eval()
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps)
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float64)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_impl.log_poisson_loss(z_np, x_tf, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(
z_np, x_tf, compute_full_loss=True)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
err_stirling = gradient_checker.compute_gradient_error(x_tf, x_shape,
y_tf_stirling,
x_shape)
eps = 1e-6
self.assertLess(err, eps)
self.assertLess(err_stirling, eps)
class LogSoftmaxTest(test_lib.TestCase):
def _log_softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = x - m
return u - np.log(np.sum(np.exp(u), 1, keepdims=True))
def testLogSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._log_softmax(x_np)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax(x_tf)
y_tf_np = y_tf.eval()
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
eps = 1e-7
self.assertLess(err, eps)
class L2LossTest(test_lib.TestCase):
def testL2Loss(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
x = constant_op.constant(
[1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype)
l2loss = nn_ops.l2_loss(x)
value = l2loss.eval()
self.assertAllClose(7.0, value)
def testGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
with self.test_session():
x = constant_op.constant(x_val, name="x")
output = nn_ops.l2_loss(x)
err = gradient_checker.compute_gradient_error(x, x_shape, output, [1])
print("L2Loss gradient err = %g " % err)
err_tolerance = 1e-11
self.assertLess(err, err_tolerance)
class L2NormalizeTest(test_lib.TestCase):
def _l2Normalize(self, x, dim):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
return x / norm
else:
norm = np.apply_along_axis(np.linalg.norm, dim, x)
return x / np.expand_dims(norm, dim)
def testL2Normalize(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
y_np = self._l2Normalize(x_np, dim)
with self.test_session():
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, y_tf.eval())
def testL2NormalizeDimArray(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
y_np = self._l2Normalize(x_np, dim)
with self.test_session():
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, y_tf.eval())
def testL2NormalizeGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
for dim in range(len(x_shape)):
with self.test_session():
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
print("L2Normalize gradient err = %g " % err)
self.assertLess(err, 1e-4)
class DropoutTest(test_lib.TestCase):
def testDropout(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob)
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = dropout.eval()
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropout(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability. This time with shaped
# noise.
x_dim = 40 * 30
y_dim = 3
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
final_count = 0
for _ in xrange(0, num_iter):
value = dropout.eval()
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropoutCorrelation(self):
# Runs a shaped dropout and tests that the correlations are correct.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = dropout.eval()
# Verifies that each y column as only one type of activation.
for i in xrange(x_dim):
sorted_value = np.unique(np.sort(value[i, :]))
self.assertEqual(sorted_value.size, 1)
def testDropoutPlaceholderKeepProb(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
keep_prob_placeholder = array_ops.placeholder(dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob_placeholder)
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = dropout.eval(feed_dict={keep_prob_placeholder: keep_prob})
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropoutUnknownShape(self):
x_dim = 40
y_dim = 30
keep_prob = 0.5
x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout_x = nn_ops.dropout(
x, keep_prob, noise_shape=array_ops.placeholder(dtypes.int32))
self.assertEqual(x.get_shape(), dropout_x.get_shape())
def testInvalidKeepProb(self):
x_dim = 40
y_dim = 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
nn_ops.dropout(t, -1.0)
with self.assertRaises(ValueError):
nn_ops.dropout(t, 1.1)
with self.assertRaises(ValueError):
nn_ops.dropout(t, [0.0, 1.0])
with self.assertRaises(ValueError):
nn_ops.dropout(t, array_ops.placeholder(dtypes.float64))
with self.assertRaises(ValueError):
nn_ops.dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))
def testShapedDropoutShapeError(self):
# Runs shaped dropout and verifies an error is thrown on misshapen noise.
x_dim = 40
y_dim = 30
keep_prob = 0.5
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim + 3])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim])
# test that broadcasting proceeds
_ = nn_ops.dropout(t, keep_prob, noise_shape=[y_dim])
_ = nn_ops.dropout(t, keep_prob, noise_shape=[1, y_dim])
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
_ = nn_ops.dropout(t, keep_prob, noise_shape=[1, 1])
def testNoDropoutFast(self):
x = array_ops.zeros((5,))
for p in 1, constant_op.constant(1.0):
y = nn_ops.dropout(x, keep_prob=p)
self.assertTrue(x is y)
class ComputeSampledLogitsTest(test_lib.TestCase):
def setUp(self):
self._num_classes = 5
self._dim = 10
self._batch_size = 3
self._num_shards = 3
def _GenerateTestInputs(self):
np.random.seed(0)
weights = np.random.randn(self._num_classes, self._dim).astype(np.float32)
biases = np.random.randn(self._num_classes).astype(np.float32)
hidden_acts = np.random.randn(self._batch_size,
self._dim).astype(np.float32)
sharded_weights = [
weights[[
row for row in range(self._num_classes)
if row % self._num_shards == shard
]] for shard in range(self._num_shards)
]
return weights, biases, hidden_acts, sharded_weights
def _ComputeSampledLogitsNP(self,
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=1,
true_expected=None,
sampled_expected=None):
batch_size, dim = hidden_acts.shape
true_logits = np.sum(hidden_acts.reshape(
(batch_size, 1, dim)) * true_w.reshape((batch_size, num_true, dim)),
axis=2)
true_b = true_b.reshape((batch_size, num_true))
true_logits += true_b
sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b
if true_expected is not None:
true_logits -= np.log(true_expected)
if sampled_expected is not None:
sampled_logits -= np.log(sampled_expected[np.newaxis, :])
out_logits = np.concatenate([true_logits, sampled_logits], axis=1)
out_labels = np.hstack((np.ones_like(true_logits) / num_true,
np.zeros_like(sampled_logits)))
return out_logits, out_labels
def _ComputeSampledLogitsTF(self,
weights,
biases,
hidden_acts,
labels,
num_sampled,
num_classes,
num_true,
sampled_vals,
subtract_log_q,
remove_accidental_hits,
name="sampled_loss_TF"):
# Should be called from within a `with test_session():` block
if isinstance(weights, list):
weights_tf = [constant_op.constant(shard) for shard in weights]
else:
weights_tf = constant_op.constant(weights)
biases_tf = constant_op.constant(biases)
hidden_acts_tf = constant_op.constant(
hidden_acts, shape=(self._batch_size, self._dim))
labels_tf = constant_op.constant(
labels, dtype=dtypes.int64, shape=(self._batch_size, num_true))
pred_logits_tf, pred_labels_tf = _compute_sampled_logits(
weights_tf,
biases_tf,
labels_tf,
hidden_acts_tf,
num_sampled,
num_classes,
num_true,
sampled_vals,
subtract_log_q=subtract_log_q,
remove_accidental_hits=remove_accidental_hits,
name=name)
return pred_logits_tf, pred_labels_tf
def testComputeSampledLogitsShapes(self):
# We just check that the shapes of the returned values are correct.
weights, biases, hidden_acts, _ = self._GenerateTestInputs()
sampled = [1, 0, 2, 3]
num_sampled = len(sampled)
true_exp = sampled_exp = [1., 1., 1., 1.]
test_sampled_vals = (sampled, true_exp, sampled_exp)
sampled_w, sampled_b = weights[sampled], biases[sampled]
with self.test_session() as sess:
for num_true_test in range(1, 5):
labels = np.random.randint(
low=0,
high=self._num_classes,
size=self._batch_size * num_true_test)
true_w, true_b = weights[labels], biases[labels]
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=num_true_test)
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
weights,
biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
remove_accidental_hits=True,
subtract_log_q=False)
logits_tf_val, labels_tf_val = sess.run([logits_tf, labels_tf])
self.assertEqual(logits_np.shape, logits_tf_val.shape)
self.assertEqual(labels_np.shape, labels_tf_val.shape)
def testComputeSampledLogitsValues(self):
# Here we check the actual numerics.
weights, biases, hidden_acts, sharded_weights = self._GenerateTestInputs()
eps = 1e-3
sampled = [1, 0, 2, 3]
num_sampled = len(sampled)
true_exp = np.empty([self._batch_size, 1], dtype=np.float32)
true_exp.fill(0.5)
sampled_exp = np.empty([num_sampled], dtype=np.float32)
sampled_exp.fill(0.5)
sampled_w, sampled_b = weights[sampled], biases[sampled]
test_sampled_vals = (sampled, true_exp, sampled_exp)
with self.test_session() as sess:
for num_true_test in range(1, 5):
# Generate test data for this run
labels = np.random.randint(
low=0,
high=self._num_classes,
size=self._batch_size * num_true_test)
true_w, true_b = weights[labels], biases[labels]
# Test 1: Without accidental hit removal or subtract_log_q
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=num_true_test)
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
weights,
biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
name="sampled_loss_test1_num_true%d" % num_true_test)
logits_tf_val, labels_tf_val = sess.run([logits_tf, labels_tf])
self.assertAllClose(logits_np, logits_tf_val, eps)
self.assertAllClose(labels_np, labels_tf_val, eps)
# Test 2: With accidental hit removal, no subtract_log_q
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
weights,
biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
subtract_log_q=False,
remove_accidental_hits=True,
name="sampled_loss_test2_num_true%d" % num_true_test)
# Test that the exponentiated logits of accidental hits are near 0.
# First we need to find the hits in this random test run:
labels_reshape = labels.reshape((self._batch_size, num_true_test))
logits_tf_np = logits_tf.eval()
for row in xrange(self._batch_size):
row_labels = labels_reshape[row, :]
for col in xrange(num_sampled):
if sampled[col] in row_labels:
# We need to add the num_true_test offset into logits_*
self.assertNear(
np.exp(logits_tf_np[row, col + num_true_test]), 0., eps)
# Test 3: With subtract_log_q, no accidental hit removal
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=num_true_test,
true_expected=true_exp,
sampled_expected=sampled_exp)
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
weights,
biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
subtract_log_q=True,
remove_accidental_hits=False,
name="sampled_loss_test3_num_true%d" % num_true_test)
logits_tf_val, labels_tf_val = sess.run([logits_tf, labels_tf])
self.assertAllClose(logits_np, logits_tf_val, eps)
self.assertAllClose(labels_np, labels_tf_val, eps)
# Test 4: Test 1, with sharded weights
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=num_true_test)
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
sharded_weights,
biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
name="sampled_loss_test1_num_true%d" % num_true_test)
logits_tf_val, labels_tf_val = sess.run([logits_tf, labels_tf])
self.assertAllClose(logits_np, logits_tf_val, eps)
self.assertAllClose(labels_np, labels_tf_val, eps)
def testNCELoss(self):
# A simple test to verify the numerics.
def _SigmoidCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
pred = 1. / (1. + np.exp(-logits))
eps = 0.0001
pred = np.minimum(np.maximum(pred, eps), 1 - eps)
return -targets * np.log(pred) - (1. - targets) * np.log(1. - pred)
weights, biases, hidden_acts, sharded_weights = self._GenerateTestInputs()
labels = [0, 1, 2]
true_w, true_b = weights[labels], biases[labels]
sampled = [1, 0, 2, 3]
num_sampled = len(sampled)
true_exp = np.empty([self._batch_size, 1], dtype=np.float32)
true_exp.fill(0.5)
sampled_exp = np.empty([num_sampled], dtype=np.float32)
sampled_exp.fill(0.5)
sampled_w, sampled_b = weights[sampled], biases[sampled]
test_sampled_vals = (sampled, true_exp, sampled_exp)
with self.test_session():
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
true_expected=true_exp,
sampled_expected=sampled_exp)
nce_loss_np = np.sum(
_SigmoidCrossEntropyWithLogits(logits_np, labels_np), 1)
labels_tf = constant_op.constant(labels, shape=(self._batch_size, 1))
weights_tf = constant_op.constant(weights)
biases_tf = constant_op.constant(biases)
inputs_tf = constant_op.constant(hidden_acts)
nce_loss_tf = nn_impl.nce_loss(
weights_tf,
biases_tf,
labels_tf,
inputs_tf,
num_sampled=1,
num_classes=self._num_classes,
num_true=1,
sampled_values=test_sampled_vals)
self.assertAllClose(nce_loss_np, nce_loss_tf.eval(), 1e-4)
# Test with sharded weights
nce_loss_tf = nn_impl.nce_loss(
[constant_op.constant(shard) for shard in sharded_weights],
biases_tf,
labels_tf,
inputs_tf,
num_sampled=1,
num_classes=self._num_classes,
num_true=1,
sampled_values=test_sampled_vals)
self.assertAllClose(nce_loss_np, nce_loss_tf.eval(), 1e-4)
def testSampledSoftmaxLoss(self):
# A simple test to verify the numerics.
def _SoftmaxCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
stable_exp_logits = np.exp(logits - np.amax(
logits, axis=1, keepdims=True))
pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
weights, biases, hidden_acts, sharded_weights = self._GenerateTestInputs()
labels = [0, 1, 2]
true_w, true_b = weights[labels], biases[labels]
sampled = [1, 0, 2, 3]
num_sampled = len(sampled)
true_exp = np.full([self._batch_size, 1], fill_value=0.5, dtype=np.float32)
sampled_exp = np.full([num_sampled], fill_value=0.5, dtype=np.float32)
sampled_w, sampled_b = weights[sampled], biases[sampled]
test_sampled_vals = (sampled, true_exp, sampled_exp)
with self.test_session():
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
true_expected=true_exp,
sampled_expected=sampled_exp)
sampled_softmax_loss_np = _SoftmaxCrossEntropyWithLogits(logits_np,
labels_np)
labels_tf = constant_op.constant(labels, shape=(self._batch_size, 1))
weights_tf = constant_op.constant(weights)
biases_tf = constant_op.constant(biases)
inputs_tf = constant_op.constant(hidden_acts)
sampled_softmax_loss_tf = nn_impl.sampled_softmax_loss(
weights=weights_tf,
biases=biases_tf,
labels=labels_tf,
inputs=inputs_tf,
num_sampled=1,
num_classes=self._num_classes,
num_true=1,
sampled_values=test_sampled_vals,
remove_accidental_hits=False)
self.assertAllClose(sampled_softmax_loss_np,
sampled_softmax_loss_tf.eval(), 1e-4)
# Test with sharded weights
sampled_softmax_loss_tf = nn_impl.sampled_softmax_loss(
weights=[constant_op.constant(shard) for shard in sharded_weights],
biases=biases_tf,
labels=labels_tf,
inputs=inputs_tf,
num_sampled=1,
num_classes=self._num_classes,
num_true=1,
sampled_values=test_sampled_vals,
remove_accidental_hits=False)
self.assertAllClose(sampled_softmax_loss_np,
sampled_softmax_loss_tf.eval(), 1e-4)
class CReluTest(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.concatenate([x * (x > 0), -x * (x < 0)], axis=1)
with self.test_session():
z = nn_ops.crelu(constant_op.constant(x)).eval()
self.assertAllClose(y, z, 1e-4)
class ReluTest(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.maximum(x, 0.0)
with self.test_session():
z = nn_ops.relu(constant_op.constant(x)).eval()
self.assertAllEqual(y, z)
def testNaNs(self):
# Test that relu(nan) = nan for various sizes.
for i in range(18):
x = np.zeros(i) + np.nan
with self.test_session():
z = nn_ops.relu(constant_op.constant(x)).eval()
self.assertTrue(np.isnan(z).all())
class MomentsTest(test_lib.TestCase):
def doOutputTest(self, input_shape, moments_axes, tol=1e-4):
for mu in [0.0, 1.0, 1e3]:
for sigma in [1.0, 0.1]:
for keep_dims in [True, False]:
input_values = np.random.rand(*input_shape) * sigma + mu
expected_mean = np.mean(input_values, axis=moments_axes,
keepdims=keep_dims)
expected_var = np.var(input_values, axis=moments_axes,
keepdims=keep_dims)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
inputs = constant_op.constant(input_values,
shape=input_shape,
dtype=dtypes.float32)
mean, variance = nn_impl.moments(inputs,
moments_axes,
keep_dims=keep_dims)
[mean, variance] = sess.run([mean, variance])
# Make sure that there are no NaNs
self.assertFalse(np.isnan(mean).any())
self.assertFalse(np.isnan(variance).any())
self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
self.assertAllClose(variance, expected_var, rtol=tol, atol=tol)
def testOutput2DInput0(self):
self.doOutputTest((10, 300), (0,))
def testOutput2DInput1(self):
self.doOutputTest((10, 300), (1,))
def testOutput2DInput01(self):
self.doOutputTest((10, 300), (0, 1))
def testOutput4DInput0(self):
self.doOutputTest((10, 10, 10, 30), (0,))
def testOutput4DInput1(self):
self.doOutputTest((10, 10, 10, 30), (1,))
def testOutput4DInput3(self):
self.doOutputTest((10, 10, 10, 30), (3,))
def testOutput4DInput012(self):
self.doOutputTest((10, 10, 10, 30), (0, 1, 2))
def testOutput4DInput123(self):
self.doOutputTest((10, 10, 10, 30), (1, 2, 3))
def testUnstableOutputShiftNone(self):
input_shape = (10, 300)
moments_axes = (0, 1)
mu, sigma = 1e3, 0.1
tol = 1e-3
input_values = np.random.rand(*input_shape) * sigma + mu
expected_mean = np.mean(input_values, axis=moments_axes)
expected_var = np.var(input_values, axis=moments_axes)
with self.test_session() as sess:
inputs = constant_op.constant(input_values, shape=input_shape,
dtype=dtypes.float32)
mean, variance = nn_impl.moments(inputs, moments_axes, shift=0.0)
[mean, variance] = sess.run([mean, variance])
# Make sure that there are no NaNs
self.assertFalse(np.isnan(mean).any())
self.assertFalse(np.isnan(variance).any())
self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
# The variance is unstable
self.assertGreater(np.abs(variance - expected_var), 0.1)
if __name__ == "__main__":
test_lib.main()
| apache-2.0 | 2,544,545,218,948,381,700 | 36.307175 | 80 | 0.594026 | false |
pyjobs/web | pyjobs_web/pyjobsweb/model/elasticsearch_model/company.py | 1 | 3244 | # -*- coding: utf-8 -*-
import elasticsearch_dsl as es
from pyjobsweb.lib.elasticsearch_ import compute_index_name
class Company(es.DocType):
class Meta:
index = 'companies'
doc_type = 'company'
french_elision = es.token_filter(
'french_elision',
type='elision',
articles_case=True,
articles=[
'l', 'm', 't', 'qu', 'n', 's',
'j', 'd', 'c', 'jusqu', 'quoiqu',
'lorsqu', 'puisqu'
]
)
french_stopwords = es.token_filter(
'french_stopwords',
type='stop',
stopwords='_french_'
)
# Do not include this filter if keywords is empty
french_keywords = es.token_filter(
'french_keywords',
type='keyword_marker',
keywords=[]
)
french_stemmer = es.token_filter(
'french_stemmer',
type='stemmer',
language='light_french'
)
french_analyzer = es.analyzer(
'french_analyzer',
tokenizer='standard',
filter=[
'lowercase',
'asciifolding',
french_elision,
french_stopwords,
# french_keywords,
french_stemmer
],
char_filter=['html_strip']
)
technologies_tokenizer = es.tokenizer(
'comma_tokenizer',
type='pattern',
pattern=' |,|, '
)
technologies_synonyms_filter = es.token_filter(
'technologies_synonyms',
type='synonym',
synonyms=[
'c => c_language',
'c++, cpp => cpp_language',
'c/c++, c/cpp => c_language',
'c/c++, c/cpp => cpp_language',
'c#, c♯, csharp => csharp_language',
'f#, f♯, fsharp => fsharp_language',
'c#, c♯, csharp => dotnet',
'f#, f♯, fsharp => dotnet',
'.net => dotnet'
]
)
technologies_analyzer = es.analyzer(
'technologies_analyzer',
tokenizer=technologies_tokenizer,
filter=[
'lowercase',
'asciifolding',
technologies_synonyms_filter
]
)
company_name_analyzer = es.analyzer(
'company_name_analyzer',
tokenizer='standard',
filter=[
'lowercase',
'asciifolding'
]
)
id = es.String(index='no')
name = es.String(analyzer=french_analyzer)
description = es.String(
analyzer=french_analyzer,
fields={
'technologies': es.String(analyzer=technologies_analyzer)
}
)
technologies = es.String(analyzer=technologies_analyzer)
url = es.String(index='no')
logo_url = es.String(index='no')
address = es.String(analyzer=french_analyzer)
address_is_valid = es.Boolean()
email = es.String(index='no')
phone = es.String(index='no')
geolocation = es.GeoPoint()
geolocation_is_valid = es.Boolean()
def __init__(self, meta=None, **kwargs):
super(Company, self).__init__(meta, **kwargs)
self._doc_type.index = compute_index_name(self.index)
@property
def index(self):
return self._doc_type.index
@property
def doc_type(self):
return self._doc_type.name
| mit | -8,846,263,115,708,567,000 | 23.515152 | 69 | 0.530284 | false |
tansey/gfl | pygfl/logistic_solver.py | 1 | 2240 | import numpy as np
from numpy.ctypeslib import ndpointer
from ctypes import *
from pygfl.utils import *
from pygfl.solver import TrailSolver
# Load the graph fused lasso library
try:
logistic_graphfl_lib = cdll.LoadLibrary("libgraphfl.so")
except OSError:
# If the installation isn't available on LD_LIBRARY_PATH, it's probably
# been built with setuptools and is in the project root.
_libgraphfl = get_libgraphfl()
logistic_graphfl_lib = cdll.LoadLibrary(_libgraphfl)
logistic_graphfl = logistic_graphfl_lib.graph_fused_lasso_logit_warm
logistic_graphfl.restype = c_int
logistic_graphfl.argtypes = [c_int, ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'),
c_int, ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'),
c_double, c_double, c_double,
c_int, c_double,
ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'),
ndpointer(c_double, flags='C_CONTIGUOUS')]
class LogisticTrailSolver(TrailSolver):
def __init__(self, alpha=2., inflate=2., maxsteps=100000, converge=1e-6, penalty='gfl', max_dp_steps=5000, gamma=1.):
TrailSolver.__init__(self, alpha, inflate, maxsteps, converge, penalty, max_dp_steps, gamma)
if penalty != 'gfl':
raise NotImplementedError('Only regular fused lasso supported for logistic loss.')
def solve_gfl(self, lam):
if hasattr(lam, '__len__'):
raise NotImplementedError('Only uniform edge weighting implemented for logistic loss.')
# Run the graph-fused lasso algorithm
s = logistic_graphfl(self.nnodes, np.ones(self.nnodes, dtype='int32'), self.y.astype('int32'),
self.ntrails, self.trails, self.breakpoints,
lam, self.alpha, self.inflate,
self.maxsteps, self.converge,
self.beta, self.z, self.u)
self.steps.append(s)
return self.beta
def log_likelihood(self, beta):
signs = -(self.y * 2 - 1)
return -np.log(1 + np.exp(signs * beta)).sum()
| lgpl-3.0 | -8,542,383,052,167,412,000 | 42.076923 | 121 | 0.623214 | false |
vinodkc/spark | examples/src/main/python/wordcount.py | 27 | 1418 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from operator import add
from pyspark.sql import SparkSession
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: wordcount <file>", file=sys.stderr)
sys.exit(-1)
spark = SparkSession\
.builder\
.appName("PythonWordCount")\
.getOrCreate()
lines = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0])
counts = lines.flatMap(lambda x: x.split(' ')) \
.map(lambda x: (x, 1)) \
.reduceByKey(add)
output = counts.collect()
for (word, count) in output:
print("%s: %i" % (word, count))
spark.stop()
| apache-2.0 | 5,795,214,979,285,744,000 | 32.761905 | 74 | 0.673484 | false |
gimite/personfinder | app/flag_note.py | 1 | 3165 | #!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import db
from recaptcha.client import captcha
import model
import reveal
import utils
class Handler(utils.BaseHandler):
"""Marks a specified note as hidden (spam)."""
def get(self):
note = model.Note.get(self.repo, self.params.id)
if not note:
return self.error(400, 'No note with ID: %r' % self.params.id)
note.status_text = utils.get_note_status_text(note)
note.source_date_local = self.to_local_time(note.source_date)
captcha_html = note.hidden and self.get_captcha_html() or ''
# Check if private info should be revealed.
content_id = 'view:' + note.person_record_id
reveal_url = reveal.make_reveal_url(self, content_id)
show_private_info = reveal.verify(content_id, self.params.signature)
self.render('flag_note.html',
note=note,
captcha_html=captcha_html,
reveal_url=reveal_url,
flag_note_page=True,
show_private_info=show_private_info,
signature=self.params.signature)
def post(self):
note = model.Note.get(self.repo, self.params.id)
if not note:
return self.error(400, 'No note with ID: %r' % self.params.id)
captcha_response = note.hidden and self.get_captcha_response()
if not note.hidden or captcha_response.is_valid:
note.hidden = not note.hidden
# When "hidden" changes, update source_date and entry_date (melwitt)
# https://web.archive.org/web/20111228161607/http://code.google.com/p/googlepersonfinder/issues/detail?id=58
now = utils.get_utcnow()
note.source_date = now
note.entry_date = now
db.put(note)
model.UserActionLog.put_new(
(note.hidden and 'hide') or 'unhide',
note, self.request.get('reason_for_report', ''))
person = model.Person.get(self.repo, note.person_record_id)
if person:
person.update_latest_status(note)
self.redirect(self.get_url('/view', id=note.person_record_id,
signature=self.params.signature))
elif not captcha_response.is_valid:
captcha_html = self.get_captcha_html(captcha_response.error_code)
self.render('flag_note.html',
note=note,
captcha_html=captcha_html,
signature=self.params.signature)
| apache-2.0 | -8,312,194,342,809,161,000 | 40.644737 | 120 | 0.61643 | false |
jessamynsmith/eggtimer-server | eggtimer/settings.py | 1 | 6290 | # Django settings for eggtimer project.
import os
from django.utils.dateparse import parse_datetime
import dj_database_url
from email.utils import formataddr
HOME_DIR = os.path.expanduser("~")
BASE_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))
ADMINS = (
(os.environ.get('ADMIN_NAME', 'admin'), os.environ.get('ADMIN_EMAIL', '[email protected]')),
)
# Export a secret value in production; for local development, the default is good enough
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY',
'psu&83=i(4wgd@9*go=nps9=1rw#9b_w6psy4mp6yoxqv1i5g')
# Use env setting if available, otherwise make debug false
DEBUG = bool(int(os.environ.get('DJANGO_DEBUG', '0')))
ALLOWED_HOSTS = ['eggtimer.herokuapp.com', 'localhost', '127.0.0.1']
CORS_ORIGIN_ALLOW_ALL = True
SECURE_SSL_REDIRECT = bool(int(os.environ.get('DJANGO_ENABLE_SSL', '1')))
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'custom_user',
'settings_context_processor',
'gunicorn',
'corsheaders',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.github',
'rest_framework',
'rest_framework.authtoken',
'floppyforms',
'bootstrapform',
'timezone_field',
'periods',
]
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'periods.middleware.AddAuthTokenMiddleware',
)
ROOT_URLCONF = 'eggtimer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'eggtimer', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
'django.template.context_processors.debug',
'django.template.context_processors.request',
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"settings_context_processor.context_processors.settings",
],
'debug': DEBUG,
},
},
]
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'eggtimer.wsgi.application'
# Parse database configuration from DATABASE_URL environment variable
DATABASES = {
'default': dj_database_url.config(
default="sqlite:///%s" % os.path.join(HOME_DIR, 'eggtimer', 'eggtimer.sqlite')
)
}
SITE_ID = 1
# https://docs.djangoproject.com/en/1.8/topics/i18n/
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'bower_components'),
os.path.join(BASE_DIR, 'eggtimer', 'static'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend"
)
# auth and allauth
AUTH_USER_MODEL = 'periods.User'
LOGIN_REDIRECT_URL = '/calendar/'
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_LOGOUT_ON_GET = True
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email'],
'METHOD': 'oauth2',
}
}
ACCOUNT_ACTIVATION_DAYS = 14
DEFAULT_FROM_EMAIL = formataddr(ADMINS[0])
REPLY_TO = (
os.environ.get('REPLY_TO_EMAIL', '[email protected]'),
)
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_PORT = 587
EMAIL_HOST_USER = "apikey"
EMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_API_KEY')
EMAIL_USE_TLS = True
if not EMAIL_HOST_PASSWORD:
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(HOME_DIR, 'eggtimer', 'emails')
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
}
# US Navy API is used for moon phases
# http://aa.usno.navy.mil/data/docs/api.php#phase
MOON_PHASE_URL = 'http://api.usno.navy.mil'
API_DATE_FORMAT = '%Y-%m-%d'
US_DATE_FORMAT = '%-m/%-d/%Y'
# TODO maybe this could be a django plugin?
DEPLOY_DATE = parse_datetime(os.environ.get('DEPLOY_DATE', ''))
VERSION = '0.6'
TEMPLATE_VISIBLE_SETTINGS = ['DEPLOY_DATE', 'VERSION', 'ADMINS']
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# if DEBUG:
# INSTALLED_APPS.extend([
# 'django_extensions',
# ])
| mit | -4,398,288,394,248,932,000 | 29.682927 | 98 | 0.679968 | false |
kustodian/ansible | lib/ansible/modules/system/service_facts.py | 16 | 10379 | #!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# originally copied from AWX's scan_services module to bring this functionality
# into Core
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: service_facts
short_description: Return service state information as fact data
description:
- Return service state information as fact data for various service management utilities
version_added: "2.5"
requirements: ["Any of the following supported init systems: systemd, sysv, upstart"]
notes:
- When accessing the C(ansible_facts.services) facts collected by this module,
it is recommended to not use "dot notation" because services can have a C(-)
character in their name which would result in invalid "dot notation", such as
C(ansible_facts.services.zuul-gateway). It is instead recommended to
using the string value of the service name as the key in order to obtain
the fact data value like C(ansible_facts.services['zuul-gateway'])
author:
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: populate service facts
service_facts:
- debug:
var: ansible_facts.services
'''
RETURN = '''
ansible_facts:
description: Facts to add to ansible_facts about the services on the system
returned: always
type: complex
contains:
services:
description: States of the services with service name as key.
returned: always
type: complex
contains:
source:
description: Init system of the service. One of C(systemd), C(sysv), C(upstart).
returned: always
type: str
sample: sysv
state:
description: State of the service. Either C(running), C(stopped), or C(unknown).
returned: always
type: str
sample: running
status:
description: State of the service. Either C(enabled), C(disabled), or C(unknown).
returned: systemd systems or RedHat/SUSE flavored sysvinit/upstart
type: str
sample: enabled
name:
description: Name of the service.
returned: always
type: str
sample: arp-ethers.service
'''
import re
from ansible.module_utils.basic import AnsibleModule
class BaseService(object):
def __init__(self, module):
self.module = module
self.incomplete_warning = False
class ServiceScanService(BaseService):
def gather_services(self):
services = {}
service_path = self.module.get_bin_path("service")
if service_path is None:
return None
initctl_path = self.module.get_bin_path("initctl")
chkconfig_path = self.module.get_bin_path("chkconfig")
# sysvinit
if service_path is not None and chkconfig_path is None:
rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
for line in stdout.split("\n"):
line_data = line.split()
if len(line_data) < 4:
continue # Skipping because we expected more data
service_name = " ".join(line_data[3:])
if line_data[1] == "+":
service_state = "running"
else:
service_state = "stopped"
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
# Upstart
if initctl_path is not None and chkconfig_path is None:
p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
real_stdout = stdout.replace("\r", "")
for line in real_stdout.split("\n"):
m = p.match(line)
if not m:
continue
service_name = m.group('name')
service_goal = m.group('goal')
service_state = m.group('state')
if m.group('pid'):
pid = m.group('pid')
else:
pid = None # NOQA
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
services[service_name] = payload
# RH sysvinit
elif chkconfig_path is not None:
# print '%s --status-all | grep -E "is (running|stopped)"' % service_path
p = re.compile(
r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
# Check for special cases where stdout does not fit pattern
match_any = False
for line in stdout.split('\n'):
if p.match(line):
match_any = True
if not match_any:
p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
match_any = False
for line in stdout.split('\n'):
if p_simple.match(line):
match_any = True
if match_any:
# Try extra flags " -l --allservices" needed for SLES11
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
elif '--list' in stderr:
# Extra flag needed for RHEL5
rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
for line in stdout.split('\n'):
m = p.match(line)
if m:
service_name = m.group('service')
service_state = 'stopped'
service_status = "disabled"
if m.group('rl3') == 'on':
service_status = "enabled"
rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
service_state = rc
if rc in (0,):
service_state = 'running'
# elif rc in (1,3):
else:
if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
self.incomplete_warning = True
continue
else:
service_state = 'stopped'
service_data = {"name": service_name, "state": service_state, "status": service_status, "source": "sysv"}
services[service_name] = service_data
return services
class SystemctlScanService(BaseService):
def systemd_enabled(self):
# Check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError:
# If comm doesn't exist, old kernel, no systemd
return False
for line in f:
if 'systemd' in line:
return True
return False
def gather_services(self):
services = {}
if not self.systemd_enabled():
return None
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
if systemctl_path is None:
return None
rc, stdout, stderr = self.module.run_command("%s list-units --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line and 'not-found' not in svc_line]:
service_name = line.split()[0]
if "running" in line:
state_val = "running"
else:
if 'failed' in line:
service_name = line.split()[1]
state_val = "stopped"
services[service_name] = {"name": service_name, "state": state_val, "status": "unknown", "source": "systemd"}
rc, stdout, stderr = self.module.run_command("%s list-unit-files --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line and 'not-found' not in svc_line]:
try:
service_name, status_val = line.split()
except ValueError:
self.module.fail_json(msg="Malformed output discovered from systemd list-unit-files: {0}".format(line))
if service_name not in services:
services[service_name] = {"name": service_name, "state": "unknown", "status": status_val, "source": "systemd"}
else:
services[service_name]["status"] = status_val
return services
def main():
module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
module.run_command_environ_update = dict(LANG="C", LC_ALL="C")
service_modules = (ServiceScanService, SystemctlScanService)
all_services = {}
incomplete_warning = False
for svc_module in service_modules:
svcmod = svc_module(module)
svc = svcmod.gather_services()
if svc is not None:
all_services.update(svc)
if svcmod.incomplete_warning:
incomplete_warning = True
if len(all_services) == 0:
results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
else:
results = dict(ansible_facts=dict(services=all_services))
if incomplete_warning:
results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,744,175,460,716,717,300 | 40.682731 | 150 | 0.561615 | false |
nrudenko/anarcho | anarchoApp/api_tests/test_team.py | 1 | 3847 | from api_tests import AnarchoTestCase
from anarcho.models.user import User
from anarcho.models.user_app import UserApp
test_team_user_email = '[email protected]'
test_team_user_name = 'test_name2'
class TeamTest(AnarchoTestCase):
def setUp(self):
AnarchoTestCase.setUp(self)
self.register()
self.login()
self.register(email=test_team_user_email, name=test_team_user_name)
self.create_app()
self.app_key = self.created_app.app_key
self.add_to_team(email=test_team_user_email, app_key=self.app_key, permission='r')
def get_user_app(self):
"""
:rtype: UserApp
"""
user = User.query.filter_by(email=test_team_user_email).first()
if user:
return UserApp.query.filter_by(user_id=user.id).first()
def test_permissions_update(self):
r = self.update_permission(email=test_team_user_email, app_key=self.app_key, permission='w')
self.assert_status_code(r)
user_app = self.get_user_app()
self.assertIsNotNone(user_app, msg='UserApp for {0} not found'.format('[email protected]'))
self.assertTrue(user_app.permission == 'w', msg='Wrong permission after update')
def test_can_not_found_app(self):
r = self.update_permission()
self.assert_status_code(r, 404)
self.assert_error_message(r, 'app_not_found')
def test_permissions_remove(self):
r = self.remove_permission(email=test_team_user_email, app_key=self.app_key)
self.assert_status_code(r)
user_app = self.get_user_app()
self.assertIsNone(user_app, msg='UserApp for {0} not deleted'.format('[email protected]'))
def test_user_can_not_remove_his_permissions(self):
r = self.remove_permission(email=self.test_user_email, app_key=self.app_key)
self.assert_status_code(r, 403)
def test_user_can_not_update_his_permissions(self):
r = self.remove_permission(email=self.test_user_email, app_key=self.app_key)
self.assert_status_code(r, 403)
def test_user_can_not_add_to_app_existing_user(self):
r = self.add_to_team(email=test_team_user_email, app_key=self.app_key, permission='r')
self.assert_status_code(r, 409)
self.assert_error_message(r, 'user_with_current_email_already_exist')
def test_email_format_validation(self):
r = self.add_to_team(email='test3mail.com', app_key=self.app_key, permission='r')
self.assert_status_code(r, 403)
def test_empty_email_validation(self):
r = self.add_to_team(email=' ', app_key=self.app_key, permission='r')
self.assert_status_code(r, 403)
def test_email_length_validation(self):
r = self.add_to_team(email='[email protected]', app_key=self.app_key, permission='r')
self.assert_status_code(r, 403)
def test_add_existing_user_to_team(self):
self.register('[email protected]', 'test_name3')
self.create_app(app_name='test_app2')
self.login()
r = self.add_to_team(email='[email protected]', app_key=self.app_key, permission='r')
self.assert_status_code(r)
def test_add_user_with_insensitive_email_to_team(self):
email = '[email protected]'
self.register(email=email, name='test_name3')
self.create_app(app_name='test_app2')
self.login()
r = self.add_to_team(email=email.lower(), app_key=self.app_key, permission='r')
self.assert_status_code(r)
def test_register_user_after_adding_to_team(self):
email = '[email protected]'
self.login()
r = self.add_to_team(email=email, app_key=self.app_key, permission='r')
self.assert_status_code(r)
r = self.register(email=email, name='test_name4')
self.assert_status_code(r)
r = self.login()
self.assert_status_code(r) | mit | 9,175,762,444,461,935,000 | 39.505263 | 104 | 0.644918 | false |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/tensorflow/python/training/training.py | 6 | 11555 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""This library provides a set of classes and functions that helps train models.
## Optimizers
The Optimizer base class provides methods to compute gradients for a loss and
apply gradients to variables. A collection of subclasses implement classic
optimization algorithms such as GradientDescent and Adagrad.
You never instantiate the Optimizer class itself, but instead instantiate one
of the subclasses.
@@Optimizer
@@GradientDescentOptimizer
@@AdadeltaOptimizer
@@AdagradOptimizer
@@AdagradDAOptimizer
@@MomentumOptimizer
@@AdamOptimizer
@@FtrlOptimizer
@@ProximalGradientDescentOptimizer
@@ProximalAdagradOptimizer
@@RMSPropOptimizer
## Gradient Computation
TensorFlow provides functions to compute the derivatives for a given
TensorFlow computation graph, adding operations to the graph. The
optimizer classes automatically compute derivatives on your graph, but
creators of new Optimizers or expert users can call the lower-level
functions below.
@@gradients
@@AggregationMethod
@@stop_gradient
@@hessians
## Gradient Clipping
TensorFlow provides several operations that you can use to add clipping
functions to your graph. You can use these functions to perform general data
clipping, but they're particularly useful for handling exploding or vanishing
gradients.
@@clip_by_value
@@clip_by_norm
@@clip_by_average_norm
@@clip_by_global_norm
@@global_norm
## Decaying the learning rate
@@exponential_decay
@@inverse_time_decay
@@natural_exp_decay
@@piecewise_constant
@@polynomial_decay
## Moving Averages
Some training algorithms, such as GradientDescent and Momentum often benefit
from maintaining a moving average of variables during optimization. Using the
moving averages for evaluations often improve results significantly.
@@ExponentialMovingAverage
## Coordinator and QueueRunner
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
for how to use threads and queues. For documentation on the Queue API,
see [Queues](../../api_docs/python/io_ops.md#queues).
@@Coordinator
@@QueueRunner
@@add_queue_runner
@@start_queue_runners
## Distributed execution
See [Distributed TensorFlow](../../how_tos/distributed/index.md) for
more information about how to configure a distributed TensorFlow program.
@@Server
@@Supervisor
@@SessionManager
@@ClusterSpec
@@replica_device_setter
@@Scaffold
@@MonitoredTrainingSession
@@SessionCreator
@@ChiefSessionCreator
@@WorkerSessionCreator
@@MonitoredSession
## Summary Operations
The following ops output
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffers as serialized string tensors.
You can fetch the output of a summary op in a session, and pass it to
a [SummaryWriter](../../api_docs/python/train.md#SummaryWriter) to append it
to an event file. Event files contain
[`Event`](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
protos that can contain `Summary` protos along with the timestamp and
step. You can then use TensorBoard to visualize the contents of the
event files. See [TensorBoard and
Summaries](../../how_tos/summaries_and_tensorboard/index.md) for more
details.
@@scalar_summary
@@image_summary
@@audio_summary
@@histogram_summary
@@zero_fraction
@@merge_summary
@@merge_all_summaries
## Adding Summaries to Event Files
See [Summaries and
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
overview of summaries, event files, and visualization in TensorBoard.
@@SummaryWriter
@@SummaryWriterCache
@@summary_iterator
## Training utilities
@@global_step
@@basic_train_loop
@@get_global_step
@@assert_global_step
@@write_graph
@@SessionRunHook
@@LoggingTensorHook
@@StopAtStepHook
@@CheckpointSaverHook
@@NewCheckpointReader
@@StepCounterHook
@@NanLossDuringTrainingError
@@NanTensorHook
@@SummarySaverHook
@@SessionRunArgs
@@SessionRunContext
@@SessionRunValues
@@LooperThread
"""
# pylint: enable=line-too-long
# Optimizers.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
from tensorflow.python.ops import io_ops as _io_ops
from tensorflow.python.ops import state_ops as _state_ops
from tensorflow.python.util.all_util import remove_undocumented
# pylint: disable=g-bad-import-order,unused-import
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adagrad import AdagradOptimizer
from tensorflow.python.training.adagrad_da import AdagradDAOptimizer
from tensorflow.python.training.proximal_adagrad import ProximalAdagradOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.ftrl import FtrlOptimizer
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.moving_averages import ExponentialMovingAverage
from tensorflow.python.training.optimizer import Optimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.proximal_gradient_descent import ProximalGradientDescentOptimizer
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizer
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizerV2
# Utility classes for training.
from tensorflow.python.training.coordinator import Coordinator
from tensorflow.python.training.coordinator import LooperThread
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.queue_runner import *
# For the module level doc.
from tensorflow.python.training import input as _input
from tensorflow.python.training.input import *
# pylint: enable=wildcard-import
from tensorflow.python.training.basic_session_run_hooks import LoggingTensorHook
from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverHook
from tensorflow.python.training.basic_session_run_hooks import StepCounterHook
from tensorflow.python.training.basic_session_run_hooks import NanLossDuringTrainingError
from tensorflow.python.training.basic_session_run_hooks import NanTensorHook
from tensorflow.python.training.basic_session_run_hooks import SummarySaverHook
from tensorflow.python.training.basic_loops import basic_train_loop
from tensorflow.python.training.device_setter import replica_device_setter
from tensorflow.python.training.monitored_session import Scaffold
from tensorflow.python.training.monitored_session import MonitoredTrainingSession
from tensorflow.python.training.monitored_session import SessionCreator
from tensorflow.python.training.monitored_session import ChiefSessionCreator
from tensorflow.python.training.monitored_session import WorkerSessionCreator
from tensorflow.python.training.monitored_session import MonitoredSession
from tensorflow.python.training.saver import Saver
from tensorflow.python.training.saver import checkpoint_exists
from tensorflow.python.training.saver import generate_checkpoint_state_proto
from tensorflow.python.training.saver import get_checkpoint_mtimes
from tensorflow.python.training.saver import get_checkpoint_state
from tensorflow.python.training.saver import latest_checkpoint
from tensorflow.python.training.saver import update_checkpoint_state
from tensorflow.python.training.saver import export_meta_graph
from tensorflow.python.training.saver import import_meta_graph
from tensorflow.python.training.session_run_hook import SessionRunHook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.session_run_hook import SessionRunContext
from tensorflow.python.training.session_run_hook import SessionRunValues
from tensorflow.python.training.session_manager import SessionManager
from tensorflow.python.training.summary_io import summary_iterator
from tensorflow.python.training.summary_io import SummaryWriter
from tensorflow.python.training.summary_io import SummaryWriterCache
from tensorflow.python.training.supervisor import Supervisor
from tensorflow.python.training.training_util import write_graph
from tensorflow.python.training.training_util import global_step
from tensorflow.python.training.training_util import get_global_step
from tensorflow.python.training.training_util import assert_global_step
from tensorflow.python.pywrap_tensorflow import do_quantize_training_on_graphdef
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
# pylint: disable=wildcard-import
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *
# Utility op. Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import *
# pylint: enable=wildcard-import
# Distributed computing support.
from tensorflow.core.protobuf.tensorflow_server_pb2 import ClusterDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import JobDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.training.server_lib import Server
# Symbols whitelisted for export without documentation.
_allowed_symbols = [
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
"generate_checkpoint_state_proto", # Used internally by saver.
"checkpoint_exists", # Only used in test?
"get_checkpoint_mtimes", # Only used in test?
# Legacy: remove.
"do_quantize_training_on_graphdef", # At least use grah_def, not graphdef.
# No uses within tensorflow.
"queue_runner", # Use tf.train.start_queue_runner etc directly.
# This is also imported internally.
# TODO(drpng): document these. The reference in howtos/distributed does
# not link.
"SyncReplicasOptimizer",
"SyncReplicasOptimizerV2",
# Protobufs:
"BytesList", # from example_pb2.
"ClusterDef",
"Example", # from example_pb2
"Feature", # from example_pb2
"Features", # from example_pb2
"FeatureList", # from example_pb2
"FeatureLists", # from example_pb2
"FloatList", # from example_pb2.
"Int64List", # from example_pb2.
"JobDef",
"SaverDef", # From saver_pb2.
"SequenceExample", # from example_pb2.
"ServerDef",
]
# Include extra modules for docstrings because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
remove_undocumented(__name__, _allowed_symbols,
[_sys.modules[__name__], _io_ops, _state_ops])
| agpl-3.0 | 4,014,976,931,703,586,300 | 36.761438 | 97 | 0.792557 | false |
jiangzhw/SPF | spf/modules/theharvester.py | 8 | 1193 | import re
import os
import subprocess
import tempfile
from modules.dataCollector import dataCollector
class theHarvester(dataCollector):
def __init__(self, domain, path, display):
dataCollector.__init__(self, domain, path, "theHarvester", display)
(garbage, self.outfile) = tempfile.mkstemp(suffix='.xml', prefix='theHarvester_', dir=None, text=True)
def run_command(self):
self.display.verbose("python " + self.path + " -d " + self.domain + " -b bing -l 500 -f")
return subprocess.call(["python", self.path, "-d", self.domain, "-b", "bing", "-l", "500", "-f", self.outfile], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def load_results(self):
# open outfile
a=open(self.outfile,'rb')
data = a.read()
return data
# extract necessary data
# grep -oh "[^>]*@[^<]*" self.outfile
# emails = re.findall("<email>([^<]*)</email>", data)
# return emails
def cleanup(self):
os.remove(self.outfile)
return
if __name__ == "__main__":
th = theHarvester("example.com", "/TOOLS/theHarvester/theHarvester.py")
th.run()
print th.emails()
print th.hosts()
| bsd-3-clause | 3,164,024,515,377,885,000 | 33.085714 | 169 | 0.610226 | false |
supermari0/ironic | ironic/common/config.py | 17 | 1141 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic.common import rpc
from ironic import version
def parse_args(argv, default_config_files=None):
rpc.set_defaults(control_exchange='ironic')
cfg.CONF(argv[1:],
project='ironic',
version=version.version_info.release_string(),
default_config_files=default_config_files)
rpc.init(cfg.CONF)
| apache-2.0 | -3,654,655,260,568,023,000 | 37.033333 | 78 | 0.722174 | false |
venkat0708/icecream | docs/conf.py | 1 | 7891 | # -*- coding: utf-8 -*-
#
# icecream_project documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'icecream_project'
copyright = u"2015, venkateswara reddy kasana"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'icecreamdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'icecream.tex',
u'icecream_project Documentation',
u"venkateswara reddy kasana", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'icecream', u'icecream_project Documentation',
[u"venkateswara reddy kasana"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'icecream', u'icecream_project Documentation',
u"venkateswara reddy kasana", 'icecream_project',
'helps in maintaining a shop', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| bsd-3-clause | 6,676,328,331,413,885,000 | 31.208163 | 80 | 0.698897 | false |
Murali-group/GraphSpace | docs/conf.py | 1 | 10096 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# graphspace-manual documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 21 01:03:05 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'graphspace-manual'
copyright = '2017, Murali`s Research Group'
author = 'Aditya Bharadwaj'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'graphspace-manual v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'graphspace-manualdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'graphspace-manual.tex', 'graphspace-manual Documentation',
'Aditya Bharadwaj', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'graphspace-manual', 'graphspace-manual Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'graphspace-manual', 'graphspace-manual Documentation',
author, 'graphspace-manual', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
from recommonmark.parser import CommonMarkParser
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': CommonMarkParser,
}
| gpl-2.0 | -1,141,918,303,103,142,500 | 27.681818 | 80 | 0.694334 | false |
crs4/pyEHR | test/pyehr/ehr/services/dbmanager/dbservices/test_version_manager.py | 2 | 10965 | import unittest, sys, os, uuid, random, copy
from pyehr.ehr.services.dbmanager.dbservices import DBServices
from pyehr.ehr.services.dbmanager.dbservices.wrappers import PatientRecord,\
ClinicalRecord, ClinicalRecordRevision, ArchetypeInstance
from pyehr.ehr.services.dbmanager.errors import OptimisticLockError,\
RedundantUpdateError, MissingRevisionError, RecordRestoreUnnecessaryError,\
OperationNotAllowedError
from pyehr.utils.services import get_service_configuration
CONF_FILE = os.getenv('SERVICE_CONFIG_FILE')
class TestVersionManager(unittest.TestCase):
def __init__(self, label):
super(TestVersionManager, self).__init__(label)
self.dbs = None
self.patient = None
def _create_random_patient(self):
self.patient = PatientRecord(record_id=uuid.uuid4().hex)
def _create_random_clinical_record(self):
arch = ArchetypeInstance('openEHR-EHR-OBSERVATION.dummy-observation.v1',
{'data': {
'at0001': random.randint(1, 99),
'at0002': 'just a text field'
}})
return ClinicalRecord(arch)
def _create_random_complex_archetype(self):
arch1 = ArchetypeInstance('openEHR-EHR-OBSERVATION.dummy-observation.v1',
{
'data': {
'at0001': random.randint(1, 99),
'at0002': 'just a text field'
}
})
arch2 = ArchetypeInstance('openEHR-EHR-COMPOSITION.dummy-composition.v1',
{
'data': {
'at1001': random.randint(1, 100),
'at1002': arch1
}
})
return arch2
def build_dataset(self):
self._create_random_patient()
crec = self._create_random_clinical_record()
self.patient = self.dbs.save_patient(self.patient)
crec, self.patient = self.dbs.save_ehr_record(crec, self.patient)
return crec
def setUp(self):
if CONF_FILE is None:
sys.exit('ERROR: no configuration file provided')
sconf = get_service_configuration(CONF_FILE)
self.dbs = DBServices(**sconf.get_db_configuration())
self.dbs.set_index_service(**sconf.get_index_configuration())
def tearDown(self):
if self.patient:
self.dbs.delete_patient(self.patient, cascade_delete=True)
self.dbs = None
def test_record_update(self):
crec = self.build_dataset()
crec.ehr_data.archetype_details['data']['at0001'] = random.randint(100, 200)
crec = self.dbs.update_ehr_record(crec)
self.assertEqual(crec.version, 2)
self.assertGreater(crec.last_update, crec.creation_time)
def test_record_restore(self):
crec = self.build_dataset()
for x in xrange(0, 10):
crec.ehr_data.archetype_details['data']['at0001'] = random.randint(100*x, 200*x)
crec = self.dbs.update_ehr_record(crec)
if x == 4:
v6_value = crec.ehr_data.archetype_details['data']['at0001']
v6_last_update = crec.last_update
self.assertEqual(crec.version, 11)
crec, deleted_revisions = self.dbs.restore_ehr_version(crec, 6)
self.assertEqual(deleted_revisions, 5)
self.assertEqual(crec.version, 6)
self.assertEqual(crec.ehr_data.archetype_details['data']['at0001'],
v6_value)
self.assertEqual(crec.last_update, v6_last_update)
def test_record_restore_original(self):
crec = self.build_dataset()
original_last_update = crec.last_update
original_value = crec.ehr_data.archetype_details['data']['at0001']
for x in xrange(0, 10):
crec.ehr_data.archetype_details['data']['at0001'] = random.randint(100*x, 200*x)
crec = self.dbs.update_ehr_record(crec)
self.assertEqual(crec.version, 11)
crec, deleted_revisions = self.dbs.restore_original_ehr(crec)
self.assertEqual(deleted_revisions, 10)
self.assertEqual(crec.version, 1)
self.assertEqual(crec.last_update, original_last_update)
self.assertEqual(crec.ehr_data.archetype_details['data']['at0001'],
original_value)
def test_record_restore_previous_revision(self):
crec = self.build_dataset()
crec_rev_1 = crec.to_json()
crec.ehr_data.archetype_details['data']['at0001'] = random.randint(100, 200)
crec = self.dbs.update_ehr_record(crec)
crec_rev_2 = crec.to_json()
crec.ehr_data.archetype_details['data']['at0002'] = 'updated text message'
crec = self.dbs.update_ehr_record(crec)
self.assertEqual(crec.version, 3)
crec = self.dbs.restore_previous_ehr_version(crec)
self.assertEqual(crec.to_json(), crec_rev_2)
crec = self.dbs.restore_previous_ehr_version(crec)
self.assertEqual(crec.to_json(), crec_rev_1)
def test_record_reindex(self):
crec = self.build_dataset()
crec_struct_id = crec.structure_id
crec.ehr_data = self._create_random_complex_archetype()
crec = self.dbs.update_ehr_record(crec)
self.assertNotEqual(crec_struct_id, crec.structure_id)
self.assertEqual(crec.version, 2)
self.assertGreater(crec.last_update, crec.creation_time)
self.assertEqual(crec.ehr_data.archetype_class, 'openEHR-EHR-COMPOSITION.dummy-composition.v1')
crec_struct_id = crec.structure_id
crec, deleted_revisions = self.dbs.restore_original_ehr(crec)
self.assertNotEqual(crec.structure_id, crec_struct_id)
self.assertEqual(crec.version, 1)
self.assertEqual(crec.ehr_data.archetype_class, 'openEHR-EHR-OBSERVATION.dummy-observation.v1')
def test_get_revision(self):
crec = self.build_dataset()
for x in xrange(0, 10):
crec.ehr_data.archetype_details['data']['at0001'] = random.randint(100*x, 200*x)
crec = self.dbs.update_ehr_record(crec)
if x == 4:
v6_value = crec.ehr_data.archetype_details['data']['at0001']
v6_last_update = crec.last_update
crec_v6 = self.dbs.get_revision(crec, 6)
self.assertIsInstance(crec_v6, ClinicalRecordRevision)
self.assertEqual(crec_v6.last_update, v6_last_update)
self.assertEqual(crec_v6.ehr_data.archetype_details['data']['at0001'],
v6_value)
def test_get_revisions(self):
crec = self.build_dataset()
for x in xrange(0, 10):
crec.ehr_data.archetype_details['data']['at0001'] = random.randint(100*x, 200*x)
crec = self.dbs.update_ehr_record(crec)
revisions = self.dbs.get_revisions(crec)
self.assertEqual(len(revisions), 10)
for rev in revisions:
self.assertIsInstance(rev, ClinicalRecordRevision)
self.assertEqual(revisions[0].version, 1)
self.assertEqual(revisions[-1].version, 10)
revisions = self.dbs.get_revisions(crec, reverse_ordering=True)
self.assertEqual(len(revisions), 10)
for rev in revisions:
self.assertIsInstance(rev, ClinicalRecordRevision)
self.assertEqual(revisions[0].version, 10)
self.assertEqual(revisions[-1].version, 1)
def test_optimistic_lock_error(self):
# first user creates a clinical record
crec1 = self.build_dataset()
# second user retrieve the same record (using copy to make things fast)
crec2 = copy.copy(crec1)
# first user update the record
crec1.ehr_data.archetype_details['data']['at0001'] = random.randint(100, 200)
self.dbs.update_ehr_record(crec1)
# second user try to update the record, an OptimisticLockError is raised
with self.assertRaises(OptimisticLockError) as ctx:
crec2.ehr_data.archetype_details['data']['at0002'] = 'updated text message'
self.dbs.update_ehr_record(crec2)
def test_redundant_update_error(self):
crec = self.build_dataset()
# record unchanged, try to update anyway
with self.assertRaises(RedundantUpdateError) as ctx:
self.dbs.update_ehr_record(crec)
def test_missing_revision_error(self):
# first user creates a clinical record and updates it several times
crec1 = self.build_dataset()
for x in xrange(0, 10):
crec1.ehr_data.archetype_details['data']['at0001'] = random.randint(100*x, 200*x)
crec1 = self.dbs.update_ehr_record(crec1)
# second user get the last version of the same record (using copy as shortcut)
crec2 = copy.copy(crec1)
# first user restore the original version of the record
self.dbs.restore_original_ehr(crec1)
# second user restores one previous version of the record but this will fail
# because used version no longer exists
with self.assertRaises(MissingRevisionError) as ctx:
self.dbs.restore_ehr_version(crec2, 5)
def test_record_restore_unnecessary_error(self):
crec = self.build_dataset()
with self.assertRaises(RecordRestoreUnnecessaryError) as ctx:
self.dbs.restore_previous_ehr_version(crec)
self.dbs.restore_original_ehr(crec)
def test_operation_not_allowed_error(self):
crec = self._create_random_clinical_record()
with self.assertRaises(OperationNotAllowedError):
crec.ehr_data.archetype_details['data']['at0002'] = 'updated text message'
self.dbs.update_ehr_record(crec)
self.dbs.restore_original_ehr(crec)
def suite():
suite = unittest.TestSuite()
suite.addTest(TestVersionManager('test_record_update'))
suite.addTest(TestVersionManager('test_record_restore'))
suite.addTest(TestVersionManager('test_record_restore_original'))
suite.addTest(TestVersionManager('test_record_restore_previous_revision'))
suite.addTest(TestVersionManager('test_record_reindex'))
suite.addTest(TestVersionManager('test_get_revision'))
suite.addTest(TestVersionManager('test_get_revisions'))
suite.addTest(TestVersionManager('test_optimistic_lock_error'))
suite.addTest(TestVersionManager('test_redundant_update_error'))
suite.addTest(TestVersionManager('test_missing_revision_error'))
suite.addTest(TestVersionManager('test_record_restore_unnecessary_error'))
suite.addTest(TestVersionManager('test_operation_not_allowed_error'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite()) | mit | 1,788,116,691,555,050,800 | 46.267241 | 103 | 0.627086 | false |
ales-erjavec/anyqt | AnyQt/QtWidgets.py | 1 | 11055 | from warnings import warn
from . import _api
# Names imported from Qt4's QtGui module
__Qt4_QtGui = [
'QAbstractButton',
'QAbstractGraphicsShapeItem',
'QAbstractItemDelegate',
'QAbstractItemView',
'QAbstractScrollArea',
'QAbstractSlider',
'QAbstractSpinBox',
'QAction',
'QActionGroup',
'QApplication',
'QBoxLayout',
'QButtonGroup',
'QCalendarWidget',
'QCheckBox',
'QColorDialog',
'QColumnView',
'QComboBox',
'QCommandLinkButton',
'QCommonStyle',
'QCompleter',
'QDataWidgetMapper',
'QDateEdit',
'QDateTimeEdit',
'QDesktopWidget',
'QDial',
'QDialog',
'QDialogButtonBox',
'QDirModel',
'QDockWidget',
'QDoubleSpinBox',
'QErrorMessage',
'QFileDialog',
'QFileIconProvider',
'QFileSystemModel',
'QFocusFrame',
'QFontComboBox',
'QFontDialog',
'QFormLayout',
'QFrame',
'QGesture',
'QGestureEvent',
'QGestureRecognizer',
'QGraphicsAnchor',
'QGraphicsAnchorLayout',
'QGraphicsBlurEffect',
'QGraphicsColorizeEffect',
'QGraphicsDropShadowEffect',
'QGraphicsEffect',
'QGraphicsEllipseItem',
'QGraphicsGridLayout',
'QGraphicsItem',
'QGraphicsItemGroup',
'QGraphicsLayout',
'QGraphicsLayoutItem',
'QGraphicsLineItem',
'QGraphicsLinearLayout',
'QGraphicsObject',
'QGraphicsOpacityEffect',
'QGraphicsPathItem',
'QGraphicsPixmapItem',
'QGraphicsPolygonItem',
'QGraphicsProxyWidget',
'QGraphicsRectItem',
'QGraphicsRotation',
'QGraphicsScale',
'QGraphicsScene',
'QGraphicsSceneContextMenuEvent',
'QGraphicsSceneDragDropEvent',
'QGraphicsSceneEvent',
'QGraphicsSceneHelpEvent',
'QGraphicsSceneHoverEvent',
'QGraphicsSceneMouseEvent',
'QGraphicsSceneMoveEvent',
'QGraphicsSceneResizeEvent',
'QGraphicsSceneWheelEvent',
'QGraphicsSimpleTextItem',
'QGraphicsTextItem',
'QGraphicsTransform',
'QGraphicsView',
'QGraphicsWidget',
'QGridLayout',
'QGroupBox',
'QHBoxLayout',
'QHeaderView',
'QInputDialog',
'QItemDelegate',
'QItemEditorCreatorBase',
'QItemEditorFactory',
'QKeyEventTransition',
# 'QKeySequenceEdit',
'QLCDNumber',
'QLabel',
'QLayout',
'QLayoutItem',
'QLineEdit',
'QListView',
'QListWidget',
'QListWidgetItem',
'QMacCocoaViewContainer',
'QMainWindow',
'QMdiArea',
'QMdiSubWindow',
'QMenu',
'QMenuBar',
'QMessageBox',
'QMouseEventTransition',
# 'QOpenGLWidget',
'QPanGesture',
'QPinchGesture',
'QPlainTextDocumentLayout',
'QPlainTextEdit',
'QProgressBar',
'QProgressDialog',
# 'QProxyStyle',
'QPushButton',
'QRadioButton',
'QRubberBand',
'QScrollArea',
'QScrollBar',
# 'QScroller',
# 'QScrollerProperties',
'QShortcut',
'QSizeGrip',
'QSizePolicy',
'QSlider',
'QSpacerItem',
'QSpinBox',
'QSplashScreen',
'QSplitter',
'QSplitterHandle',
'QStackedLayout',
'QStackedWidget',
'QStatusBar',
'QStyle',
'QStyleFactory',
'QStyleHintReturn',
'QStyleHintReturnMask',
'QStyleHintReturnVariant',
'QStyleOption',
'QStyleOptionButton',
'QStyleOptionComboBox',
'QStyleOptionComplex',
'QStyleOptionDockWidget',
'QStyleOptionFocusRect',
'QStyleOptionFrame',
'QStyleOptionGraphicsItem',
'QStyleOptionGroupBox',
'QStyleOptionHeader',
'QStyleOptionMenuItem',
'QStyleOptionProgressBar',
'QStyleOptionRubberBand',
'QStyleOptionSizeGrip',
'QStyleOptionSlider',
'QStyleOptionSpinBox',
'QStyleOptionTab',
'QStyleOptionTabBarBase',
'QStyleOptionTabWidgetFrame',
'QStyleOptionTitleBar',
'QStyleOptionToolBar',
'QStyleOptionToolBox',
'QStyleOptionToolButton',
'QStyleOptionViewItem',
'QStylePainter',
'QStyledItemDelegate',
'QSwipeGesture',
'QSystemTrayIcon',
'QTabBar',
'QTabWidget',
'QTableView',
'QTableWidget',
'QTableWidgetItem',
'QTableWidgetSelectionRange',
'QTapAndHoldGesture',
'QTapGesture',
'QTextBrowser',
'QTextEdit',
'QTimeEdit',
'QToolBar',
'QToolBox',
'QToolButton',
'QToolTip',
'QTreeView',
'QTreeWidget',
'QTreeWidgetItem',
'QTreeWidgetItemIterator',
'QUndoCommand',
'QUndoGroup',
'QUndoStack',
'QUndoView',
'QVBoxLayout',
'QWIDGETSIZE_MAX',
'QWhatsThis',
'QWidget',
'QWidgetAction',
'QWidgetItem',
'QWizard',
'QWizardPage',
'qApp',
'qDrawBorderPixmap',
'qDrawPlainRect',
'qDrawShadeLine',
'qDrawShadePanel',
'qDrawShadeRect',
'qDrawWinButton',
'qDrawWinPanel'
]
if _api.USED_API == _api.QT_API_PYQT5:
from PyQt5.QtWidgets import *
from PyQt5.QtCore import PYQT_VERSION as _PYQT_VERSION
if _PYQT_VERSION < 0x50502: # ?
from . import _fixes
_fixes.fix_pyqt5_QGraphicsItem_itemChange()
del _fixes
elif _api.USED_API == _api.QT_API_PYQT4:
from PyQt4 import QtGui as _QtGui
globals().update(
{name: getattr(_QtGui, name)
for name in __Qt4_QtGui if hasattr(_QtGui, name)}
)
# Alias the QStyleOption version classes
QStyleOptionViewItem = _QtGui.QStyleOptionViewItemV4
QStyleOptionViewItem_ = _QtGui.QStyleOptionViewItem
QStyleOptionToolBox = _QtGui.QStyleOptionToolBoxV2
QStyleOptionToolBox_ = _QtGui.QStyleOptionToolBox
QStyleOptionDockWidget = _QtGui.QStyleOptionDockWidgetV2
QStyleOptionDockWidget_ = _QtGui.QStyleOptionDockWidget
QStyleOptionFrame = _QtGui.QStyleOptionFrameV3
QStyleOptionFrame_ = _QtGui.QStyleOptionFrame
QStyleOptionProgressBar = _QtGui.QStyleOptionProgressBarV2
QStyleOptionProgressBar_ = _QtGui.QStyleOptionProgressBar
QStyleOptionTabWidgetFrame = _QtGui.QStyleOptionTabWidgetFrameV2
QStyleOptionTabWidgetFrame_ = _QtGui.QStyleOptionTabWidgetFrame
QStyleOptionTabBarBase = _QtGui.QStyleOptionTabBarBaseV2
QStyleOptionTabBarBase_ = _QtGui.QStyleOptionTabBarBase
QStyleOptionTab = _QtGui.QStyleOptionTabV3
QStyleOptionTab_ = _QtGui.QStyleOptionTab
# PyQt5's version of QFileDialog's static methods
class QFileDialog(_QtGui.QFileDialog):
getOpenFileName = _QtGui.QFileDialog.getOpenFileNameAndFilter
getOpenFileNames = _QtGui.QFileDialog.getOpenFileNamesAndFilter
getSaveFileName = _QtGui.QFileDialog.getSaveFileNameAndFilter
# Some extra forward compatibility
QHeaderView.setSectionResizeMode = lambda self, *args: self.setResizeMode(*args)
QHeaderView.sectionResizeMode = lambda self: self.resizeMode()
QHeaderView.sectionsClickable = lambda self: self.isClickable()
QHeaderView.setSectionsClickable = \
lambda self, clickable: self.setClickable(clickable)
QHeaderView.sectionsMovable = lambda self: self.isMovable()
QHeaderView.setSectionsMovable = \
lambda self, movable: self.setMovable(movable)
from PyQt4 import QtCore as __QtCore
QWidget = _QtGui.QWidget
__QPixmap = _QtGui.QPixmap
def _QWidget_grab(self, rect=__QtCore.QRect(0, 0, -1, -1)):
if not rect.isValid():
return __QPixmap.grabWidget(self)
else:
return __QPixmap.grabWidget(self, rect)
QWidget.grab = _QWidget_grab
del _QtGui, __QtCore
elif _api.USED_API == _api.QT_API_PYSIDE:
from PySide import QtGui as _QtGui
globals().update(
{name: getattr(_QtGui, name)
for name in __Qt4_QtGui if hasattr(_QtGui, name)}
)
# Alias the QStyleOption version classes
QStyleOptionViewItem = _QtGui.QStyleOptionViewItemV4
QStyleOptionViewItem_ = _QtGui.QStyleOptionViewItem
QStyleOptionToolBox = _QtGui.QStyleOptionToolBoxV2
QStyleOptionToolBox_ = _QtGui.QStyleOptionToolBox
QStyleOptionDockWidget = _QtGui.QStyleOptionDockWidgetV2
QStyleOptionDockWidget_ = _QtGui.QStyleOptionDockWidget
QStyleOptionFrame = _QtGui.QStyleOptionFrameV3
QStyleOptionFrame_ = _QtGui.QStyleOptionFrame
QStyleOptionProgressBar = _QtGui.QStyleOptionProgressBarV2
QStyleOptionProgressBar_ = _QtGui.QStyleOptionProgressBar
if hasattr(_QtGui, "QStyleOptionTabWidgetFrameV2"):
QStyleOptionTabWidgetFrame = _QtGui.QStyleOptionTabWidgetFrameV2
QStyleOptionTabWidgetFrame_ = _QtGui.QStyleOptionTabWidgetFrame
else:
QStyleOptionTabWidgetFrame = _QtGui.QStyleOptionTabWidgetFrame
QStyleOptionTabWidgetFrame_ = _QtGui.QStyleOptionTabWidgetFrame
QStyleOptionTabBarBase = _QtGui.QStyleOptionTabBarBaseV2
QStyleOptionTabBarBase_ = _QtGui.QStyleOptionTabBarBase
QStyleOptionTab = _QtGui.QStyleOptionTabV3
QStyleOptionTab_ = _QtGui.QStyleOptionTab
# Some extra forward compatibility
QHeaderView.setSectionResizeMode = lambda self, *args: self.setResizeMode(*args)
QHeaderView.sectionResizeMode = lambda self: self.resizeMode()
QHeaderView.sectionsClickable = lambda self: self.isClickable()
QHeaderView.setSectionsClickable = \
lambda self, clickable: self.setClickable(clickable)
QHeaderView.sectionsMovable = lambda self: self.isMovable()
QHeaderView.setSectionsMovable = \
lambda self, movable: self.setMovable(movable)
from PySide import QtCore as __QtCore
QWidget = _QtGui.QWidget
__QPixmap = _QtGui.QPixmap
def _QWidget_grab(self, rect=__QtCore.QRect(0, 0, -1, -1)):
if not rect.isValid():
return __QPixmap.grabWidget(self)
else:
return __QPixmap.grabWidget(self, rect)
QWidget.grab = _QWidget_grab
del _QtGui, __QtCore
elif _api.USED_API == _api.QT_API_PYSIDE2:
from PySide2.QtWidgets import *
try:
QWIDGETSIZE_MAX # Missing in older PyQt5, PyQt4
except NameError:
QWIDGETSIZE_MAX = (1 << 24) - 1
if not hasattr(QWidget, "screen"):
def QWidget_screen(self):
screens = QApplication.screens()
desktop = __QApplication_desktop() # avoid deprecation warning
screen_num = desktop.screenNumber(self)
if 0 <= screen_num < len(screens):
return screens[screen_num]
else:
return QApplication.primaryScreen()
QWidget.screen = QWidget_screen
del QWidget_screen
if hasattr(QWidget, "getContentsMargins"):
def QWidget_getContentsMargins(self):
warn("QWidget.getContentsMargins is obsolete and is removed in Qt6",
DeprecationWarning, stacklevel=2)
return __QWidget_getContentsMargins(self)
__QWidget_getContentsMargins = QWidget.getContentsMargins
QWidget.getContentsMargins = QWidget_getContentsMargins
if hasattr(QApplication, "desktop"):
def QApplication_desktop():
warn("QApplication.desktop is obsolete and is removed in Qt6",
DeprecationWarning, stacklevel=2)
return __QApplication_desktop()
__QApplication_desktop = QApplication.desktop
QApplication.desktop = staticmethod(QApplication_desktop)
del QApplication_desktop
| gpl-3.0 | -3,937,240,286,949,035,500 | 29.122616 | 84 | 0.691 | false |
nvoron23/owasp-pysec | pysec/utils.py | 4 | 4467 | # Python Security Project (PySec) and its related class files.
#
# PySec is a set of tools for secure application development under Linux
#
# Copyright 2014 PySec development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: ascii -*-
"""A lot of utilies for:
- Operations on paths
- Counting
"""
from itertools import islice
import operator
import heapq
import datetime
import calendar
import re
from pysec import lang
def xrange(*args):
"""xrange([start,] stop[, step]) -> xrange object
This xrange use python's integers and has not limits of
machine integers."""
len_args = len(args)
if len_args == 1:
stop = int(args[0])
start = 0
step = 1
elif len_args == 2:
start = int(args[0])
stop = int(args[1])
step = 1
elif len_args == 3:
start = int(args[0])
stop = int(args[1])
step = int(args[2])
else:
raise TypeError("xrange() requires 1-3 int arguments")
if step < 0:
bcmp = operator.gt
elif step > 0:
bcmp = operator.lt
else:
raise StopIteration
act = int(start)
while bcmp(act, stop):
yield act
act += step
def range(*args):
return list(xrange(*args))
def top_n(values, first_n=10):
"""Returns the *n* greatest objects in values"""
values = iter(values)
top = [val for val in islice(values, first_n)]
if len(top) < first_n:
return top
heapq.heapify(top)
for val in values:
heapq.heappushpop(top, val)
return top
def clamp(val, min_val, max_val):
return min_val if val < min_val else (max_val if val > max_val else val)
def eq(*values):
if not values:
return 1
cval = values[0]
return all(cval == val for val in values[1:])
def secs_to_iso_utc(timestamp, suffix=1):
return datetime.datetime.utcfromtimestamp(int(timestamp)).isoformat(' ') + suffix
ISO_UTC_FORMAT = re.compile(r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})[T_ ](?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(?P<msecond>\.\d+)?Z?")
def iso_utc_to_secs(time):
m = ISO_UTC_FORMAT.match(time)
if not m:
raise ValueError(lang.TIME_INVALID_TIME_FORMAT % time)
year = int(m.group('year'))
month = int(m.group('month'))
day = int(m.group('day'))
hour = int(m.group('hour'))
minute = int(m.group('minute'))
second = int(m.group('second'))
msec = m.group('msecond')
if msec:
msec = float(msec)
else:
msec = 0.
return float(calendar.timegm((year, month, day, hour, minute, second, 0, 1, 0))) + msec
DAY = 24 * 60 * 60
MONTH = 31 * DAY
YEAR = 365 * DAY
def parse_duration(duration):
secs = 0
for field in duration.split():
field = field.strip()
if field.endswith('sec'):
field = field[:-3]
unit = 1
elif field.endswith('day'):
unit = DAY
field = field[:-3]
elif field.endswith('month'):
unit = MONTH
field = field[:-5]
elif field.endswith('year'):
unit = YEAR
field = field[:-4]
else:
raise ValueError(lang.TIME_UNKNOWN_TIME_UNIT % field)
field = field.strip()
if not field.isdigit():
raise ValueError(lang.TIME_NOT_NUMERIC_VALUE % field)
secs += int(field) * unit
return secs
def ilen(gen, max=None):
"""Iterate a generator and return the number of iterations.
If max is not None, the iteration terminate at *max* iteration."""
l = 0
if max is None:
for _ in gen:
l += 1
return l
else:
max = int(max)
if max < 0:
raise ValueError("invalid negative max: %d" % max)
for i in xrange(0, max):
try:
gen.next()
except StopIteration:
break
else:
return max
return i + 1
| apache-2.0 | 6,365,216,507,317,321,000 | 25.748503 | 156 | 0.586971 | false |
venicegeo/eventkit-cloud | eventkit_cloud/api/tests/test_notifications.py | 1 | 1945 | # -*- coding: utf-8 -*-
import os
from django.contrib.auth.models import Group, User
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from eventkit_cloud.core.helpers import sendnotification, NotificationVerb, NotificationLevel
# from django.test import TestCase as APITestCase
class TestNotifications(APITestCase):
fixtures = ('osm_provider.json', 'datamodel_presets.json',)
def __init__(self, *args, **kwargs):
super(TestNotifications, self).__init__(*args, **kwargs)
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
self.group, created = Group.objects.get_or_create(name='TestDefaultExportExtentGroup')
self.user1 = User.objects.create_user(username='user_1', email='[email protected]', password='demo')
self.user2 = User.objects.create_user(username='user_2', email='[email protected]', password='demo')
token = Token.objects.create(user=self.user1)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
def test_send(self, ):
memo = "Note to myself"
level = NotificationLevel.SUCCESS.value
verb = NotificationVerb.REMOVED_FROM_GROUP.value
sendnotification(self.user1, self.user1, verb, None, None, level, memo)
url = '/api/notifications'
response = self.client.get(url, content_type='application/json; version=1.0')
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(response.data),1)
self.assertEqual(response.data[0]["description"],memo)
self.assertEqual(response.data[0]["level"],level)
self.assertEqual(response.data[0]["verb"],verb)
| bsd-3-clause | -25,658,885,582,931,536 | 40.382979 | 104 | 0.662725 | false |
Bachmann1234/pytest | _pytest/runner.py | 3 | 16984 | """ basic collect and runtest protocol implementations """
import bdb
import sys
from time import time
import py
import pytest
from _pytest._code.code import TerminalRepr, ExceptionInfo
def pytest_namespace():
return {
'fail' : fail,
'skip' : skip,
'importorskip' : importorskip,
'exit' : exit,
}
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption('--durations',
action="store", type=int, default=None, metavar="N",
help="show N slowest setup/test durations (N=0 for all)."),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, 'duration'):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" %
(rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
class NodeInfo:
def __init__(self, location):
self.location = location
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(
nodeid=item.nodeid, location=item.location,
)
runtestprotocol(item, nextitem=nextitem)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def pytest_runtest_setup(item):
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
raise
def pytest_runtest_teardown(item, nextitem):
item.session._setupstate.teardown_exact(item, nextitem)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail") or
call.excinfo.errisinstance(skip.Exception) or
call.excinfo.errisinstance(bdb.BdbQuit))
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
class CallInfo:
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
self.result = func()
except KeyboardInterrupt:
self.stop = time()
raise
except:
self.excinfo = ExceptionInfo()
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d['version_info'][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d['id'], d['sysplatform'], ver, d['executable'])
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, 'node'):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop-call.start
keywords = dict([(x,1) for x in item.keywords])
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(pytest.skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo,
style=item.config.option.tbstyle)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" %(key, rwhen), content))
return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when,
sections, duration)
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(self, nodeid, location, keywords, outcome,
longrepr, when, sections=(), duration=0, **extra):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: list of (secname, data) extra information which needs to
#: marshallable
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid, self.when, self.outcome)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
def pytest_make_collect_report(collector):
call = CallInfo(collector._memocollect, "memocollect")
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = CollectReport(collector.nodeid, outcome, longrepr,
getattr(call, 'result', None))
rep.call = call # see collect_one_node
return rep
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result,
sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid, len(self.result), self.outcome)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert colitem and not isinstance(colitem, tuple)
assert py.builtin.callable(finalizer)
#assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except Exception:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
py.builtin._reraise(*exc)
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert colitem is None or colitem in self.stack \
or isinstance(colitem, tuple)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
while self.stack:
if self.stack == needed_collectors[:len(self.stack)]:
break
self._pop_and_teardown()
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, '_prepare_exc'):
py.builtin._reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack):]:
self.stack.append(col)
try:
col.setup()
except Exception:
col._prepare_exc = sys.exc_info()
raise
def collect_one_node(collector):
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
# =============================================================
# Test OutcomeExceptions and helpers for creating them.
class OutcomeException(Exception):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg=None, pytrace=True):
Exception.__init__(self, msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self):
if self.msg:
return str(self.msg)
return "<%s instance>" %(self.__class__.__name__,)
__str__ = __repr__
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = 'builtins'
class Failed(OutcomeException):
""" raised from an explicit call to pytest.fail() """
__module__ = 'builtins'
class Exit(KeyboardInterrupt):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(self, msg="unknown reason"):
self.msg = msg
KeyboardInterrupt.__init__(self, msg)
# exposed helper methods
def exit(msg):
""" exit testing process as if KeyboardInterrupt was triggered. """
__tracebackhide__ = True
raise Exit(msg)
exit.Exception = Exit
def skip(msg=""):
""" skip an executing test with the given message. Note: it's usually
better to use the pytest.mark.skipif marker to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies. See the pytest_skipping plugin for details.
"""
__tracebackhide__ = True
raise Skipped(msg=msg)
skip.Exception = Skipped
def fail(msg="", pytrace=True):
""" explicitly fail an currently-executing test with the given Message.
:arg pytrace: if false the msg represents the full failure information
and no python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
fail.Exception = Failed
def importorskip(modname, minversion=None):
""" return imported module if it has at least "minversion" as its
__version__ attribute. If no minversion is specified the a skip
is only triggered if the module can not be imported.
"""
__tracebackhide__ = True
compile(modname, '', 'eval') # to catch syntaxerrors
try:
__import__(modname)
except ImportError:
skip("could not import %r" %(modname,))
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, '__version__', None)
if minversion is not None:
try:
from pkg_resources import parse_version as pv
except ImportError:
skip("we have a required version for %r but can not import "
"no pkg_resources to parse version strings." %(modname,))
if verattr is None or pv(verattr) < pv(minversion):
skip("module %r has __version__ %r, required is: %r" %(
modname, verattr, minversion))
return mod
| mit | 1,096,846,667,274,738,700 | 32.171875 | 79 | 0.604451 | false |
jebpublic/pybvc | samples/samplenetconf/demos/vr_demo6.py | 4 | 6989 | #!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.netconfdev.vrouter.vrouter5600 import VRouter5600
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
def vr_demo_6():
f = "cfg4.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
nodeIpAddr = d['nodeIpAddr']
nodePortNum = d['nodePortNum']
nodeUname = d['nodeUname']
nodePswd = d['nodePswd']
ifName = d['loopback']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("\n")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum,
nodeUname, nodePswd)
print ("<<< 'Controller': %s, '%s': %s"
% (ctrlIpAddr, nodeName, nodeIpAddr))
print ("\n")
time.sleep(rundelay)
node_configured = False
result = ctrl.check_node_config_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONFIGURED)):
node_configured = True
print ("<<< '%s' is configured on the Controller" % nodeName)
elif(status.eq(STATUS.DATA_NOT_FOUND)):
node_configured = False
else:
print ("\n")
print "Failed to get configuration status for the '%s'" % nodeName
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
if node_configured is False:
result = ctrl.add_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< '%s' added to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n")
time.sleep(rundelay)
result = ctrl.check_node_conn_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONNECTED)):
print ("<<< '%s' is connected to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show list of loopback interfaces on the '%s'" % nodeName)
time.sleep(rundelay)
result = vrouter.get_loopback_interfaces_list()
status = result.get_status()
if(status.eq(STATUS.OK)):
print "Loopback interfaces:"
dpIfList = result.get_data()
print json.dumps(dpIfList, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show '%s' loopback interface configuration on the '%s'"
% (ifName, nodeName))
time.sleep(rundelay)
result = vrouter.get_loopback_interface_cfg(ifName)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Loopback interface '%s' config:" % ifName)
response = result.get_data()
content = response.content
data = json.loads(content)
print json.dumps(data, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show configuration of loopback interfaces on the '%s'"
% nodeName)
time.sleep(rundelay)
result = vrouter.get_loopback_interfaces_cfg()
status = result.get_status()
if(status.eq(STATUS.OK)):
print "Loopback interfaces config:"
lbIfCfg = result.get_data()
print json.dumps(lbIfCfg, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show interfaces configuration on the '%s'" % nodeName)
time.sleep(rundelay)
result = vrouter.get_interfaces_cfg()
status = result.get_status()
if(status.eq(STATUS.OK)):
print "Interfaces config:"
cfg = result.get_data()
data = json.loads(cfg)
print json.dumps(data, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print "\n"
print (">>> Remove '%s' NETCONF node from the Controller" % nodeName)
time.sleep(rundelay)
result = ctrl.delete_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("'%s' NETCONF node was successfully removed "
"from the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
vr_demo_6()
| bsd-3-clause | 8,553,185,380,699,407,000 | 33.259804 | 78 | 0.60724 | false |
filipenf/ansible | lib/ansible/plugins/action/__init__.py | 3 | 39271 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import pipes
import random
import re
import stat
import tempfile
import time
from abc import ABCMeta, abstractmethod
from ansible.compat.six import binary_type, text_type, iteritems, with_metaclass
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.executor.module_common import modify_module
from ansible.release import __version__
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes, to_unicode
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionBase(with_metaclass(ABCMeta, object)):
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
self._cleanup_remote_tmp = False
self._supports_check_mode = True
@abstractmethod
def run(self, tmp=None, task_vars=None):
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Temporary directory. Sometimes an action plugin sets up
a temporary directory and then calls another module. This parameter
allows us to reuse the same directory for both.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementors of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
# store the module invocation details into the results
results = {}
if self._task.async == 0:
results['invocation'] = dict(
module_name = self._task.action,
module_args = self._task.args,
)
return results
def _remote_file_exists(self, path):
cmd = self._connection._shell.exists(path)
result = self._low_level_execute_command(cmd=cmd, sudoable=True)
if result['rc'] == 0:
return True
return False
def _configure_module(self, module_name, module_args, task_vars=None):
'''
Handles the loading and templating of the module code through the
modify_module() function.
'''
if task_vars is None:
task_vars = dict()
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# win_stat, win_file, and win_copy are not just like their
# python counterparts but they are compatible enough for our
# internal usage
if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
module_name = 'win_%s' % module_name
# Remove extra quotes surrounding path parameters before sending to module.
if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
if module_path:
break
else: # This is a for-else: http://bit.ly/1ElPkyg
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes. We check specifically
# for win_ping here, otherwise the code would look for ping.ps1
if '.ps1' in self._connection.module_implementation_preferences:
ping_module = 'win_ping'
else:
ping_module = 'ping'
module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, self._connection.module_implementation_preferences)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
raise AnsibleError("The module %s was not found in configured module paths. "
"Additionally, core modules are missing. If this is a checkout, "
"run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
(module_data, module_style, module_shebang) = modify_module(module_name, module_path, module_args, task_vars=task_vars, module_compression=self._play_context.module_compression)
return (module_style, module_shebang, module_data, module_path)
def _compute_environment_string(self):
'''
Builds the environment string to be used when executing the remote task.
'''
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [ environments ]
# the environments as inherited need to be reversed, to make
# sure we merge in the parent's values first so those in the
# block then task 'win' in precedence
environments.reverse()
for environment in environments:
if environment is None:
continue
temp_environment = self._templar.template(environment)
if not isinstance(temp_environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(temp_environment)
final_environment = self._templar.template(final_environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
Determines if a temp path should be created before the action is executed.
'''
return getattr(self, 'TRANSFERS_FILES', False)
def _late_needs_tmp_path(self, tmp, module_style):
'''
Determines if a temp path is required after some early actions have already taken place.
'''
if tmp and "tmp" in tmp:
# tmp has already been created
return False
if not self._connection.has_pipelining or not self._play_context.pipelining or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become_method == 'su':
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
if module_style != "new":
# even when conn has pipelining, old style modules need tmp to store arguments
return True
return False
def _make_tmp_path(self, remote_user):
'''
Create and return a temporary path on a remote box.
'''
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
if self._play_context.become and self._play_context.become_user not in ('root', remote_user):
use_system_tmp = True
tmp_mode = 0o700
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
result = self._low_level_execute_command(cmd, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if self._play_context.verbosity > 3:
output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
else:
output = (u'SSH encountered an unknown error during the connection.'
' We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
elif u'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = ('Authentication or permission failure.'
' In some cases, you may have been able to authenticate and did not have permissions on the remote directory.'
' Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp".'
' Failed command was: %s, exited with result %d' % (cmd, result['rc']))
if 'stdout' in result and result['stdout'] != u'':
output = output + u": %s" % result['stdout']
raise AnsibleConnectionFailure(output)
try:
stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
except IndexError:
# stdout was empty or just space, set to / to trigger error in next if
rc = '/'
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
return rc
def _remove_tmp_path(self, tmp_path):
'''Remove a temporary path we created. '''
if tmp_path and self._cleanup_remote_tmp and not C.DEFAULT_KEEP_REMOTE_FILES and "-tmp-" in tmp_path:
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
self._low_level_execute_command(cmd, sudoable=False)
def _transfer_file(self, local_path, remote_path):
self._connection.put_file(local_path, remote_path)
return remote_path
def _transfer_data(self, remote_path, data):
'''
Copies the module data out to the temporary module path.
'''
if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'wb')
try:
data = to_bytes(data, errors='strict')
afo.write(data)
except Exception as e:
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e))
afo.flush()
afo.close()
try:
self._transfer_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _fixup_perms(self, remote_path, remote_user, execute=True, recursive=True):
"""
We need the files we upload to be readable (and sometimes executable)
by the user being sudo'd to but we want to limit other people's access
(because the files could contain passwords or other private
information. We achieve this in one of these ways:
* If no sudo is performed or the remote_user is sudo'ing to
themselves, we don't have to change permissions.
* If the remote_user sudo's to a privileged user (for instance, root),
we don't have to change permissions
* If the remote_user sudo's to an unprivileged user then we attempt to
grant the unprivileged user access via file system acls.
* If granting file system acls fails we try to change the owner of the
file with chown which only works in case the remote_user is
privileged or the remote systems allows chown calls by unprivileged
users (e.g. HP-UX)
* If the chown fails we can set the file to be world readable so that
the second unprivileged user can read the file.
Since this could allow other users to get access to private
information we only do this ansible is configured with
"allow_world_readable_tmpfiles" in the ansible.cfg
"""
if self._connection._shell.SHELL_FAMILY == 'powershell':
# This won't work on Powershell as-is, so we'll just completely skip until
# we have a need for it, at which point we'll have to do something different.
return remote_path
if remote_path is None:
# Sometimes code calls us naively -- it has a var which could
# contain a path to a tmp dir but doesn't know if it needs to
# exist or not. If there's no path, then there's no need for us
# to do work
display.debug('_fixup_perms called with remote_path==None. Sure this is correct?')
return remote_path
if self._play_context.become and self._play_context.become_user not in ('root', remote_user):
# Unprivileged user that's different than the ssh user. Let's get
# to work!
# Try to use file system acls to make the files readable for sudo'd
# user
if execute:
mode = 'rx'
else:
mode = 'rX'
res = self._remote_set_user_facl(remote_path, self._play_context.become_user, mode, recursive=recursive, sudoable=False)
if res['rc'] != 0:
# File system acls failed; let's try to use chown next
# Set executable bit first as on some systems an
# unprivileged user can use chown
if execute:
res = self._remote_chmod('u+x', remote_path, recursive=recursive)
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
res = self._remote_chown(remote_path, self._play_context.become_user, recursive=recursive)
if res['rc'] != 0 and remote_user == 'root':
# chown failed even if remove_user is root
raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as root. Unprivileged become user would be unable to read the file.')
elif res['rc'] != 0:
if C.ALLOW_WORLD_READABLE_TMPFILES:
# chown and fs acls failed -- do things this insecure
# way only if the user opted in in the config file
display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user which may be insecure. For information on securing this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
res = self._remote_chmod('a+%s' % mode, remote_path, recursive=recursive)
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
else:
raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user (rc: {0}, err: {1}). For information on working around this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'.format(res['rc'], res['stderr']))
elif execute:
# Can't depend on the file being transferred with execute
# permissions. Only need user perms because no become was
# used here
res = self._remote_chmod('u+x', remote_path, recursive=recursive)
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], res['stderr']))
return remote_path
def _remote_chmod(self, mode, path, recursive=True, sudoable=False):
'''
Issue a remote chmod command
'''
cmd = self._connection._shell.chmod(mode, path, recursive=recursive)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chown(self, path, user, group=None, recursive=True, sudoable=False):
'''
Issue a remote chown command
'''
cmd = self._connection._shell.chown(path, user, group, recursive=recursive)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_set_user_facl(self, path, user, mode, recursive=True, sudoable=False):
'''
Issue a remote call to setfacl
'''
cmd = self._connection._shell.set_user_facl(path, user, mode, recursive=recursive)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _execute_remote_stat(self, path, all_vars, follow, tmp=None):
'''
Get information from remote file.
'''
module_args=dict(
path=path,
follow=follow,
get_md5=False,
get_checksum=True,
checksum_algo='sha1',
)
mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars, tmp=tmp, delete_remote_tmp=(tmp is None))
if 'failed' in mystat and mystat['failed']:
raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, mystat['msg']))
if not mystat['stat']['exists']:
# empty might be matched, 1 should never match, also backwards compatible
mystat['stat']['checksum'] = '1'
# happens sometimes when it is a dir and not on bsd
if not 'checksum' in mystat['stat']:
mystat['stat']['checksum'] = ''
return mystat['stat']
def _remote_checksum(self, path, all_vars, follow=False):
'''
Produces a remote checksum given a path,
Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
0 = unknown error
1 = file does not exist, this might not be an error
2 = permissions issue
3 = its a directory, not a file
4 = stat module failed, likely due to not finding python
'''
x = "0" # unknown error has occured
try:
remote_stat = self._execute_remote_stat(path, all_vars, follow=follow)
if remote_stat['exists'] and remote_stat['isdir']:
x = "3" # its a directory not a file
else:
x = remote_stat['checksum'] # if 1, file is missing
except AnsibleError as e:
errormsg = to_unicode(e)
if errormsg.endswith('Permission denied'):
x = "2" # cannot read file
elif errormsg.endswith('MODULE FAILURE'):
x = "4" # python not found or module uncaught exception
finally:
return x
def _remote_expand_user(self, path):
''' takes a remote path and performs tilde expansion on the remote host '''
if not path.startswith('~'): # FIXME: Windows paths may start with "~ instead of just ~
return path
# FIXME: Can't use os.path.sep for Windows paths.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
if self._play_context.become and self._play_context.become_user:
expand_path = '~%s' % self._play_context.become_user
cmd = self._connection._shell.expand_user(expand_path)
data = self._low_level_execute_command(cmd, sudoable=False)
#initial_fragment = utils.last_non_blank_line(data['stdout'])
initial_fragment = data['stdout'].strip().splitlines()[-1]
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Return
# the original string
return path
if len(split_path) > 1:
return self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
return initial_fragment
@staticmethod
def _filter_non_json_lines(data):
'''
Used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which does not start with '{', '[', or is an empty line.
Have to be careful how we filter trailing junk as multiline JSON is valid.
'''
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
display.debug('No start of json char found')
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
lines.reverse()
for end, line in enumerate(lines):
if line.strip().endswith(endchar):
break
else:
display.debug('No end of json char found')
raise ValueError('No end of json char found')
if end < len(lines) - 1:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[:end]
trailing_junk.reverse()
display.warning('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[end:]
lines.reverse()
return '\n'.join(lines)
def _strip_success_message(self, data):
'''
Removes the BECOME-SUCCESS message from the data.
'''
if data.strip().startswith('BECOME-SUCCESS-'):
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
return data
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
'''
Transfer and run a module along with its arguments.
'''
if task_vars is None:
task_vars = dict()
# if a module name was not specified for this execution, use
# the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
# set check mode in the module arguments, if required
if self._play_context.check_mode:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
else:
module_args['_ansible_check_mode'] = False
# Get the connection user for permission checks
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
# set no log in the module arguments, if required
module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG
# set debug in the module arguments, if required
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
# let module know we are in diff mode
module_args['_ansible_diff'] = self._play_context.diff
# let module know our verbosity
module_args['_ansible_verbosity'] = display.verbosity
# give the module information about the ansible version
module_args['_ansible_version'] = __version__
# give the module information about its name
module_args['_ansible_module_name'] = module_name
# set the syslog facility to be used in the module
module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
# let module know about filesystems that selinux treats specially
module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
(module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
display.vvv("Using module file %s" % module_path)
if not shebang and module_style != 'binary':
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
# a remote tmp path may be necessary and not already created
remote_module_path = None
args_file_path = None
if not tmp and self._late_needs_tmp_path(tmp, module_style):
tmp = self._make_tmp_path(remote_user)
if tmp:
remote_module_filename = self._connection._shell.get_remote_filename(module_path)
remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
if module_style in ('old', 'non_native_want_json', 'binary'):
# we'll also need a temp file to hold our module arguments
args_file_path = self._connection._shell.join_path(tmp, 'args')
if remote_module_path or module_style != 'new':
display.debug("transferring module to remote %s" % remote_module_path)
if module_style == 'binary':
self._transfer_file(module_path, remote_module_path)
else:
self._transfer_data(remote_module_path, module_data)
if module_style == 'old':
# we need to dump the module args to a k=v string in a file on
# the remote system, which can be read and parsed by the module
args_data = ""
for k,v in iteritems(module_args):
args_data += '%s=%s ' % (k, pipes.quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
elif module_style in ('non_native_want_json', 'binary'):
self._transfer_data(args_file_path, json.dumps(module_args))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
# Fix permissions of the tmp path and tmp files. This should be
# called after all files have been transferred.
self._fixup_perms(tmp, remote_user, recursive=True)
cmd = ""
in_data = None
if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
in_data = module_data
else:
if remote_module_path:
cmd = remote_module_path
rm_tmp = None
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if not self._play_context.become or self._play_context.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
cmd = cmd.strip()
sudoable = True
if module_name == "accelerate":
# always run the accelerate module as the user
# specified in the play, not the sudo_user
sudoable = False
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if self._play_context.become and self._play_context.become_user != 'root':
# not sudoing to root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd, sudoable=False)
tmp_rm_data = self._parse_returned_data(tmp_rm_res)
if tmp_rm_data.get('rc', 0) != 0:
display.warning('Error deleting remote temporary files (rc: {0}, stderr: {1})'.format(tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))
# parse the main result
data = self._parse_returned_data(res)
# pre-split stdout into lines, if stdout is in the data and there
# isn't already a stdout_lines value there
if 'stdout' in data and 'stdout_lines' not in data:
data['stdout_lines'] = data.get('stdout', u'').splitlines()
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _parse_returned_data(self, res):
try:
data = json.loads(self._filter_non_json_lines(res.get('stdout', u'')))
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, parsed=False)
data['msg'] = "MODULE FAILURE"
data['module_stdout'] = res.get('stdout', u'')
if 'stderr' in res:
data['module_stderr'] = res['stderr']
if res['stderr'].startswith(u'Traceback'):
data['exception'] = res['stderr']
return data
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='replace'):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
:kwarg encoding_errors: If the value returned by the command isn't
utf-8 then we have to figure out how to transform it to unicode.
If the value is just going to be displayed to the user (or
discarded) then the default of 'replace' is fine. If the data is
used as a key or is going to be written back out to a file
verbatim, then this won't work. May have to use some sort of
replacement strategy (python3 could use surrogateescape)
'''
display.debug("_low_level_execute_command(): starting")
if not cmd:
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
display.debug("_low_level_execute_command(): no command, exiting")
return dict(stdout='', stderr='', rc=254)
allow_same_user = C.BECOME_ALLOW_SAME_USER
same_user = self._play_context.become_user == self._play_context.remote_user
if sudoable and self._play_context.become and (allow_same_user or not same_user):
display.debug("_low_level_execute_command(): using become for this command")
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
if self._connection.allow_executable:
if executable is None:
executable = self._play_context.executable
# mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
# only applied for the default executable to avoid interfering with the raw action
cmd = self._connection._shell.append_command(cmd, 'sleep 0')
if executable:
cmd = executable + ' -c ' + pipes.quote(cmd)
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
# Change directory to basedir of task for command execution
cwd = os.getcwd()
os.chdir(self._loader.get_basedir())
try:
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
finally:
os.chdir(cwd)
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type
if isinstance(stdout, binary_type):
out = to_unicode(stdout, errors=encoding_errors)
elif not isinstance(stdout, text_type):
out = to_unicode(b''.join(stdout.readlines()), errors=encoding_errors)
else:
out = stdout
if isinstance(stderr, binary_type):
err = to_unicode(stderr, errors=encoding_errors)
elif not isinstance(stderr, text_type):
err = to_unicode(b''.join(stderr.readlines()), errors=encoding_errors)
else:
err = stderr
if rc is None:
rc = 0
# be sure to remove the BECOME-SUCCESS message now
out = self._strip_success_message(out)
display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr))
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
def _get_first_available_file(self, faf, of=None, searchdir='files'):
display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead")
for fn in faf:
fnt = self._templar.template(fn)
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = fnt
fnd = self._loader.path_dwim_relative(lead, searchdir, fnt)
if not os.path.exists(fnd) and of is not None:
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = of
fnd = self._loader.path_dwim_relative(lead, searchdir, of)
if os.path.exists(fnd):
return fnd
return None
def _get_diff_data(self, destination, source, task_vars, source_file=True):
diff = {}
display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True)
if not('failed' in peek_result and peek_result['failed']) or peek_result.get('rc', 0) == 0:
if peek_result['state'] == 'absent':
diff['before'] = ''
elif peek_result['appears_binary']:
diff['dst_binary'] = 1
elif C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Slurping the file %s" % source)
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result)
diff['before_header'] = destination
diff['before'] = dest_contents
if source_file:
st = os.stat(source)
if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Reading local copy of the file %s" % source)
try:
src = open(source)
src_contents = src.read()
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
if "\x00" in src_contents:
diff['src_binary'] = 1
else:
diff['after_header'] = source
diff['after'] = src_contents
else:
display.debug("source of file passed in")
diff['after_header'] = 'dynamically generated'
diff['after'] = source
if self._play_context.no_log:
if 'before' in diff:
diff["before"] = ""
if 'after' in diff:
diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]"
return diff
def _find_needle(self, dirname, needle):
'''
find a needle in haystack of paths, optionally using 'dirname' as a subdir.
This will build the ordered list of paths to search and pass them to dwim
to get back the first existing file found.
'''
path_stack = self._task.get_search_path()
result = self._loader.path_dwim_relative_stack(path_stack, dirname, needle)
if result is None:
raise AnsibleError("Unable to find '%s' in expected paths." % needle)
return result
| gpl-3.0 | 2,241,425,962,940,953,900 | 45.255595 | 326 | 0.599654 | false |
lizardsystem/lizard-damage | lizard_damage/templatetags/formatting.py | 1 | 1321 | from django import template
register = template.Library()
def split_len(seq, length):
"""splits a string into length sized strings, beginning at the end"""
result = [seq[max(i - length, 0):i] for i in range(len(seq), 0, -length)]
result.reverse()
return result
@register.filter()
def euroformat(value):
value_str = '%0.0f' % value
return '€ %s,-' % ('.'.join(split_len(value_str, 3)))
@register.filter
def haformat(value):
"""
Hectare format, do not mix up with the "Jack Ha" Format.
"""
if value == 0.0:
return '0 ha'
return '%0.1f ha' % value
@register.filter
def hoursformat(value):
if value:
return '%0.f uur' % (value / 3600.0)
else:
return '-'
@register.filter
def daysformat(value):
if not value:
return '-'
if value < 1 * 3600 * 24:
return '%0.f uur' % (value / 3600.0)
else:
return '%0.f dag(en)' % (value / 24.0 / 3600.0)
@register.filter
def monthformat(value):
month_dict = {
1: 'januari',
2: 'februari',
3: 'maart',
4: 'april',
5: 'mei',
6: 'juni',
7: 'juli',
8: 'augustus',
9: 'september',
10: 'oktober',
11: 'november',
12: 'december'}
return month_dict.get(value, value)
| gpl-3.0 | -3,460,470,603,933,927,000 | 20.306452 | 77 | 0.545042 | false |
tectronics/chimerascan | chimerascan/deprecated/gene_to_genome2.py | 12 | 2125 | '''
Created on Jan 31, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
# local imports
from feature import GeneFeature
def build_gene_to_genome_map(line_iter, rname_prefix=None):
# create arrays to map genes in bed file to genome
rname_prefix = '' if rname_prefix is None else rname_prefix
gene_genome_map = {}
for g in GeneFeature.parse(line_iter):
rname = rname_prefix + g.tx_name
strand = 1 if g.strand == '-' else 0
exon_vectors = [(start, end) for start, end in g.exons]
if strand:
exon_vectors.reverse()
if rname in gene_genome_map:
logging.error("Duplicate references %s found in bed file" % (rname))
gene_genome_map[rname] = (g.chrom, strand, exon_vectors)
return gene_genome_map
def gene_to_genome_pos(rname, pos, gene_genome_map):
'''
translate gene 'rname' position 'gene_pos' to genomic
coordinates. returns a 3-tuple with (chrom, strand, pos)
'''
chrom, strand, intervals = gene_genome_map[rname]
offset = 0
for start, end, in intervals:
exon_size = end - start
if pos < offset + exon_size:
if strand:
return chrom, strand, start + exon_size - (pos - offset) - 1
else:
return chrom, strand, start + (pos - offset)
#print start, end, offset, pos
offset += exon_size
return None
| gpl-3.0 | 8,387,176,535,562,690,000 | 35.016949 | 80 | 0.664471 | false |
Subsets and Splits