repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
HBehrens/feedsanitizer | django/contrib/gis/utils/wkt.py | 419 | 1846 | """
Utilities for manipulating Geometry WKT.
"""
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision(geom, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, basestring):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join([coord_fmt % c[:2] for c in coords])
def formatted_poly(poly):
return ','.join(['(%s)' % formatted_coords(r) for r in poly])
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join(['(%s)' % formatted_poly(p) for p in g])
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join([''.join([wkt for wkt in formatted_geom(child)]) for child in g])
else:
raise TypeError
yield ')'
return ''.join([wkt for wkt in formatted_geom(geom)])
| mit | -1,700,979,453,453,220,000 | -8,525,120,367,114,004,000 | 32.563636 | 92 | 0.582882 | false |
manumathewthomas/Chat-with-Joey | chatbot/chatbot.py | 1 | 31681 | # Copyright 2015 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Main script. See README.md for more information
Use python 3
"""
import argparse # Command line parsing
import configparser # Saving the models parameters
import datetime # Chronometer
import os # Files management
import tensorflow as tf
import numpy as np
import math
from tqdm import tqdm # Progress bar
from tensorflow.python import debug as tf_debug
from chatbot.textdata import TextData
from chatbot.model import Model
class Chatbot:
"""
Main class which launch the training or testing mode
"""
class TestMode:
""" Simple structure representing the different testing modes
"""
ALL = 'all'
INTERACTIVE = 'interactive' # The user can write his own questions
DAEMON = 'daemon' # The chatbot runs on background and can regularly be called to predict something
def __init__(self):
"""
"""
# Model/dataset parameters
self.args = None
# Task specific object
self.textData = None # Dataset
self.model = None # Sequence to sequence model
# Tensorflow utilities for convenience saving/logging
self.writer = None
self.saver = None
self.modelDir = '' # Where the model is saved
self.globStep = 0 # Represent the number of iteration for the current model
# TensorFlow main session (we keep track for the daemon)
self.sess = None
# Filename and directories constants
self.MODEL_DIR_BASE = 'save/model'
self.MODEL_NAME_BASE = 'model'
self.MODEL_EXT = '.ckpt'
self.CONFIG_FILENAME = 'params.ini'
self.CONFIG_VERSION = '0.4'
self.TEST_IN_NAME = 'data/test/samples.txt'
self.TEST_OUT_SUFFIX = '_predictions.txt'
self.SENTENCES_PREFIX = ['Q: ', 'A: ']
@staticmethod
def parseArgs(args):
"""
Parse the arguments from the given command line
Args:
args (list<str>): List of arguments to parse. If None, the default sys.argv will be parsed
"""
parser = argparse.ArgumentParser()
# Global options
globalArgs = parser.add_argument_group('Global options')
globalArgs.add_argument('--test',
nargs='?',
choices=[Chatbot.TestMode.ALL, Chatbot.TestMode.INTERACTIVE, Chatbot.TestMode.DAEMON],
const=Chatbot.TestMode.ALL, default=None,
help='if present, launch the program try to answer all sentences from data/test/ with'
' the defined model(s), in interactive mode, the user can wrote his own sentences,'
' use daemon mode to integrate the chatbot in another program')
globalArgs.add_argument('--createDataset', action='store_true', help='if present, the program will only generate the dataset from the corpus (no training/testing)')
globalArgs.add_argument('--playDataset', type=int, nargs='?', const=10, default=None, help='if set, the program will randomly play some samples(can be use conjointly with createDataset if this is the only action you want to perform)')
globalArgs.add_argument('--reset', action='store_true', help='use this if you want to ignore the previous model present on the model directory (Warning: the model will be destroyed with all the folder content)')
globalArgs.add_argument('--verbose', action='store_true', help='When testing, will plot the outputs at the same time they are computed')
globalArgs.add_argument('--debug', action='store_true', help='run DeepQA with Tensorflow debug mode. Read TF documentation for more details on this.')
globalArgs.add_argument('--keepAll', action='store_true', help='If this option is set, all saved model will be kept (Warning: make sure you have enough free disk space or increase saveEvery)') # TODO: Add an option to delimit the max size
globalArgs.add_argument('--modelTag', type=str, default=None, help='tag to differentiate which model to store/load')
globalArgs.add_argument('--rootDir', type=str, default=None, help='folder where to look for the models and data')
globalArgs.add_argument('--watsonMode', action='store_true', help='Inverse the questions and answer when training (the network try to guess the question)')
globalArgs.add_argument('--autoEncode', action='store_true', help='Randomly pick the question or the answer and use it both as input and output')
globalArgs.add_argument('--device', type=str, default=None, help='\'gpu\' or \'cpu\' (Warning: make sure you have enough free RAM), allow to choose on which hardware run the model')
globalArgs.add_argument('--seed', type=int, default=None, help='random seed for replication')
# Dataset options
datasetArgs = parser.add_argument_group('Dataset options')
datasetArgs.add_argument('--corpus', choices=TextData.corpusChoices(), default=TextData.corpusChoices()[0], help='corpus on which extract the dataset.')
datasetArgs.add_argument('--datasetTag', type=str, default='', help='add a tag to the dataset (file where to load the vocabulary and the precomputed samples, not the original corpus). Useful to manage multiple versions. Also used to define the file used for the lightweight format.') # The samples are computed from the corpus if it does not exist already. There are saved in \'data/samples/\'
datasetArgs.add_argument('--ratioDataset', type=float, default=1.0, help='ratio of dataset used to avoid using the whole dataset') # Not implemented, useless ?
datasetArgs.add_argument('--maxLength', type=int, default=10, help='maximum length of the sentence (for input and output), define number of maximum step of the RNN')
datasetArgs.add_argument('--lightweightFile', type=str, default=None, help='file containing our lightweight-formatted corpus')
# Network options (Warning: if modifying something here, also make the change on save/loadParams() )
nnArgs = parser.add_argument_group('Network options', 'architecture related option')
nnArgs.add_argument('--hiddenSize', type=int, default=256, help='number of hidden units in each RNN cell')
nnArgs.add_argument('--numLayers', type=int, default=2, help='number of rnn layers')
nnArgs.add_argument('--embeddingSize', type=int, default=32, help='embedding size of the word representation')
nnArgs.add_argument('--initEmbeddings', action='store_true', help='if present, the program will initialize the embeddings with pre-trained word2vec vectors')
nnArgs.add_argument('--softmaxSamples', type=int, default=0, help='Number of samples in the sampled softmax loss function. A value of 0 deactivates sampled softmax')
# Training options
trainingArgs = parser.add_argument_group('Training options')
trainingArgs.add_argument('--numEpochs', type=int, default=30, help='maximum number of epochs to run')
trainingArgs.add_argument('--saveEvery', type=int, default=1000, help='nb of mini-batch step before creating a model checkpoint')
trainingArgs.add_argument('--batchSize', type=int, default=10, help='mini-batch size')
trainingArgs.add_argument('--learningRate', type=float, default=0.001, help='Learning rate')
return parser.parse_args(args)
def main(self, args=None):
"""
Launch the training and/or the interactive mode
"""
print('Welcome to DeepQA v0.1 !')
print()
print('TensorFlow detected: v{}'.format(tf.__version__))
# General initialisation
self.args = self.parseArgs(args)
if not self.args.rootDir:
self.args.rootDir = os.getcwd() # Use the current working directory
#tf.logging.set_verbosity(tf.logging.INFO) # DEBUG, INFO, WARN (default), ERROR, or FATAL
self.loadModelParams() # Update the self.modelDir and self.globStep, for now, not used when loading Model (but need to be called before _getSummaryName)
self.textData = TextData(self.args)
# TODO: Add a mode where we can force the input of the decoder // Try to visualize the predictions for
# each word of the vocabulary / decoder input
# TODO: For now, the model are trained for a specific dataset (because of the maxLength which define the
# vocabulary). Add a compatibility mode which allow to launch a model trained on a different vocabulary (
# remap the word2id/id2word variables).
if self.args.createDataset:
print('Dataset created! Thanks for using this program')
return # No need to go further
# Prepare the model
with tf.device(self.getDevice()):
self.model = Model(self.args, self.textData)
# Saver/summaries
self.writer = tf.summary.FileWriter(self._getSummaryName())
self.saver = tf.train.Saver(max_to_keep=200, write_version=tf.train.SaverDef.V1) # TODO: See GitHub for format name issue (when restoring the model)
# TODO: Fixed seed (WARNING: If dataset shuffling, make sure to do that after saving the
# dataset, otherwise, all which cames after the shuffling won't be replicable when
# reloading the dataset). How to restore the seed after loading ??
# Also fix seed for random.shuffle (does it works globally for all files ?)
# Running session
self.sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True, # Allows backup device for non GPU-available operations (when forcing GPU)
log_device_placement=False) # Too verbose ?
) # TODO: Replace all sess by self.sess (not necessary a good idea) ?
if self.args.debug:
self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)
self.sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
print('Initialize variables...')
self.sess.run(tf.global_variables_initializer())
# Reload the model eventually (if it exist.), on testing mode, the models are not loaded here (but in predictTestset)
if self.args.test != Chatbot.TestMode.ALL:
self.managePreviousModel(self.sess)
# Initialize embeddings with pre-trained word2vec vectors
if self.args.initEmbeddings:
print("Loading pre-trained embeddings from GoogleNews-vectors-negative300.bin")
self.loadEmbedding(self.sess)
if self.args.test:
if self.args.test == Chatbot.TestMode.INTERACTIVE:
self.mainTestInteractive(self.sess)
elif self.args.test == Chatbot.TestMode.ALL:
print('Start predicting...')
self.predictTestset(self.sess)
print('All predictions done')
elif self.args.test == Chatbot.TestMode.DAEMON:
print('Daemon mode, running in background...')
else:
raise RuntimeError('Unknown test mode: {}'.format(self.args.test)) # Should never happen
else:
self.mainTrain(self.sess)
if self.args.test != Chatbot.TestMode.DAEMON:
self.sess.close()
print("The End! Thanks for using this program")
def mainTrain(self, sess):
""" Training loop
Args:
sess: The current running session
"""
# Specific training dependent loading
self.textData.makeLighter(self.args.ratioDataset) # Limit the number of training samples
mergedSummaries = tf.summary.merge_all() # Define the summary operator (Warning: Won't appear on the tensorboard graph)
if self.globStep == 0: # Not restoring from previous run
self.writer.add_graph(sess.graph) # First time only
# If restoring a model, restore the progression bar ? and current batch ?
print('Start training (press Ctrl+C to save and exit)...')
try: # If the user exit while training, we still try to save the model
for e in range(self.args.numEpochs):
print()
print("----- Epoch {}/{} ; (lr={}) -----".format(e+1, self.args.numEpochs, self.args.learningRate))
batches = self.textData.getBatches()
# TODO: Also update learning parameters eventually
tic = datetime.datetime.now()
for nextBatch in tqdm(batches, desc="Training"):
# Training pass
ops, feedDict = self.model.step(nextBatch)
assert len(ops) == 2 # training, loss
_, loss, summary = sess.run(ops + (mergedSummaries,), feedDict)
self.writer.add_summary(summary, self.globStep)
self.globStep += 1
# Output training status
if self.globStep % 100 == 0:
perplexity = math.exp(float(loss)) if loss < 300 else float("inf")
tqdm.write("----- Step %d -- Loss %.2f -- Perplexity %.2f" % (self.globStep, loss, perplexity))
# Checkpoint
if self.globStep % self.args.saveEvery == 0:
self._saveSession(sess)
toc = datetime.datetime.now()
print("Epoch finished in {}".format(toc-tic)) # Warning: Will overflow if an epoch takes more than 24 hours, and the output isn't really nicer
except (KeyboardInterrupt, SystemExit): # If the user press Ctrl+C while testing progress
print('Interruption detected, exiting the program...')
self._saveSession(sess) # Ultimate saving before complete exit
def predictTestset(self, sess):
""" Try predicting the sentences from the samples.txt file.
The sentences are saved on the modelDir under the same name
Args:
sess: The current running session
"""
# Loading the file to predict
with open(os.path.join(self.args.rootDir, self.TEST_IN_NAME), 'r') as f:
lines = f.readlines()
modelList = self._getModelList()
if not modelList:
print('Warning: No model found in \'{}\'. Please train a model before trying to predict'.format(self.modelDir))
return
# Predicting for each model present in modelDir
for modelName in sorted(modelList): # TODO: Natural sorting
print('Restoring previous model from {}'.format(modelName))
self.saver.restore(sess, modelName)
print('Testing...')
saveName = modelName[:-len(self.MODEL_EXT)] + self.TEST_OUT_SUFFIX # We remove the model extension and add the prediction suffix
with open(saveName, 'w') as f:
nbIgnored = 0
for line in tqdm(lines, desc='Sentences'):
question = line[:-1] # Remove the endl character
answer = self.singlePredict(question)
if not answer:
nbIgnored += 1
continue # Back to the beginning, try again
predString = '{x[0]}{0}\n{x[1]}{1}\n\n'.format(question, self.textData.sequence2str(answer, clean=True), x=self.SENTENCES_PREFIX)
if self.args.verbose:
tqdm.write(predString)
f.write(predString)
print('Prediction finished, {}/{} sentences ignored (too long)'.format(nbIgnored, len(lines)))
def mainTestInteractive(self, sess):
""" Try predicting the sentences that the user will enter in the console
Args:
sess: The current running session
"""
# TODO: If verbose mode, also show similar sentences from the training set with the same words (include in mainTest also)
# TODO: Also show the top 10 most likely predictions for each predicted output (when verbose mode)
# TODO: Log the questions asked for latter re-use (merge with test/samples.txt)
print('Testing: Launch interactive mode:')
print('')
print('Welcome to the interactive mode, here you can ask to Deep Q&A the sentence you want. Don\'t have high '
'expectation. Type \'exit\' or just press ENTER to quit the program. Have fun.')
while True:
question = input(self.SENTENCES_PREFIX[0])
if question == '' or question == 'exit':
break
questionSeq = [] # Will be contain the question as seen by the encoder
answer = self.singlePredict(question, questionSeq)
if not answer:
print('Warning: sentence too long, sorry. Maybe try a simpler sentence.')
continue # Back to the beginning, try again
print('{}{}'.format(self.SENTENCES_PREFIX[1], self.textData.sequence2str(answer, clean=True)))
if self.args.verbose:
print(self.textData.batchSeq2str(questionSeq, clean=True, reverse=True))
print(self.textData.sequence2str(answer))
print()
def singlePredict(self, question, questionSeq=None):
""" Predict the sentence
Args:
question (str): the raw input sentence
questionSeq (List<int>): output argument. If given will contain the input batch sequence
Return:
list <int>: the word ids corresponding to the answer
"""
# Create the input batch
batch = self.textData.sentence2enco(question)
if not batch:
return None
if questionSeq is not None: # If the caller want to have the real input
questionSeq.extend(batch.encoderSeqs)
# Run the model
ops, feedDict = self.model.step(batch)
output = self.sess.run(ops[0], feedDict) # TODO: Summarize the output too (histogram, ...)
answer = self.textData.deco2sentence(output)
return answer
def daemonPredict(self, sentence):
""" Return the answer to a given sentence (same as singlePredict() but with additional cleaning)
Args:
sentence (str): the raw input sentence
Return:
str: the human readable sentence
"""
return self.textData.sequence2str(
self.singlePredict(sentence),
clean=True
)
def daemonClose(self):
""" A utility function to close the daemon when finish
"""
print('Exiting the daemon mode...')
self.sess.close()
print('Daemon closed.')
def loadEmbedding(self, sess):
""" Initialize embeddings with pre-trained word2vec vectors
Will modify the embedding weights of the current loaded model
Uses the GoogleNews pre-trained values (path hardcoded)
"""
# Fetch embedding variables from model
with tf.variable_scope("embedding_rnn_seq2seq/RNN/EmbeddingWrapper", reuse=True):
em_in = tf.get_variable("embedding")
with tf.variable_scope("embedding_rnn_seq2seq/embedding_rnn_decoder", reuse=True):
em_out = tf.get_variable("embedding")
# Disable training for embeddings
variables = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES)
variables.remove(em_in)
variables.remove(em_out)
# If restoring a model, we can leave here
if self.globStep != 0:
return
# New model, we load the pre-trained word2vec data and initialize embeddings
with open(os.path.join(self.args.rootDir, 'data/word2vec/GoogleNews-vectors-negative300.bin'), "rb", 0) as f:
header = f.readline()
vocab_size, vector_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * vector_size
initW = np.random.uniform(-0.25,0.25,(len(self.textData.word2id), vector_size))
for line in tqdm(range(vocab_size)):
word = []
while True:
ch = f.read(1)
if ch == b' ':
word = b''.join(word).decode('utf-8')
break
if ch != b'\n':
word.append(ch)
if word in self.textData.word2id:
initW[self.textData.word2id[word]] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
# PCA Decomposition to reduce word2vec dimensionality
if self.args.embeddingSize < vector_size:
U, s, Vt = np.linalg.svd(initW, full_matrices=False)
S = np.zeros((vector_size, vector_size), dtype=complex)
S[:vector_size, :vector_size] = np.diag(s)
initW = np.dot(U[:, :self.args.embeddingSize], S[:self.args.embeddingSize, :self.args.embeddingSize])
# Initialize input and output embeddings
sess.run(em_in.assign(initW))
sess.run(em_out.assign(initW))
def managePreviousModel(self, sess):
""" Restore or reset the model, depending of the parameters
If the destination directory already contains some file, it will handle the conflict as following:
* If --reset is set, all present files will be removed (warning: no confirmation is asked) and the training
restart from scratch (globStep & cie reinitialized)
* Otherwise, it will depend of the directory content. If the directory contains:
* No model files (only summary logs): works as a reset (restart from scratch)
* Other model files, but modelName not found (surely keepAll option changed): raise error, the user should
decide by himself what to do
* The right model file (eventually some other): no problem, simply resume the training
In any case, the directory will exist as it has been created by the summary writer
Args:
sess: The current running session
"""
print('WARNING: ', end='')
modelName = self._getModelName()
if os.listdir(self.modelDir):
if self.args.reset:
print('Reset: Destroying previous model at {}'.format(self.modelDir))
# Analysing directory content
elif os.path.exists(modelName): # Restore the model
print('Restoring previous model from {}'.format(modelName))
self.saver.restore(sess, modelName) # Will crash when --reset is not activated and the model has not been saved yet
elif self._getModelList():
print('Conflict with previous models.')
raise RuntimeError('Some models are already present in \'{}\'. You should check them first (or re-try with the keepAll flag)'.format(self.modelDir))
else: # No other model to conflict with (probably summary files)
print('No previous model found, but some files found at {}. Cleaning...'.format(self.modelDir)) # Warning: No confirmation asked
self.args.reset = True
if self.args.reset:
fileList = [os.path.join(self.modelDir, f) for f in os.listdir(self.modelDir)]
for f in fileList:
print('Removing {}'.format(f))
os.remove(f)
else:
print('No previous model found, starting from clean directory: {}'.format(self.modelDir))
def _saveSession(self, sess):
""" Save the model parameters and the variables
Args:
sess: the current session
"""
tqdm.write('Checkpoint reached: saving model (don\'t stop the run)...')
self.saveModelParams()
self.saver.save(sess, self._getModelName()) # TODO: Put a limit size (ex: 3GB for the modelDir)
tqdm.write('Model saved.')
def _getModelList(self):
""" Return the list of the model files inside the model directory
"""
return [os.path.join(self.modelDir, f) for f in os.listdir(self.modelDir) if f.endswith(self.MODEL_EXT)]
def loadModelParams(self):
""" Load the some values associated with the current model, like the current globStep value
For now, this function does not need to be called before loading the model (no parameters restored). However,
the modelDir name will be initialized here so it is required to call this function before managePreviousModel(),
_getModelName() or _getSummaryName()
Warning: if you modify this function, make sure the changes mirror saveModelParams, also check if the parameters
should be reset in managePreviousModel
"""
# Compute the current model path
self.modelDir = os.path.join(self.args.rootDir, self.MODEL_DIR_BASE)
if self.args.modelTag:
self.modelDir += '-' + self.args.modelTag
# If there is a previous model, restore some parameters
configName = os.path.join(self.modelDir, self.CONFIG_FILENAME)
if not self.args.reset and not self.args.createDataset and os.path.exists(configName):
# Loading
config = configparser.ConfigParser()
config.read(configName)
# Check the version
currentVersion = config['General'].get('version')
if currentVersion != self.CONFIG_VERSION:
raise UserWarning('Present configuration version {0} does not match {1}. You can try manual changes on \'{2}\''.format(currentVersion, self.CONFIG_VERSION, configName))
# Restoring the the parameters
self.globStep = config['General'].getint('globStep')
self.args.maxLength = config['General'].getint('maxLength') # We need to restore the model length because of the textData associated and the vocabulary size (TODO: Compatibility mode between different maxLength)
self.args.watsonMode = config['General'].getboolean('watsonMode')
self.args.autoEncode = config['General'].getboolean('autoEncode')
self.args.corpus = config['General'].get('corpus')
self.args.datasetTag = config['General'].get('datasetTag', '')
self.args.hiddenSize = config['Network'].getint('hiddenSize')
self.args.numLayers = config['Network'].getint('numLayers')
self.args.embeddingSize = config['Network'].getint('embeddingSize')
self.args.initEmbeddings = config['Network'].getboolean('initEmbeddings')
self.args.softmaxSamples = config['Network'].getint('softmaxSamples')
# No restoring for training params, batch size or other non model dependent parameters
# Show the restored params
print()
print('Warning: Restoring parameters:')
print('globStep: {}'.format(self.globStep))
print('maxLength: {}'.format(self.args.maxLength))
print('watsonMode: {}'.format(self.args.watsonMode))
print('autoEncode: {}'.format(self.args.autoEncode))
print('corpus: {}'.format(self.args.corpus))
print('datasetTag: {}'.format(self.args.datasetTag))
print('hiddenSize: {}'.format(self.args.hiddenSize))
print('numLayers: {}'.format(self.args.numLayers))
print('embeddingSize: {}'.format(self.args.embeddingSize))
print('initEmbeddings: {}'.format(self.args.initEmbeddings))
print('softmaxSamples: {}'.format(self.args.softmaxSamples))
print()
# For now, not arbitrary independent maxLength between encoder and decoder
self.args.maxLengthEnco = self.args.maxLength
self.args.maxLengthDeco = self.args.maxLength + 2
if self.args.watsonMode:
self.SENTENCES_PREFIX.reverse()
def saveModelParams(self):
""" Save the params of the model, like the current globStep value
Warning: if you modify this function, make sure the changes mirror loadModelParams
"""
config = configparser.ConfigParser()
config['General'] = {}
config['General']['version'] = self.CONFIG_VERSION
config['General']['globStep'] = str(self.globStep)
config['General']['maxLength'] = str(self.args.maxLength)
config['General']['watsonMode'] = str(self.args.watsonMode)
config['General']['autoEncode'] = str(self.args.autoEncode)
config['General']['corpus'] = str(self.args.corpus)
config['General']['datasetTag'] = str(self.args.datasetTag)
config['Network'] = {}
config['Network']['hiddenSize'] = str(self.args.hiddenSize)
config['Network']['numLayers'] = str(self.args.numLayers)
config['Network']['embeddingSize'] = str(self.args.embeddingSize)
config['Network']['initEmbeddings'] = str(self.args.initEmbeddings)
config['Network']['softmaxSamples'] = str(self.args.softmaxSamples)
# Keep track of the learning params (but without restoring them)
config['Training (won\'t be restored)'] = {}
config['Training (won\'t be restored)']['learningRate'] = str(self.args.learningRate)
config['Training (won\'t be restored)']['batchSize'] = str(self.args.batchSize)
with open(os.path.join(self.modelDir, self.CONFIG_FILENAME), 'w') as configFile:
config.write(configFile)
def _getSummaryName(self):
""" Parse the argument to decide were to save the summary, at the same place that the model
The folder could already contain logs if we restore the training, those will be merged
Return:
str: The path and name of the summary
"""
return self.modelDir
def _getModelName(self):
""" Parse the argument to decide were to save/load the model
This function is called at each checkpoint and the first time the model is load. If keepAll option is set, the
globStep value will be included in the name.
Return:
str: The path and name were the model need to be saved
"""
modelName = os.path.join(self.modelDir, self.MODEL_NAME_BASE)
if self.args.keepAll: # We do not erase the previously saved model by including the current step on the name
modelName += '-' + str(self.globStep)
return modelName + self.MODEL_EXT
def getDevice(self):
""" Parse the argument to decide on which device run the model
Return:
str: The name of the device on which run the program
"""
if self.args.device == 'cpu':
return '/cpu:0'
elif self.args.device == 'gpu':
return '/gpu:0'
elif self.args.device is None: # No specified device (default)
return None
else:
print('Warning: Error in the device name: {}, use the default device'.format(self.args.device))
return None
| apache-2.0 | -8,090,488,278,616,554,000 | 5,708,982,348,771,971,000 | 50.016103 | 402 | 0.631009 | false |
codemeow5/PyPack | pypack/protocol.py | 1 | 2657 | """ Class and function related to protocol operation
"""
import datetime
import struct
MSG_TYPE_SEND = 0x1
MSG_TYPE_ACK = 0x2
MSG_TYPE_RECEIVED = 0x3
MSG_TYPE_RELEASE = 0x4
MSG_TYPE_COMPLETED = 0x5
QOS0 = 0
QOS1 = 1
QOS2 = 2
# MAX_DATETIME = int((datetime.datetime(2500, 1, 1) - datetime.datetime(1970, 1, 1)).total_seconds())
class Packet(object):
""" This is a class that describe an incoming or outgoing message
Members:
msg_type : Enum. message type
qos : Enum. quality of service level
dup : Bool. whether the message is resent
msg_id : Number. message id
remaining_length : Number. payload length
total_length : Number. buffer length
payload : String. message body
buff : String. full message
confirm : whether the message is answered
retry_times : resent times
timestamp : next send time
"""
def __init__(self, msg_type=MSG_TYPE_SEND, qos=QOS0, dup=False, msg_id=0, payload=None):
self.msg_type = msg_type
self.qos = qos
self.dup = dup
self.msg_id = msg_id
if payload is not None and not isinstance(payload, str):
raise TypeError("parameter payload must be str, not %s" % type(payload).__name__)
self.payload = payload
if payload is None:
self.remaining_length = 0
else:
self.remaining_length = len(payload)
self.total_length = 5 + self.remaining_length
self.confirm = False
self.retry_times = 0
self.timestamp = 0
self.buff = None
@staticmethod
def encode(packet):
""" Encode packet object and fill buff field
"""
buff = bytearray()
fixed_header = (packet.msg_type << 4) | (packet.qos << 2) | (packet.dup << 1)
buff.extend(struct.pack("!B", fixed_header))
buff.extend(struct.pack("!H", packet.msg_id))
buff.extend(struct.pack("!H", packet.remaining_length))
if packet.payload is not None:
buff.extend(packet.payload)
packet.buff = str(buff)
@staticmethod
def decode(buff):
""" Convert buff string to packet object
"""
(fixed_header, msg_id, remaining_length) = struct.unpack("!BHH", buff[:5])
msg_type = fixed_header >> 4
qos = (fixed_header & 0xf) >> 2
dup = (fixed_header & 0x3) >> 1
if len(buff) >= 5 + remaining_length:
(_, payload) = struct.unpack("!5s%ss" % remaining_length, buff[:5 + remaining_length])
packet = Packet(msg_type, qos, dup, msg_id, payload)
packet.buff = buff
return packet
else:
return None
| mit | 4,588,043,538,149,391,400 | 3,851,246,842,759,226,000 | 31.802469 | 101 | 0.601807 | false |
ioanaantoche/muhaha | ioana/examples/feet.py | 1 | 1624 | import sys
from naoqi import ALProxy
import time
def main(robotIP):
PORT = 9559
try:
motionProxy = ALProxy("ALMotion", robotIP, PORT)
except Exception,e:
print "Could not create proxy to ALMotion"
print "Error was: ",e
sys.exit(1)
try:
postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
# Send NAO to Pose Init
postureProxy.goToPosture("StandInit", 0.5)
motionProxy.wbEnable(True)
# Example showing how to fix the feet.
#print "Feet fixed."
#stateName = "Fixed"
#supportLeg = "Legs"
#motionProxy.wbFootState(stateName, supportLeg)
# Example showing how to fix the left leg and constrained in a plane the right leg.
#print "Left leg fixed, right leg in a plane."
#motionProxy.wbFootState("Fixed", "LLeg")
#motionProxy.wbFootState("Plane", "RLeg")
# Example showing how to fix the left leg and keep free the right leg.
print "Left leg fixed, right leg free"
motionProxy.wbFootState("Fixed", "LLeg")
motionProxy.wbFootState("Free", "RLeg")
time.sleep(10.0)
print "motionProxy.wbEnable(False)"
motionProxy.wbEnable(False)
time.sleep(5.0)
print "postureProxy.goToPosture(Sit, 0.5)"
postureProxy.goToPosture("SitRelax", 0.5)
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python almotion_wbfootstate.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main(robotIp) | gpl-2.0 | 616,225,764,516,062,000 | 4,435,537,970,648,450,600 | 26.083333 | 90 | 0.649631 | false |
seomoz/gevent-soup | bs4/element.py | 438 | 61538 | import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
| mit | 677,899,126,455,505,200 | 3,977,941,708,721,918,500 | 37.198634 | 113 | 0.558939 | false |
BreakawayConsulting/pyxmlerrors | pyxmlerrors.py | 1 | 3091 | """
Copyright (c) 2013 Breakaway Consulting Pty. Ltd.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.dom.minidom
import xml.dom.expatbuilder
from xml.parsers.expat import ExpatError
def monkey_start_element_handler(self, name, attributes):
"""This function is monkey-patched over the standard start_element_handle method.
It adds the _line and _col attributes to the element node so that later error-checking can produce useful,
targeted error messages.
"""
real_start_element_handler(self, name, attributes)
node = self.curNode
node._line = self.getParser().CurrentLineNumber
node._col = self.getParser().CurrentColumnNumber
real_start_element_handler = xml.dom.expatbuilder.ExpatBuilderNS.start_element_handler
xml.dom.expatbuilder.ExpatBuilderNS.start_element_handler = monkey_start_element_handler
def xml_error_str(el, msg):
"""Return an error string in the form:
filename:lineno.colno msg
"""
return "{}:{}.{} {}".format(el.ownerDocument._path, el.ownerDocument._start_line + el._line, el._col, msg)
def xml_parse_file(filename):
"""Parse XML file `filename` and return the documentElement.
This is a thin-wrapper for the underlying standard file parsing routine that add extra attributes to the
DOM to enable better diagnostics via the xml_error_str function.
"""
try:
dom = xml.dom.minidom.parse(filename)
except ExpatError as e:
e._path = filename
raise e
dom._path = filename
dom._start_line = 0
return dom
def xml_parse_string(string, name='<string>', start_line=0):
"""Parse an XML string.
Optionally a name can be provided that will be used when providing diagnosics.
In the case where the string has been extracted from another file the start_line parameter can be used to adjust
the line number diagnostics.
"""
try:
dom = xml.dom.minidom.parseString(string)
except ExpatError as e:
e._path = name
e.lineno += start_line
raise e
dom._path = name
dom._start_line = start_line
return dom
| mit | 3,792,106,465,720,870,000 | -2,753,760,030,429,735,400 | 34.528736 | 116 | 0.733096 | false |
blakfeld/ansible | v1/ansible/runner/action_plugins/synchronize.py | 86 | 8449 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012-2013, Timothy Appnel <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os.path
from ansible import utils
from ansible import constants
from ansible.runner.return_data import ReturnData
import ansible.utils.template as template
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
self.inject = None
def _get_absolute_path(self, path=None):
if 'vars' in self.inject:
if '_original_file' in self.inject['vars']:
# roles
original_path = path
path = utils.path_dwim_relative(self.inject['_original_file'], 'files', path, self.runner.basedir)
if original_path and original_path[-1] == '/' and path[-1] != '/':
# make sure the dwim'd path ends in a trailing "/"
# if the original path did
path += '/'
return path
def _process_origin(self, host, path, user):
if not host in ['127.0.0.1', 'localhost']:
if user:
return '%s@%s:%s' % (user, host, path)
else:
return '%s:%s' % (host, path)
else:
if not ':' in path:
if not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _process_remote(self, host, path, user):
transport = self.runner.transport
return_data = None
if not host in ['127.0.0.1', 'localhost'] or transport != "local":
if user:
return_data = '%s@%s:%s' % (user, host, path)
else:
return_data = '%s:%s' % (host, path)
else:
return_data = path
if not ':' in return_data:
if not return_data.startswith('/'):
return_data = self._get_absolute_path(path=return_data)
return return_data
def setup(self, module_name, inject):
''' Always default to localhost as delegate if None defined '''
self.inject = inject
# Store original transport and sudo values.
self.original_transport = inject.get('ansible_connection', self.runner.transport)
self.original_become = self.runner.become
self.transport_overridden = False
if inject.get('delegate_to') is None:
inject['delegate_to'] = '127.0.0.1'
# IF original transport is not local, override transport and disable sudo.
if self.original_transport != 'local':
inject['ansible_connection'] = 'local'
self.transport_overridden = True
self.runner.become = False
def run(self, conn, tmp, module_name, module_args,
inject, complex_args=None, **kwargs):
''' generates params and passes them on to the rsync module '''
self.inject = inject
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
src = options.get('src', None)
dest = options.get('dest', None)
use_ssh_args = options.pop('use_ssh_args', None)
src = template.template(self.runner.basedir, src, inject)
dest = template.template(self.runner.basedir, dest, inject)
use_ssh_args = template.template(self.runner.basedir, use_ssh_args, inject)
try:
options['local_rsync_path'] = inject['ansible_rsync_path']
except KeyError:
pass
# from the perspective of the rsync call the delegate is the localhost
src_host = '127.0.0.1'
dest_host = inject.get('ansible_ssh_host', inject['inventory_hostname'])
# allow ansible_ssh_host to be templated
dest_host = template.template(self.runner.basedir, dest_host, inject, fail_on_undefined=True)
dest_is_local = dest_host in ['127.0.0.1', 'localhost']
# CHECK FOR NON-DEFAULT SSH PORT
dest_port = options.get('dest_port')
inv_port = inject.get('ansible_ssh_port', inject['inventory_hostname'])
if inv_port != dest_port and inv_port != inject['inventory_hostname']:
options['dest_port'] = inv_port
# edge case: explicit delegate and dest_host are the same
if dest_host == inject['delegate_to']:
dest_host = '127.0.0.1'
# SWITCH SRC AND DEST PER MODE
if options.get('mode', 'push') == 'pull':
(dest_host, src_host) = (src_host, dest_host)
# CHECK DELEGATE HOST INFO
use_delegate = False
if conn.delegate != conn.host:
if 'hostvars' in inject:
if conn.delegate in inject['hostvars'] and self.original_transport != 'local':
# use a delegate host instead of localhost
use_delegate = True
# COMPARE DELEGATE, HOST AND TRANSPORT
process_args = False
if not dest_host is src_host and self.original_transport != 'local':
# interpret and inject remote host info into src or dest
process_args = True
# MUNGE SRC AND DEST PER REMOTE_HOST INFO
if process_args or use_delegate:
user = None
if utils.boolean(options.get('set_remote_user', 'yes')):
if use_delegate:
user = inject['hostvars'][conn.delegate].get('ansible_ssh_user')
if not use_delegate or not user:
user = inject.get('ansible_ssh_user',
self.runner.remote_user)
if use_delegate:
# FIXME
private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
else:
private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
private_key = template.template(self.runner.basedir, private_key, inject, fail_on_undefined=True)
if not private_key is None:
private_key = os.path.expanduser(private_key)
options['private_key'] = private_key
# use the mode to define src and dest's url
if options.get('mode', 'push') == 'pull':
# src is a remote path: <user>@<host>, dest is a local path
src = self._process_remote(src_host, src, user)
dest = self._process_origin(dest_host, dest, user)
else:
# src is a local path, dest is a remote path: <user>@<host>
src = self._process_origin(src_host, src, user)
dest = self._process_remote(dest_host, dest, user)
options['src'] = src
options['dest'] = dest
if 'mode' in options:
del options['mode']
if use_ssh_args:
options['ssh_args'] = constants.ANSIBLE_SSH_ARGS
# Allow custom rsync path argument.
rsync_path = options.get('rsync_path', None)
# If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
if not rsync_path and self.transport_overridden and self.original_become and not dest_is_local and self.runner.become_method == 'sudo':
rsync_path = 'sudo rsync'
# make sure rsync path is quoted.
if rsync_path:
options['rsync_path'] = '"' + rsync_path + '"'
module_args = ""
if self.runner.noop_on_check(inject):
module_args = "CHECKMODE=True"
# run the module and store the result
result = self.runner._execute_module(conn, tmp, 'synchronize', module_args, complex_args=options, inject=inject)
# reset the sudo property
self.runner.become = self.original_become
return result
| gpl-3.0 | 655,345,670,744,316,700 | -6,064,226,323,490,407,000 | 37.756881 | 143 | 0.582554 | false |
LaurentClaessens/phystricks | manual/phystricksIllusionNHwEtp.py | 1 | 1130 | # -*- coding: utf8 -*-
from phystricks import *
def IllusionNHwEtp():
pspict,fig = SinglePicture("IllusionNHwEtp")
pspict.dilatation(0.7)
perspective=ObliqueProjection(45,sqrt(2)/2)
l=2
P=(0,0)
cubesP=[]
cubesL=[]
cubesH=[]
profondeur=7
longueur=4
hauteur=4
for i in range(0,profondeur):
cubesP.append(perspective.cuboid( P,l,l,l ))
Q=cubesP[-1].c2[3]
P=(Q.x,Q.y)
P=(0,0)
for i in range(0,longueur):
cubesL.append(perspective.cuboid(P,l,l,l))
Q=cubesL[-1].c1[2]
P=(Q.x,Q.y)
for i in range(0,hauteur):
cubesH.append(perspective.cuboid(P,l,l,l))
Q=cubesH[-1].c1[0]
P=(Q.x,Q.y)
cubesP.reverse() # Ainsi les plus éloignés sont tracés en premier.
for i,cub in enumerate(cubesP):
cub.make_opaque()
pspict.DrawGraphs(cub)
for i,cub in enumerate(cubesL):
cub.make_opaque()
pspict.DrawGraphs(cub)
for i,cub in enumerate(cubesH):
cub.make_opaque()
pspict.DrawGraphs(cub)
fig.no_figure()
fig.conclude()
fig.write_the_file()
| gpl-3.0 | 8,860,010,446,284,391,000 | 8,542,029,223,594,591,000 | 25.209302 | 73 | 0.582076 | false |
czgu/metaHack | env/lib/python2.7/site-packages/django/core/checks/compatibility/django_1_7_0.py | 91 | 1368 | from __future__ import unicode_literals
from .. import Warning, register, Tags
@register(Tags.compatibility)
def check_1_7_compatibility(**kwargs):
errors = []
errors.extend(_check_middleware_classes(**kwargs))
return errors
def _check_middleware_classes(app_configs=None, **kwargs):
"""
Checks if the user has *not* overridden the ``MIDDLEWARE_CLASSES`` setting &
warns them about the global default changes.
"""
from django.conf import settings
# MIDDLEWARE_CLASSES is overridden by default by startproject. If users
# have removed this override then we'll warn them about the default changes.
if not settings.is_overridden('MIDDLEWARE_CLASSES'):
return [
Warning(
"MIDDLEWARE_CLASSES is not set.",
hint=("Django 1.7 changed the global defaults for the MIDDLEWARE_CLASSES. "
"django.contrib.sessions.middleware.SessionMiddleware, "
"django.contrib.auth.middleware.AuthenticationMiddleware, and "
"django.contrib.messages.middleware.MessageMiddleware were removed from the defaults. "
"If your project needs these middleware then you should configure this setting."),
obj=None,
id='1_7.W001',
)
]
else:
return []
| apache-2.0 | -7,697,147,320,529,392,000 | -4,967,731,485,623,296,000 | 37 | 109 | 0.633041 | false |
tempbottle/gunicorn | gunicorn/app/django_wsgi.py | 87 | 4363 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
""" module used to build the django wsgi application """
from __future__ import print_function
import os
import re
import sys
import time
try:
from StringIO import StringIO
except:
from io import StringIO
from imp import reload
from django.conf import settings
from django.core.management.validation import get_validation_errors
from django.utils import translation
try:
from django.core.servers.basehttp import get_internal_wsgi_application
django14 = True
except ImportError:
from django.core.handlers.wsgi import WSGIHandler
django14 = False
from gunicorn import util
def make_wsgi_application():
# validate models
s = StringIO()
if get_validation_errors(s):
s.seek(0)
error = s.read()
msg = "One or more models did not validate:\n%s" % error
print(msg, file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
translation.activate(settings.LANGUAGE_CODE)
if django14:
return get_internal_wsgi_application()
return WSGIHandler()
def reload_django_settings():
mod = util.import_module(os.environ['DJANGO_SETTINGS_MODULE'])
# Reload module.
reload(mod)
# Reload settings.
# Use code from django.settings.Settings module.
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(settings, setting, setting_value)
# Expand entries in INSTALLED_APPS like "django.contrib.*" to a list
# of all those apps.
new_installed_apps = []
for app in settings.INSTALLED_APPS:
if app.endswith('.*'):
app_mod = util.import_module(app[:-2])
appdir = os.path.dirname(app_mod.__file__)
app_subdirs = os.listdir(appdir)
name_pattern = re.compile(r'[a-zA-Z]\w*')
for d in sorted(app_subdirs):
if (name_pattern.match(d) and
os.path.isdir(os.path.join(appdir, d))):
new_installed_apps.append('%s.%s' % (app[:-2], d))
else:
new_installed_apps.append(app)
setattr(settings, "INSTALLED_APPS", new_installed_apps)
if hasattr(time, 'tzset') and settings.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root,
*(settings.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" %
settings.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = settings.TIME_ZONE
time.tzset()
# Settings are configured, so we can set up the logger if required
if getattr(settings, 'LOGGING_CONFIG', False):
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = settings.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = util.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
# ... then invoke it with the logging settings
logging_config_func(settings.LOGGING)
def make_command_wsgi_application(admin_mediapath):
reload_django_settings()
try:
from django.core.servers.basehttp import AdminMediaHandler
return AdminMediaHandler(make_wsgi_application(), admin_mediapath)
except ImportError:
return make_wsgi_application()
| mit | 3,823,621,984,780,430,000 | -3,491,016,903,333,509,600 | 35.358333 | 98 | 0.617007 | false |
nttdata-osscloud/ceilometer | ceilometer/tests/api/v1/test_list_sources_scenarios.py | 2 | 1184 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 Julien Danjou
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test listing users.
"""
from ceilometer.tests import api as tests_api
from ceilometer.tests import db as tests_db
class TestListSource(tests_api.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def test_source(self):
ydata = self.get('/sources/test_source')
self.assertIn("somekey", ydata)
self.assertEqual(666, ydata["somekey"])
def test_unknownsource(self):
ydata = self.get('/sources/test_source_that_does_not_exist')
self.assertEqual({}, ydata)
| apache-2.0 | -8,317,890,900,220,599,000 | -1,095,110,378,844,540,000 | 33.794118 | 75 | 0.708369 | false |
thilbern/scikit-learn | sklearn/neighbors/base.py | 7 | 25049 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = check_array(X, accept_sparse='csr')
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
j = np.arange(neigh_ind.shape[0])[:, None]
neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = check_array(X, accept_sparse='csr')
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
ind, dist = results
return dist, ind
else:
return results
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause | 2,410,599,780,268,017,000 | 9,035,050,016,156,020,000 | 36.442451 | 79 | 0.532197 | false |
mjgrav2001/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 142 | 5990 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause | -52,713,720,362,413,400 | -3,937,411,249,919,530,500 | 35.975309 | 79 | 0.631386 | false |
Baekalfen/Helmholtz-Coil-Simulator | main_grid_treaded.py | 1 | 7586 | # -*- coding: utf-8 -*-
##from multiprocessing import Process
##from multiprocessing import Pool
from visual import *
from math import *
from sys import platform
import time
#######################################
# #
# Author: Mads Ynddal #
# All Rights Reserved 2012 #
# #
#######################################
#######################################
#
# Settings - Start
#
#Window setup:
Width=800 #Width of window
Height=750 #Height of window
### Tekniske specifikationer
FPS=60
cpu_threads=2 #Windows kan kun håndtere 1 tråd
defi=1. #punkter pr. dm
grid_sizex=30
grid_sizey=1
grid_sizez=30
OP_ref_spacing=2
coil_1=1
coil_2=1
max_blen=20
hue_multiplier=2./6
strength_as_color=1
vector_pointers=1
debug_vectors3=0
debug_vectors3_multiplier=10**6
debug_offset=0
###
### Fysik og matematik
Vp=4*pi*10**-7
I=1
constant=Vp*I/(4*pi)
coiloffset=-15./2 #Distance from each coil divided by two
dm=15*8*2*pi #Definitionsmængden for funktionen (rundes op)
###
#
# Settings - End
#
#######################################
#######################################
#
# INITIALIZING - Start
#
scene = display(title='Helmholtz Coil',width=Width, height=Height,autoscale = False,scale=(0.03,0.03,0.03))
vectors_threaded=[]
coil1=curve(pos=[])
coil2=curve(pos=[])
vectors=[]
col=0
P=[]
if platform=="win32":
print "WARNING! Windows can't run multiple threads!\nForcing cpu_threads to 1"
cpu_threads=1
if cpu_threads>1:
from multiprocessing import Process
from multiprocessing import Pool
#
# INITIALIZING - End
#
#######################################
#######################################
# Optegner spoler ud fra funktionen
for tt in range(int(dm*defi)+1):
t=tt/defi
x=t*0.0005+15./2+debug_offset
y=31.5/2*sin(t)
z=31.5/2*cos(t)
if coil_1==1:
coil1.append((x,y,z))
else:
coil1.append((0,0,0))
x=t*0.0005-15./2-debug_offset
y=31.5/2*sin(t)
z=31.5/2*cos(t)
if coil_2:
coil2.append((x,y,z))
else:
coil2.append((0,0,0))
#
#######################################
#######################################
# Vektor regneregler
def vlen(a):
return sqrt(a[0]**2+a[1]**2+a[2]**2)
#Vector length
def vsub(a,b):
return [a[0]-b[0],a[1]-b[1],a[2]-b[2]]
#Substract vectors a,b
def vadd(a,b):
return [a[0]+b[0],a[1]+b[1],a[2]+b[2]]
#Add vectors a,b
def vdiv(a,b):
return [a[0]/float(b),a[1]/float(b),a[2]/float(b)]
#Divide vector by scalar b
def cprod(a,b):
return [a[1]*b[2]-a[2]*b[1],
a[2]*b[0]-a[0]*b[2],
a[0]*b[1]-a[1]*b[0]]
#Cross product
#
#######################################
#######################################
# Biot-Savarts lov
def dlxrr3(dl,r):
return vdiv(cprod(dl,r),vlen(r)**3)
def Apply_contant(Bsum1,Bsum2):
Bsum=vdiv(vadd(Bsum1,Bsum2),1/constant)
#Bsum=vdiv(vsub(Bsum1,Bsum2),1/constant)
Bsum1=vdiv(Bsum1,1/constant)
Bsum2=vdiv(Bsum2,1/constant)
return Bsum
def inte(OP):
global coiloffset,col
Bsum1=[0,0,0]
Bsum2=[0,0,0]
#Første spole
coiloffset*=-1
for tt in range(int(dm*defi)):
t=tt/float(defi)
s1,s2=s1s2(t)
dl=vsub(s2,s1)
m=vdiv(vadd(s1,s2),2)
r=vsub(OP,m)
Bsum1=vadd(Bsum1,dlxrr3(dl,r))
if not coil_1:
Bsum1=[0,0,0]
#Anden spole
coiloffset*=-1
for tt in range(int(dm*defi)):
t=tt/float(defi)
s1,s2=s1s2(t)
dl=vsub(s2,s1)
m=vdiv(vadd(s1,s2),2)
r=vsub(OP,m)
Bsum2=vadd(Bsum2,dlxrr3(dl,r))
if not coil_2:
Bsum2=[0,0,0]
return Bsum1,Bsum2
#
#######################################
#######################################
# Udregn funktionsværdi til tiden 't'
def s1s2(t1):
s1=[t1*0.0005+coiloffset,31.5/2*sin(t1),31.5/2*cos(t1)]
t2=t1+1/float(defi)
s2=[t2*0.0005+coiloffset,31.5/2*sin(t2),31.5/2*cos(t2)]
return s1,s2
#
#######################################
#######################################
# Udregn vektorstyrke og retning
def cal_vectors(xx,yy,zz):
global vectors_threaded
P=[xx*OP_ref_spacing-((grid_sizex-1)*OP_ref_spacing)/2,yy*OP_ref_spacing-((grid_sizey-1)*OP_ref_spacing)/2,zz*OP_ref_spacing-((grid_sizez-1)*OP_ref_spacing)/2]
n=xx+yy+zz
Bsum=vdiv(Apply_contant(*inte(P)),1./debug_vectors3_multiplier)
Blen=vlen(Bsum)
return (P,Bsum,Blen)
#
#######################################
#######################################
# Distribuerer opgaver til CPU-kerner
if cpu_threads>1:
pool = Pool(processes=cpu_threads)# start 4 worker processes
result=[]
P=[]
Bsum=[]
time_stamp=time.time()
for xx in range(grid_sizex):
for yy in range(grid_sizey):
for zz in range(grid_sizez):
if cpu_threads>1:
result.append(pool.apply_async(cal_vectors, [xx,yy,zz]))
else:
vectors_threaded.append(cal_vectors(xx,yy,zz))
### Indsamler svar fra CPU-kerner
if cpu_threads>1:
for n in range(grid_sizex*grid_sizey*grid_sizez):
vectors_threaded.append(result[n].get())
#
#######################################
#######################################
# Vektorfelt bliver rendereret
for n in range(len(vectors_threaded)):
P,Bsum,Blen=vectors_threaded[n]
if strength_as_color==1:
Blen=vlen(Bsum)
vcolor=color.hsv_to_rgb((1./4-(Blen*1./(max_blen/2)*hue_multiplier),1,1))
Bsum=vdiv(Bsum,Blen)
else:
vcolor=color.red
if Blen<max_blen:
curve(color=vcolor,pos=[(P[0],P[1],P[2]),(P[0]+Bsum[0],P[1]+Bsum[1],P[2]+Bsum[2])])
if vector_pointers==1:
sphere(pos=(P[0]+Bsum[0],P[1]+Bsum[1],P[2]+Bsum[2]), radius=0.1, color=color.white, opacity=1)
#
#######################################
print "Processing lasted: "+str(time.time()-time_stamp)[0:5],"sec\nUtilizing",cpu_threads,"processor threads, to animate:",grid_sizez*grid_sizey*grid_sizex,"vectors"
#######################################
# Indsætter partikel og
# indstiller kamera
center_point = sphere (pos=(0,0,0), radius=1, color=color.red, opacity=0.5)
particle = sphere (pos=(0,0,-12), radius=1, color=color.green, opacity=0.4)
speed = label()
i=0.
auto_rotate=1
while(1):
rate(FPS)
if auto_rotate==1:
i+=1
scene.forward=(-1*sin(i/FPS/5),-1,-1*cos(i/FPS/5))
#Particle
Bsum=vdiv(Apply_contant(*inte(particle.pos)),1./debug_vectors3_multiplier)
particle.pos.x+=Bsum[0]/30.
particle.pos.y+=Bsum[1]/30.
particle.pos.z+=Bsum[2]/30.
speed.pos=particle.pos
speed.pos.x+=4
speed.text=str(vlen(Bsum))[0:3]
#Particle
### Bruger input
if scene.kb.keys: # is there an event waiting to be processed?
c = scene.kb.getkey() # obtain keyboard information
if c=="r":
particle.pos=(20,0,-12)
if c=="t":
particle.pos=(0,0,-12)
if c=="y":
particle.pos=(5,0,-13)
if c=="u":
particle.pos=(14,0,-15)
if c=="w":
auto_rotate=0
scene.forward=(0,-1,0)
if c=="s":
auto_rotate=0
scene.forward=(-1,0,0)
if c=="a":
auto_rotate=0
scene.forward=(0,0,-1)
if c=="d":
if auto_rotate==0:
auto_rotate=1
else:
auto_rotate=0
#
####################################### | gpl-2.0 | -6,284,761,584,430,866,000 | 5,344,798,198,060,094,000 | 24.27 | 165 | 0.513984 | false |
gemini-testing/selenium | py/selenium/webdriver/opera/webdriver.py | 5 | 3372 | #!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import os
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.chrome.webdriver import WebDriver as ChromiumDriver
from .options import Options
class OperaDriver(ChromiumDriver):
"""Controls the new OperaDriver and allows you
to drive the Opera browser based on Chromium."""
def __init__(self, executable_path=None, port=0,
opera_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the operadriver.
Starts the service and then creates new instance of operadriver.
:Args:
- executable_path - path to the executable. If the default is used
it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0,
a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
executable_path = (executable_path if executable_path is not None
else "operadriver")
ChromiumDriver.__init__(self,
executable_path=executable_path,
port=port,
chrome_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
def create_options(self):
return Options()
class WebDriver(OperaDriver):
class ServiceType:
CHROMIUM = 2
def __init__(self,
desired_capabilities=None,
executable_path=None,
port=0,
service_log_path=None,
service_args=None,
opera_options=None):
OperaDriver.__init__(self, executable_path=executable_path,
port=port, opera_options=opera_options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
| apache-2.0 | 3,725,477,175,228,219,400 | 954,162,755,879,260,500 | 39.142857 | 78 | 0.6293 | false |
kpurusho/mbed | workspace_tools/host_tests/host_tests_plugins/host_test_plugins.py | 92 | 4881 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os import access, F_OK
from sys import stdout
from time import sleep
from subprocess import call
class HostTestPluginBase:
""" Base class for all plug-ins used with host tests.
"""
###########################################################################
# Interface:
###########################################################################
###########################################################################
# Interface attributes defining plugin name, type etc.
###########################################################################
name = "HostTestPluginBase" # Plugin name, can be plugin class name
type = "BasePlugin" # Plugin type: ResetMethod, Copymethod etc.
capabilities = [] # Capabilities names: what plugin can achieve
# (e.g. reset using some external command line tool)
stable = False # Determine if plugin is stable and can be used
###########################################################################
# Interface methods
###########################################################################
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return False
def execute(self, capabilitity, *args, **kwargs):
""" Executes capability by name.
Each capability e.g. may directly just call some command line
program or execute building pythonic function
"""
return False
###########################################################################
# Interface helper methods - overload only if you need to have custom behaviour
###########################################################################
def print_plugin_error(self, text):
""" Function prints error in console and exits always with False
"""
print "Plugin error: %s::%s: %s"% (self.name, self.type, text)
return False
def print_plugin_info(self, text, NL=True):
""" Function prints notification in console and exits always with True
"""
if NL:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text)
else:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text),
return True
def print_plugin_char(self, char):
""" Function prints char on stdout
"""
stdout.write(char)
stdout.flush()
return True
def check_mount_point_ready(self, destination_disk, init_delay=0.2, loop_delay=0.25):
""" Checks if destination_disk is ready and can be accessed by e.g. copy commands
@init_delay - Initial delay time before first access check
@loop_delay - pooling delay for access check
"""
if not access(destination_disk, F_OK):
self.print_plugin_info("Waiting for mount point '%s' to be ready..."% destination_disk, NL=False)
sleep(init_delay)
while not access(destination_disk, F_OK):
sleep(loop_delay)
self.print_plugin_char('.')
def check_parameters(self, capabilitity, *args, **kwargs):
""" This function should be ran each time we call execute()
to check if none of the required parameters is missing.
"""
missing_parameters = []
for parameter in self.required_parameters:
if parameter not in kwargs:
missing_parameters.append(parameter)
if len(missing_parameters) > 0:
self.print_plugin_error("execute parameter(s) '%s' missing!"% (', '.join(parameter)))
return False
return True
def run_command(self, cmd, shell=True):
""" Runs command from command line.
"""
result = True
ret = 0
try:
ret = call(cmd, shell=shell)
if ret:
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
return False
except Exception as e:
result = False
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
self.print_plugin_error(str(e))
return result
| apache-2.0 | 8,811,467,396,183,610,000 | -1,647,910,175,727,146,000 | 40.016807 | 109 | 0.535751 | false |
kmonsoor/python-for-android | python-modules/twisted/twisted/words/im/ircsupport.py | 49 | 9263 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
IRC support for Instance Messenger.
"""
import string
from twisted.words.protocols import irc
from twisted.words.im.locals import ONLINE
from twisted.internet import defer, reactor, protocol
from twisted.internet.defer import succeed
from twisted.words.im import basesupport, interfaces, locals
from zope.interface import implements
class IRCPerson(basesupport.AbstractPerson):
def imperson_whois(self):
if self.account.client is None:
raise locals.OfflineError
self.account.client.sendLine("WHOIS %s" % self.name)
### interface impl
def isOnline(self):
return ONLINE
def getStatus(self):
return ONLINE
def setStatus(self,status):
self.status=status
self.chat.getContactsList().setContactStatus(self)
def sendMessage(self, text, meta=None):
if self.account.client is None:
raise locals.OfflineError
for line in string.split(text, '\n'):
if meta and meta.get("style", None) == "emote":
self.account.client.ctcpMakeQuery(self.name,[('ACTION', line)])
else:
self.account.client.msg(self.name, line)
return succeed(text)
class IRCGroup(basesupport.AbstractGroup):
implements(interfaces.IGroup)
def imgroup_testAction(self):
pass
def imtarget_kick(self, target):
if self.account.client is None:
raise locals.OfflineError
reason = "for great justice!"
self.account.client.sendLine("KICK #%s %s :%s" % (
self.name, target.name, reason))
### Interface Implementation
def setTopic(self, topic):
if self.account.client is None:
raise locals.OfflineError
self.account.client.topic(self.name, topic)
def sendGroupMessage(self, text, meta={}):
if self.account.client is None:
raise locals.OfflineError
if meta and meta.get("style", None) == "emote":
self.account.client.me(self.name,text)
return succeed(text)
#standard shmandard, clients don't support plain escaped newlines!
for line in string.split(text, '\n'):
self.account.client.say(self.name, line)
return succeed(text)
def leave(self):
if self.account.client is None:
raise locals.OfflineError
self.account.client.leave(self.name)
self.account.client.getGroupConversation(self.name,1)
class IRCProto(basesupport.AbstractClientMixin, irc.IRCClient):
def __init__(self, account, chatui, logonDeferred=None):
basesupport.AbstractClientMixin.__init__(self, account, chatui,
logonDeferred)
self._namreplies={}
self._ingroups={}
self._groups={}
self._topics={}
def getGroupConversation(self, name, hide=0):
name=string.lower(name)
return self.chat.getGroupConversation(self.chat.getGroup(name, self),
stayHidden=hide)
def getPerson(self,name):
return self.chat.getPerson(name, self)
def connectionMade(self):
# XXX: Why do I duplicate code in IRCClient.register?
try:
if self.account.password:
self.sendLine("PASS :%s" % self.account.password)
self.setNick(self.account.username)
self.sendLine("USER %s foo bar :Twisted-IM user" % (
self.account.username,))
for channel in self.account.channels:
self.joinGroup(channel)
self.account._isOnline=1
if self._logonDeferred is not None:
self._logonDeferred.callback(self)
self.chat.getContactsList()
except:
import traceback
traceback.print_exc()
def setNick(self,nick):
self.name=nick
self.accountName="%s (IRC)"%nick
irc.IRCClient.setNick(self,nick)
def kickedFrom(self, channel, kicker, message):
"""
Called when I am kicked from a channel.
"""
return self.chat.getGroupConversation(
self.chat.getGroup(channel[1:], self), 1)
def userKicked(self, kickee, channel, kicker, message):
pass
def noticed(self, username, channel, message):
self.privmsg(username, channel, message, {"dontAutoRespond": 1})
def privmsg(self, username, channel, message, metadata=None):
if metadata is None:
metadata = {}
username=string.split(username,'!',1)[0]
if username==self.name: return
if channel[0]=='#':
group=channel[1:]
self.getGroupConversation(group).showGroupMessage(username, message, metadata)
return
self.chat.getConversation(self.getPerson(username)).showMessage(message, metadata)
def action(self,username,channel,emote):
username=string.split(username,'!',1)[0]
if username==self.name: return
meta={'style':'emote'}
if channel[0]=='#':
group=channel[1:]
self.getGroupConversation(group).showGroupMessage(username, emote, meta)
return
self.chat.getConversation(self.getPerson(username)).showMessage(emote,meta)
def irc_RPL_NAMREPLY(self,prefix,params):
"""
RPL_NAMREPLY
>> NAMES #bnl
<< :Arlington.VA.US.Undernet.Org 353 z3p = #bnl :pSwede Dan-- SkOyg AG
"""
group=string.lower(params[2][1:])
users=string.split(params[3])
for ui in range(len(users)):
while users[ui][0] in ["@","+"]: # channel modes
users[ui]=users[ui][1:]
if not self._namreplies.has_key(group):
self._namreplies[group]=[]
self._namreplies[group].extend(users)
for nickname in users:
try:
self._ingroups[nickname].append(group)
except:
self._ingroups[nickname]=[group]
def irc_RPL_ENDOFNAMES(self,prefix,params):
group=params[1][1:]
self.getGroupConversation(group).setGroupMembers(self._namreplies[string.lower(group)])
del self._namreplies[string.lower(group)]
def irc_RPL_TOPIC(self,prefix,params):
self._topics[params[1][1:]]=params[2]
def irc_333(self,prefix,params):
group=params[1][1:]
self.getGroupConversation(group).setTopic(self._topics[group],params[2])
del self._topics[group]
def irc_TOPIC(self,prefix,params):
nickname = string.split(prefix,"!")[0]
group = params[0][1:]
topic = params[1]
self.getGroupConversation(group).setTopic(topic,nickname)
def irc_JOIN(self,prefix,params):
nickname=string.split(prefix,"!")[0]
group=string.lower(params[0][1:])
if nickname!=self.nickname:
try:
self._ingroups[nickname].append(group)
except:
self._ingroups[nickname]=[group]
self.getGroupConversation(group).memberJoined(nickname)
def irc_PART(self,prefix,params):
nickname=string.split(prefix,"!")[0]
group=string.lower(params[0][1:])
if nickname!=self.nickname:
if group in self._ingroups[nickname]:
self._ingroups[nickname].remove(group)
self.getGroupConversation(group).memberLeft(nickname)
def irc_QUIT(self,prefix,params):
nickname=string.split(prefix,"!")[0]
if self._ingroups.has_key(nickname):
for group in self._ingroups[nickname]:
self.getGroupConversation(group).memberLeft(nickname)
self._ingroups[nickname]=[]
def irc_NICK(self, prefix, params):
fromNick = string.split(prefix, "!")[0]
toNick = params[0]
if not self._ingroups.has_key(fromNick):
return
for group in self._ingroups[fromNick]:
self.getGroupConversation(group).memberChangedNick(fromNick, toNick)
self._ingroups[toNick] = self._ingroups[fromNick]
del self._ingroups[fromNick]
def irc_unknown(self, prefix, command, params):
pass
# GTKIM calls
def joinGroup(self,name):
self.join(name)
self.getGroupConversation(name)
class IRCAccount(basesupport.AbstractAccount):
implements(interfaces.IAccount)
gatewayType = "IRC"
_groupFactory = IRCGroup
_personFactory = IRCPerson
def __init__(self, accountName, autoLogin, username, password, host, port,
channels=''):
basesupport.AbstractAccount.__init__(self, accountName, autoLogin,
username, password, host, port)
self.channels = map(string.strip,string.split(channels,','))
if self.channels == ['']:
self.channels = []
def _startLogOn(self, chatui):
logonDeferred = defer.Deferred()
cc = protocol.ClientCreator(reactor, IRCProto, self, chatui,
logonDeferred)
d = cc.connectTCP(self.host, self.port)
d.addErrback(logonDeferred.errback)
return logonDeferred
| apache-2.0 | 8,593,951,383,689,100,000 | 1,453,562,520,052,827,400 | 34.220532 | 95 | 0.60855 | false |
karlbright/beets | beets/mediafile.py | 1 | 36961 | # This file is part of beets.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Handles low-level interfacing for files' tags. Wraps Mutagen to
automatically detect file types and provide a unified interface for a
useful subset of music files' tags.
Usage:
>>> f = MediaFile('Lucy.mp3')
>>> f.title
u'Lucy in the Sky with Diamonds'
>>> f.artist = 'The Beatles'
>>> f.save()
A field will always return a reasonable value of the correct type, even
if no tag is present. If no value is available, the value will be false
(e.g., zero or the empty string).
"""
import mutagen
import mutagen.mp3
import mutagen.oggvorbis
import mutagen.mp4
import mutagen.flac
import mutagen.monkeysaudio
import datetime
import re
import base64
import imghdr
import os
import logging
import traceback
from beets.util.enumeration import enum
__all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile']
# Logger.
log = logging.getLogger('beets')
# Exceptions.
# Raised for any file MediaFile can't read.
class UnreadableFileError(IOError):
pass
# Raised for files that don't seem to have a type MediaFile supports.
class FileTypeError(UnreadableFileError):
pass
# Constants.
# Human-readable type names.
TYPES = {
'mp3': 'MP3',
'mp4': 'AAC',
'ogg': 'OGG',
'flac': 'FLAC',
'ape': 'APE',
'wv': 'WavPack',
'mpc': 'Musepack',
}
# Utility.
def _safe_cast(out_type, val):
"""Tries to covert val to out_type but will never raise an
exception. If the value can't be converted, then a sensible
default value is returned. out_type should be bool, int, or
unicode; otherwise, the value is just passed through.
"""
if out_type == int:
if val is None:
return 0
elif isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if not isinstance(val, basestring):
val = unicode(val)
# Get a number from the front of the string.
val = re.match(r'[0-9]*', val.strip()).group(0)
if not val:
return 0
else:
return int(val)
elif out_type == bool:
if val is None:
return False
else:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == unicode:
if val is None:
return u''
else:
if isinstance(val, str):
return val.decode('utf8', 'ignore')
elif isinstance(val, unicode):
return val
else:
return unicode(val)
elif out_type == float:
if val is None:
return 0.0
elif isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if not isinstance(val, basestring):
val = unicode(val)
val = re.match(r'[\+-]?[0-9\.]*', val.strip()).group(0)
if not val:
return 0.0
else:
return float(val)
else:
return val
# Flags for encoding field behavior.
# Determine style of packing, if any.
packing = enum('SLASHED', # pair delimited by /
'TUPLE', # a python tuple of 2 items
'DATE', # YYYY-MM-DD
name='packing')
class StorageStyle(object):
"""Parameterizes the storage behavior of a single field for a
certain tag format.
- key: The Mutagen key used to access the field's data.
- list_elem: Store item as a single object or as first element
of a list.
- as_type: Which type the value is stored as (unicode, int,
bool, or str).
- packing: If this value is packed in a multiple-value storage
unit, which type of packing (in the packing enum). Otherwise,
None. (Makes as_type irrelevant).
- pack_pos: If the value is packed, in which position it is
stored.
- ID3 storage only: match against this 'desc' field as well
as the key.
"""
def __init__(self, key, list_elem = True, as_type = unicode,
packing = None, pack_pos = 0, id3_desc = None,
id3_frame_field = 'text'):
self.key = key
self.list_elem = list_elem
self.as_type = as_type
self.packing = packing
self.pack_pos = pack_pos
self.id3_desc = id3_desc
self.id3_frame_field = id3_frame_field
# Dealing with packings.
class Packed(object):
"""Makes a packed list of values subscriptable. To access the packed
output after making changes, use packed_thing.items.
"""
def __init__(self, items, packstyle, none_val=0, out_type=int):
"""Create a Packed object for subscripting the packed values in
items. The items are packed using packstyle, which is a value
from the packing enum. none_val is returned from a request when
no suitable value is found in the items. Vales are converted to
out_type before they are returned.
"""
self.items = items
self.packstyle = packstyle
self.none_val = none_val
self.out_type = out_type
def __getitem__(self, index):
if not isinstance(index, int):
raise TypeError('index must be an integer')
if self.items is None:
return self.none_val
items = self.items
if self.packstyle == packing.DATE:
# Remove time information from dates. Usually delimited by
# a "T" or a space.
items = re.sub(r'[Tt ].*$', '', unicode(items))
# transform from a string packing into a list we can index into
if self.packstyle == packing.SLASHED:
seq = unicode(items).split('/')
elif self.packstyle == packing.DATE:
seq = unicode(items).split('-')
elif self.packstyle == packing.TUPLE:
seq = items # tuple: items is already indexable
try:
out = seq[index]
except:
out = None
if out is None or out == self.none_val or out == '':
return _safe_cast(self.out_type, self.none_val)
else:
return _safe_cast(self.out_type, out)
def __setitem__(self, index, value):
if self.packstyle in (packing.SLASHED, packing.TUPLE):
# SLASHED and TUPLE are always two-item packings
length = 2
else:
# DATE can have up to three fields
length = 3
# make a list of the items we'll pack
new_items = []
for i in range(length):
if i == index:
next_item = value
else:
next_item = self[i]
new_items.append(next_item)
if self.packstyle == packing.DATE:
# Truncate the items wherever we reach an invalid (none)
# entry. This prevents dates like 2008-00-05.
for i, item in enumerate(new_items):
if item == self.none_val or item is None:
del(new_items[i:]) # truncate
break
if self.packstyle == packing.SLASHED:
self.items = '/'.join(map(unicode, new_items))
elif self.packstyle == packing.DATE:
field_lengths = [4, 2, 2] # YYYY-MM-DD
elems = []
for i, item in enumerate(new_items):
elems.append( ('%0' + str(field_lengths[i]) + 'i') % item )
self.items = '-'.join(elems)
elif self.packstyle == packing.TUPLE:
self.items = new_items
# The field itself.
class MediaField(object):
"""A descriptor providing access to a particular (abstract) metadata
field. out_type is the type that users of MediaFile should see and
can be unicode, int, or bool. id3, mp4, and flac are StorageStyle
instances parameterizing the field's storage for each type.
"""
def __init__(self, out_type = unicode, **kwargs):
"""Creates a new MediaField.
- out_type: The field's semantic (exterior) type.
- kwargs: A hash whose keys are 'mp3', 'mp4', and 'etc'
and whose values are StorageStyle instances
parameterizing the field's storage for each type.
"""
self.out_type = out_type
if not set(['mp3', 'mp4', 'etc']) == set(kwargs):
raise TypeError('MediaField constructor must have keyword '
'arguments mp3, mp4, and etc')
self.styles = kwargs
def _fetchdata(self, obj, style):
"""Get the value associated with this descriptor's field stored
with the given StorageStyle. Unwraps from a list if necessary.
"""
# fetch the value, which may be a scalar or a list
if obj.type == 'mp3':
if style.id3_desc is not None: # also match on 'desc' field
frames = obj.mgfile.tags.getall(style.key)
entry = None
for frame in frames:
if frame.desc.lower() == style.id3_desc.lower():
entry = getattr(frame, style.id3_frame_field)
break
if entry is None: # no desc match
return None
else:
# Get the metadata frame object.
try:
frame = obj.mgfile[style.key]
except KeyError:
return None
entry = getattr(frame, style.id3_frame_field)
else: # Not MP3.
try:
entry = obj.mgfile[style.key]
except KeyError:
return None
# possibly index the list
if style.list_elem:
if entry: # List must have at least one value.
return entry[0]
else:
return None
else:
return entry
def _storedata(self, obj, val, style):
"""Store val for this descriptor's field in the tag dictionary
according to the provided StorageStyle. Store it as a
single-item list if necessary.
"""
# wrap as a list if necessary
if style.list_elem: out = [val]
else: out = val
if obj.type == 'mp3':
# Try to match on "desc" field.
if style.id3_desc is not None:
frames = obj.mgfile.tags.getall(style.key)
# try modifying in place
found = False
for frame in frames:
if frame.desc.lower() == style.id3_desc.lower():
setattr(frame, style.id3_frame_field, out)
found = True
break
# need to make a new frame?
if not found:
assert isinstance(style.id3_frame_field, str) # Keyword.
frame = mutagen.id3.Frames[style.key](
encoding=3,
desc=style.id3_desc,
**{style.id3_frame_field: val}
)
obj.mgfile.tags.add(frame)
# Try to match on "owner" field.
elif style.key.startswith('UFID:'):
owner = style.key.split(':', 1)[1]
frames = obj.mgfile.tags.getall(style.key)
for frame in frames:
# Replace existing frame data.
if frame.owner == owner:
setattr(frame, style.id3_frame_field, val)
else:
# New frame.
assert isinstance(style.id3_frame_field, str) # Keyword.
frame = mutagen.id3.UFID(owner=owner,
**{style.id3_frame_field: val})
obj.mgfile.tags.setall('UFID', [frame])
# Just replace based on key.
else:
assert isinstance(style.id3_frame_field, str) # Keyword.
frame = mutagen.id3.Frames[style.key](encoding = 3,
**{style.id3_frame_field: val})
obj.mgfile.tags.setall(style.key, [frame])
else: # Not MP3.
obj.mgfile[style.key] = out
def _styles(self, obj):
if obj.type in ('mp3', 'mp4'):
styles = self.styles[obj.type]
else:
styles = self.styles['etc'] # sane styles
# Make sure we always return a list of styles, even when given
# a single style for convenience.
if isinstance(styles, StorageStyle):
return [styles]
else:
return styles
def __get__(self, obj, owner):
"""Retrieve the value of this metadata field.
"""
# Fetch the data using the various StorageStyles.
styles = self._styles(obj)
if styles is None:
out = None
else:
for style in styles:
# Use the first style that returns a reasonable value.
out = self._fetchdata(obj, style)
if out:
break
if style.packing:
out = Packed(out, style.packing)[style.pack_pos]
# MPEG-4 freeform frames are (should be?) encoded as UTF-8.
if obj.type == 'mp4' and style.key.startswith('----:') and \
isinstance(out, str):
out = out.decode('utf8')
return _safe_cast(self.out_type, out)
def __set__(self, obj, val):
"""Set the value of this metadata field.
"""
# Store using every StorageStyle available.
styles = self._styles(obj)
if styles is None:
return
for style in styles:
if style.packing:
p = Packed(self._fetchdata(obj, style), style.packing)
p[style.pack_pos] = val
out = p.items
else: # unicode, integer, or boolean scalar
out = val
# deal with Nones according to abstract type if present
if out is None:
if self.out_type == int:
out = 0
elif self.out_type == bool:
out = False
elif self.out_type == unicode:
out = u''
# We trust that packed values are handled above.
# Convert to correct storage type (irrelevant for
# packed values).
if style.as_type == unicode:
if out is None:
out = u''
else:
if self.out_type == bool:
# store bools as 1,0 instead of True,False
out = unicode(int(out))
else:
out = unicode(out)
elif style.as_type == int:
if out is None:
out = 0
else:
out = int(out)
elif style.as_type in (bool, str):
out = style.as_type(out)
# MPEG-4 "freeform" (----) frames must be encoded as UTF-8
# byte strings.
if obj.type == 'mp4' and style.key.startswith('----:') and \
isinstance(out, unicode):
out = out.encode('utf8')
# Store the data.
self._storedata(obj, out, style)
class CompositeDateField(object):
"""A MediaFile field for conveniently accessing the year, month, and
day fields as a datetime.date object. Allows both getting and
setting of the component fields.
"""
def __init__(self, year_field, month_field, day_field):
"""Create a new date field from the indicated MediaFields for
the component values.
"""
self.year_field = year_field
self.month_field = month_field
self.day_field = day_field
def __get__(self, obj, owner):
"""Return a datetime.date object whose components indicating the
smallest valid date whose components are at least as large as
the three component fields (that is, if year == 1999, month == 0,
and day == 0, then date == datetime.date(1999, 1, 1)). If the
components indicate an invalid date (e.g., if month == 47),
datetime.date.min is returned.
"""
try:
return datetime.date(
max(self.year_field.__get__(obj, owner), datetime.MINYEAR),
max(self.month_field.__get__(obj, owner), 1),
max(self.day_field.__get__(obj, owner), 1)
)
except ValueError: # Out of range values.
return datetime.date.min
def __set__(self, obj, val):
"""Set the year, month, and day fields to match the components of
the provided datetime.date object.
"""
self.year_field.__set__(obj, val.year)
self.month_field.__set__(obj, val.month)
self.day_field.__set__(obj, val.day)
class ImageField(object):
"""A descriptor providing access to a file's embedded album art.
Holds a bytestring reflecting the image data. The image should
either be a JPEG or a PNG for cross-format compatibility. It's
probably a bad idea to use anything but these two formats.
"""
@classmethod
def _mime(cls, data):
"""Return the MIME type (either image/png or image/jpeg) of the
image data (a bytestring).
"""
kind = imghdr.what(None, h=data)
if kind == 'png':
return 'image/png'
else:
# Currently just fall back to JPEG.
return 'image/jpeg'
@classmethod
def _mp4kind(cls, data):
"""Return the MPEG-4 image type code of the data. If the image
is not a PNG or JPEG, JPEG is assumed.
"""
kind = imghdr.what(None, h=data)
if kind == 'png':
return mutagen.mp4.MP4Cover.FORMAT_PNG
else:
return mutagen.mp4.MP4Cover.FORMAT_JPEG
def __get__(self, obj, owner):
if obj.type == 'mp3':
# Look for APIC frames.
for frame in obj.mgfile.tags.values():
if frame.FrameID == 'APIC':
picframe = frame
break
else:
# No APIC frame.
return None
return picframe.data
elif obj.type == 'mp4':
if 'covr' in obj.mgfile:
covers = obj.mgfile['covr']
if covers:
cover = covers[0]
# cover is an MP4Cover, which is a subclass of str.
return cover
# No cover found.
return None
else:
# Here we're assuming everything but MP3 and MPEG-4 uses
# the Xiph/Vorbis Comments standard. This may not be valid.
# http://wiki.xiph.org/VorbisComment#Cover_art
if 'metadata_block_picture' not in obj.mgfile:
# Try legacy COVERART tags.
if 'coverart' in obj.mgfile and obj.mgfile['coverart']:
return base64.b64decode(obj.mgfile['coverart'][0])
return None
for data in obj.mgfile["metadata_block_picture"]:
try:
pic = mutagen.flac.Picture(base64.b64decode(data))
break
except TypeError:
pass
else:
return None
return pic.data
def __set__(self, obj, val):
if val is not None:
if not isinstance(val, str):
raise ValueError('value must be a byte string or None')
if obj.type == 'mp3':
# Clear all APIC frames.
obj.mgfile.tags.delall('APIC')
if val is None:
# If we're clearing the image, we're done.
return
picframe = mutagen.id3.APIC(
encoding = 3,
mime = self._mime(val),
type = 3, # front cover
desc = u'',
data = val,
)
obj.mgfile['APIC'] = picframe
elif obj.type == 'mp4':
if val is None:
if 'covr' in obj.mgfile:
del obj.mgfile['covr']
else:
cover = mutagen.mp4.MP4Cover(val, self._mp4kind(val))
obj.mgfile['covr'] = [cover]
else:
# Again, assuming Vorbis Comments standard.
# Strip all art, including legacy COVERART.
if 'metadata_block_picture' in obj.mgfile:
if 'metadata_block_picture' in obj.mgfile:
del obj.mgfile['metadata_block_picture']
if 'coverart' in obj.mgfile:
del obj.mgfile['coverart']
if 'coverartmime' in obj.mgfile:
del obj.mgfile['coverartmime']
# Add new art if provided.
if val is not None:
pic = mutagen.flac.Picture()
pic.data = val
pic.mime = self._mime(val)
obj.mgfile['metadata_block_picture'] = [
base64.b64encode(pic.write())
]
class FloatValueField(MediaField):
"""A field that stores a floating-point number as a string."""
def __init__(self, places=2, suffix=None, **kwargs):
"""Make a field that stores ``places`` digits after the decimal
point and appends ``suffix`` (if specified) when encoding as a
string.
"""
super(FloatValueField, self).__init__(unicode, **kwargs)
fmt = ['%.', str(places), 'f']
if suffix:
fmt += [' ', suffix]
self.fmt = ''.join(fmt)
def __get__(self, obj, owner):
valstr = super(FloatValueField, self).__get__(obj, owner)
return _safe_cast(float, valstr)
def __set__(self, obj, val):
if not val:
val = 0.0
valstr = self.fmt % val
super(FloatValueField, self).__set__(obj, valstr)
# The file (a collection of fields).
class MediaFile(object):
"""Represents a multimedia file on disk and provides access to its
metadata.
"""
def __init__(self, path):
"""Constructs a new MediaFile reflecting the file at path. May
throw UnreadableFileError.
"""
self.path = path
unreadable_exc = (
mutagen.mp3.HeaderNotFoundError,
mutagen.flac.FLACNoHeaderError,
mutagen.monkeysaudio.MonkeysAudioHeaderError,
mutagen.mp4.MP4StreamInfoError,
mutagen.oggvorbis.OggVorbisHeaderError,
)
try:
self.mgfile = mutagen.File(path)
except unreadable_exc:
log.warn('header parsing failed')
raise UnreadableFileError('Mutagen could not read file')
except IOError:
raise UnreadableFileError('could not read file')
except:
# Hide bugs in Mutagen.
log.error('uncaught Mutagen exception:\n' + traceback.format_exc())
raise UnreadableFileError('Mutagen raised an exception')
if self.mgfile is None: # Mutagen couldn't guess the type
raise FileTypeError('file type unsupported by Mutagen')
elif type(self.mgfile).__name__ == 'M4A' or \
type(self.mgfile).__name__ == 'MP4':
self.type = 'mp4'
elif type(self.mgfile).__name__ == 'ID3' or \
type(self.mgfile).__name__ == 'MP3':
self.type = 'mp3'
elif type(self.mgfile).__name__ == 'FLAC':
self.type = 'flac'
elif type(self.mgfile).__name__ == 'OggVorbis':
self.type = 'ogg'
elif type(self.mgfile).__name__ == 'MonkeysAudio':
self.type = 'ape'
elif type(self.mgfile).__name__ == 'WavPack':
self.type = 'wv'
elif type(self.mgfile).__name__ == 'Musepack':
self.type = 'mpc'
else:
raise FileTypeError('file type %s unsupported by MediaFile' %
type(self.mgfile).__name__)
# add a set of tags if it's missing
if self.mgfile.tags is None:
self.mgfile.add_tags()
def save(self):
self.mgfile.save()
#### field definitions ####
title = MediaField(
mp3 = StorageStyle('TIT2'),
mp4 = StorageStyle("\xa9nam"),
etc = StorageStyle('title'),
)
artist = MediaField(
mp3 = StorageStyle('TPE1'),
mp4 = StorageStyle("\xa9ART"),
etc = StorageStyle('artist'),
)
album = MediaField(
mp3 = StorageStyle('TALB'),
mp4 = StorageStyle("\xa9alb"),
etc = StorageStyle('album'),
)
genre = MediaField(
mp3 = StorageStyle('TCON'),
mp4 = StorageStyle("\xa9gen"),
etc = StorageStyle('genre'),
)
composer = MediaField(
mp3 = StorageStyle('TCOM'),
mp4 = StorageStyle("\xa9wrt"),
etc = StorageStyle('composer'),
)
grouping = MediaField(
mp3 = StorageStyle('TIT1'),
mp4 = StorageStyle("\xa9grp"),
etc = StorageStyle('grouping'),
)
year = MediaField(out_type=int,
mp3 = StorageStyle('TDRC',
packing = packing.DATE,
pack_pos = 0),
mp4 = StorageStyle("\xa9day",
packing = packing.DATE,
pack_pos = 0),
etc = [StorageStyle('date',
packing = packing.DATE,
pack_pos = 0),
StorageStyle('year')]
)
month = MediaField(out_type=int,
mp3 = StorageStyle('TDRC',
packing = packing.DATE,
pack_pos = 1),
mp4 = StorageStyle("\xa9day",
packing = packing.DATE,
pack_pos = 1),
etc = StorageStyle('date',
packing = packing.DATE,
pack_pos = 1)
)
day = MediaField(out_type=int,
mp3 = StorageStyle('TDRC',
packing = packing.DATE,
pack_pos = 2),
mp4 = StorageStyle("\xa9day",
packing = packing.DATE,
pack_pos = 2),
etc = StorageStyle('date',
packing = packing.DATE,
pack_pos = 2)
)
date = CompositeDateField(year, month, day)
track = MediaField(out_type = int,
mp3 = StorageStyle('TRCK',
packing = packing.SLASHED,
pack_pos = 0),
mp4 = StorageStyle('trkn',
packing = packing.TUPLE,
pack_pos = 0),
etc = [StorageStyle('track'),
StorageStyle('tracknumber')]
)
tracktotal = MediaField(out_type = int,
mp3 = StorageStyle('TRCK',
packing = packing.SLASHED,
pack_pos = 1),
mp4 = StorageStyle('trkn',
packing = packing.TUPLE,
pack_pos = 1),
etc = [StorageStyle('tracktotal'),
StorageStyle('trackc'),
StorageStyle('totaltracks')]
)
disc = MediaField(out_type = int,
mp3 = StorageStyle('TPOS',
packing = packing.SLASHED,
pack_pos = 0),
mp4 = StorageStyle('disk',
packing = packing.TUPLE,
pack_pos = 0),
etc = [StorageStyle('disc'),
StorageStyle('discnumber')]
)
disctotal = MediaField(out_type = int,
mp3 = StorageStyle('TPOS',
packing = packing.SLASHED,
pack_pos = 1),
mp4 = StorageStyle('disk',
packing = packing.TUPLE,
pack_pos = 1),
etc = [StorageStyle('disctotal'),
StorageStyle('discc'),
StorageStyle('totaldiscs')]
)
lyrics = MediaField(
mp3 = StorageStyle('USLT',
list_elem = False,
id3_desc = u''),
mp4 = StorageStyle("\xa9lyr"),
etc = StorageStyle('lyrics')
)
comments = MediaField(
mp3 = StorageStyle('COMM', id3_desc = u''),
mp4 = StorageStyle("\xa9cmt"),
etc = [StorageStyle('description'),
StorageStyle('comment')]
)
bpm = MediaField(out_type = int,
mp3 = StorageStyle('TBPM'),
mp4 = StorageStyle('tmpo', as_type = int),
etc = StorageStyle('bpm')
)
comp = MediaField(out_type = bool,
mp3 = StorageStyle('TCMP'),
mp4 = StorageStyle('cpil',
list_elem = False,
as_type = bool),
etc = StorageStyle('compilation')
)
albumartist = MediaField(
mp3 = StorageStyle('TPE2'),
mp4 = StorageStyle('aART'),
etc = [StorageStyle('album artist'),
StorageStyle('albumartist')]
)
albumtype = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'MusicBrainz Album Type'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Type'),
etc = StorageStyle('musicbrainz_albumtype')
)
label = MediaField(
mp3 = StorageStyle('TPUB'),
mp4 = [StorageStyle('----:com.apple.iTunes:Label'),
StorageStyle('----:com.apple.iTunes:publisher')],
etc = [StorageStyle('label'),
StorageStyle('publisher')] # Traktor
)
# Album art.
art = ImageField()
# MusicBrainz IDs.
mb_trackid = MediaField(
mp3 = StorageStyle('UFID:http://musicbrainz.org',
list_elem = False,
id3_frame_field = 'data'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Track Id',
as_type=str),
etc = StorageStyle('musicbrainz_trackid')
)
mb_albumid = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'MusicBrainz Album Id'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Id',
as_type=str),
etc = StorageStyle('musicbrainz_albumid')
)
mb_artistid = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'MusicBrainz Artist Id'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Artist Id',
as_type=str),
etc = StorageStyle('musicbrainz_artistid')
)
mb_albumartistid = MediaField(
mp3 = StorageStyle('TXXX',
id3_desc=u'MusicBrainz Album Artist Id'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Artist Id',
as_type=str),
etc = StorageStyle('musicbrainz_albumartistid')
)
# ReplayGain fields.
rg_track_gain = FloatValueField(2, 'dB',
mp3 = StorageStyle('TXXX',
id3_desc=u'REPLAYGAIN_TRACK_GAIN'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_TRACK_GAIN')
)
rg_album_gain = FloatValueField(2, 'dB',
mp3 = StorageStyle('TXXX',
id3_desc=u'REPLAYGAIN_ALBUM_GAIN'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_ALBUM_GAIN')
)
rg_track_peak = FloatValueField(6, None,
mp3 = StorageStyle('TXXX',
id3_desc=u'REPLAYGAIN_TRACK_PEAK'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_TRACK_PEAK')
)
rg_album_peak = FloatValueField(6, None,
mp3 = StorageStyle('TXXX',
id3_desc=u'REPLAYGAIN_ALBUM_PEAK'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_ALBUM_PEAK')
)
@property
def length(self):
"""The duration of the audio in seconds (a float)."""
return self.mgfile.info.length
@property
def samplerate(self):
"""The audio's sample rate (an int)."""
if hasattr(self.mgfile.info, 'sample_rate'):
return self.mgfile.info.sample_rate
return 0
@property
def bitdepth(self):
"""The number of bits per sample in the audio encoding (an int).
Only available for certain file formats (zero where
unavailable).
"""
if hasattr(self.mgfile.info, 'bits_per_sample'):
return self.mgfile.info.bits_per_sample
return 0
@property
def channels(self):
"""The number of channels in the audio (an int)."""
if isinstance(self.mgfile.info, mutagen.mp3.MPEGInfo):
return {
mutagen.mp3.STEREO: 2,
mutagen.mp3.JOINTSTEREO: 2,
mutagen.mp3.DUALCHANNEL: 2,
mutagen.mp3.MONO: 1,
}[self.mgfile.info.mode]
if hasattr(self.mgfile.info, 'channels'):
return self.mgfile.info.channels
return 0
@property
def bitrate(self):
"""The number of bits per seconds used in the audio coding (an
int). If this is provided explicitly by the compressed file
format, this is a precise reflection of the encoding. Otherwise,
it is estimated from the on-disk file size. In this case, some
imprecision is possible because the file header is incorporated
in the file size.
"""
if hasattr(self.mgfile.info, 'bitrate') and self.mgfile.info.bitrate:
# Many formats provide it explicitly.
return self.mgfile.info.bitrate
else:
# Otherwise, we calculate bitrate from the file size. (This
# is the case for all of the lossless formats.)
if not self.length:
# Avoid division by zero if length is not available.
return 0
size = os.path.getsize(self.path)
return int(size * 8 / self.length)
@property
def format(self):
"""A string describing the file format/codec."""
return TYPES[self.type]
| mit | 236,581,130,188,820,000 | 100,686,486,342,777,900 | 35.850449 | 79 | 0.501177 | false |
Ramanujakalyan/Inherit | gis-tools-101/rev_geo.py | 29 | 6294 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = "Brian Lehman, Scott Hendrickson"
import sys
import re
import codecs
reload(sys)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
import math
import pprint
import json
import fiona
from shapely.geometry import Point, shape, Polygon, box
from collections import defaultdict
import argparse
import os
import pickle
########################
# functions
def tree(): return defaultdict(tree)
def grid_finder(x,y):
return (int((math.floor(x)-grid_boundaries[0])/delta)
,int((math.floor(y)-grid_boundaries[1])/delta))
def topic_args():
parser = argparse.ArgumentParser(description="Reverse geo coder returns location info given a set of lon,lat")
parser.add_argument("file_name"
, metavar= "file_name"
, nargs="?"
, default=[]
, help="Input file name (optional).")
parser.add_argument("-b"
, "--bounding-box"
, dest="grid_boundaries"
, default="-185,15,-65,70"
, help="Set bounding box for region to include (default: [-185,15,-65,70])")
parser.add_argument("-d"
, "--delta"
, dest="delta"
, default=5
, help="Set the number of degrees between grid coords (default: 5)")
parser.add_argument("-g"
, "--use-saved-grid"
, dest="use_saved_grid"
, default=False
, action="store_true"
, help="Save grid or use previously saved version in data/grid.json")
parser.add_argument("-s"
, "--shape-file-path"
, dest="shape_file_path"
, default="data/tl_2013_us_county.shp"
, help="Set shapefile path (default: data/tl_2013_us_county.shp)")
parser.add_argument("-t"
, "--tweet-input"
, dest="tweet_input"
, default=False
, action="store_true"
, help="Set input as tweet payload instead of coordinates (in progress)")
return parser
def build_grid():
#grid_boundaries=(-185,15,-65,70) # upright edge is plus delta (lower 48 states)
grid={(i,j):{}
for i in range((grid_boundaries[2]-grid_boundaries[0])/delta)
for j in range((grid_boundaries[3]-grid_boundaries[1])/delta) }
with fiona.open(options.shape_file_path) as fc:
print >>sys.stderr, fc.driver,"###",fc.schema,"###", len(fc),"###",fc.crs
print >> sys.stderr,fc.schema
print >>sys.stderr, "Number of records:", len(fc)
print >>sys.stderr, "Bounds of all records:", fc.bounds
print >>sys.stderr, "Bounds applied:",grid_boundaries
print >> sys.stderr,"######## indexing shapes to grid ########"
print >> sys.stderr,"shapes complete:"
c=0
for feature in fc:
c+=1
GEOID=str(feature['properties']['GEOID'])
NAME=feature['properties']['NAME']
INTPTLON=float(feature['properties']['INTPTLON'])
INTPTLAT=float(feature['properties']['INTPTLAT'])
shp=shape(feature['geometry']) # list of coordinates of geometric shape
bb=box(*shp.bounds) #box(minx,miny,maxx,maxy)) creates one boxlike shape to rule them all
for i,j in grid:
grid_box=box(i*delta+grid_boundaries[0]
,j*delta+grid_boundaries[1]
,(i+1)*delta+grid_boundaries[0]
,(j+1)*delta+grid_boundaries[1] )
if grid_box.intersects(bb): #http://toblerity.org/shapely/manual.html#object.intersects
grid[(i,j)][bb]=(shp,GEOID,NAME,INTPTLON,INTPTLAT) # (county shape, countyID)
if c%100==0:
print >> sys.stderr, c
return grid
if __name__ == '__main__':
options = topic_args().parse_args()
grid_boundaries=[int(item) for item in options.grid_boundaries.split(",")]
delta=int(options.delta)
if not options.use_saved_grid:
grid=build_grid()
else:
if not os.path.isfile("./data/grid.json"):
print >>sys.stderr, "creating ./data/grid.json"
grid=build_grid()
if not os.path.exists("./data"):
os.makedirs("./data")
print >>sys.stderr, "saving file ./data/grid.json"
with open("./data/grid.json","wb") as g:
pickle.dump(grid,g)
else:
print >>sys.stderr, "using ./data/grid.json"
grid=pickle.load(open("./data/grid.json"))
counter=0
in_grid_not_in_county=0
grid_counter=0
print >> sys.stderr,"######## locating geo coords in grid ########"
for line in sys.stdin:
#( lng, lat ) = coord #NOTE:the input file must contain (lng,lat)
values=line.replace("(","").replace(")","").replace("[","").replace("]","").strip().split(",")
lng = float(values[0])
lat = float(values[1])
point = Point(float(lng), float(lat))
coords=grid_finder(lng,lat)
found=False
if coords not in grid:
counter+=1
print >> sys.stderr,"not in grid:{},not in county:{},found{}".format(counter,in_grid_not_in_county,grid_counter)
print >> sys.stderr,"{},{}: not in grid".format(lng,lat)
continue
for box in grid[coords]:
if box.contains(point):
if grid[coords][box][0].contains(point):
e=tree()
found=True
grid_counter+=1
e["coords"]=(lng,lat)
e["GEOID"]=grid[coords][box][1]
e["centroid"]=(grid[coords][box][3],grid[coords][box][4])
e["county"]=grid[coords][box][2]
print json.dumps(e)
break #point found, no need to continue searching
if not found:
in_grid_not_in_county+=1
print >> sys.stderr,"######## DONE ########"
print >> sys.stderr, "{} points outside of grid".format(counter)
print >> sys.stderr, "{} points in grid but not in a county".format(in_grid_not_in_county)
print >> sys.stderr, "{} points in grid and in county".format(grid_counter)
| unlicense | 831,984,771,179,405,600 | -4,285,563,556,035,430,000 | 39.089172 | 124 | 0.55116 | false |
kennedyshead/home-assistant | tests/components/pushbullet/test_notify.py | 8 | 8722 | """The tests for the pushbullet notification platform."""
import json
from unittest.mock import patch
from pushbullet import PushBullet
import pytest
import homeassistant.components.notify as notify
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, load_fixture
@pytest.fixture
def mock_pushbullet():
"""Mock pushbullet."""
with patch.object(
PushBullet,
"_get_data",
return_value=json.loads(load_fixture("pushbullet_devices.json")),
):
yield
async def test_pushbullet_config(hass, mock_pushbullet):
"""Test setup."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
async def test_pushbullet_config_bad(hass):
"""Test set up the platform with bad/missing configuration."""
config = {notify.DOMAIN: {"platform": "pushbullet"}}
with assert_setup_component(0) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert not handle_config[notify.DOMAIN]
async def test_pushbullet_push_default(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {"title": "Test Title", "message": "Test Message"}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
expected_body = {"body": "Test Message", "title": "Test Title", "type": "note"}
assert requests_mock.last_request.json() == expected_body
async def test_pushbullet_push_device(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.last_request.json() == expected_body
async def test_pushbullet_push_devices(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "device/My iPhone"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 2
assert len(requests_mock.request_history) == 2
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
expected_body = {
"body": "Test Message",
"device_iden": "identity2",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[1].json() == expected_body
async def test_pushbullet_push_email(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["email/[email protected]"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
assert len(requests_mock.request_history) == 1
expected_body = {
"body": "Test Message",
"email": "[email protected]",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
async def test_pushbullet_push_mixed(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "email/[email protected]"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 2
assert len(requests_mock.request_history) == 2
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
expected_body = {
"body": "Test Message",
"email": "[email protected]",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[1].json() == expected_body
async def test_pushbullet_push_no_file(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "device/My iPhone"],
"data": {"file": "not_a_file"},
}
assert not await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
| apache-2.0 | 823,693,993,943,671,800 | 6,682,170,312,707,393,000 | 30.948718 | 83 | 0.60055 | false |
webmull/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/message_listener.py | 590 | 3354 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines a listener interface for observing certain
state transitions on Message objects.
Also defines a null implementation of this interface.
"""
__author__ = '[email protected] (Will Robinson)'
class MessageListener(object):
"""Listens for modifications made to a message. Meant to be registered via
Message._SetListener().
Attributes:
dirty: If True, then calling Modified() would be a no-op. This can be
used to avoid these calls entirely in the common case.
"""
def Modified(self):
"""Called every time the message is modified in such a way that the parent
message may need to be updated. This currently means either:
(a) The message was modified for the first time, so the parent message
should henceforth mark the message as present.
(b) The message's cached byte size became dirty -- i.e. the message was
modified for the first time after a previous call to ByteSize().
Therefore the parent should also mark its byte size as dirty.
Note that (a) implies (b), since new objects start out with a client cached
size (zero). However, we document (a) explicitly because it is important.
Modified() will *only* be called in response to one of these two events --
not every time the sub-message is modified.
Note that if the listener's |dirty| attribute is true, then calling
Modified at the moment would be a no-op, so it can be skipped. Performance-
sensitive callers should check this attribute directly before calling since
it will be true most of the time.
"""
raise NotImplementedError
class NullMessageListener(object):
"""No-op MessageListener implementation."""
def Modified(self):
pass
| bsd-3-clause | -6,536,847,338,259,670,000 | 6,311,545,379,808,346,000 | 42 | 80 | 0.748062 | false |
AdamPI314/SOHR | include/tools/check_available_hosts.py | 2 | 2084 | #/usr/bin/env python
import subprocess as sp
import numpy as np
import re
import signal
def check_host():
cmd= "echo `hostname`"
pid=sp.Popen(cmd, shell= True, stdout=sp.PIPE, stderr= sp.PIPE)
out, err= pid.communicate()
if "linus" in out:
host= "linus"
elif "pople" in out:
host= "pople"
pid.wait()
return host
def check_nodes():
#cmd= "python sr_check_nodes.py"
cmd= "/tcghome/sbai/sr_tools/my_bin/check-nodes-timeout.sh -t 300"
pid=sp.Popen(cmd, shell= True, stdout=sp.PIPE, stderr= sp.PIPE)
out, err= pid.communicate()
return out
def parse_and_w2f(filename, check_nodes_out, criteria):
#with open("check_nodes_out.log", 'w') as f:
# f.write(check_nodes_out)
#f.close()
#with open("check_nodes_out.log", 'r') as f:
# check_nodes_out=f.read()
#f.close()
#match match the whole string from ^ to $
#search return the first match condition
#findall return all match condition
#m = re.search("(compute-\d+-\d+|pop\d+):\s+.+load average:\s+(\d+.\d+),\s+(\d+.\d+),\s+(\d+.\d+)",
# check_nodes_out)
m = re.findall("(compute-\d+-\d+|pop\d+):\s+.+load average:\s+(\d+.\d+),\s+(\d+.\d+),\s+(\d+.\d+)",
check_nodes_out)
if m:
#print m.groups()
avail_nodes= [node for node in m if float(node[-3])<=criteria and float(node[-2])<=criteria and float(node[-1])<=criteria]
#got to sort node based on nodes[-1]+nodes[-2]+nodes[-3]
avail_nodes= sorted(avail_nodes, key= lambda x:float(x[-1])+float(x[-2])+float(x[-3]))
with open(filename, 'w') as f_host:
f_host.write("# This is a sample host file\n")
for node in avail_nodes:
f_host.write(node[0]+":4 # The next 4 procs run on this host, "+node[-3]+" "+node[-2]+" "+node[-1]+"\n")
f_host.close()
if __name__== "__main__":
#print check_host()
print "check nodes (walltime=300 seconds)..."
print "might take longer on pople...\n"
check_nodes_out=[]
check_nodes_out= check_nodes()
criteria=15.0; filename= "hosts"
print "search nodes with current load less than ", criteria, "..."
print "write to file ", filename, "...\n"
parse_and_w2f(filename, check_nodes_out, criteria)
| mit | -3,167,059,038,882,564,000 | -7,718,602,512,880,176,000 | 29.202899 | 124 | 0.640595 | false |
pyjs/pyjs | pyjswidgets/pyjamas/ui/FormPanel.ie6.py | 7 | 1124 | class FormPanel:
def getTextContents(self, iframe):
JS("""
try {
if (!@{{iframe}}['contentWindow']['document'])
return null;
return @{{iframe}}['contentWindow']['document']['body']['innerText'];
} catch (e) {
return null;
}
""")
def hookEvents(self, iframe, form, listener):
JS("""
if (@{{iframe}}) {
@{{iframe}}['onreadystatechange'] = function() {
if (!@{{iframe}}['__formAction'])
return;
if (@{{iframe}}['readyState'] == 'complete') {
@{{listener}}['onFrameLoad']();
}
};
}
@{{form}}['onsubmit'] = function() {
if (@{{iframe}})
@{{iframe}}['__formAction'] = @{{form}}['action'];
return @{{listener}}['onFormSubmit']();
};
""")
def unhookEvents(self, iframe, form):
JS("""
if (@{{iframe}})
@{{iframe}}['onreadystatechange'] = null;
@{{form}}['onsubmit'] = null;
""")
| apache-2.0 | 2,274,875,838,513,343,000 | 4,089,890,677,670,038,500 | 27.1 | 81 | 0.406584 | false |
efiring/numpy-work | numpy/testing/decorators.py | 2 | 5819 | """Decorators for labeling test objects
Decorators that merely return a modified version of the original
function object are straightforward. Decorators that return a new
function object need to use
nose.tools.make_decorator(original_function)(decorator) in returning
the decorator, in order to preserve metadata such as function name,
setup and teardown functions and so on - see nose.tools for more
information.
"""
def slow(t):
"""Labels a test as 'slow'.
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
than a second or two should be labeled as slow (the whole suite consits of
thousands of tests, so even a second is significant)."""
t.slow = True
return t
def setastest(tf=True):
''' Signals to nose that this function is or is not a test
Parameters
----------
tf : bool
If True specifies this is a test, not a test otherwise
e.g
>>> from numpy.testing.decorators import setastest
>>> @setastest(False)
... def func_with_test_in_name(arg1, arg2): pass
...
>>>
This decorator cannot use the nose namespace, because it can be
called from a non-test module. See also istest and nottest in
nose.tools
'''
def set_test(t):
t.__test__ = tf
return t
return set_test
def skipif(skip_condition, msg=None):
''' Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable.
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decision. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if callable(skip_condition):
skip_val = lambda : skip_condition()
else:
skip_val = lambda : skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None:
out = 'Test skipped due to test condition'
else:
out = '\n'+msg
return "Skipping test: %s%s" % (func.__name__,out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual decorator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
def knownfailureif(fail_condition, msg=None):
''' Make function raise KnownFailureTest exception if fail_condition is true
Parameters
----------
fail_condition : bool or callable.
Flag to determine whether to mark test as known failure (True)
or not (False). If the condition is a callable, it is used at
runtime to dynamically make the decision. This is useful for
tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a KnownFailureTest exception
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if callable(fail_condition):
fail_val = lambda : fail_condition()
else:
fail_val = lambda : fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
from noseclasses import KnownFailureTest
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest, msg
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
| bsd-3-clause | -6,697,066,211,093,191,000 | 2,845,611,010,545,291,300 | 32.831395 | 80 | 0.637051 | false |
anupam2221/isprime | isprime.py | 1 | 1522 | ##written 9-6-2017
##by Anupam KP (c)
##[email protected]
##
##
##preprocessing time- O(n loglogn)
##Tells if a prime number in O(1)
dexterprime=[0]*500
def akpprocess(n):
n=int(n)
global dexterprime
dexterprime=[0]*(n+1)
dexterprime[0]=dexterprime[1]=1
for i in range(2,n+1):
for j in range(2,i+1):
if (i*j)>(n):
break
if dexterprime[j]==0:
dexterprime[i*j]=1
#print primetell.d
def isprime(x):
global dexterprime
try:
if dexterprime[x]==0:
return True
else:
return False
except:
print "you haven't made the seive of the length of the number you have provided.\n Please preprocess with akpprocess("+str(x)+")"
class primetell:
d=[]
def __init__(self,n):
self.d=[]
self.preprocess(n)
def preprocess(self,n):
n=int(n)
self.d=[0]*(n+1)
self.d[0]=self.d[1]=1
for i in range(2,n+1):
for j in range(2,i+1):
if (i*j)>n+1:
break
if self.d[j]==0:
self.d[i*j]=1
#print primetell.d
def isprime(self,x):
if self.d[x]==0:
return True
else:
return False
if __name__=="__main__":
akpprocess(20)
print dexterprime
print isprime(11)
| mit | -3,326,510,749,082,374,700 | -2,910,010,775,815,586,300 | 21.382353 | 141 | 0.459921 | false |
mumble-voip/libmumble-gyp | test/generator-output/gyptest-relocate.py | 216 | 1670 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a project hierarchy created with the --generator-output=
option can be built even when it's relocated to a different path.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('src'), False)
test.run_gyp('prog1.gyp',
'-Dset_symroot=1',
'--generator-output=' + test.workpath('gypfiles'),
chdir='src')
test.writable(test.workpath('src'), True)
test.relocate('src', 'relocate/src')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/src'), False)
test.writable(test.workpath('relocate/src/build'), True)
test.writable(test.workpath('relocate/src/subdir2/build'), True)
test.writable(test.workpath('relocate/src/subdir3/build'), True)
test.build('prog1.gyp', test.ALL, chdir='relocate/gypfiles')
chdir = 'relocate/gypfiles'
expect = """\
Hello from %s
Hello from inc.h
Hello from inc1/include1.h
Hello from inc2/include2.h
Hello from inc3/include3.h
Hello from subdir2/deeper/deeper.h
"""
if test.format == 'xcode':
chdir = 'relocate/src'
test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
if test.format == 'xcode':
chdir = 'relocate/src/subdir2'
test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
test.pass_test()
| bsd-3-clause | 1,824,964,986,759,586,600 | 3,365,477,546,840,172,500 | 26.833333 | 74 | 0.71018 | false |
ninemoreminutes/lmiapi | lmiapi/central.py | 1 | 7685 | # Python
import json
import logging
import re
import urllib
import urlparse
import warnings
import webbrowser
# Requests
import requests
# BeautifulSoup4
from bs4 import BeautifulSoup
__all__ = ['LogMeInCentralAPI']
logger = logging.getLogger('lmiapi.central')
class LogMeInCentralAPI(object): # pragma: no cover
API_ROOT = 'https://secure.logmein.com/api/'
def __init__(self, email, password):
warnings.warn('The LogMeInCentralAPI class is no longer maintained or '
'supported; use LogMeInPublicAPI instead.',
DeprecationWarning)
self.email = email
self.password = password
self.session = requests.Session()
self.session.headers.update({'Accept': 'application/JSON'})
self.current_profile_id = None
def _post(self, path, data=None):
url = '%s%s' % (self.API_ROOT, path.lstrip('/'))
headers = {'Content-Type': 'application/JSON'}
data = json.dumps(data or {})
response = self.session.post(url, data=data, headers=headers)
if response.status_code == 401 and self.login():
response = self.session.post(url, data=data, headers=headers)
response.raise_for_status()
logger.debug('POST %s -> %d', url, response.status_code)
return response.json()
def _update_current_profile_id(self, soup):
profile_id = None
alt_link = soup.find('link', rel='alternate', href=re.compile(r'^.*?profileid=\d+?.*?$'))
if alt_link:
alt_parts = urlparse.urlsplit(alt_link['href'])
alt_query = urlparse.parse_qs(alt_parts.query)
profile_id = int(alt_query.get('profileid', ['0'])[0])
self.current_profile_id = profile_id or None
logger.debug('current profile id: %s', str(self.current_profile_id))
return self.current_profile_id
def login(self):
# Read main LogMeIn page at secure.logmein.com.
url = urlparse.urljoin(self.API_ROOT, '/')
response = self.session.get(url)
response.raise_for_status()
# Find login button link.
soup = BeautifulSoup(response.text)
btn_login = soup.find('a', attrs={'class': 'btn-login', 'href': True})
if not btn_login:
raise RuntimeError('Unable to find login button link!')
login_url = urlparse.urljoin(response.url, btn_login['href'])
# Follow the login link.
response = self.session.get(login_url)
response.raise_for_status()
# Try to find the current profile ID in the response. If found, we're
# already logged in.
soup = BeautifulSoup(response.text)
profile_id = self._update_current_profile_id(soup)
if profile_id:
return profile_id
# Otherwise, we were redirected to the login page, so find the login
# form and build up the auth data to send.
form = soup.find('form', id='form', action=True)
if not form:
raise RuntimeError('No login form could be found!')
auth_url = urlparse.urljoin(response.url, form['action'])
auth_method = form.attrs.get('method', 'POST').lower()
fields = form.find_all('input', attrs={'name': True})
auth_data = {}
for field in fields:
name = field['name']
if name == 'email':
value = self.email
elif name == 'password':
value = self.password
else:
value = field.attrs.get('value', '')
auth_data[name] = value
# Now submit the login form with the auth data filled in.
logger.debug('auth url: %s %s', auth_method.upper(), auth_url)
logger.debug('auth data: %r', auth_data)
response = getattr(self.session, auth_method)(auth_url, auth_data)
response.raise_for_status()
# Look for the current profile ID in the response.
soup = BeautifulSoup(response.text)
return self._update_current_profile_id(soup)
def select_profile(self, profile_id):
# Get the URL used to switch to a new profile.
url = urlparse.urljoin(self.API_ROOT, '/login/selectprofile.aspx?profile=%d' % profile_id)
response = self.session.get(url)
response.raise_for_status()
# Look for the new profile ID in the response.
soup = BeautifulSoup(response.text)
return self._update_current_profile_id(soup)
def get_user_profile_list(self):
result = self._post('ProfileList.svc/GetUserProfileList')
return dict([(x['Id'], x['Name']) for x in result['GetUserProfileListResult']['List']])
def get_all_hosts(self):
result = self._post('Computers.svc/GetAllHostsForCentral')
return result
def get_host_details(self, host_id):
url = urlparse.urljoin(self.API_ROOT, '/mycomputers_preferences.asp')
response = self.session.get(url, params={'hostid': host_id})
response.raise_for_status()
soup = BeautifulSoup(response.text)
host_details = {}
for hostid_input in soup.select('fieldset > input[name="hostid"]'):
host_details[u'hostid'] = int(hostid_input['value'])
for profileid_input in soup.select('fieldset input[name="profileid"]'):
host_details[u'profileid'] = int(profileid_input['value'])
for tr in soup.select('fieldset table tr'):
for n, td in enumerate(tr.find_all('td', recursive=False)):
if n == 0:
key_parts = td.get_text(strip=True).replace(':', '').split()
key_parts = [x.strip().title() for x in key_parts]
key_parts[0] = key_parts[0].lower()
key = u''.join(key_parts)
if key == 'status':
key = u'statusString'
elif key == 'group':
key = u'groupName'
elif n == 1:
if key == 'computerDescription':
value = td.find('input', attrs={'name': 'desc'})['value']
elif key == 'statusString':
value = td.get_text('|', strip=True).split('|')[0]
a_tag = td.find('a', href=True)
if a_tag:
host_details[u'connectUrl'] = urlparse.urljoin(response.url, a_tag['href'])
elif key == 'groupName':
selected_option = td.find('option', selected=True)
value = selected_option.get_text()
host_details[u'groupid'] = int(selected_option['value'])
elif key == 'note':
value = td.find('textarea').get_text()
else:
value = td.get_text(strip=True)
host_details[key] = value
return host_details
def get_host_av_info(self, host_id):
result = self._post('AntiVirus.svc/GetHostAVInfo', {'hostId': host_id})
return result['GetHostAVInfoResult']
def connect_to_host(self, host_id):
url = urlparse.urljoin(self.API_ROOT, '/mycomputers_connect.asp')
qs = urllib.urlencode({'hostid': host_id})
url = '%s?%s' % (url, qs)
webbrowser.open_new_tab(url)
return
response = self.session.get(url, params={'hostid': host_id})
response.raise_for_status()
soup = BeautifulSoup(response.text)
meta = soup.find('meta', attrs={'http-equiv': 'refresh', 'content': True})
url = meta['content'].split(';URL=', 1)[1]
response = self.session.get(url)
| bsd-3-clause | -4,854,633,029,228,152,000 | 8,772,285,023,332,700,000 | 42.664773 | 103 | 0.571893 | false |
jlspyaozhongkai/Uter | third_party_build/Python-2.7.9/lib/python2.7/lib-tk/tkCommonDialog.py | 193 | 1418 | # base class for tk common dialogues
#
# this module provides a base class for accessing the common
# dialogues available in Tk 4.2 and newer. use tkFileDialog,
# tkColorChooser, and tkMessageBox to access the individual
# dialogs.
#
# written by Fredrik Lundh, May 1997
#
from Tkinter import *
class Dialog:
command = None
def __init__(self, master=None, **options):
# FIXME: should this be placed on the module level instead?
if TkVersion < 4.2:
raise TclError, "this module requires Tk 4.2 or newer"
self.master = master
self.options = options
if not master and options.get('parent'):
self.master = options['parent']
def _fixoptions(self):
pass # hook
def _fixresult(self, widget, result):
return result # hook
def show(self, **options):
# update instance options
for k, v in options.items():
self.options[k] = v
self._fixoptions()
# we need a dummy widget to properly process the options
# (at least as long as we use Tkinter 1.63)
w = Frame(self.master)
try:
s = w.tk.call(self.command, *w._options(self.options))
s = self._fixresult(w, s)
finally:
try:
# get rid of the widget
w.destroy()
except:
pass
return s
| gpl-3.0 | 8,168,051,074,445,096,000 | 4,716,143,843,275,097,000 | 22.633333 | 67 | 0.577574 | false |
h3biomed/ansible-modules-core | network/vyos/vyos_facts.py | 29 | 9161 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: vyos_facts
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Collect facts from remote devices running OS
description:
- Collects a base set of device facts from a remote device that
is running VyOS. This module prepends all of the
base network fact keys with U(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: vyos
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: "!config"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node
vars:
cli:
host: "{{ inventory_hostname }}"
username: vyos
password: vyos
transport: cli
- name: collect all facts from the device
vyos_facts:
gather_subset: all
- name: collect only the config and default facts
vyos_facts:
gather_subset: config
- name: collect everything exception the config
vyos_facts:
gather_subset: "!config"
"""
RETURN = """
ansible_net_config:
description: The running-config from the device
returned: when config is configured
type: str
ansible_net_commits:
description: The set of available configuration revisions
returned: when present
type: list
ansible_net_hostname:
description: The configured system hostname
returned: always
type: str
ansible_net_model:
description: The device model string
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the device
returned: always
type: str
ansible_net_version:
description: The version of the software running
returned: always
type: str
ansible_net_neighbors:
description: The set of LLDP neighbors
returned: when interface is configured
type: list
ansible_net_gather_subset:
description: The list of subsets gathered by the module
returned: always
type: list
"""
import re
import ansible.module_utils.vyos
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.network import NetworkModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
def __init__(self, runner):
self.runner = runner
self.facts = dict()
self.commands()
def commands(self):
raise NotImplementedError
class Default(FactsBase):
def commands(self):
self.runner.add_command('show version')
self.runner.add_command('show host name')
def populate(self):
data = self.runner.get_command('show version')
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['hostname'] = self.runner.get_command('show host name')
def parse_version(self, data):
match = re.search(r'Version:\s*(\S+)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'HW model:\s*(\S+)', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'HW S/N:\s+(\S+)', data)
if match:
return match.group(1)
class Config(FactsBase):
def commands(self):
self.runner.add_command('show configuration commands')
self.runner.add_command('show system commit')
def populate(self):
config = self.runner.get_command('show configuration commands')
self.facts['config'] = str(config).split('\n')
commits = self.runner.get_command('show system commit')
entries = list()
entry = None
for line in commits.split('\n'):
match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line)
if match:
if entry:
entries.append(entry)
entry = dict(revision=match.group(1),
datetime=match.group(2),
by=str(match.group(3)).strip(),
via=str(match.group(4)).strip(),
comment=None)
else:
entry['comment'] = line.strip()
self.facts['commits'] = entries
class Neighbors(FactsBase):
def commands(self):
self.runner.add_command('show lldp neighbors')
self.runner.add_command('show lldp neighbors detail')
def populate(self):
all_neighbors = self.runner.get_command('show lldp neighbors')
if 'LLDP not configured' not in all_neighbors:
neighbors = self.parse(
self.runner.get_command('show lldp neighbors detail')
)
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def parse(self, data):
parsed = list()
values = None
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
values += '\n%s' % line
elif line.startswith('Interface'):
if values:
parsed.append(values)
values = line
return parsed
def parse_neighbors(self, data):
facts = dict()
for item in data:
interface = self.parse_interface(item)
host = self.parse_host(item)
port = self.parse_port(item)
if interface not in facts:
facts[interface] = list()
facts[interface].append(dict(host=host, port=port))
return facts
def parse_interface(self, data):
match = re.search(r'^Interface:\s+(\S+),', data)
return match.group(1)
def parse_host(self, data):
match = re.search(r'SysName:\s+(.+)$', data, re.M)
if match:
return match.group(1)
def parse_port(self, data):
match = re.search(r'PortDescr:\s+(.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
neighbors=Neighbors,
config=Config
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
runner = CommandRunner(module)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](runner))
runner.run()
try:
for inst in instances:
inst.populate()
facts.update(inst.facts)
except Exception:
exc = get_exception()
module.fail_json(msg='unknown failure', output=runner.items, exc=str(exc))
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,821,128,523,685,976,000 | -1,571,205,081,519,860,500 | 27.628125 | 82 | 0.621875 | false |
BT-rmartin/odoo | addons/web_diagram/controllers/main.py | 268 | 4321 | import openerp
from openerp.tools.safe_eval import safe_eval as eval
class DiagramView(openerp.http.Controller):
@openerp.http.route('/web_diagram/diagram/get_diagram_info', type='json', auth='user')
def get_diagram_info(self, req, id, model, node, connector,
src_node, des_node, label, **kw):
visible_node_fields = kw.get('visible_node_fields',[])
invisible_node_fields = kw.get('invisible_node_fields',[])
node_fields_string = kw.get('node_fields_string',[])
connector_fields = kw.get('connector_fields',[])
connector_fields_string = kw.get('connector_fields_string',[])
bgcolors = {}
shapes = {}
bgcolor = kw.get('bgcolor','')
shape = kw.get('shape','')
if bgcolor:
for color_spec in bgcolor.split(';'):
if color_spec:
colour, color_state = color_spec.split(':')
bgcolors[colour] = color_state
if shape:
for shape_spec in shape.split(';'):
if shape_spec:
shape_colour, shape_color_state = shape_spec.split(':')
shapes[shape_colour] = shape_color_state
ir_view = req.session.model('ir.ui.view')
graphs = ir_view.graph_get(
int(id), model, node, connector, src_node, des_node, label,
(140, 180), req.session.context)
nodes = graphs['nodes']
transitions = graphs['transitions']
isolate_nodes = {}
for blnk_node in graphs['blank_nodes']:
isolate_nodes[blnk_node['id']] = blnk_node
else:
y = map(lambda t: t['y'],filter(lambda x: x['y'] if x['x']==20 else None, nodes.values()))
y_max = (y and max(y)) or 120
connectors = {}
list_tr = []
for tr in transitions:
list_tr.append(tr)
connectors.setdefault(tr, {
'id': int(tr),
's_id': transitions[tr][0],
'd_id': transitions[tr][1]
})
connector_tr = req.session.model(connector)
connector_ids = connector_tr.search([('id', 'in', list_tr)], 0, 0, 0, req.session.context)
data_connectors =connector_tr.read(connector_ids, connector_fields, req.session.context)
for tr in data_connectors:
transition_id = str(tr['id'])
_sourceid, label = graphs['label'][transition_id]
t = connectors[transition_id]
t.update(
source=tr[src_node][1],
destination=tr[des_node][1],
options={},
signal=label
)
for i, fld in enumerate(connector_fields):
t['options'][connector_fields_string[i]] = tr[fld]
fields = req.session.model('ir.model.fields')
field_ids = fields.search([('model', '=', model), ('relation', '=', node)], 0, 0, 0, req.session.context)
field_data = fields.read(field_ids, ['relation_field'], req.session.context)
node_act = req.session.model(node)
search_acts = node_act.search([(field_data[0]['relation_field'], '=', id)], 0, 0, 0, req.session.context)
data_acts = node_act.read(search_acts, invisible_node_fields + visible_node_fields, req.session.context)
for act in data_acts:
n = nodes.get(str(act['id']))
if not n:
n = isolate_nodes.get(act['id'], {})
y_max += 140
n.update(x=20, y=y_max)
nodes[act['id']] = n
n.update(
id=act['id'],
color='white',
options={}
)
for color, expr in bgcolors.items():
if eval(expr, act):
n['color'] = color
for shape, expr in shapes.items():
if eval(expr, act):
n['shape'] = shape
for i, fld in enumerate(visible_node_fields):
n['options'][node_fields_string[i]] = act[fld]
_id, name = req.session.model(model).name_get([id], req.session.context)[0]
return dict(nodes=nodes,
conn=connectors,
name=name,
parent_field=graphs['node_parent_field'])
| agpl-3.0 | -3,331,981,214,882,944,500 | 833,517,522,688,226,000 | 38.281818 | 113 | 0.51863 | false |
karolciba/playground | ds_algos/heap.py | 1 | 3241 | #!/usr/bin/env python
def heapsort(ary, strategy = 'up'):
swaps = 0
def swap(i,j):
nonlocal swaps
swaps += 1
ary[i], ary[j] = ary[j], ary[i]
lst = len(ary)
def siftup(pos):
while pos:
if ary[pos] < ary[pos//2]:
swap(pos,pos//2)
pos //= 2
else:
break
def siftdown(pos, end):
while pos < end:
left = 2*pos if 2*pos < end else pos
right = 2*pos + 1 if 2 * pos + 1 < end else pos
toswap = pos
if ary[pos] > ary[left]:
toswap = left
if ary[toswap] > ary[right]:
toswap = right
if toswap == pos:
break
swap(pos, toswap)
pos = toswap
# build heap starting from first element
# print("before", ary)
if strategy == 'down':
for i in range(lst):
siftup(i)
else:
for i in range(lst-1, -1, -1):
siftdown(i,lst)
print("swaps", swaps)
# print("heapyfied", ary)
for i in range(lst-1, 0, -1):
swap(0,i)
siftdown(0,i)
# print("sorted", ary)
# sort tree swapping element for end, and rebuilding tree
class BinaryHeap():
def __init__(self ):
self._ary = []
def push(self, item):
pos = len(self._ary)
self._ary.append(item)
self.siftup(pos)
def siftup(self, pos):
while pos:
if self._ary[pos] < self._ary[pos//2]:
self._ary[pos], self._ary[pos//2] = self._ary[pos//2], self._ary[pos]
pos //= 2
else:
break
def pop(self):
lst = len(self._ary)
item = None
print(lst, item)
if lst >= 1:
self._ary[0], self._ary[lst-1] = self._ary[lst-1],self._ary[0]
item = self._ary.pop()
print(lst, item)
self.siftdown(0)
return item
def siftdown(self, pos):
lst = len(self._ary)
if lst == 0:
return None
while pos < lst:
left = 2 * pos
right = 2 * pos + 1
left = pos if left >= lst else left
right = pos if right >= lst else right
swap = pos
print("siftdown pos {} left {} right {} swap {} of len {}".format(pos, left, right, swap, len(self._ary)))
# if self._ary[left] >= self._ary[pos] <= self.ary[right]:
# return
if self._ary[pos] > self._ary[left]:
swap = left
if self._ary[swap] > self._ary[right]:
swap = right
if swap == pos:
return
self._ary[pos], self._ary[swap] = self._ary[swap], self._ary[pos]
pos = swap
if __name__ == '__main__':
import random
ary = list(range(1,10000))
random.shuffle(ary)
heapsort(ary, 'up')
srt = []
# heap = BinaryHeap()
# for i in ary:
# heap.push(i)
#
#
# print("heap", heap._ary)
# item = heap.pop()
# while item:
# print(item, heap._ary)
# srt.append(item)
# item = heap.pop()
#
#
# print("sorted", srt)
| unlicense | -3,172,988,851,595,605,500 | -4,146,376,530,679,236,000 | 26.235294 | 118 | 0.453872 | false |
sorenk/ansible | test/units/modules/network/f5/test_bigiq_regkey_license.py | 17 | 3902 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigiq_regkey_license import ModuleParameters
from library.bigiq_regkey_license import ApiParameters
from library.bigiq_regkey_license import ModuleManager
from library.bigiq_regkey_license import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigiq_regkey_license import ModuleParameters
from ansible.modules.network.f5.bigiq_regkey_license import ApiParameters
from ansible.modules.network.f5.bigiq_regkey_license import ModuleManager
from ansible.modules.network.f5.bigiq_regkey_license import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
regkey_pool='foo',
license_key='XXXX-XXXX-XXXX-XXXX-XXXX',
accept_eula=True,
description='this is a description'
)
p = ModuleParameters(params=args)
assert p.regkey_pool == 'foo'
assert p.license_key == 'XXXX-XXXX-XXXX-XXXX-XXXX'
assert p.accept_eula is True
assert p.description == 'this is a description'
def test_api_parameters(self):
args = load_fixture('load_regkey_license_key.json')
p = ApiParameters(params=args)
assert p.description == 'foo bar baz'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_create(self, *args):
set_module_args(dict(
regkey_pool='foo',
license_key='XXXX-XXXX-XXXX-XXXX-XXXX',
accept_eula=True,
description='this is a description',
password='passsword',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'this is a description'
| gpl-3.0 | -2,939,849,070,611,452,400 | 1,167,471,843,014,742,000 | 31.247934 | 91 | 0.671707 | false |
ekarlso/python-jolokiaclient | jolokiaclient/exceptions.py | 17 | 11913 | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
# Copyright 2013 Alessio Ababilov
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
"""
import inspect
import sys
import six
class ClientException(Exception):
"""The base exception class for all exceptions this library raises.
"""
pass
class MissingArgs(ClientException):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = "Missing argument(s): %s" % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
class ValidationError(ClientException):
"""Error in validation on API client side."""
pass
class UnsupportedVersion(ClientException):
"""User is trying to use an unsupported version of the API."""
pass
class CommandError(ClientException):
"""Error in CLI tool."""
pass
class AuthorizationFailure(ClientException):
"""Cannot authorize API client."""
pass
class ConnectionRefused(ClientException):
"""Cannot connect to API service."""
pass
class AuthPluginOptionsMissing(AuthorizationFailure):
"""Auth plugin misses some options."""
def __init__(self, opt_names):
super(AuthPluginOptionsMissing, self).__init__(
"Authentication failed. Missing options: %s" %
", ".join(opt_names))
self.opt_names = opt_names
class AuthSystemNotFound(AuthorizationFailure):
"""User has specified a AuthSystem that is not installed."""
def __init__(self, auth_system):
super(AuthSystemNotFound, self).__init__(
"AuthSystemNotFound: %s" % repr(auth_system))
self.auth_system = auth_system
class NoUniqueMatch(ClientException):
"""Multiple entities found instead of one."""
pass
class EndpointException(ClientException):
"""Something is rotten in Service Catalog."""
pass
class EndpointNotFound(EndpointException):
"""Could not find requested endpoint in Service Catalog."""
pass
class AmbiguousEndpoints(EndpointException):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
super(AmbiguousEndpoints, self).__init__(
"AmbiguousEndpoints: %s" % repr(endpoints))
self.endpoints = endpoints
class HttpError(ClientException):
"""The base exception class for all HTTP exceptions.
"""
http_status = 0
message = "HTTP Error"
def __init__(self, message=None, details=None,
response=None, request_id=None,
url=None, method=None, http_status=None):
self.http_status = http_status or self.http_status
self.message = message or self.message
self.details = details
self.request_id = request_id
self.response = response
self.url = url
self.method = method
formatted_string = "%s (HTTP %s)" % (self.message, self.http_status)
if request_id:
formatted_string += " (Request-ID: %s)" % request_id
super(HttpError, self).__init__(formatted_string)
class HTTPClientError(HttpError):
"""Client-side HTTP error.
Exception for cases in which the client seems to have erred.
"""
message = "HTTP Client Error"
class HttpServerError(HttpError):
"""Server-side HTTP error.
Exception for cases in which the server is aware that it has
erred or is incapable of performing the request.
"""
message = "HTTP Server Error"
class BadRequest(HTTPClientError):
"""HTTP 400 - Bad Request.
The request cannot be fulfilled due to bad syntax.
"""
http_status = 400
message = "Bad Request"
class Unauthorized(HTTPClientError):
"""HTTP 401 - Unauthorized.
Similar to 403 Forbidden, but specifically for use when authentication
is required and has failed or has not yet been provided.
"""
http_status = 401
message = "Unauthorized"
class PaymentRequired(HTTPClientError):
"""HTTP 402 - Payment Required.
Reserved for future use.
"""
http_status = 402
message = "Payment Required"
class Forbidden(HTTPClientError):
"""HTTP 403 - Forbidden.
The request was a valid request, but the server is refusing to respond
to it.
"""
http_status = 403
message = "Forbidden"
class NotFound(HTTPClientError):
"""HTTP 404 - Not Found.
The requested resource could not be found but may be available again
in the future.
"""
http_status = 404
message = "Not Found"
class MethodNotAllowed(HTTPClientError):
"""HTTP 405 - Method Not Allowed.
A request was made of a resource using a request method not supported
by that resource.
"""
http_status = 405
message = "Method Not Allowed"
class NotAcceptable(HTTPClientError):
"""HTTP 406 - Not Acceptable.
The requested resource is only capable of generating content not
acceptable according to the Accept headers sent in the request.
"""
http_status = 406
message = "Not Acceptable"
class ProxyAuthenticationRequired(HTTPClientError):
"""HTTP 407 - Proxy Authentication Required.
The client must first authenticate itself with the proxy.
"""
http_status = 407
message = "Proxy Authentication Required"
class RequestTimeout(HTTPClientError):
"""HTTP 408 - Request Timeout.
The server timed out waiting for the request.
"""
http_status = 408
message = "Request Timeout"
class Conflict(HTTPClientError):
"""HTTP 409 - Conflict.
Indicates that the request could not be processed because of conflict
in the request, such as an edit conflict.
"""
http_status = 409
message = "Conflict"
class Gone(HTTPClientError):
"""HTTP 410 - Gone.
Indicates that the resource requested is no longer available and will
not be available again.
"""
http_status = 410
message = "Gone"
class LengthRequired(HTTPClientError):
"""HTTP 411 - Length Required.
The request did not specify the length of its content, which is
required by the requested resource.
"""
http_status = 411
message = "Length Required"
class PreconditionFailed(HTTPClientError):
"""HTTP 412 - Precondition Failed.
The server does not meet one of the preconditions that the requester
put on the request.
"""
http_status = 412
message = "Precondition Failed"
class RequestEntityTooLarge(HTTPClientError):
"""HTTP 413 - Request Entity Too Large.
The request is larger than the server is willing or able to process.
"""
http_status = 413
message = "Request Entity Too Large"
def __init__(self, *args, **kwargs):
try:
self.retry_after = int(kwargs.pop('retry_after'))
except (KeyError, ValueError):
self.retry_after = 0
super(RequestEntityTooLarge, self).__init__(*args, **kwargs)
class RequestUriTooLong(HTTPClientError):
"""HTTP 414 - Request-URI Too Long.
The URI provided was too long for the server to process.
"""
http_status = 414
message = "Request-URI Too Long"
class UnsupportedMediaType(HTTPClientError):
"""HTTP 415 - Unsupported Media Type.
The request entity has a media type which the server or resource does
not support.
"""
http_status = 415
message = "Unsupported Media Type"
class RequestedRangeNotSatisfiable(HTTPClientError):
"""HTTP 416 - Requested Range Not Satisfiable.
The client has asked for a portion of the file, but the server cannot
supply that portion.
"""
http_status = 416
message = "Requested Range Not Satisfiable"
class ExpectationFailed(HTTPClientError):
"""HTTP 417 - Expectation Failed.
The server cannot meet the requirements of the Expect request-header field.
"""
http_status = 417
message = "Expectation Failed"
class UnprocessableEntity(HTTPClientError):
"""HTTP 422 - Unprocessable Entity.
The request was well-formed but was unable to be followed due to semantic
errors.
"""
http_status = 422
message = "Unprocessable Entity"
class InternalServerError(HttpServerError):
"""HTTP 500 - Internal Server Error.
A generic error message, given when no more specific message is suitable.
"""
http_status = 500
message = "Internal Server Error"
# NotImplemented is a python keyword.
class HttpNotImplemented(HttpServerError):
"""HTTP 501 - Not Implemented.
The server either does not recognize the request method, or it lacks
the ability to fulfill the request.
"""
http_status = 501
message = "Not Implemented"
class BadGateway(HttpServerError):
"""HTTP 502 - Bad Gateway.
The server was acting as a gateway or proxy and received an invalid
response from the upstream server.
"""
http_status = 502
message = "Bad Gateway"
class ServiceUnavailable(HttpServerError):
"""HTTP 503 - Service Unavailable.
The server is currently unavailable.
"""
http_status = 503
message = "Service Unavailable"
class GatewayTimeout(HttpServerError):
"""HTTP 504 - Gateway Timeout.
The server was acting as a gateway or proxy and did not receive a timely
response from the upstream server.
"""
http_status = 504
message = "Gateway Timeout"
class HttpVersionNotSupported(HttpServerError):
"""HTTP 505 - HttpVersion Not Supported.
The server does not support the HTTP protocol version used in the request.
"""
http_status = 505
message = "HTTP Version Not Supported"
# _code_map contains all the classes that have http_status attribute.
_code_map = dict(
(getattr(obj, 'http_status', None), obj)
for name, obj in six.iteritems(vars(sys.modules[__name__]))
if inspect.isclass(obj) and getattr(obj, 'http_status', False)
)
def from_response(response, method, url):
"""Returns an instance of :class:`HttpError` or subclass based on response.
:param response: instance of `requests.Response` class
:param method: HTTP method used for request
:param url: URL used for request
"""
kwargs = {
"http_status": response.status_code,
"response": response,
"method": method,
"url": url,
"request_id": response.headers.get("x-compute-request-id"),
}
if "retry-after" in response.headers:
kwargs["retry_after"] = response.headers["retry-after"]
content_type = response.headers.get("Content-Type", "")
if content_type.startswith("application/json"):
try:
body = response.json()
except ValueError:
pass
else:
if hasattr(body, "keys"):
error = body[body.keys()[0]]
kwargs["message"] = error.get("message", None)
kwargs["details"] = error.get("details", None)
elif content_type.startswith("text/"):
kwargs["details"] = response.text
try:
cls = _code_map[response.status_code]
except KeyError:
if 500 <= response.status_code < 600:
cls = HttpServerError
elif 400 <= response.status_code < 500:
cls = HTTPClientError
else:
cls = HttpError
return cls(**kwargs)
| apache-2.0 | -8,683,375,772,841,263,000 | -5,913,956,974,567,560,000 | 25.831081 | 79 | 0.666835 | false |
SimtterCom/gyp | test/win/gyptest-link-deffile.py | 344 | 1252 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure a .def file is handled in the link.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
# Multiple .def files doesn't make any sense, should fail at generate time.
test.run_gyp('deffile-multiple.gyp', chdir=CHDIR, stderr=None, status=1)
test.run_gyp('deffile.gyp', chdir=CHDIR)
test.build('deffile.gyp', test.ALL, chdir=CHDIR)
def HasExport(binary, export):
full_path = test.built_file_path(binary, chdir=CHDIR)
output = test.run_dumpbin('/exports', full_path)
return export in output
# Make sure we only have the export when the .def file is in use.
if HasExport('test_deffile_dll_notexported.dll', 'AnExportedFunction'):
test.fail_test()
if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
test.fail_test()
if HasExport('test_deffile_exe_notexported.exe', 'AnExportedFunction'):
test.fail_test()
if not HasExport('test_deffile_exe_ok.exe', 'AnExportedFunction'):
test.fail_test()
test.pass_test()
| bsd-3-clause | 9,079,128,928,773,429,000 | 3,202,137,019,489,424,000 | 28.116279 | 77 | 0.702077 | false |
kuznetz/rabbitvcs | rabbitvcs/vcs/git/gittyup/tests/commit.py | 4 | 1027 | #
# test/stage.py
#
import os
from shutil import rmtree
from sys import argv
from optparse import OptionParser
from gittyup.client import GittyupClient
from gittyup.objects import *
from util import touch, change
parser = OptionParser()
parser.add_option("-c", "--cleanup", action="store_true", default=False)
(options, args) = parser.parse_args(argv)
DIR = "commit"
if options.cleanup:
rmtree(DIR, ignore_errors=True)
print "commit.py clean"
else:
if os.path.isdir(DIR):
raise SystemExit("This test script has already been run. Please call this script with --cleanup to start again")
os.mkdir(DIR)
g = GittyupClient()
g.initialize_repository(DIR)
touch(DIR + "/test1.txt")
touch(DIR + "/test2.txt")
g.stage([DIR+"/test1.txt", DIR+"/test2.txt"])
g.commit("First commit", commit_all=True)
change(DIR + "/test1.txt")
g.stage([DIR+"/test1.txt"])
g.commit("Second commit", author="Alex Plumb <[email protected]>")
print "commit.py pass"
| gpl-2.0 | -9,071,098,165,128,521,000 | -5,967,752,834,030,900,000 | 23.452381 | 121 | 0.670886 | false |
Endika/edx-platform | openedx/core/djangoapps/credit/migrations/0001_initial.py | 48 | 12567 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import openedx.core.djangoapps.credit.models
import model_utils.fields
import xmodule_django.models
import jsonfield.fields
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CreditCourse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_key', xmodule_django.models.CourseKeyField(unique=True, max_length=255, db_index=True)),
('enabled', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='CreditEligibility',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(max_length=255, db_index=True)),
('deadline', models.DateTimeField(default=openedx.core.djangoapps.credit.models.default_deadline_for_credit_eligibility, help_text='Deadline for purchasing and requesting credit.')),
('course', models.ForeignKey(related_name='eligibilities', to='credit.CreditCourse')),
],
options={
'verbose_name_plural': 'Credit eligibilities',
},
),
migrations.CreateModel(
name='CreditProvider',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('provider_id', models.CharField(help_text='Unique identifier for this credit provider. Only alphanumeric characters and hyphens (-) are allowed. The identifier is case-sensitive.', unique=True, max_length=255, validators=[django.core.validators.RegexValidator(regex=b'[a-z,A-Z,0-9,\\-]+', message=b'Only alphanumeric characters and hyphens (-) are allowed', code=b'invalid_provider_id')])),
('active', models.BooleanField(default=True, help_text='Whether the credit provider is currently enabled.')),
('display_name', models.CharField(help_text='Name of the credit provider displayed to users', max_length=255)),
('enable_integration', models.BooleanField(default=False, help_text='When true, automatically notify the credit provider when a user requests credit. In order for this to work, a shared secret key MUST be configured for the credit provider in secure auth settings.')),
('provider_url', models.URLField(default=b'', help_text='URL of the credit provider. If automatic integration is enabled, this will the the end-point that we POST to to notify the provider of a credit request. Otherwise, the user will be shown a link to this URL, so the user can request credit from the provider directly.')),
('provider_status_url', models.URLField(default=b'', help_text='URL from the credit provider where the user can check the status of his or her request for credit. This is displayed to students *after* they have requested credit.')),
('provider_description', models.TextField(default=b'', help_text='Description for the credit provider displayed to users.')),
('fulfillment_instructions', models.TextField(help_text='Plain text or html content for displaying further steps on receipt page *after* paying for the credit to get credit for a credit course against a credit provider.', null=True, blank=True)),
('eligibility_email_message', models.TextField(default=b'', help_text='Plain text or html content for displaying custom message inside credit eligibility email content which is sent when user has met all credit eligibility requirements.')),
('receipt_email_message', models.TextField(default=b'', help_text='Plain text or html content for displaying custom message inside credit receipt email content which is sent *after* paying to get credit for a credit course.')),
('thumbnail_url', models.URLField(default=b'', help_text='Thumbnail image url of the credit provider.', max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CreditRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('uuid', models.CharField(unique=True, max_length=32, db_index=True)),
('username', models.CharField(max_length=255, db_index=True)),
('parameters', jsonfield.fields.JSONField()),
('status', models.CharField(default=b'pending', max_length=255, choices=[(b'pending', b'Pending'), (b'approved', b'Approved'), (b'rejected', b'Rejected')])),
('course', models.ForeignKey(related_name='credit_requests', to='credit.CreditCourse')),
('provider', models.ForeignKey(related_name='credit_requests', to='credit.CreditProvider')),
],
options={
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='CreditRequirement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('namespace', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('display_name', models.CharField(default=b'', max_length=255)),
('order', models.PositiveIntegerField(default=0)),
('criteria', jsonfield.fields.JSONField()),
('active', models.BooleanField(default=True)),
('course', models.ForeignKey(related_name='credit_requirements', to='credit.CreditCourse')),
],
options={
'ordering': ['order'],
},
),
migrations.CreateModel(
name='CreditRequirementStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(max_length=255, db_index=True)),
('status', models.CharField(max_length=32, choices=[(b'satisfied', b'satisfied'), (b'failed', b'failed'), (b'declined', b'declined')])),
('reason', jsonfield.fields.JSONField(default={})),
('requirement', models.ForeignKey(related_name='statuses', to='credit.CreditRequirement')),
],
),
migrations.CreateModel(
name='HistoricalCreditRequest',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('uuid', models.CharField(max_length=32, db_index=True)),
('username', models.CharField(max_length=255, db_index=True)),
('parameters', jsonfield.fields.JSONField()),
('status', models.CharField(default=b'pending', max_length=255, choices=[(b'pending', b'Pending'), (b'approved', b'Approved'), (b'rejected', b'Rejected')])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('course', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='credit.CreditCourse', null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('provider', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='credit.CreditProvider', null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical credit request',
},
),
migrations.CreateModel(
name='HistoricalCreditRequirementStatus',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(max_length=255, db_index=True)),
('status', models.CharField(max_length=32, choices=[(b'satisfied', b'satisfied'), (b'failed', b'failed'), (b'declined', b'declined')])),
('reason', jsonfield.fields.JSONField(default={})),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('requirement', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='credit.CreditRequirement', null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical credit requirement status',
},
),
migrations.AlterUniqueTogether(
name='creditrequirementstatus',
unique_together=set([('username', 'requirement')]),
),
migrations.AlterUniqueTogether(
name='creditrequirement',
unique_together=set([('namespace', 'name', 'course')]),
),
migrations.AlterUniqueTogether(
name='creditrequest',
unique_together=set([('username', 'course', 'provider')]),
),
migrations.AlterUniqueTogether(
name='crediteligibility',
unique_together=set([('username', 'course')]),
),
]
| agpl-3.0 | 4,784,278,655,160,631,000 | 3,701,353,624,157,202,000 | 71.641618 | 407 | 0.628392 | false |
jclakkis/discus-inferno | flaskenv/lib/python2.7/site-packages/flask/testsuite/testing.py | 561 | 7411 | # -*- coding: utf-8 -*-
"""
flask.testsuite.testing
~~~~~~~~~~~~~~~~~~~~~~~
Test client and more.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
from flask._compat import text_type
class TestToolsTestCase(FlaskTestCase):
def test_environ_defaults_from_config(self):
app = flask.Flask(__name__)
app.testing = True
app.config['SERVER_NAME'] = 'example.com:1234'
app.config['APPLICATION_ROOT'] = '/foo'
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://example.com:1234/foo/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://example.com:1234/foo/')
def test_environ_defaults(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://localhost/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://localhost/')
def test_redirect_keep_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testing'
@app.route('/', methods=['GET', 'POST'])
def index():
if flask.request.method == 'POST':
return flask.redirect('/getsession')
flask.session['data'] = 'foo'
return 'index'
@app.route('/getsession')
def get_session():
return flask.session.get('data', '<missing>')
with app.test_client() as c:
rv = c.get('/getsession')
assert rv.data == b'<missing>'
rv = c.get('/')
assert rv.data == b'index'
assert flask.session.get('data') == 'foo'
rv = c.post('/', data={}, follow_redirects=True)
assert rv.data == b'foo'
# This support requires a new Werkzeug version
if not hasattr(c, 'redirect_client'):
assert flask.session.get('data') == 'foo'
rv = c.get('/getsession')
assert rv.data == b'foo'
def test_session_transactions(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
@app.route('/')
def index():
return text_type(flask.session['foo'])
with app.test_client() as c:
with c.session_transaction() as sess:
self.assert_equal(len(sess), 0)
sess['foo'] = [42]
self.assert_equal(len(sess), 1)
rv = c.get('/')
self.assert_equal(rv.data, b'[42]')
with c.session_transaction() as sess:
self.assert_equal(len(sess), 1)
self.assert_equal(sess['foo'], [42])
def test_session_transactions_no_null_sessions(self):
app = flask.Flask(__name__)
app.testing = True
with app.test_client() as c:
try:
with c.session_transaction() as sess:
pass
except RuntimeError as e:
self.assert_in('Session backend did not open a session', str(e))
else:
self.fail('Expected runtime error')
def test_session_transactions_keep_context(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
with app.test_client() as c:
rv = c.get('/')
req = flask.request._get_current_object()
self.assert_true(req is not None)
with c.session_transaction():
self.assert_true(req is flask.request._get_current_object())
def test_session_transaction_needs_cookies(self):
app = flask.Flask(__name__)
app.testing = True
c = app.test_client(use_cookies=False)
try:
with c.session_transaction() as s:
pass
except RuntimeError as e:
self.assert_in('cookies', str(e))
else:
self.fail('Expected runtime error')
def test_test_client_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
flask.g.value = 42
return 'Hello World!'
@app.route('/other')
def other():
1 // 0
with app.test_client() as c:
resp = c.get('/')
self.assert_equal(flask.g.value, 42)
self.assert_equal(resp.data, b'Hello World!')
self.assert_equal(resp.status_code, 200)
resp = c.get('/other')
self.assert_false(hasattr(flask.g, 'value'))
self.assert_in(b'Internal Server Error', resp.data)
self.assert_equal(resp.status_code, 500)
flask.g.value = 23
try:
flask.g.value
except (AttributeError, RuntimeError):
pass
else:
raise AssertionError('some kind of exception expected')
def test_reuse_client(self):
app = flask.Flask(__name__)
c = app.test_client()
with c:
self.assert_equal(c.get('/').status_code, 404)
with c:
self.assert_equal(c.get('/').status_code, 404)
def test_test_client_calls_teardown_handlers(self):
app = flask.Flask(__name__)
called = []
@app.teardown_request
def remember(error):
called.append(error)
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
self.assert_equal(called, [None])
del called[:]
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [None])
self.assert_equal(called, [None, None])
class SubdomainTestCase(FlaskTestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['SERVER_NAME'] = 'example.com'
self.client = self.app.test_client()
self._ctx = self.app.test_request_context()
self._ctx.push()
def tearDown(self):
if self._ctx is not None:
self._ctx.pop()
def test_subdomain(self):
@self.app.route('/', subdomain='<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def test_nosubdomain(self):
@self.app.route('/<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestToolsTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
| mit | -6,090,425,826,319,614,000 | 4,396,196,237,390,977,500 | 29.623967 | 80 | 0.538254 | false |
3dfxsoftware/cbss-addons | npg_bank_account_reconciliation/npg_bank_account_reconciliation.py | 1 | 21839 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 NovaPoint Group LLC (<http://www.novapointgroup.com>)
# Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import decimal_precision as dp
class bank_acc_rec_statement(osv.osv):
def check_group(self, cr, uid, ids, context=None):
"""Check if following security constraints are implemented for groups:
Bank Statement Preparer– they can create, view and delete any of the Bank Statements provided the Bank Statement is not in the DONE state,
or the Ready for Review state.
Bank Statement Verifier – they can create, view, edit, and delete any of the Bank Statements information at any time.
NOTE: DONE Bank Statements are only allowed to be deleted by a Bank Statement Verifier."""
model_data_obj = self.pool.get('ir.model.data')
res_groups_obj = self.pool.get('res.groups')
group_verifier_id = model_data_obj._get_id(cr, uid, 'npg_bank_account_reconciliation', 'group_bank_stmt_verifier')
for statement in self.browse(cr, uid, ids, context=context):
if group_verifier_id:
res_id = model_data_obj.read(cr, uid, [group_verifier_id], ['res_id'])[0]['res_id']
group_verifier = res_groups_obj.browse(cr, uid, res_id, context=context)
group_user_ids = [user.id for user in group_verifier.users]
if statement.state!='draft' and uid not in group_user_ids:
raise osv.except_osv(_('User Error !'),
_("Only a member of '%s' group may delete/edit bank statements when not in draft state!" %(group_verifier.name)))
return True
def copy(self, cr, uid, id, default={}, context=None):
default.update({
'credit_move_line_ids': [],
'debit_move_line_ids': [],
'name': '',
})
return super(bank_acc_rec_statement, self).copy(cr, uid, id, default=default, context=context)
def write(self, cr, uid, ids, vals, context=None):
self.check_group(cr, uid, ids, context) # Check if the user is allowed to perform the action
return super(bank_acc_rec_statement, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
"Reset the related account.move.line to be re-assigned later to statement."
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
self.check_group(cr, uid, ids, context) # Check if the user is allowed to perform the action
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
statement_line_ids = map(lambda x: x.id, statement_lines)
statement_line_obj.unlink(cr, uid, statement_line_ids, context=context) # call unlink method to reset
return super(bank_acc_rec_statement, self).unlink(cr, uid, ids, context=context)
def check_difference_balance(self, cr, uid, ids, context=None):
"Check if difference balance is zero or not."
for statement in self.browse(cr, uid, ids, context=context):
if statement.difference != 0.0:
raise osv.except_osv(_('Warning!'),
_("Prior to reconciling a statement, all differences must be accounted for and the Difference balance must be zero." \
" Please review and make necessary changes."))
return True
def action_cancel(self, cr, uid, ids, context=None):
"Cancel the the statement."
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def action_review(self, cr, uid, ids, context=None):
"Change the status of statement from 'draft' to 'to_be_reviewed'."
# If difference balance not zero prevent further processing
self.check_difference_balance(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'to_be_reviewed'}, context=context)
return True
def action_process(self, cr, uid, ids, context=None):
"""Set the account move lines as 'Cleared' and Assign 'Bank Acc Rec Statement ID'
for the statement lines which are marked as 'Cleared'."""
account_move_line_obj = self.pool.get('account.move.line')
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
# If difference balance not zero prevent further processing
self.check_difference_balance(cr, uid, ids, context=context)
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
for statement_line in statement_lines:
#Mark the move lines as 'Cleared'mand assign the 'Bank Acc Rec Statement ID'
account_move_line_obj.write(cr, uid, [statement_line.move_line_id.id],
{'cleared_bank_account': statement_line.cleared_bank_account,
'bank_acc_rec_statement_id': statement_line.cleared_bank_account and statement.id or False
}, context=context)
self.write(cr, uid, [statement.id], {'state': 'done',
'verified_by_user_id': uid,
'verified_date': time.strftime('%Y-%m-%d')
}, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
"""Reset the statement to draft and perform resetting operations."""
account_move_line_obj = self.pool.get('account.move.line')
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
line_ids = []
statement_line_ids = []
for statement_line in statement_lines:
statement_line_ids.append(statement_line.id)
line_ids.append(statement_line.move_line_id.id) # Find move lines related to statement lines
# Reset 'Cleared' and 'Bank Acc Rec Statement ID' to False
account_move_line_obj.write(cr, uid, line_ids, {'cleared_bank_account': False,
'bank_acc_rec_statement_id': False,
}, context=context)
# Reset 'Cleared' in statement lines
statement_line_obj.write(cr, uid, statement_line_ids, {'cleared_bank_account': False,
'research_required': False
}, context=context)
# Reset statement
self.write(cr, uid, [statement.id], {'state': 'draft',
'verified_by_user_id': False,
'verified_date': False
}, context=context)
return True
def action_select_all(self, cr, uid, ids, context=None):
"""Mark all the statement lines as 'Cleared'."""
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
statement_line_ids = map(lambda x: x.id, statement_lines)
statement_line_obj.write(cr, uid, statement_line_ids, {'cleared_bank_account': True}, context=context)
return True
def action_unselect_all(self, cr, uid, ids, context=None):
"""Reset 'Cleared' in all the statement lines."""
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
for statement in self.browse(cr, uid, ids, context=context):
statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids
statement_line_ids = map(lambda x: x.id, statement_lines)
statement_line_obj.write(cr, uid, statement_line_ids, {'cleared_bank_account': False}, context=context)
return True
def _get_balance(self, cr, uid, ids, name, args, context=None):
"""Computed as following:
A) Deposits, Credits, and Interest Amount: Total SUM of Amts of lines with Cleared = True
Deposits, Credits, and Interest # of Items: Total of number of lines with Cleared = True
B) Checks, Withdrawals, Debits, and Service Charges Amount:
Checks, Withdrawals, Debits, and Service Charges Amount # of Items:
Cleared Balance (Total Sum of the Deposit Amount Cleared (A) – Total Sum of Checks Amount Cleared (B))
Difference= (Ending Balance – Beginning Balance) - cleared balance = should be zero.
"""
res = {}
account_precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
for statement in self.browse(cr, uid, ids, context=context):
res[statement.id] = {
'sum_of_credits': 0.0,
'sum_of_debits': 0.0,
'cleared_balance': 0.0,
'difference': 0.0,
'sum_of_credits_lines': 0.0,
'sum_of_debits_lines': 0.0
}
for line in statement.credit_move_line_ids:
res[statement.id]['sum_of_credits'] += line.cleared_bank_account and round(line.amount, account_precision) or 0.0
res[statement.id]['sum_of_credits_lines'] += line.cleared_bank_account and 1.0 or 0.0
for line in statement.debit_move_line_ids:
res[statement.id]['sum_of_debits'] += line.cleared_bank_account and round(line.amount, account_precision) or 0.0
res[statement.id]['sum_of_debits_lines'] += line.cleared_bank_account and 1.0 or 0.0
res[statement.id]['cleared_balance'] = round(res[statement.id]['sum_of_debits'] - res[statement.id]['sum_of_credits'], account_precision)
res[statement.id]['difference'] = round((statement.ending_balance - statement.starting_balance) - res[statement.id]['cleared_balance'], account_precision)
return res
def refresh_record(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {}, context=context)
def onchange_account_id(self, cr, uid, ids, account_id, ending_date, suppress_ending_date_filter, context=None):
account_move_line_obj = self.pool.get('account.move.line')
statement_line_obj = self.pool.get('bank.acc.rec.statement.line')
val = {'value': {'credit_move_line_ids': [], 'debit_move_line_ids': []}}
if account_id:
for statement in self.browse(cr, uid, ids, context=context):
statement_line_ids = statement_line_obj.search(cr, uid, [('statement_id', '=', statement.id)], context=context)
# call unlink method to reset and remove existing statement lines and
# mark reset field values in related move lines
statement_line_obj.unlink(cr, uid, statement_line_ids, context=context)
# Apply filter on move lines to allow
#1. credit and debit side journal items in posted state of the selected GL account
#2. Journal items which are not assigned to previous bank statements
#3. Date less than or equal to ending date provided the 'Suppress Ending Date Filter' is not checkec
domain = [('account_id', '=', account_id), ('move_id.state', '=', 'posted'), ('cleared_bank_account', '=', False), ('draft_assigned_to_statement', '=', False)]
if not suppress_ending_date_filter:
domain += [('date', '<=', ending_date)]
line_ids = account_move_line_obj.search(cr, uid, domain, context=context)
for line in account_move_line_obj.browse(cr, uid, line_ids, context=context):
res = {
'ref': line.ref,
'date': line.date,
'partner_id': line.partner_id.id,
'currency_id': line.currency_id.id,
'amount': line.credit or line.debit,
'name': line.name,
'move_line_id': line.id,
'type': line.credit and 'cr' or 'dr'
}
if res['type'] == 'cr':
val['value']['credit_move_line_ids'].append(res)
else:
val['value']['debit_move_line_ids'].append(res)
return val
_name = "bank.acc.rec.statement"
_columns = {
'name': fields.char('Name', required=True, size=64, states={'done':[('readonly', True)]}, help="This is a unique name identifying the statement (e.g. Bank X January 2012)."),
'account_id': fields.many2one('account.account', 'Account', required=True,
states={'done':[('readonly', True)]}, domain="[('company_id', '=', company_id), ('type', '!=', 'view')]",
help="The Bank/Gl Account that is being reconciled."),
'ending_date': fields.date('Ending Date', required=True, states={'done':[('readonly', True)]}, help="The ending date of your bank statement."),
'starting_balance': fields.float('Starting Balance', required=True, digits_compute=dp.get_precision('Account'), help="The Starting Balance on your bank statement.", states={'done':[('readonly', True)]}),
'ending_balance': fields.float('Ending Balance', required=True, digits_compute=dp.get_precision('Account'), help="The Ending Balance on your bank statement.", states={'done':[('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True,
help="The Company for which the deposit ticket is made to"),
'notes': fields.text('Notes'),
'verified_date': fields.date('Verified Date', states={'done':[('readonly', True)]},
help="Date in which Deposit Ticket was verified."),
'verified_by_user_id': fields.many2one('res.users', 'Verified By', states={'done':[('readonly', True)]},
help="Entered automatically by the “last user” who saved it. System generated."),
'credit_move_line_ids': fields.one2many('bank.acc.rec.statement.line', 'statement_id', 'Credits',
domain=[('type','=','cr')], context={'default_type':'cr'}, states={'done':[('readonly', True)]}),
'debit_move_line_ids': fields.one2many('bank.acc.rec.statement.line', 'statement_id', 'Debits',
domain=[('type','=','dr')], context={'default_type':'dr'}, states={'done':[('readonly', True)]}),
'cleared_balance': fields.function(_get_balance, method=True, string='Cleared Balance', digits_compute=dp.get_precision('Account'),
type='float', help="Total Sum of the Deposit Amount Cleared – Total Sum of Checks, Withdrawals, Debits, and Service Charges Amount Cleared",
multi="balance"),
'difference': fields.function(_get_balance, method=True, type='float', string='Difference', digits_compute=dp.get_precision('Account'),
help="(Ending Balance – Beginning Balance) - Cleared Balance.", multi="balance"),
'sum_of_credits': fields.function(_get_balance, method=True, string='Checks, Withdrawals, Debits, and Service Charges Amount', digits_compute=dp.get_precision('Account'),
type='float', help="Total SUM of Amts of lines with Cleared = True",
multi="balance"),
'sum_of_debits': fields.function(_get_balance, method=True, type='float', string='Deposits, Credits, and Interest Amount', digits_compute=dp.get_precision('Account'),
help="Total SUM of Amts of lines with Cleared = True", multi="balance"),
'sum_of_credits_lines': fields.function(_get_balance, method=True, string='Checks, Withdrawals, Debits, and Service Charges # of Items',
type='float', help="Total of number of lines with Cleared = True",
multi="balance"),
'sum_of_debits_lines': fields.function(_get_balance, method=True, type='float', string='Deposits, Credits, and Interest # of Items',
help="Total of number of lines with Cleared = True", multi="balance"),
'suppress_ending_date_filter': fields.boolean('Remove Ending Date Filter', help="If this is checked then the Statement End Date filter on the transactions below will not occur. All transactions would come over."),
'state': fields.selection([
('draft','Draft'),
('to_be_reviewed','Ready for Review'),
('done','Done'),
('cancel', 'Cancel')
],'State', select=True, readonly=True),
}
_defaults = {
'state': 'draft',
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
'ending_date': time.strftime('%Y-%m-%d'),
}
_order = "ending_date desc"
_sql_constraints = [
('name_company_uniq', 'unique (name, company_id, account_id)', 'The name of the statement must be unique per company and G/L account!')
]
bank_acc_rec_statement()
class bank_acc_rec_statement_line(osv.osv):
_name = "bank.acc.rec.statement.line"
_description = "Statement Line"
_columns = {
'name': fields.char('Name', size=64, help="Derived from the related Journal Item.", required=True),
'ref': fields.char('Reference', size=64, help="Derived from related Journal Item."),
'partner_id': fields.many2one('res.partner', string='Partner', help="Derived from related Journal Item."),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account'),
help="Derived from the 'debit' amount from related Journal Item."),
'date': fields.date('Date', required=True, help="Derived from related Journal Item."),
'statement_id': fields.many2one('bank.acc.rec.statement', 'Statement', required=True, ondelete='cascade'),
'move_line_id': fields.many2one('account.move.line', 'Journal Item', help="Related Journal Item."),
'cleared_bank_account': fields.boolean('Cleared? ', help='Check if the transaction has cleared from the bank'),
'research_required': fields.boolean('Research Required? ', help='Check if the transaction should be researched by Accounting personal'),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'type':fields.selection([('dr','Debit'),('cr','Credit')], 'Cr/Dr'),
}
def create(self, cr, uid, vals, context=None):
account_move_line_obj = self.pool.get('account.move.line')
# Prevent manually adding new statement line.
# This would allow only onchange method to pre-populate statement lines based on the filter rules.
if not vals.get('move_line_id', False):
raise osv.except_osv(_('Processing Error'),_('You cannot add any new bank statement line manually as of this revision!'))
account_move_line_obj.write(cr, uid, [vals['move_line_id']], {'draft_assigned_to_statement': True}, context=context)
return super(bank_acc_rec_statement_line, self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
move_line_ids = map(lambda x: x.move_line_id.id, self.browse(cr, uid, ids, context=context))
# Reset field values in move lines to be added later
account_move_line_obj.write(cr, uid, move_line_ids, {'draft_assigned_to_statement': False,
'cleared_bank_account': False,
'bank_acc_rec_statement_id': False,
}, context=context)
return super(bank_acc_rec_statement_line, self).unlink(cr, uid, ids, context=context)
bank_acc_rec_statement_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | gpl-2.0 | -66,504,750,549,658,940 | 3,853,188,595,444,039,000 | 66.776398 | 221 | 0.593319 | false |
robobrobro/ballin-octo-shame | lib/Python-3.4.3/Lib/idlelib/idle_test/test_config_name.py | 93 | 2399 | """Unit tests for idlelib.configSectionNameDialog"""
import unittest
from idlelib.idle_test.mock_tk import Var, Mbox
from idlelib import configSectionNameDialog as name_dialog_module
name_dialog = name_dialog_module.GetCfgSectionNameDialog
class Dummy_name_dialog:
# Mock for testing the following methods of name_dialog
name_ok = name_dialog.name_ok
Ok = name_dialog.Ok
Cancel = name_dialog.Cancel
# Attributes, constant or variable, needed for tests
used_names = ['used']
name = Var()
result = None
destroyed = False
def destroy(self):
self.destroyed = True
# name_ok calls Mbox.showerror if name is not ok
orig_mbox = name_dialog_module.tkMessageBox
showerror = Mbox.showerror
class ConfigNameTest(unittest.TestCase):
dialog = Dummy_name_dialog()
@classmethod
def setUpClass(cls):
name_dialog_module.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
name_dialog_module.tkMessageBox = orig_mbox
def test_blank_name(self):
self.dialog.name.set(' ')
self.assertEqual(self.dialog.name_ok(), '')
self.assertEqual(showerror.title, 'Name Error')
self.assertIn('No', showerror.message)
def test_used_name(self):
self.dialog.name.set('used')
self.assertEqual(self.dialog.name_ok(), '')
self.assertEqual(showerror.title, 'Name Error')
self.assertIn('use', showerror.message)
def test_long_name(self):
self.dialog.name.set('good'*8)
self.assertEqual(self.dialog.name_ok(), '')
self.assertEqual(showerror.title, 'Name Error')
self.assertIn('too long', showerror.message)
def test_good_name(self):
self.dialog.name.set(' good ')
showerror.title = 'No Error' # should not be called
self.assertEqual(self.dialog.name_ok(), 'good')
self.assertEqual(showerror.title, 'No Error')
def test_ok(self):
self.dialog.destroyed = False
self.dialog.name.set('good')
self.dialog.Ok()
self.assertEqual(self.dialog.result, 'good')
self.assertTrue(self.dialog.destroyed)
def test_cancel(self):
self.dialog.destroyed = False
self.dialog.Cancel()
self.assertEqual(self.dialog.result, '')
self.assertTrue(self.dialog.destroyed)
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
| mit | 7,649,218,311,836,868,000 | 2,559,456,443,131,060,700 | 30.986667 | 65 | 0.661526 | false |
geraldarthur/qgis-openlayers-plugin | openlayers/weblayers/weblayer_registry.py | 5 | 2552 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
OpenLayers Plugin
A QGIS plugin
-------------------
begin : 2009-11-30
copyright : (C) 2009 by Pirmin Kalberer, Sourcepole
email : pka at sourcepole.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from weblayer import WebLayerGroup
class WebLayerTypeRegistry:
"""Registry of OL web Layers"""
def __init__(self, plugin):
self._plugin = plugin
self._groups = {}
self._olLayerTypes = {}
self._layerTypeId = 0 # Sequence for ID
self._olLayerTypeNames = {}
def group(self, name, icon):
"""Create group and register in registry"""
if name not in self._groups:
self._groups[name] = WebLayerGroup(name, icon)
return self._groups[name]
def groups(self):
return self._groups.values()
def register(self, layerType):
layerType.group = self.group(layerType.groupName, layerType.groupIcon)
layerType.setAddLayerCallback(self._plugin.addLayer)
layerType.layerTypeId = self._layerTypeId
self._olLayerTypes[self._layerTypeId] = layerType
self._layerTypeId += 1
self._olLayerTypeNames[layerType.layerTypeName] = layerType
def types(self):
return self._olLayerTypes.values()
def getById(self, id):
if self._olLayerTypes.has_key(id):
return self._olLayerTypes[id]
else:
return None
def getByName(self, name):
if self._olLayerTypeNames.has_key(name):
return self._olLayerTypeNames[name]
else:
return None
def groupLayerTypes(self, group):
lst = []
for lyr in self.types():
if lyr.group == group:
lst.append(lyr)
return lst
| gpl-2.0 | 7,580,168,996,658,763,000 | -5,686,744,904,259,998,000 | 34.444444 | 78 | 0.47884 | false |
johngian/remo | remo/profiles/migrations/0050_auto__add_field_userprofile_is_rotm_nominee.py | 3 | 11048 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.is_rotm_nominee'
db.add_column(u'profiles_userprofile', 'is_rotm_nominee',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.is_rotm_nominee'
db.delete_column(u'profiles_userprofile', 'is_rotm_nominee')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
u'profiles.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 11, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users_added'", 'null': 'True', 'to': u"orm['auth.User']"}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'current_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined_program': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'diaspora_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'first_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_matching'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'gender': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channels': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'irc_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'is_rotm_nominee': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_unavailable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jabber_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'local_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longest_streak_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'longest_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentees'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'mozillian_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'mozillians_profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'personal_blog_feed': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'personal_website_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'private_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True'}),
'receive_email_on_add_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_event_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_voting_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'registration_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'second_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tracked_functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_tracking'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'twitter_account': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'unavailability_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'wiki_profile_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
u'profiles.userstatus': {
'Meta': {'ordering': "['-expected_date', '-created_on']", 'object_name': 'UserStatus'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expected_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'replacement_rep': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replaced_rep'", 'null': 'True', 'to': u"orm['auth.User']"}),
'return_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'status'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['profiles'] | bsd-3-clause | 4,942,017,446,098,254,000 | -4,537,076,547,054,694,400 | 83.992308 | 199 | 0.554761 | false |
jianhuashao/WebDownloadJobsManage | dbs/google_drive/oauth2client/keyring_storage.py | 273 | 3227 | # Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A keyring based Storage.
A Storage for Credentials that uses the keyring module.
"""
__author__ = '[email protected] (Joe Gregorio)'
import keyring
import threading
from client import Storage as BaseStorage
from client import Credentials
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from the keyring.
To use this module you must have the keyring module installed. See
<http://pypi.python.org/pypi/keyring/>. This is an optional module and is not
installed with oauth2client by default because it does not work on all the
platforms that oauth2client supports, such as Google App Engine.
The keyring module <http://pypi.python.org/pypi/keyring/> is a cross-platform
library for access the keyring capabilities of the local system. The user will
be prompted for their keyring password when this module is used, and the
manner in which the user is prompted will vary per platform.
Usage:
from oauth2client.keyring_storage import Storage
s = Storage('name_of_application', 'user1')
credentials = s.get()
"""
def __init__(self, service_name, user_name):
"""Constructor.
Args:
service_name: string, The name of the service under which the credentials
are stored.
user_name: string, The name of the user to store credentials for.
"""
self._service_name = service_name
self._user_name = user_name
self._lock = threading.Lock()
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name,
credentials.to_json())
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name, '')
| apache-2.0 | -4,765,218,602,492,156,000 | -8,051,962,396,628,989,000 | 28.605505 | 80 | 0.699721 | false |
gbaty/shiboken2 | tests/samplebinding/ownership_delete_child_in_python_test.py | 6 | 1815 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
#
# Contact: PySide team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
'''Tests for deleting a child object in python'''
import unittest
import random
import string
from sample import ObjectType
from py3kcompat import IS_PY3K
if IS_PY3K:
string.letters = string.ascii_letters
class DeleteChildInPython(unittest.TestCase):
'''Test case for deleting (unref) a child in python'''
def testDeleteChild(self):
'''Delete child in python should not invalidate child'''
parent = ObjectType()
child = ObjectType(parent)
name = ''.join(random.sample(string.letters, 5))
child.setObjectName(name)
del child
new_child = parent.children()[0]
self.assertEqual(new_child.objectName(), name)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -614,288,523,059,486,800 | -1,025,795,328,053,048,600 | 32 | 70 | 0.72011 | false |
KitKatXperience/platform_external_chromium_org | chrome/test/webdriver/test/chromedriver.py | 41 | 7950 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chrome WebDriver that implements extra Chrome-specific functionality.
This module is experimental and will change and break without warning.
Use at your own risk.
Style Note: Because this is an extension to the WebDriver python API and
since this module will eventually be moved into the webdriver codebase, the
code follows WebDriver naming conventions for functions.
"""
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
class _ViewType(object):
"""Constants representing different web view types in Chrome.
They mirror the enum AutomationId::Type in chrome/common/automation_id.h.
"""
TAB = 1
EXTENSION_POPUP = 2
EXTENSION_BG_PAGE = 3
EXTENSION_INFOBAR = 4
APP_SHELL = 6
class WebDriver(RemoteWebDriver):
"""
Controls Chrome and provides additional Chrome-specific functionality not in
the WebDriver standard.
This class is experimental and subject to change and break without warning.
Use at your own risk.
"""
_CHROME_GET_EXTENSIONS = "chrome.getExtensions"
_CHROME_INSTALL_EXTENSION = "chrome.installExtension"
_CHROME_GET_EXTENSION_INFO = "chrome.getExtensionInfo"
_CHROME_MODIFY_EXTENSION = "chrome.setExtensionState"
_CHROME_UNINSTALL_EXTENSION = "chrome.uninstallExtension"
_CHROME_GET_VIEW_HANDLES = "chrome.getViewHandles"
_CHROME_DUMP_HEAP_PROFILE = "chrome.dumpHeapProfile"
def __init__(self, url, desired_capabilities={}):
"""Creates a WebDriver that controls Chrome via ChromeDriver.
Args:
url: The URL of a running ChromeDriver server.
desired_capabilities: Requested capabilities for the new WebDriver
session.
"""
RemoteWebDriver.__init__(self,
command_executor=url,
desired_capabilities=desired_capabilities)
# Add custom commands.
custom_commands = {
WebDriver._CHROME_GET_EXTENSIONS:
('GET', '/session/$sessionId/chrome/extensions'),
WebDriver._CHROME_INSTALL_EXTENSION:
('POST', '/session/$sessionId/chrome/extensions'),
WebDriver._CHROME_GET_EXTENSION_INFO:
('GET', '/session/$sessionId/chrome/extension/$id'),
WebDriver._CHROME_MODIFY_EXTENSION:
('POST', '/session/$sessionId/chrome/extension/$id'),
WebDriver._CHROME_UNINSTALL_EXTENSION:
('DELETE', '/session/$sessionId/chrome/extension/$id'),
WebDriver._CHROME_GET_VIEW_HANDLES:
('GET', '/session/$sessionId/chrome/views'),
WebDriver._CHROME_DUMP_HEAP_PROFILE:
('POST', '/session/$sessionId/chrome/heapprofilerdump')
}
self.command_executor._commands.update(custom_commands)
def get_installed_extensions(self):
"""Returns a list of installed extensions."""
ids = RemoteWebDriver.execute(
self, WebDriver._CHROME_GET_EXTENSIONS)['value']
return map(lambda id: Extension(self, id), ids)
def install_extension(self, path):
"""Install the extension at the given path.
Args:
path: Path to packed or unpacked extension to install.
Returns:
The installed extension.
"""
params = {'path': path}
id = RemoteWebDriver.execute(
self, WebDriver._CHROME_INSTALL_EXTENSION, params)['value']
return Extension(self, id)
def dump_heap_profile(self, reason):
"""Dumps a heap profile. It works only on Linux and ChromeOS.
We need an environment variable "HEAPPROFILE" set to a directory and a
filename prefix, for example, "/tmp/prof". In a case of this example,
heap profiles will be dumped into "/tmp/prof.(pid).0002.heap",
"/tmp/prof.(pid).0003.heap", and so on. Nothing happens when this
function is called without the env.
Args:
reason: A string which describes the reason for dumping a heap profile.
The reason will be included in the logged message.
Examples:
'To check memory leaking'
'For WebDriver tests'
"""
if self.IsLinux(): # IsLinux() also implies IsChromeOS().
params = {'reason': reason}
RemoteWebDriver.execute(self, WebDriver._CHROME_DUMP_HEAP_PROFILE, params)
else:
raise WebDriverException('Heap-profiling is not supported in this OS.')
class Extension(object):
"""Represents a Chrome extension/app."""
def __init__(self, parent, id):
self._parent = parent
self._id = id
@property
def id(self):
return self._id
def get_name(self):
return self._get_info()['name']
def get_version(self):
return self._get_info()['version']
def is_enabled(self):
return self._get_info()['is_enabled']
def set_enabled(self, value):
self._execute(WebDriver._CHROME_MODIFY_EXTENSION, {'enable': value})
def is_page_action_visible(self):
"""Returns whether the page action is visible in the currently targeted tab.
This will fail if the current target is not a tab.
"""
return self._get_info()['is_page_action_visible']
def uninstall(self):
self._execute(WebDriver._CHROME_UNINSTALL_EXTENSION)
def click_browser_action(self):
"""Clicks the browser action in the currently targeted tab.
This will fail if the current target is not a tab.
"""
self._execute(WebDriver._CHROME_MODIFY_EXTENSION,
{'click_button': 'browser_action'})
def click_page_action(self):
"""Clicks the page action in the currently targeted tab.
This will fail if the current target is not a tab.
"""
self._execute(WebDriver._CHROME_MODIFY_EXTENSION,
{'click_button': 'page_action'})
def get_app_shell_handle(self):
"""Returns the window handle for the app shell."""
return self._get_handle(_ViewType.APP_SHELL)
def get_bg_page_handle(self):
"""Returns the window handle for the background page."""
return self._get_handle(_ViewType.EXTENSION_BG_PAGE)
def get_popup_handle(self):
"""Returns the window handle for the open browser/page action popup."""
return self._get_handle(_ViewType.EXTENSION_POPUP)
def get_infobar_handles(self):
"""Returns a list of window handles for all open infobars of this extension.
This handle can be used with |WebDriver.switch_to_window|.
"""
infobars = filter(lambda view: view['type'] == _ViewType.EXTENSION_INFOBAR,
self._get_views())
return map(lambda view: view['handle'], infobars)
def _get_handle(self, type):
"""Returns the window handle for the page of given type.
This handle can be used with |WebDriver.switch_to_window|.
Args:
type: The type of the window as defined in _ViewType.
Returns:
The window handle, or None if there is no page with the given type.
"""
pages = filter(lambda view: view['type'] == type, self._get_views())
if len(pages) > 0:
return pages[0]['handle']
return None
def _get_info(self):
"""Returns a dictionary of all this extension's info."""
return self._execute(WebDriver._CHROME_GET_EXTENSION_INFO)['value']
def _get_views(self):
"""Returns a list of view information for this extension."""
views = self._parent.execute(WebDriver._CHROME_GET_VIEW_HANDLES)['value']
ext_views = []
for view in views:
if 'extension_id' in view and view['extension_id'] == self._id:
ext_views += [view]
return ext_views
def _execute(self, command, params=None):
"""Executes a command against the underlying extension.
Args:
command: The name of the command to execute.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
| bsd-3-clause | -2,925,578,753,430,130,700 | -7,046,293,664,439,723,000 | 32.829787 | 80 | 0.681887 | false |
porjo/docker | docs/docs-update.py | 12 | 8230 | #!/usr/bin/env python
#
# Sven's quick hack script to update the documentation
#
# call with:
# ./docs/update.py /usr/bin/docker
#
import datetime
import re
from sys import argv
import subprocess
import os
import os.path
script, docker_cmd = argv
date_string = datetime.date.today().strftime('%B %Y')
def print_usage(outtext, docker_cmd, command):
try:
help_string = subprocess.check_output(
"".join((docker_cmd, " ", command, " --help")),
stderr=subprocess.STDOUT,
shell=True
)
except subprocess.CalledProcessError, e:
help_string = e.output
for l in str(help_string).strip().split("\n"):
l = l.rstrip()
if l == '':
outtext.write("\n")
else:
# `docker --help` tells the user the path they called it with
l = re.sub(docker_cmd, "docker", l)
outtext.write(" {}\n".format(l))
outtext.write("\n")
# TODO: look for an complain about any missing commands
def update_cli_reference():
originalFile = "docs/sources/reference/commandline/cli.md"
os.rename(originalFile, originalFile+".bak")
intext = open("{}.bak".format(originalFile), "r")
outtext = open(originalFile, "w")
mode = 'p'
space = " "
command = ""
# 2 mode line-by line parser
for line in intext:
if mode == 'p':
# Prose
match = re.match("( \s*)Usage: docker ([a-z]+)", line)
if match:
# the begining of a Docker command usage block
space = match.group(1)
command = match.group(2)
mode = 'c'
else:
match = re.match("( \s*)Usage of .*docker.*:", line)
if match:
# the begining of the Docker --help usage block
space = match.group(1)
command = ""
mode = 'c'
else:
outtext.write(line)
else:
# command usage block
match = re.match("("+space+")(.*)|^$", line)
if not match:
# The end of the current usage block
# Shell out to run docker to see the new output
print_usage(outtext, docker_cmd, command)
outtext.write(line)
mode = 'p'
if mode == 'c':
print_usage(outtext, docker_cmd, command)
def update_man_pages():
cmds = []
try:
help_string = subprocess.check_output(
"".join((docker_cmd)),
stderr=subprocess.STDOUT,
shell=True
)
except subprocess.CalledProcessError, e:
help_string = e.output
for l in str(help_string).strip().split("\n"):
l = l.rstrip()
if l != "":
match = re.match(" (.*?) .*", l)
if match:
cmds.append(match.group(1))
desc_re = re.compile(
r".*# DESCRIPTION(.*?)# (OPTIONS|EXAMPLES?).*",
re.MULTILINE | re.DOTALL
)
example_re = re.compile(
r".*# EXAMPLES?(.*)# HISTORY.*",
re.MULTILINE | re.DOTALL
)
history_re = re.compile(
r".*# HISTORY(.*)",
re.MULTILINE | re.DOTALL
)
for command in cmds:
print "COMMAND: "+command
history = ""
description = ""
examples = ""
if os.path.isfile("docs/man/docker-"+command+".1.md"):
intext = open("docs/man/docker-"+command+".1.md", "r")
txt = intext.read()
intext.close()
match = desc_re.match(txt)
if match:
description = match.group(1)
match = example_re.match(txt)
if match:
examples = match.group(1)
match = history_re.match(txt)
if match:
history = match.group(1).strip()
usage = ""
usage_description = ""
params = {}
key_params = {}
try:
help_string = subprocess.check_output(
"".join((docker_cmd, " ", command, " --help")),
stderr=subprocess.STDOUT,
shell=True
)
except subprocess.CalledProcessError, e:
help_string = e.output
last_key = ""
for l in str(help).split("\n"):
l = l.rstrip()
if l != "":
match = re.match("Usage: docker {}(.*)".format(command), l)
if match:
usage = match.group(1).strip()
else:
match = re.match(" (-+)(.*) \s+(.*)", l)
if match:
last_key = match.group(2).rstrip()
key_params[last_key] = match.group(1)+last_key
params[last_key] = match.group(3)
else:
if last_key != "":
params[last_key] = "{}\n{}".format(params[last_key], l)
else:
if usage_description != "":
usage_description = usage_description + "\n"
usage_description = usage_description + l
# replace [OPTIONS] with the list of params
options = ""
match = re.match("\[OPTIONS\](.*)", usage)
if match:
usage = match.group(1)
new_usage = ""
# TODO: sort without the `-`'s
for key in sorted(params.keys(), key=lambda s: s.lower()):
# split on commas, remove --?.*=.*, put in *'s mumble
ps = []
opts = []
for k in key_params[key].split(","):
match = re.match("(-+)([A-Za-z-0-9]*)(?:=(.*))?", k.lstrip())
if match:
p = "**{}{}**".format(match.group(1), match.group(2))
o = "**{}{}**".format(match.group(1), match.group(2))
if match.group(3):
val = match.group(3)
if val == "\"\"":
val = match.group(2).upper()
p = "{}[=*{}*]".format(p, val)
val = match.group(3)
if val in ("true", "false"):
params[key] = params[key].rstrip()
if not params[key].endswith('.'):
params[key] = params[key]+ "."
params[key] = "{} The default is *{}*.".format(params[key], val)
val = "*true*|*false*"
o = "{}={}".format(o, val)
ps.append(p)
opts.append(o)
else:
print "nomatch:{}".format(k)
new_usage = "{}\n[{}]".format(new_usage, "|".join(ps))
options = "{}{}\n {}\n\n".format(options, ", ".join(opts), params[key])
if new_usage != "":
new_usage = "{}\n".format(new_usage.strip())
usage = new_usage + usage
outtext = open("docs/man/docker-{}.1.md".format(command), "w")
outtext.write("""% DOCKER(1) Docker User Manuals
% Docker Community
% JUNE 2014
# NAME
""")
outtext.write("docker-{} - {}\n\n".format(command, usage_description))
outtext.write("# SYNOPSIS\n**docker {}**\n{}\n\n".format(command, usage))
if description != "":
outtext.write("# DESCRIPTION{}".format(description))
if options == "":
options = "There are no available options.\n\n"
outtext.write("# OPTIONS\n{}".format(options))
if examples != "":
outtext.write("# EXAMPLES{}".format(examples))
outtext.write("# HISTORY\n")
if history != "":
outtext.write("{}\n".format(history))
recent_history_re = re.compile(
".*{}.*".format(date_string),
re.MULTILINE | re.DOTALL
)
if not recent_history_re.match(history):
outtext.write("{}, updated by Sven Dowideit <[email protected]>\n".format(date_string))
outtext.close()
# main
update_cli_reference()
update_man_pages()
| apache-2.0 | -5,018,599,104,881,690,000 | 7,830,254,827,522,822,000 | 33.435146 | 106 | 0.464277 | false |
bclau/nova | nova/availability_zones.py | 10 | 4790 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Availability zone helper functions."""
from oslo.config import cfg
from nova import db
from nova.openstack.common import memorycache
# NOTE(vish): azs don't change that often, so cache them for an hour to
# avoid hitting the db multiple times on every request.
AZ_CACHE_SECONDS = 60 * 60
MC = None
availability_zone_opts = [
cfg.StrOpt('internal_service_availability_zone',
default='internal',
help='availability_zone to show internal services under'),
cfg.StrOpt('default_availability_zone',
default='nova',
help='default compute node availability_zone'),
]
CONF = cfg.CONF
CONF.register_opts(availability_zone_opts)
def _get_cache():
global MC
if MC is None:
MC = memorycache.get_client()
return MC
def reset_cache():
"""Reset the cache, mainly for testing purposes and update
availability_zone for host aggregate
"""
global MC
MC = None
def _make_cache_key(host):
return "azcache-%s" % host.encode('utf-8')
def set_availability_zones(context, services):
# Makes sure services isn't a sqlalchemy object
services = [dict(service.iteritems()) for service in services]
metadata = db.aggregate_host_get_by_metadata_key(context,
key='availability_zone')
for service in services:
az = CONF.internal_service_availability_zone
if service['topic'] == "compute":
if metadata.get(service['host']):
az = u','.join(list(metadata[service['host']]))
else:
az = CONF.default_availability_zone
# update the cache
cache = _get_cache()
cache_key = _make_cache_key(service['host'])
cache.delete(cache_key)
cache.set(cache_key, az, AZ_CACHE_SECONDS)
service['availability_zone'] = az
return services
def get_host_availability_zone(context, host, conductor_api=None):
if conductor_api:
metadata = conductor_api.aggregate_metadata_get_by_host(
context, host, key='availability_zone')
else:
metadata = db.aggregate_metadata_get_by_host(
context, host, key='availability_zone')
if 'availability_zone' in metadata:
az = list(metadata['availability_zone'])[0]
else:
az = CONF.default_availability_zone
return az
def get_availability_zones(context, get_only_available=False):
"""Return available and unavailable zones on demands.
:param get_only_available: flag to determine whether to return
available zones only, default False indicates return both
available zones and not available zones, True indicates return
available zones only
"""
enabled_services = db.service_get_all(context, False)
enabled_services = set_availability_zones(context, enabled_services)
available_zones = []
for zone in [service['availability_zone'] for service
in enabled_services]:
if zone not in available_zones:
available_zones.append(zone)
if not get_only_available:
disabled_services = db.service_get_all(context, True)
disabled_services = set_availability_zones(context, disabled_services)
not_available_zones = []
zones = [service['availability_zone'] for service in disabled_services
if service['availability_zone'] not in available_zones]
for zone in zones:
if zone not in not_available_zones:
not_available_zones.append(zone)
return (available_zones, not_available_zones)
else:
return available_zones
def get_instance_availability_zone(context, instance):
"""Return availability zone of specified instance."""
host = str(instance.get('host'))
if not host:
return None
cache_key = _make_cache_key(host)
cache = _get_cache()
az = cache.get(cache_key)
if not az:
elevated = context.elevated()
az = get_host_availability_zone(elevated, host)
cache.set(cache_key, az, AZ_CACHE_SECONDS)
return az
| apache-2.0 | -7,156,829,483,964,665,000 | -5,470,502,609,527,878,000 | 32.496503 | 78 | 0.650522 | false |
alyosha1879/ryu | ryu/utils.py | 6 | 4894 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import logging
import os
import sys
import re
LOG = logging.getLogger('ryu.utils')
def chop_py_suffix(p):
for suf in ['.py', '.pyc', '.pyo']:
if p.endswith(suf):
return p[:-len(suf)]
return p
def _likely_same(a, b):
try:
# Samefile not availible on windows
if sys.platform == 'win32':
if os.stat(a) == os.stat(b):
return True
else:
if os.path.samefile(a, b):
return True
except OSError:
# m.__file__ is not always accessible. eg. egg
return False
if chop_py_suffix(a) == chop_py_suffix(b):
return True
return False
def _find_loaded_module(modpath):
# copy() to avoid RuntimeError: dictionary changed size during iteration
for k, m in sys.modules.copy().iteritems():
if k == '__main__':
continue
if not hasattr(m, '__file__'):
continue
if _likely_same(m.__file__, modpath):
return m
return None
def import_module(modname):
try:
__import__(modname)
except:
abspath = os.path.abspath(modname)
mod = _find_loaded_module(abspath)
if mod:
return mod
opath = sys.path
sys.path.append(os.path.dirname(abspath))
name = os.path.basename(modname)
if name.endswith('.py'):
name = name[:-3]
__import__(name)
sys.path = opath
return sys.modules[name]
return sys.modules[modname]
def round_up(x, y):
return ((x + y - 1) / y) * y
def _str_to_hex(data):
"""Convert string into array of hexes to be printed."""
return ' '.join(hex(ord(char)) for char in data)
def _bytearray_to_hex(data):
"""Convert bytearray into array of hexes to be printed."""
return ' '.join(hex(byte) for byte in data)
def hex_array(data):
"""Convert string or bytearray into array of hexes to be printed."""
to_hex = {str: _str_to_hex,
bytearray: _bytearray_to_hex}
try:
return to_hex[type(data)](data)
except KeyError:
LOG.exception('%s is invalid data type' % type(data))
return None
# the following functions are taken from OpenStack
#
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
with open(requirements_file, 'r') as fil:
return fil.read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
else:
requirements.append(line)
return requirements
| apache-2.0 | 6,351,393,442,041,270,000 | 4,266,039,841,581,119,500 | 30.574194 | 78 | 0.612383 | false |
caldwell/servo | tests/wpt/css-tests/tools/wptserve/tests/functional/base.py | 293 | 1831 | import base64
import logging
import os
import unittest
import urllib
import urllib2
import urlparse
import wptserve
logging.basicConfig()
here = os.path.split(__file__)[0]
doc_root = os.path.join(here, "docroot")
class Request(urllib2.Request):
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self.method = "GET"
def get_method(self):
return self.method
def add_data(self, data):
if hasattr(data, "iteritems"):
data = urllib.urlencode(data)
print data
self.add_header("Content-Length", str(len(data)))
urllib2.Request.add_data(self, data)
class TestUsingServer(unittest.TestCase):
def setUp(self):
self.server = wptserve.server.WebTestHttpd(host="localhost",
port=0,
use_ssl=False,
certificate=None,
doc_root=doc_root)
self.server.start(False)
def tearDown(self):
self.server.stop()
def abs_url(self, path, query=None):
return urlparse.urlunsplit(("http", "%s:%i" % (self.server.host, self.server.port), path, query, None))
def request(self, path, query=None, method="GET", headers=None, body=None, auth=None):
req = Request(self.abs_url(path, query))
req.method = method
if headers is None:
headers = {}
for name, value in headers.iteritems():
req.add_header(name, value)
if body is not None:
req.add_data(body)
if auth is not None:
req.add_header("Authorization", "Basic %s" % base64.encodestring('%s:%s' % auth))
return urllib2.urlopen(req)
| mpl-2.0 | 1,577,345,994,934,407,200 | 9,188,201,244,591,402,000 | 29.016393 | 111 | 0.555434 | false |
akash1808/nova_test_latest | nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py | 33 | 1894 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import platform
import mock
from os_brick.initiator import connector
from nova.compute import arch
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import fibrechannel
class LibvirtFibreChannelVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_fibrechan_driver(self):
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_conn)
self.assertIsInstance(libvirt_driver.connector,
connector.FibreChannelConnector)
def _test_libvirt_fibrechan_driver_s390(self):
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_conn)
self.assertIsInstance(libvirt_driver.connector,
connector.FibreChannelConnectorS390X)
@mock.patch.object(platform, 'machine', return_value=arch.S390)
def test_libvirt_fibrechan_driver_s390(self, mock_machine):
self._test_libvirt_fibrechan_driver_s390()
@mock.patch.object(platform, 'machine', return_value=arch.S390X)
def test_libvirt_fibrechan_driver_s390x(self, mock_machine):
self._test_libvirt_fibrechan_driver_s390()
| apache-2.0 | 5,381,302,398,741,768,000 | -7,154,179,400,933,660,000 | 42.045455 | 79 | 0.681626 | false |
storm-computers/odoo | addons/account_budget/wizard/account_budget_report.py | 47 | 1121 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import fields, osv
class account_budget_report(osv.osv_memory):
_name = "account.budget.report"
_description = "Account Budget report for analytic account"
_columns = {
'date_from': fields.date('Start of period', required=True),
'date_to': fields.date('End of period', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.budget.post',
'form': data
}
datas['form']['ids'] = datas['ids']
datas['form']['report'] = 'analytic-full'
return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_budget', data=datas, context=context)
| agpl-3.0 | -3,091,201,618,348,338,700 | -1,810,794,292,046,445,800 | 34.03125 | 119 | 0.581624 | false |
venkey-ariv/fullerite | src/diamond/collectors/nginx/nginx.py | 6 | 3290 | # coding=utf-8
"""
Collect statistics from Nginx
#### Dependencies
* urllib2
#### Usage
To enable the nginx status page to work with defaults,
add a file to /etc/nginx/sites-enabled/ (on Ubuntu) with the
following content:
<pre>
server {
listen 127.0.0.1:8080;
server_name localhost;
location /nginx_status {
stub_status on;
access_log /data/server/shared/log/access.log;
allow 127.0.0.1;
deny all;
}
}
</pre>
"""
import urllib2
import re
import diamond.collector
class NginxCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(NginxCollector, self).get_default_config_help()
config_help.update({
'req_host': 'Hostname',
'req_port': 'Port',
'req_path': 'Path',
})
return config_help
def get_default_config(self):
default_config = super(NginxCollector, self).get_default_config()
default_config['req_host'] = 'localhost'
default_config['req_port'] = 8080
default_config['req_path'] = '/nginx_status'
default_config['path'] = 'nginx'
return default_config
def collect(self):
url = 'http://%s:%i%s' % (self.config['req_host'],
int(self.config['req_port']),
self.config['req_path'])
activeConnectionsRE = re.compile(r'Active connections: (?P<conn>\d+)')
totalConnectionsRE = re.compile('^\s+(?P<conn>\d+)\s+'
+ '(?P<acc>\d+)\s+(?P<req>\d+)')
connectionStatusRE = re.compile('Reading: (?P<reading>\d+) '
+ 'Writing: (?P<writing>\d+) '
+ 'Waiting: (?P<waiting>\d+)')
req = urllib2.Request(url)
try:
handle = urllib2.urlopen(req)
for l in handle.readlines():
l = l.rstrip('\r\n')
if activeConnectionsRE.match(l):
self.publish_gauge(
'nginx.active_connections',
int(activeConnectionsRE.match(l).group('conn')))
elif totalConnectionsRE.match(l):
m = totalConnectionsRE.match(l)
req_per_conn = float(m.group('req')) / float(m.group('acc'))
self.publish_cumulative_counter('nginx.conn_accepted', int(m.group('conn')))
self.publish_cumulative_counter('nginx.conn_handled', int(m.group('acc')))
self.publish_cumulative_counter('nginx.req_handled', int(m.group('req')))
self.publish_gauge('nginx.req_per_conn', float(req_per_conn))
elif connectionStatusRE.match(l):
m = connectionStatusRE.match(l)
self.publish_gauge('nginx.act_reads', int(m.group('reading')))
self.publish_gauge('nginx.act_writes', int(m.group('writing')))
self.publish_gauge('nginx.act_waits', int(m.group('waiting')))
except IOError, e:
self.log.error("Unable to open %s" % url)
except Exception, e:
self.log.error("Unknown error opening url: %s", e)
| apache-2.0 | 606,747,550,062,783,700 | 9,094,689,981,025,083,000 | 36.386364 | 96 | 0.532827 | false |
wschenck/nest-simulator | extras/ConnPlotter/examples/non_dale.py | 20 | 2836 | # -*- coding: utf-8 -*-
#
# non_dale.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
Non-Dale example model.
Two layer A, B, with single population each.
Both layers make excitatory and inhibitory projections
to each other, violating Dale's law.
Build with
ConnectionPattern(..., ..., synTypes=(((SynType('exc', 1.0, 'b'),
SynType('inh', -1.0, 'r')),)))
"""
def non_dale():
"""
Build lists representing non-Dale network model.
Returns:
layerList, connectList, modelList
"""
def modCopy(orig, diff):
"""Create copy of dict orig, update with diff, return."""
assert (isinstance(orig, dict))
assert (isinstance(diff, dict))
tmp = orig.copy()
tmp.update(diff)
return tmp
N = 40
modelList = []
layerList = [('A', 'iaf_psc_alpha', [N, N], [1., 1.]),
('B', 'iaf_psc_alpha', [N, N], [1., 1.])]
common_connspec = {'rule': 'pairwise_bernoulli'}
common_synspec = {'synapse_model': 'static_synapse',
'delay': 1.0}
connectList = [
('A', 'B',
modCopy(common_connspec, {'mask': {'circular': {'radius': 0.2}}, 'p': 0.8}),
modCopy(common_synspec, {'weight': 2.0})),
('A', 'B',
modCopy(common_connspec, {'mask': {'circular': {'radius': 0.3}}, 'p': 0.4}),
modCopy(common_synspec, {'weight': -2.0})),
('B', 'A',
modCopy(common_connspec, {'mask': {'rectangular':
{'lower_left': [-0.4, -0.2],
'upper_right': [0.4, 0.2]}},
'p': 1.0}),
modCopy(common_synspec, {'weight': 2.0})),
('B', 'A',
modCopy(common_connspec, {'mask': {'rectangular':
{'lower_left': [-0.2, -0.4],
'upper_right': [0.2, 0.4]}},
'p': 1.0}),
modCopy(common_synspec, {'weight': -2.0})),
]
return layerList, connectList, modelList
| gpl-2.0 | -792,840,109,552,527,400 | 2,602,076,484,055,177,700 | 31.976744 | 85 | 0.536671 | false |
bentilly/heroes | lib/flask/testsuite/helpers.py | 405 | 21973 | # -*- coding: utf-8 -*-
"""
flask.testsuite.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import flask
import unittest
from logging import StreamHandler
from flask.testsuite import FlaskTestCase, catch_warnings, catch_stderr
from werkzeug.http import parse_cache_control_header, parse_options_header
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class JSONTestCase(FlaskTestCase):
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
self.assert_equal(rv.status_code, 400)
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
self.assert_equal(resp.data, u'Hällo Wörld'.encode('utf-8'))
def test_jsonify(self):
d = dict(a=23, b=42, c=[1, 2, 3])
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
self.assert_equal(rv.mimetype, 'application/json')
self.assert_equal(flask.json.loads(rv.data), d)
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, '"\\u2603"')
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, u'"\u2603"')
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
self.assert_equal(rv.data, b'3')
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
self.assert_equal(rv, u'"\\u003c/script\\u003e"')
self.assert_equal(type(rv), text_type)
rv = render('{{ "</script>"|tojson }}')
self.assert_equal(rv, '"\\u003c/script\\u003e"')
rv = render('{{ "<\0/script>"|tojson }}')
self.assert_equal(rv, '"\\u003c\\u0000/script\\u003e"')
rv = render('{{ "<!--<script>"|tojson }}')
self.assert_equal(rv, '"\\u003c!--\\u003cscript\\u003e"')
rv = render('{{ "&"|tojson }}')
self.assert_equal(rv, '"\\u0026"')
rv = render('{{ "\'"|tojson }}')
self.assert_equal(rv, '"\\u0027"')
rv = render("<a ng-data='{{ data|tojson }}'></a>",
data={'x': ["foo", "bar", "baz'"]})
self.assert_equal(rv,
'<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>')
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
self.assertEqual(rv.data, b'"<42>"')
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, u'정상처리'.encode('utf-8'))
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
self.assert_equal(app.config['JSON_SORT_KEYS'], True)
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
self.assert_equal(lines, [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
])
class SendfileTestCase(FlaskTestCase):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_equal(rv.mimetype, 'text/html')
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
self.assert_equal(rv.data, f.read())
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
def test_send_file_object(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
self.assert_equal(rv.data, f.read())
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
self.assert_equal(rv.mimetype, 'text/html')
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = False
with app.test_request_context():
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'application/octet-stream')
rv.close()
# etags
self.assert_equal(len(captured), 1)
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'text/plain')
rv.close()
# etags
self.assert_equal(len(captured), 1)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
self.assert_not_in('x-sendfile', rv.headers)
rv.close()
# etags
self.assert_equal(len(captured), 1)
def test_attachment(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
with app.test_request_context():
self.assert_equal(options['filename'], 'index.html')
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.html')
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
self.assert_equal(rv.mimetype, 'text/plain')
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.txt')
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
class LoggingTestCase(FlaskTestCase):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
self.assert_true(app.logger is logger1)
self.assert_equal(logger1.name, __name__)
app.logger_name = __name__ + '/test_logger_cache'
self.assert_true(app.logger is not logger1)
def test_debug_log(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
with catch_stderr() as err:
c.get('/')
out = err.getvalue()
self.assert_in('WARNING in helpers [', out)
self.assert_in(os.path.basename(__file__.rsplit('.', 1)[0] + '.py'), out)
self.assert_in('the standard library is dead', out)
self.assert_in('this is a debug statement', out)
with catch_stderr() as err:
try:
c.get('/exc')
except ZeroDivisionError:
pass
else:
self.assert_true(False, 'debug log ate the exception')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
self.assert_equal(app.logger.level, 10)
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
err = out.getvalue()
self.assert_in('Exception on / [GET]', err)
self.assert_in('Traceback (most recent call last):', err)
self.assert_in('1 // 0', err)
self.assert_in('ZeroDivisionError:', err)
def test_processor_exceptions(self):
app = flask.Flask(__name__)
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_equal(rv.data, b'Hello Server Error')
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index', _anchor='x y'),
'/#x%20y')
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index',
_external=True,
_scheme='https'),
'https://localhost/')
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
self.assert_equal(flask.url_for('myview', _method='GET'),
'/myview/')
self.assert_equal(flask.url_for('myview', id=42, _method='GET'),
'/myview/42')
self.assert_equal(flask.url_for('myview', _method='POST'),
'/myview/create')
class NoImportsTestCase(FlaskTestCase):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self):
try:
flask.Flask('importerror')
except NotImplementedError:
self.fail('Flask(import_name) is importing import_name.')
class StreamingTestCase(FlaskTestCase):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(generate())
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
self.assertEqual(called, [42])
def suite():
suite = unittest.TestSuite()
if flask.json_available:
suite.addTest(unittest.makeSuite(JSONTestCase))
suite.addTest(unittest.makeSuite(SendfileTestCase))
suite.addTest(unittest.makeSuite(LoggingTestCase))
suite.addTest(unittest.makeSuite(NoImportsTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
return suite
| apache-2.0 | -3,684,108,861,296,416,000 | -7,268,208,694,559,844,000 | 36.020236 | 89 | 0.519656 | false |
newemailjdm/pybrain | examples/rl/environments/ode/ccrl_glass_pgpe.py | 30 | 2812 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with PGPE on the CCRL ODE Environment
#
# The CCRL robot is a body structure with 2x 7 DoF Arms.
# Complex grasping tasks can be learned with this environment.
#
# Control/Actions:
# The agent can control all 14 DOF of the robot arms plus the 2 hands.
#
# A wide variety of sensors are available for observation and reward:
# - 16 angles of joints
# - 16 angle velocitys of joints
# - Number of hand parts that have contact to target object
# - collision with table
# - distance of hand to target
# - angle of hand to horizontal and vertical plane
#
# Task available are:
# - Grasp Task, agent has to get hold of the object with avoiding collision with table
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
# Author: Frank Sehnke, [email protected]
#########################################################################
__author__ = "Frank Sehnke"
__version__ = '$Id$'
from pybrain.tools.example_tools import ExTools
from pybrain.rl.environments.ode import CCRLEnvironment
from pybrain.rl.environments.ode.tasks import CCRLGlasTask
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import PGPE
from pybrain.rl.experiments import EpisodicExperiment
hiddenUnits = 4
batch=1 #number of samples per learning step
prnts=1 #number of learning steps after results are printed
epis=2000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
env = None
for runs in range(numbExp):
# create environment
#Options: XML-Model, Bool(OpenGL), Bool(Realtime simu. while client is connected), ServerIP(default:localhost), Port(default:21560)
if env != None: env.closeSocket()
env = CCRLEnvironment()
# create task
task = CCRLGlasTask(env)
# create controller network
net = buildNetwork(len(task.getObservation()), hiddenUnits, env.actLen, outclass=TanhLayer) #, hiddenUnits
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, PGPE(storeAllEvaluations = True))
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.showExps()
#To view what the simulation is doing at the moment, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
| bsd-3-clause | 7,299,563,147,088,571,000 | -3,180,565,799,149,382,700 | 40.352941 | 169 | 0.710171 | false |
Omegaphora/external_chromium_org | tools/memory_inspector/memory_inspector/classification/native_heap_classifier_unittest.py | 89 | 5824 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from memory_inspector.classification import native_heap_classifier
from memory_inspector.core import native_heap
from memory_inspector.core import stacktrace
from memory_inspector.core import symbol
_TEST_RULES = """
[
{
'name': 'content',
'source_path': r'content/',
'children': [
{
'name': 'browser',
'stacktrace': r'content::browser',
'source_path': r'content/browser',
},
{
'name': 'renderer',
'stacktrace': r'content::renderer',
},
],
},
{
'name': 'ashmem_in_skia',
'stacktrace': [r'sk::', r'ashmem::'],
},
]
"""
_TEST_STACK_TRACES = [
(3, [('stack_frame_0::foo()', '/ignored.c'),
('this_goes_under_totals_other', '/ignored.c')]),
(5, [('foo', '/ignored.c'),
('content::browser::something()', '/content/browser/something.cc'),
('bar', '/ignored.c')]),
(7, [('content::browser::something_else()', '/content/browser/else.cc')]),
(11, [('content::browser::not_really()', '/content/subtle/something.cc'),
('foo', '/ignored.c')]),
(13, [('foo', '/ignored.c'),
('content::renderer::something()', '/content/renderer/foo.c'),
('bar', '/ignored.c')]),
(17, [('content::renderer::something_else()', '/content/renderer/foo.c')]),
(19, [('content::renderer::something_else_2()', '/content/renderer/bar.c'),
('foo', '/ignored.c')]),
(23, [('content::something_different', '/content/foo.c')]),
(29, [('foo', '/ignored.c'),
('sk::something', '/skia/something.c'),
('not_ashsmem_goes_into_totals_other', '/ignored.c')]),
(31, [('foo', '/ignored.c'),
('sk::something', '/skia/something.c'),
('foo::bar', '/ignored.c'),
('sk::foo::ashmem::alloc()', '/skia/ashmem.c')]),
(37, [('foo', '/ignored.c'),
('sk::something', '/ignored.c'),
('sk::foo::ashmem::alloc()', '/ignored.c')]),
(43, [('foo::ashmem::alloc()', '/ignored.c'),
('sk::foo', '/ignored.c'),
('wrong_order_goes_into_totals', '/ignored.c')])
]
_EXPECTED_RESULTS = {
'Total': [238, 0],
'Total::content': [95, 0],
'Total::content::browser': [12, 0], # 5 + 7.
'Total::content::renderer': [49, 0], # 13 + 17 + 19.
'Total::content::content-other': [34, 0],
'Total::ashmem_in_skia': [68, 0], # 31 + 37.
'Total::Total-other': [75, 0], # 3 + 29 + 43.
}
_HEURISTIC_TEST_STACK_TRACES = [
(10, '/root/base1/foo/bar/file.cc'), # Contrib: 0.13
(20, '/root/base1/foo/baz/file.cc'), # Contrib: 0.26
(1, '/root/base1/foo/nah/file.cc'), # Contrib: 0.01
(3, '/root/base2/file.cc'), # Contrib: 0.03
(22, '/root/base2/subpath/file.cc'), # Contrib: 0.28
(18, '/root/base2/subpath2/file.cc'), # Contrib: 0.23
(2, '/root/whatever/file.cc'), # Contrib: 0.02
]
_HEURISTIC_EXPECTED_RESULTS = {
'Total': [76, 0],
'Total::/root/': [76, 0],
'Total::/root/::base1/foo/': [31, 0], # 10 + 20 +1
'Total::/root/::base1/foo/::bar/': [10, 0],
'Total::/root/::base1/foo/::baz/': [20, 0],
'Total::/root/::base1/foo/::base1/foo/-other': [1, 0],
'Total::/root/::base2/': [43, 0], # 3 + 22 + 18
'Total::/root/::base2/::subpath/': [22, 0],
'Total::/root/::base2/::subpath2/': [18, 0],
'Total::/root/::base2/::base2/-other': [3, 0],
'Total::/root/::/root/-other': [2, 0],
'Total::Total-other': [0, 0],
}
class NativeHeapClassifierTest(unittest.TestCase):
def testStandardRuleParsingAndProcessing(self):
rule_tree = native_heap_classifier.LoadRules(_TEST_RULES)
nheap = native_heap.NativeHeap()
mock_addr = 0
for test_entry in _TEST_STACK_TRACES:
mock_strace = stacktrace.Stacktrace()
for (mock_btstr, mock_source_path) in test_entry[1]:
mock_addr += 4 # Addr is irrelevant, just keep it distinct.
mock_frame = stacktrace.Frame(mock_addr)
mock_frame.SetSymbolInfo(symbol.Symbol(mock_btstr, mock_source_path))
mock_strace.Add(mock_frame)
nheap.Add(native_heap.Allocation(
size=test_entry[0], stack_trace=mock_strace))
res = native_heap_classifier.Classify(nheap, rule_tree)
self._CheckResult(res.total, '', _EXPECTED_RESULTS)
def testInferHeuristicRules(self):
nheap = native_heap.NativeHeap()
mock_addr = 0
for (mock_alloc_size, mock_source_path) in _HEURISTIC_TEST_STACK_TRACES:
mock_strace = stacktrace.Stacktrace()
mock_addr += 4 # Addr is irrelevant, just keep it distinct.
mock_frame = stacktrace.Frame(mock_addr)
mock_frame.SetSymbolInfo(symbol.Symbol(str(mock_addr), mock_source_path))
for _ in xrange(10): # Just repeat the same stack frame 10 times
mock_strace.Add(mock_frame)
nheap.Add(native_heap.Allocation(
size=mock_alloc_size, stack_trace=mock_strace))
rule_tree = native_heap_classifier.InferHeuristicRulesFromHeap(
nheap, threshold=0.05)
res = native_heap_classifier.Classify(nheap, rule_tree)
self._CheckResult(res.total, '', _HEURISTIC_EXPECTED_RESULTS)
def _CheckResult(self, node, prefix, expected_results):
node_name = prefix + node.name
self.assertIn(node_name, expected_results)
self.assertEqual(node.values, expected_results[node_name])
for child in node.children:
self._CheckResult(child, node_name + '::', expected_results) | bsd-3-clause | -1,788,831,865,218,774,800 | 4,696,659,774,693,599,000 | 39.172414 | 79 | 0.564045 | false |
skbkontur/Diamond | src/collectors/mesos_cgroup/test/testmesos_cgroup.py | 16 | 6777 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import MagicMock, Mock, mock_open
from mock import patch
from diamond.collector import Collector
from mesos_cgroup import MesosCGroupCollector
##########################################################################
class TestMesosCGroupCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MesosCGroupCollector', {})
self.collector = MesosCGroupCollector(config, None)
def test_import(self):
self.assertTrue(MesosCGroupCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
task_id = 'b0d5971e-915c-414b-aa25-0da46e64ff4e'
def urlopen_se(url):
if url == 'http://localhost:5051/state.json':
return self.getFixture('state.json')
else:
print url
raise NotImplementedError()
def listdir_se(directory):
cgroup_directories = [
'/sys/fs/cgroup/cpuacct/mesos',
'/sys/fs/cgroup/cpu/mesos',
'/sys/fs/cgroup/memory/mesos'
]
if directory in cgroup_directories:
return ["b0d5971e-915c-414b-aa25-0da46e64ff4e"]
else:
print directory
raise NotImplementedError()
def isdir_se(directory):
task_directories = [
'/sys/fs/cgroup/cpuacct/mesos/%s' % task_id,
'/sys/fs/cgroup/cpu/mesos/%s' % task_id,
'/sys/fs/cgroup/memory/mesos/%s' % task_id
]
if directory in task_directories:
return True
else:
print directory
raise NotImplementedError()
def open_se(path, mode='r', create=True):
if path.endswith('cpuacct/mesos/%s/cpuacct.usage' % task_id):
fixture = self.getFixture('cpuacct.usage')
m = mock_open(read_data=fixture.getvalue())
m.__enter__.return_value = fixture
return m
elif path.endswith('cpuacct/mesos/%s/cpuacct.stat' % task_id):
fixture = self.getFixture('cpuacct.stat')
m = mock_open(read_data=fixture.getvalue())
m.__enter__.return_value = fixture
return m
elif path.endswith('cpu/mesos/%s/cpu.stat' % task_id):
fixture = self.getFixture('cpu.stat')
m = mock_open(read_data=fixture.getvalue())
m.__enter__.return_value = fixture
return m
elif path.endswith('memory/mesos/%s/memory.stat' % task_id):
fixture = self.getFixture('memory.stat')
m = mock_open(read_data=fixture.getvalue())
m.__enter__.return_value = fixture
return m
else:
patch_open.stop()
o = open(path, mode, create)
patch_open.start()
return o
patch_urlopen = patch('urllib2.urlopen', Mock(side_effect=urlopen_se))
patch_listdir = patch('os.listdir', Mock(side_effect=listdir_se))
patch_isdir = patch('os.path.isdir', Mock(side_effect=isdir_se))
patch_open = patch('__builtin__.open', MagicMock(spec=file,
side_effect=open_se))
patch_urlopen.start()
patch_listdir.start()
patch_isdir.start()
patch_open.start()
self.collector.collect()
patch_open.stop()
patch_isdir.stop()
patch_listdir.stop()
patch_urlopen.stop()
metrics = self.get_metrics()
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
def get_metrics(self):
return {
'ENVIRONMENT.ROLE.TASK.0.cpuacct.usage': '170379797227518',
'ENVIRONMENT.ROLE.TASK.0.cpuacct.user': '9333852',
'ENVIRONMENT.ROLE.TASK.0.cpuacct.system': '2774846',
'ENVIRONMENT.ROLE.TASK.0.cpu.nr_periods': '26848849',
'ENVIRONMENT.ROLE.TASK.0.cpu.nr_throttled': '85144',
'ENVIRONMENT.ROLE.TASK.0.cpu.throttled_time': '34709931864651',
'ENVIRONMENT.ROLE.TASK.0.memory.cache': '233398272',
'ENVIRONMENT.ROLE.TASK.0.memory.rss': '1789911040',
'ENVIRONMENT.ROLE.TASK.0.memory.rss_huge': '1642070016',
'ENVIRONMENT.ROLE.TASK.0.memory.mapped_file': '1118208',
'ENVIRONMENT.ROLE.TASK.0.memory.writeback': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.pgpgin': '375953210',
'ENVIRONMENT.ROLE.TASK.0.memory.pgpgout': '385688436',
'ENVIRONMENT.ROLE.TASK.0.memory.pgfault': '353980394',
'ENVIRONMENT.ROLE.TASK.0.memory.pgmajfault': '157',
'ENVIRONMENT.ROLE.TASK.0.memory.inactive_anon': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.active_anon': '1789911040',
'ENVIRONMENT.ROLE.TASK.0.memory.inactive_file': '52654080',
'ENVIRONMENT.ROLE.TASK.0.memory.active_file': '180727808',
'ENVIRONMENT.ROLE.TASK.0.memory.unevictable': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.hierarchical_memory_limit': '3355443200', # noqa
'ENVIRONMENT.ROLE.TASK.0.memory.total_cache': '233398272',
'ENVIRONMENT.ROLE.TASK.0.memory.total_rss': '1789911040',
'ENVIRONMENT.ROLE.TASK.0.memory.total_rss_huge': '1642070016',
'ENVIRONMENT.ROLE.TASK.0.memory.total_mapped_file': '1118208',
'ENVIRONMENT.ROLE.TASK.0.memory.total_writeback': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.total_pgpgin': '375953210',
'ENVIRONMENT.ROLE.TASK.0.memory.total_pgpgout': '385688436',
'ENVIRONMENT.ROLE.TASK.0.memory.total_pgfault': '353980394',
'ENVIRONMENT.ROLE.TASK.0.memory.total_pgmajfault': '157',
'ENVIRONMENT.ROLE.TASK.0.memory.total_inactive_anon': '0',
'ENVIRONMENT.ROLE.TASK.0.memory.total_active_anon': '1789911040',
'ENVIRONMENT.ROLE.TASK.0.memory.total_inactive_file': '52654080',
'ENVIRONMENT.ROLE.TASK.0.memory.total_active_file': '180727808',
'ENVIRONMENT.ROLE.TASK.0.memory.total_unevictable': '0'
}
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit | -8,069,580,590,999,158,000 | -1,119,017,060,347,275,400 | 42.165605 | 93 | 0.56013 | false |
usingnamespace/pyramid_authsanity | src/pyramid_authsanity/__init__.py | 1 | 2689 | from pyramid.settings import asbool, aslist
from .interfaces import IAuthSourceService
from .policy import AuthServicePolicy
from .sources import (
CookieAuthSourceInitializer,
HeaderAuthSourceInitializer,
SessionAuthSourceInitializer,
)
from .util import int_or_none, kw_from_settings
default_settings = (
("source", str, ""),
("debug", asbool, False),
("cookie.cookie_name", str, "auth"),
("cookie.max_age", int_or_none, None),
("cookie.httponly", asbool, True),
("cookie.path", str, "/"),
("cookie.domains", aslist, []),
("cookie.debug", asbool, False),
("session.value_key", str, "sanity."),
)
def init_cookie_source(config, settings):
if "authsanity.secret" not in settings:
raise RuntimeError("authsanity.secret is required for cookie based storage")
kw = kw_from_settings(settings, "authsanity.cookie.")
config.register_service_factory(
CookieAuthSourceInitializer(settings["authsanity.secret"], **kw),
iface=IAuthSourceService,
)
def init_session_source(config, settings):
kw = kw_from_settings(settings, "authsanity.session.")
config.register_service_factory(
SessionAuthSourceInitializer(**kw), iface=IAuthSourceService
)
def init_authorization_header_source(config, settings):
if "authsanity.secret" not in settings:
raise RuntimeError(
"authsanity.secret is required for Authorization header source"
)
kw = kw_from_settings(settings, "authsanity.header.")
config.register_service_factory(
HeaderAuthSourceInitializer(settings["authsanity.secret"], **kw),
iface=IAuthSourceService,
)
default_sources = {
"cookie": init_cookie_source,
"session": init_session_source,
"header": init_authorization_header_source,
}
# Stolen from pyramid_debugtoolbar
def parse_settings(settings):
parsed = {}
def populate(name, convert, default):
name = "%s%s" % ("authsanity.", name)
value = convert(settings.get(name, default))
parsed[name] = value
for name, convert, default in default_settings:
populate(name, convert, default)
return parsed
def includeme(config):
# Go parse the settings
settings = parse_settings(config.registry.settings)
# Update the config
config.registry.settings.update(settings)
# include pyramid_services
config.include("pyramid_services")
if settings["authsanity.source"] in default_sources:
default_sources[settings["authsanity.source"]](config, config.registry.settings)
config.set_authentication_policy(
AuthServicePolicy(debug=settings["authsanity.debug"])
)
| isc | -47,584,415,937,048,370 | -6,876,544,576,558,361,000 | 27.305263 | 88 | 0.685757 | false |
Rickyfox/MLMA2 | core/DataHandler.py | 1 | 1770 | '''
Created on Dec 17, 2014
@author: Dominik Lang
'''
import csv
import os.path
from random import shuffle
import collections
import numpy
from sklearn.preprocessing import Imputer
class DataHandler(object):
def __init__(self):
pass
'''
@summary: A method to handle reading the data in from the csv file
@return: List containing the rows of the dataset as seperate lists
'''
def readData(self):
# We get the path to the current file, then go one directory up to find the data file
basepath = os.path.dirname(__file__)
filepath = os.path.abspath(os.path.join(basepath, "..","data.csv"))
output=[]
with open(filepath, 'rb') as csvfile:
i=0
linereader = csv.reader(csvfile, delimiter=',')
for row in linereader:
if i==0:
i+=1
continue
output.append(row)
return output
'''
@summary: A method that splits the dataset into a training and a test set
'''
def splitData(self,dataset):
sets = collections.namedtuple('Sets', ['train', 'test'])
third=len(dataset)/3
shuffle(dataset)
testset=dataset[0:third]
trainset=dataset[third:-1]
s=sets(trainset,testset)
return s
def vectorizeData(self,dataset):
vectors = collections.namedtuple('vectors', ['X', 'Y'])
x=[]
y=[]
for i in dataset:
atts=i[0:-2]
c=i[-1]
x.append(atts)
y.append(c)
x=numpy.asarray(x)
y=numpy.asarray(y)
output=vectors(x,y)
return output
| gpl-2.0 | -4,564,861,069,887,728,000 | -5,664,580,317,697,214,000 | 23.929577 | 93 | 0.535028 | false |
shakamunyi/tensorflow | tensorflow/contrib/layers/python/layers/feature_column.py | 19 | 105773 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features in `Estimator` models.
FeatureColumns are the primary way of encoding features for pre-canned
`Estimator` models.
When using FeatureColumns with `Estimator` models, the type of feature column
you should choose depends on (1) the feature type and (2) the model type.
(1) Feature type:
* Continuous features can be represented by `real_valued_column`.
* Categorical features can be represented by any `sparse_column_with_*`
column (`sparse_column_with_keys`, `sparse_column_with_vocabulary_file`,
`sparse_column_with_hash_bucket`, `sparse_column_with_integerized_feature`).
(2) Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = real_valued_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `one_hot_column`. `one_hot_column` will create a dense
boolean tensor with an entry for each possible value, and thus the
computation cost is linear in the number of possible values versus the number
of values that occur in the sparse tensor. Thus using a "one_hot_column" is
only recommended for features with only a few possible values. For features
with many possible values or for very sparse features, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
sparse_column_with_keys("department", ["math", "philosphy", ...]),
dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. When doing so
an embedding_lookups are used to efficiently perform the sparse matrix
multiplication.
dept_column = sparse_column_with_keys("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=[department_column, bucketized_age_column],
hash_bucket_size=1000)
Example of building an `Estimator` model using FeatureColumns:
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_from_feature_columns` within
`feature_column_ops.py`.
Example of building a non-`Estimator` model using FeatureColumns:
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
See feature_column_ops_test for more examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import six
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.ops import bucketization_op
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.contrib.layers.python.ops import sparse_ops as contrib_sparse_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
class _LinearEmbeddingLookupArguments(
collections.namedtuple("_LinearEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner"])):
"""Represents the information needed from a column for embedding lookup.
Used to compute DNN inputs and weighted sum.
"""
pass
class _DeepEmbeddingLookupArguments(
collections.namedtuple("_DeepEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner",
"dimension",
"shared_embedding_name",
"hash_key",
"max_norm",
"trainable"])):
"""Represents the information needed from a column for embedding lookup.
Used to compute DNN inputs and weighted sum.
"""
pass
class _FeatureColumn(object):
"""Represents a feature column abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
This class is an abstract class. User should not create one instance of this.
Following classes (_SparseColumn, _RealValuedColumn, ...) are concrete
instances.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def name(self):
"""Returns the name of column or transformed column."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def config(self):
"""Returns configuration of the base feature for `tf.parse_example`."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
pass
@abc.abstractmethod
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
raise NotImplementedError("Transform is not implemented for {}.".format(
self))
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collection=None,
trainable=True,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network."""
raise ValueError("Calling an abstract method.")
def _deep_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to embedding lookup to build an input layer."""
raise NotImplementedError(
"No deep embedding lookup arguments for column {}.".format(self))
# It is expected that classes implement either wide_embedding_lookup_arguments
# or to_dense_tensor to be used in linear models.
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to look up embeddings for this column."""
raise NotImplementedError(
"No wide embedding lookup arguments for column {}.".format(self))
# pylint: disable=unused-argument
def _to_dense_tensor(self, input_tensor):
"""Returns a dense tensor representing this column's values."""
raise NotImplementedError(
"No dense tensor representation for column {}.".format(self))
def _checkpoint_path(self):
"""Returns None, or a (path,tensor_name) to load a checkpoint from."""
return None
def _key_without_properties(self, properties):
"""Helper method for self.key() that omits particular properties."""
fields_values = []
# pylint: disable=protected-access
for i, k in enumerate(self._fields):
if k in properties:
# Excludes a property from the key.
# For instance, exclude `initializer` from the key of EmbeddingColumn
# since we don't support users specifying different initializers for
# the same embedding column. Ditto for `normalizer` and
# RealValuedColumn.
# Special treatment is needed since the default str form of a
# function contains its address, which could introduce non-determinism
# in sorting.
continue
fields_values.append("{}={}".format(k, self[i]))
# pylint: enable=protected-access
# This is effectively the same format as str(self), except with our special
# treatment.
return "{}({})".format(type(self).__name__, ", ".join(fields_values))
# TODO(b/30410315): Support warm starting in all feature columns.
class _SparseColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple("_SparseColumn", [
"column_name", "is_integerized", "bucket_size", "lookup_config",
"combiner", "dtype"
])):
"""Represents a sparse feature column also known as categorical features.
Instances of this class are immutable. A sparse column means features are
sparse and dictionary returned by InputBuilder contains a
("column_name", SparseTensor) pair.
One and only one of bucket_size or lookup_config should be set. If
is_integerized is True then bucket_size should be set.
Attributes:
column_name: A string defining sparse column name.
is_integerized: A bool if True means type of feature is an integer.
Integerized means we can use the feature itself as id.
bucket_size: An int that is > 0. The number of buckets.
lookup_config: A _SparseIdLookupConfig defining feature-to-id lookup
configuration
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features, either `tf.string` or `tf.int64`.
Raises:
TypeError: if lookup_config is not a _SparseIdLookupConfig.
ValueError: if above expectations about input fails.
"""
def __new__(cls,
column_name,
is_integerized=False,
bucket_size=None,
lookup_config=None,
combiner="sum",
dtype=dtypes.string):
if is_integerized and bucket_size is None:
raise ValueError("bucket_size must be set if is_integerized is True. "
"column_name: {}".format(column_name))
if is_integerized and not dtype.is_integer:
raise ValueError("dtype must be an integer if is_integerized is True. "
"dtype: {}, column_name: {}.".format(dtype, column_name))
if dtype != dtypes.string and not dtype.is_integer:
raise ValueError("dtype must be string or integer. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if bucket_size is None and lookup_config is None:
raise ValueError("one of bucket_size or lookup_config must be set. "
"column_name: {}".format(column_name))
if bucket_size is not None and lookup_config:
raise ValueError("one and only one of bucket_size or lookup_config "
"must be set. column_name: {}".format(column_name))
if bucket_size is not None and bucket_size < 1:
raise ValueError("bucket_size must be at least 1. "
"bucket_size: {}, column_name: {}".format(bucket_size,
column_name))
if ((lookup_config) and
(not isinstance(lookup_config, _SparseIdLookupConfig))):
raise TypeError(
"lookup_config must be an instance of _SparseIdLookupConfig. "
"Given one is in type {} for column_name {}".format(
type(lookup_config), column_name))
if (lookup_config and lookup_config.vocabulary_file and
lookup_config.vocab_size is None):
raise ValueError("vocab_size must be defined. "
"column_name: {}".format(column_name))
return super(_SparseColumn, cls).__new__(
cls,
column_name,
is_integerized=is_integerized,
bucket_size=bucket_size,
lookup_config=lookup_config,
combiner=combiner,
dtype=dtype)
@property
def name(self):
return self.column_name
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
if self.bucket_size is not None:
return self.bucket_size
return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"SparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def _get_input_sparse_tensor(self, input_tensor):
"""sparsify input_tensor if dense."""
if not isinstance(input_tensor, sparse_tensor_py.SparseTensor):
# To avoid making any assumptions about which values are to be ignored,
# we set ignore_value to -1 for numeric tensors to avoid excluding valid
# indices.
if input_tensor.dtype == dtypes.string:
ignore_value = ""
else:
ignore_value = -1
input_tensor = _reshape_real_valued_tensor(input_tensor, 2, self.name)
input_tensor = contrib_sparse_ops.dense_to_sparse_tensor(
input_tensor, ignore_value=ignore_value)
return input_tensor
def is_compatible(self, other_column):
"""Check compatibility of two sparse columns."""
if self.lookup_config and other_column.lookup_config:
return self.lookup_config == other_column.lookup_config
compatible = (self.length == other_column.length and
(self.dtype == other_column.dtype or
(self.dtype.is_integer and other_column.dtype.is_integer)))
if compatible:
logging.warn("Column {} and {} may not have the same vocabulary.".
format(self.name, other_column.name))
return compatible
@abc.abstractmethod
def _do_transform(self, input_tensor):
pass
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
input_tensor = self._get_input_sparse_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = self._do_transform(input_tensor)
def _transform_feature(self, inputs):
input_tensor = self._get_input_sparse_tensor(inputs.get(self.name))
return self._do_transform(input_tensor)
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access
self.id_tensor(input_tensor), self.weight_tensor(input_tensor))
class _SparseColumnIntegerized(_SparseColumn):
"""See `sparse_column_with_integerized_feature`."""
def _do_transform(self, input_tensor):
sparse_id_values = math_ops.mod(input_tensor.values, self.bucket_size,
name="mod")
return sparse_tensor_py.SparseTensor(input_tensor.indices, sparse_id_values,
input_tensor.dense_shape)
def sparse_column_with_integerized_feature(column_name,
bucket_size,
combiner="sum",
dtype=dtypes.int64):
"""Creates an integerized _SparseColumn.
Use this when your features are already pre-integerized into int64 IDs, that
is, when the set of values to output is already coming in as what's desired in
the output. Integerized means we can use the feature value itself as id.
Typically this is used for reading contiguous ranges of integers indexes, but
it doesn't have to be. The output value is simply copied from the
input_feature, whatever it is. Just be aware, however, that if you have large
gaps of unused integers it might affect what you feed those in (for instance,
if you make up a one-hot tensor from these, the unused integers will appear as
values in the tensor which are always zero.)
Args:
column_name: A string defining sparse column name.
bucket_size: An int that is > 1. The number of buckets. It should be bigger
than maximum feature. In other words features in this column should be an
int64 in range [0, bucket_size)
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. It should be an integer type. Default value is
dtypes.int64.
Returns:
An integerized _SparseColumn definition.
Raises:
ValueError: bucket_size is not greater than 1.
ValueError: dtype is not integer.
"""
return _SparseColumnIntegerized(
column_name, is_integerized=True, bucket_size=bucket_size,
combiner=combiner, dtype=dtype)
class _SparseColumnHashed(_SparseColumn):
"""See `sparse_column_with_hash_bucket`."""
def _do_transform(self, input_tensor):
if self.dtype.is_integer:
sparse_values = string_ops.as_string(input_tensor.values)
else:
sparse_values = input_tensor.values
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.bucket_size, name="lookup")
return sparse_tensor_py.SparseTensor(input_tensor.indices, sparse_id_values,
input_tensor.dense_shape)
def sparse_column_with_hash_bucket(column_name,
hash_bucket_size,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with hashed bucket configuration.
Use this when your sparse features are in string or integer format, but you
don't have a vocab file that maps each value to an integer ID.
output_id = Hash(input_feature_string) % bucket_size
Args:
column_name: A string defining sparse column name.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with hashed bucket configuration
Raises:
ValueError: hash_bucket_size is not greater than 2.
ValueError: dtype is neither string nor integer.
"""
return _SparseColumnHashed(
column_name,
bucket_size=hash_bucket_size,
combiner=combiner,
dtype=dtype)
class _SparseColumnKeys(_SparseColumn):
"""See `sparse_column_with_keys`."""
def _do_transform(self, input_tensor):
table = lookup.index_table_from_tensor(
mapping=tuple(self.lookup_config.keys),
default_value=self.lookup_config.default_value,
dtype=self.dtype,
name="lookup")
return table.lookup(input_tensor)
def sparse_column_with_keys(
column_name, keys, default_value=-1, combiner="sum", dtype=dtypes.string):
"""Creates a _SparseColumn with keys.
Look up logic is as follows:
lookup_id = index_of_feature_in_keys if feature in keys else default_value
Args:
column_name: A string defining sparse column name.
keys: A list or tuple defining vocabulary. Must be castable to `dtype`.
default_value: The value to use for out-of-vocabulary feature values.
Default is -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. Only integer and string are supported.
Returns:
A _SparseColumnKeys with keys configuration.
"""
keys = tuple(keys)
return _SparseColumnKeys(
column_name,
lookup_config=_SparseIdLookupConfig(
keys=keys, vocab_size=len(keys), default_value=default_value),
combiner=combiner,
dtype=dtype)
class _SparseColumnVocabulary(_SparseColumn):
"""See `sparse_column_with_vocabulary_file`."""
def _do_transform(self, st):
if self.dtype.is_integer:
sparse_string_values = string_ops.as_string(st.values)
sparse_string_tensor = sparse_tensor_py.SparseTensor(st.indices,
sparse_string_values,
st.dense_shape)
else:
sparse_string_tensor = st
table = lookup.index_table_from_file(
vocabulary_file=self.lookup_config.vocabulary_file,
num_oov_buckets=self.lookup_config.num_oov_buckets,
vocab_size=self.lookup_config.vocab_size,
default_value=self.lookup_config.default_value,
name=self.name + "_lookup")
return table.lookup(sparse_string_tensor)
def sparse_column_with_vocabulary_file(column_name,
vocabulary_file,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
combiner="sum",
dtype=dtypes.string):
"""Creates a _SparseColumn with vocabulary file configuration.
Use this when your sparse features are in string or integer format, and you
have a vocab file that maps each value to an integer ID.
output_id = LookupIdFromVocab(input_feature_string)
Args:
column_name: A string defining sparse column name.
vocabulary_file: The vocabulary filename.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with vocabulary file configuration.
Raises:
ValueError: vocab_size is not defined.
ValueError: dtype is neither string nor integer.
"""
if vocab_size is None:
raise ValueError("vocab_size should be defined. "
"column_name: {}".format(column_name))
return _SparseColumnVocabulary(
column_name,
lookup_config=_SparseIdLookupConfig(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
vocab_size=vocab_size,
default_value=default_value),
combiner=combiner,
dtype=dtype)
class _WeightedSparseColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple("_WeightedSparseColumn",
["sparse_id_column", "weight_column_name",
"dtype"])):
"""See `weighted_sparse_column`."""
def __new__(cls, sparse_id_column, weight_column_name, dtype):
return super(_WeightedSparseColumn, cls).__new__(cls, sparse_id_column,
weight_column_name, dtype)
@property
def name(self):
return "{}_weighted_by_{}".format(self.sparse_id_column.name,
self.weight_column_name)
@property
def length(self):
"""Returns id size."""
return self.sparse_id_column.length
@property
def config(self):
config = _get_feature_config(self.sparse_id_column)
config.update(
{self.weight_column_name: parsing_ops.VarLenFeature(self.dtype)})
return config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor[0]
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return input_tensor[1]
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"WeightedSparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.sparse_id_column.combiner)
def _do_transform(self, id_tensor, weight_tensor):
if not isinstance(weight_tensor, sparse_tensor_py.SparseTensor):
# The weight tensor can be a regular Tensor. In such case, sparsify it.
weight_tensor = contrib_sparse_ops.dense_to_sparse_tensor(weight_tensor)
if not self.dtype.is_floating:
weight_tensor = math_ops.to_float(weight_tensor)
return tuple([id_tensor, weight_tensor])
def insert_transformed_feature(self, columns_to_tensors):
"""Inserts a tuple with the id and weight tensors."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
weight_tensor = columns_to_tensors[self.weight_column_name]
columns_to_tensors[self] = self._do_transform(
columns_to_tensors[self.sparse_id_column], weight_tensor)
def _transform_feature(self, inputs):
return self._do_transform(
inputs.get(self.sparse_id_column), inputs.get(self.weight_column_name))
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access
self.id_tensor(input_tensor), self.weight_tensor(input_tensor))
def is_compatible(self, other_column):
"""Check compatibility with other sparse column."""
if isinstance(other_column, _WeightedSparseColumn):
return self.sparse_id_column.is_compatible(other_column.sparse_id_column)
return self.sparse_id_column.is_compatible(other_column)
def weighted_sparse_column(sparse_id_column,
weight_column_name,
dtype=dtypes.float32):
"""Creates a _SparseColumn by combining sparse_id_column with a weight column.
Example:
```python
sparse_feature = sparse_column_with_hash_bucket(column_name="sparse_col",
hash_bucket_size=1000)
weighted_feature = weighted_sparse_column(sparse_id_column=sparse_feature,
weight_column_name="weights_col")
```
This configuration assumes that input dictionary of model contains the
following two items:
* (key="sparse_col", value=sparse_tensor) where sparse_tensor is
a SparseTensor.
* (key="weights_col", value=weights_tensor) where weights_tensor
is a SparseTensor.
Following are assumed to be true:
* sparse_tensor.indices = weights_tensor.indices
* sparse_tensor.dense_shape = weights_tensor.dense_shape
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` functions.
weight_column_name: A string defining a sparse column name which represents
weight or value of the corresponding sparse id feature.
dtype: Type of weights, such as `tf.float32`. Only floating and integer
weights are supported.
Returns:
A _WeightedSparseColumn composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if dtype is not convertible to float.
"""
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype is not convertible to float. Given {}".format(
dtype))
return _WeightedSparseColumn(sparse_id_column, weight_column_name, dtype)
class _OneHotColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_OneHotColumn", ["sparse_id_column"])):
"""Represents a one-hot column for use in deep networks.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
function.
"""
@property
def name(self):
return "{}_one_hot".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
return self.sparse_id_column.length
@property
def config(self):
"""Returns the parsing config of the origin column."""
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Used by the Transformer to prevent double transformations."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _to_dnn_input_layer(self,
transformed_input_tensor,
unused_weight_collections=None,
unused_trainable=False,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network.
Args:
transformed_input_tensor: A tensor that has undergone the transformations
in `insert_transformed_feature`. Rank should be >= `output_rank`.
unused_weight_collections: Unused. One hot encodings are not variable.
unused_trainable: Unused. One hot encodings are not trainable.
output_rank: the desired rank of the output `Tensor`.
Returns:
A multi-hot Tensor to be fed into the first layer of neural network.
Raises:
ValueError: When using one_hot_column with weighted_sparse_column.
This is not yet supported.
"""
# Reshape ID column to `output_rank`.
sparse_id_column = self.sparse_id_column.id_tensor(transformed_input_tensor)
# pylint: disable=protected-access
sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank)
weight_tensor = self.sparse_id_column.weight_tensor(
transformed_input_tensor)
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(sp_ids=sparse_id_column,
sp_values=weight_tensor,
vocab_size=self.length)
# Remove (?, -1) index
weighted_column = sparse_ops.sparse_slice(
weighted_column,
[0, 0],
weighted_column.dense_shape)
return sparse_ops.sparse_tensor_to_dense(weighted_column)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(sparse_id_column,
default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(
one_hot_id_tensor, reduction_indices=[output_rank - 1])
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.length])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
def _transform_feature(self, inputs):
return self._to_dnn_input_layer(inputs.get(self.sparse_id_column))
@property
def _parse_example_spec(self):
return self.config
class _EmbeddingColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_EmbeddingColumn", [
"sparse_id_column", "dimension", "combiner", "initializer",
"ckpt_to_load_from", "tensor_name_in_ckpt", "shared_embedding_name",
"shared_vocab_size", "max_norm", "trainable"
])):
"""Represents an embedding column.
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` or `weighted_sparse_column` functions.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
shared_embedding_name: (Optional). The common name for shared embedding.
shared_vocab_size: (Optional). The common vocab_size used for shared
embedding space.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True.
Raises:
ValueError: if `initializer` is specified and is not callable. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
def __new__(cls,
sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
shared_embedding_name=None,
shared_vocab_size=None,
max_norm=None,
trainable=True):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"Embedding of column_name: {}".format(
sparse_id_column.name))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
if initializer is None:
logging.warn("The default stddev value of initializer will change from "
"\"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" after "
"2017/02/25.")
stddev = 1 / math.sqrt(sparse_id_column.length)
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
dimension, combiner,
initializer, ckpt_to_load_from,
tensor_name_in_ckpt,
shared_embedding_name,
shared_vocab_size,
max_norm,
trainable)
@property
def name(self):
if self.shared_embedding_name is None:
return "{}_embedding".format(self.sparse_id_column.name)
else:
return "{}_shared_embedding".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns id size."""
if self.shared_vocab_size is None:
return self.sparse_id_column.length
else:
return self.shared_vocab_size
@property
def config(self):
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=self.sparse_id_column.id_tensor(input_tensor),
weight_tensor=self.sparse_id_column.weight_tensor(input_tensor),
vocab_size=self.length,
dimension=self.dimension,
initializer=self.initializer,
combiner=self.combiner,
shared_embedding_name=self.shared_embedding_name,
hash_key=None,
max_norm=self.max_norm,
trainable=self.trainable)
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
raise ValueError("Column {} is not supported in linear models. "
"Please use sparse_column.".format(self))
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _embeddings_from_arguments(
self,
self._deep_embedding_lookup_arguments(inputs.get(self)),
weight_collections, trainable)
def _transform_feature(self, inputs):
return inputs.get(self.sparse_id_column)
@property
def _parse_example_spec(self):
return self.config
def _is_variable(v):
"""Returns true if `v` is a variable."""
return isinstance(v, (variables.Variable,
resource_variable_ops.ResourceVariable))
def _embeddings_from_arguments(column,
args,
weight_collections,
trainable,
output_rank=2):
"""Returns embeddings for a column based on the computed arguments.
Args:
column: the column name.
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
output_rank: the desired rank of the returned `Tensor`. Inner dimensions will
be combined to produce the desired rank.
Returns:
the embeddings.
Raises:
ValueError: if not possible to create.
"""
# pylint: disable=protected-access
input_tensor = layers._inner_flatten(args.input_tensor, output_rank)
weight_tensor = None
if args.weight_tensor is not None:
weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank)
# pylint: enable=protected-access
# This option is only enabled for scattered_embedding_column.
if args.hash_key:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
return embedding_ops.scattered_embedding_lookup_sparse(
embeddings,
input_tensor,
args.dimension,
hash_key=args.hash_key,
combiner=args.combiner,
name="lookup")
if args.shared_embedding_name is not None:
shared_embedding_collection_name = (
"SHARED_EMBEDDING_COLLECTION_" + args.shared_embedding_name.upper())
graph = ops.get_default_graph()
shared_embedding_collection = (
graph.get_collection_ref(shared_embedding_collection_name))
shape = [args.vocab_size, args.dimension]
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
"Collection %s can only contain one "
"(partitioned) variable." % shared_embedding_collection_name)
else:
embeddings = shared_embedding_collection[0]
if embeddings.get_shape() != shape:
raise ValueError(
"The embedding variable with name {} already "
"exists, but its shape does not match required "
"embedding shape here. Please make sure to use "
"different shared_embedding_name for different "
"shared embeddings.".format(args.shared_embedding_name))
else:
embeddings = contrib_variables.model_variable(
name=args.shared_embedding_name,
shape=shape,
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
graph.add_to_collection(shared_embedding_collection_name, embeddings)
else:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size, args.dimension],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
if _is_variable(embeddings):
embeddings = [embeddings]
else:
embeddings = embeddings._get_variable_list() # pylint: disable=protected-access
# pylint: disable=protected-access
_maybe_restore_from_checkpoint(column._checkpoint_path(), embeddings)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
combiner=args.combiner,
name=column.name + "weights",
max_norm=args.max_norm)
def _maybe_restore_from_checkpoint(checkpoint_path, variable):
if checkpoint_path is not None:
path, tensor_name = checkpoint_path
weights_to_restore = variable
if len(variable) == 1:
weights_to_restore = variable[0]
checkpoint_utils.init_from_checkpoint(path,
{tensor_name: weights_to_restore})
def one_hot_column(sparse_id_column):
"""Creates an `_OneHotColumn` for a one-hot or multi-hot repr in a DNN.
Args:
sparse_id_column: A _SparseColumn which is created by
`sparse_column_with_*`
or crossed_column functions. Note that `combiner` defined in
`sparse_id_column` is ignored.
Returns:
An _OneHotColumn.
"""
return _OneHotColumn(sparse_id_column)
def embedding_column(sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates an `_EmbeddingColumn` for feeding sparse data into a DNN.
Args:
sparse_id_column: A `_SparseColumn` which is created by for example
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in `sparse_id_column` is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
An `_EmbeddingColumn`.
"""
return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
max_norm=max_norm, trainable=trainable)
def shared_embedding_columns(sparse_id_columns,
dimension,
combiner="mean",
shared_embedding_name=None,
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""Creates a list of `_EmbeddingColumn` sharing the same embedding.
Args:
sparse_id_columns: An iterable of `_SparseColumn`, such as those created by
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in each sparse_id_column is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
shared_embedding_name: (Optional). A string specifying the name of shared
embedding weights. This will be needed if you want to reference the shared
embedding separately from the generated `_EmbeddingColumn`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_columns[0].length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
max_norm: (Optional). If not None, embedding values are l2-normalized to
the value of max_norm.
trainable: (Optional). Should the embedding be trainable. Default is True
Returns:
A tuple of `_EmbeddingColumn` with shared embedding space.
Raises:
ValueError: if sparse_id_columns is empty, or its elements are not
compatible with each other.
TypeError: if `sparse_id_columns` is not a sequence or is a string. If at
least one element of `sparse_id_columns` is not a `SparseColumn` or a
`WeightedSparseColumn`.
"""
if (not isinstance(sparse_id_columns, collections.Sequence) or
isinstance(sparse_id_columns, six.string_types)):
raise TypeError(
"sparse_id_columns must be a non-string sequence (ex: list or tuple) "
"instead of type {}.".format(type(sparse_id_columns)))
if len(sparse_id_columns) < 1:
raise ValueError("The input sparse_id_columns should have at least one "
"element.")
for sparse_id_column in sparse_id_columns:
if not (isinstance(sparse_id_column, _SparseColumn) or
isinstance(sparse_id_column, _WeightedSparseColumn)):
raise TypeError("Elements of sparse_id_columns must be _SparseColumn or "
"_WeightedSparseColumn, but {} is not."
.format(sparse_id_column))
if len(sparse_id_columns) == 1:
return [
_EmbeddingColumn(sparse_id_columns[0], dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name, max_norm=max_norm,
trainable=trainable)]
else:
# Check compatibility of sparse_id_columns
compatible = True
for column in sparse_id_columns[1:]:
if isinstance(sparse_id_columns[0], _WeightedSparseColumn):
compatible = compatible and sparse_id_columns[0].is_compatible(column)
else:
compatible = compatible and column.is_compatible(sparse_id_columns[0])
if not compatible:
raise ValueError("The input sparse id columns are not compatible.")
# Construct the shared name and size for shared embedding space.
if not shared_embedding_name:
# Sort the columns so that shared_embedding_name will be deterministic
# even if users pass in unsorted columns from a dict or something.
# Since they are different classes, ordering is SparseColumns first,
# then WeightedSparseColumns.
sparse_columns = []
weighted_sparse_columns = []
for column in sparse_id_columns:
if isinstance(column, _SparseColumn):
sparse_columns.append(column)
else:
weighted_sparse_columns.append(column)
sorted_columns = sorted(sparse_columns) + sorted(
weighted_sparse_columns, key=lambda x: x.name)
if len(sorted_columns) <= 3:
shared_embedding_name = "_".join([column.name
for column in sorted_columns])
else:
shared_embedding_name = "_".join([column.name
for column in sorted_columns[0:3]])
shared_embedding_name += (
"_plus_{}_others".format(len(sorted_columns) - 3))
shared_embedding_name += "_shared_embedding"
shared_vocab_size = sparse_id_columns[0].length
embedded_columns = []
for column in sparse_id_columns:
embedded_columns.append(
_EmbeddingColumn(column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name, shared_vocab_size,
max_norm=max_norm, trainable=trainable))
return tuple(embedded_columns)
class _ScatteredEmbeddingColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_ScatteredEmbeddingColumn", [
"column_name", "size", "dimension", "hash_key", "combiner",
"initializer"
])):
"""See `scattered_embedding_column`."""
def __new__(cls,
column_name,
size,
dimension,
hash_key,
combiner="sqrtn",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"column_name: {}".format(column_name))
if initializer is None:
logging.warn("The default stddev value of initializer will change from "
"\"0.1\" to \"1/sqrt(dimension)\" after 2017/02/25.")
stddev = 0.1
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_ScatteredEmbeddingColumn, cls).__new__(cls, column_name, size,
dimension, hash_key,
combiner,
initializer)
@property
def name(self):
return "{}_scattered_embedding".format(self.column_name)
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(dtypes.string)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
columns_to_tensors[self] = columns_to_tensors[self.column_name]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.size,
initializer=self.initializer,
combiner=self.combiner,
dimension=self.dimension,
shared_embedding_name=None,
hash_key=self.hash_key,
max_norm=None,
trainable=True)
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _embeddings_from_arguments(
self,
self._deep_embedding_lookup_arguments(inputs.get(self)),
weight_collections, trainable)
def _transform_feature(self, inputs):
return inputs.get(self.column_name)
@property
def _parse_example_spec(self):
return self.config
def scattered_embedding_column(column_name,
size,
dimension,
hash_key,
combiner="mean",
initializer=None):
"""Creates an embedding column of a sparse feature using parameter hashing.
This is a useful shorthand when you have a sparse feature you want to use an
embedding for, but also want to hash the embedding's values in each dimension
to a variable based on a different hash.
Specifically, the i-th embedding component of a value v is found by retrieving
an embedding weight whose index is a fingerprint of the pair (v,i).
An embedding column with sparse_column_with_hash_bucket such as
embedding_column(
sparse_column_with_hash_bucket(column_name, bucket_size),
dimension)
could be replaced by
scattered_embedding_column(
column_name,
size=bucket_size * dimension,
dimension=dimension,
hash_key=tf.contrib.layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)
for the same number of embedding parameters. This should hopefully reduce the
impact of collisions, but adds the cost of slowing down training.
Args:
column_name: A string defining sparse column name.
size: An integer specifying the number of parameters in the embedding layer.
dimension: An integer specifying dimension of the embedding.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"mean" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1.
Returns:
A _ScatteredEmbeddingColumn.
Raises:
ValueError: if dimension or size is not a positive integer; or if combiner
is not supported.
"""
if (dimension < 1) or (size < 1):
raise ValueError("Dimension and size must be greater than 0. "
"dimension: {}, size: {}, column_name: {}".format(
dimension, size, column_name))
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. "
"combiner: {}, column_name: {}".format(combiner,
column_name))
return _ScatteredEmbeddingColumn(column_name, size, dimension, hash_key,
combiner, initializer)
def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None):
"""Reshaping logic for dense, numeric `Tensors`.
Follows the following rules:
1. If `output_rank > input_rank + 1` raise a `ValueError`.
2. If `output_rank == input_rank + 1`, expand `input_tensor` by one
dimension and return
3. If `output_rank == input_rank`, return `input_tensor`.
4. If `output_rank < input_rank`, flatten the inner dimensions of
`input_tensor` and return a `Tensor` with `output_rank`
Args:
input_tensor: a dense `Tensor` to be reshaped.
output_rank: the desired rank of the reshaped `Tensor`.
column_name: (optional) the name of the associated column. Used for error
messages.
Returns:
A `Tensor` with the same entries as `input_tensor` and rank `output_rank`.
Raises:
ValueError: if `output_rank > input_rank + 1`.
"""
input_rank = input_tensor.get_shape().ndims
if input_rank is not None:
if output_rank > input_rank + 1:
error_string = ("Rank of input Tensor ({}) should be the same as "
"output_rank ({}). For example, sequence data should "
"typically be 3 dimensional (rank 3) while non-sequence "
"data is typically 2 dimensional (rank 2).".format(
input_rank, output_rank))
if column_name is not None:
error_string = ("Error while processing column {}.".format(column_name)
+ error_string)
raise ValueError(error_string)
if output_rank == input_rank + 1:
logging.warning(
"Rank of input Tensor ({}) should be the same as output_rank ({}) "
"for column. Will attempt to expand dims. It is highly recommended "
"that you resize your input, as this behavior may change.".format(
input_rank, output_rank))
return array_ops.expand_dims(input_tensor, -1, name="expand_dims")
if output_rank == input_rank:
return input_tensor
# Here, either `input_rank` is unknown or it is greater than `output_rank`.
return layers._inner_flatten(input_tensor, output_rank) # pylint: disable=protected-access
class _RealValuedVarLenColumn(_FeatureColumn, collections.namedtuple(
"_RealValuedVarLenColumn",
["column_name", "default_value", "dtype", "normalizer", "is_sparse"])):
"""Represents a real valued feature column for variable length Features.
Instances of this class are immutable.
If is_sparse=False, the dictionary returned by InputBuilder contains a
("column_name", Tensor) pair with a Tensor shape of (batch_size, dimension).
If is_sparse=True, the dictionary contains a ("column_name", SparseTensor)
pair instead with shape inferred after parsing.
"""
@property
def name(self):
return self.column_name
@property
def config(self):
if self.is_sparse:
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
else:
return {self.column_name: parsing_ops.FixedLenSequenceFeature(
[], self.dtype, allow_missing=True,
default_value=self.default_value)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["normalizer"])
@property
def normalizer_fn(self):
"""Returns the function used to normalize the column."""
return self.normalizer
def _normalized_input_tensor(self, input_tensor):
"""Returns the input tensor after custom normalization is applied."""
if self.normalizer is None:
return input_tensor
if self.is_sparse:
return sparse_tensor_py.SparseTensor(
input_tensor.indices,
self.normalizer(input_tensor.values),
input_tensor.dense_shape)
else:
return self.normalizer(input_tensor)
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
return _reshape_real_valued_tensor(
self._to_dense_tensor(input_tensor), output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
if not self.is_sparse:
return input_tensor
raise ValueError("Set is_sparse to False if you want a dense Tensor for "
"column_name: {}".format(self.name))
@experimental
def _real_valued_var_len_column(column_name,
default_value=None,
dtype=dtypes.float32,
normalizer=None,
is_sparse=False):
"""Creates a `_RealValuedVarLenColumn` for variable-length numeric data.
Note, this is not integrated with any of the DNNEstimators, except the RNN
ones DynamicRNNEstimator and the StateSavingRNNEstimator.
It can either create a parsing config for a SparseTensor (with is_sparse=True)
or a padded Tensor.
The (dense_)shape of the result will be [batch_size, None], which can be used
with is_sparse=False as input into an RNN (see DynamicRNNEstimator or
StateSavingRNNEstimator) or with is_sparse=True as input into a tree (see
gtflow).
Use real_valued_column if the Feature has a fixed length. Use some
SparseColumn for columns to be embedded / one-hot-encoded.
Args:
column_name: A string defining real valued column name.
default_value: A scalar value compatible with dtype. Needs to be specified
if is_sparse=False.
dtype: Defines the type of values. Default value is tf.float32. Needs to be
convertible to tf.float32.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
is_sparse=False, the normalizer will be run on the values of the
`SparseTensor`.
is_sparse: A boolean defining whether to create a SparseTensor or a Tensor.
Returns:
A _RealValuedSparseColumn.
Raises:
TypeError: if default_value is not a scalar value compatible with dtype.
TypeError: if dtype is not convertible to tf.float32.
ValueError: if default_value is None and is_sparse is False.
"""
if not (dtype.is_integer or dtype.is_floating):
raise TypeError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None and not is_sparse:
raise ValueError("default_value must be provided when is_sparse=False to "
"parse a padded Tensor. "
"column_name: {}".format(column_name))
if isinstance(default_value, list):
raise ValueError(
"Only scalar default value. default_value: {}, column_name: {}".format(
default_value, column_name))
if default_value is not None:
if dtype.is_integer:
default_value = int(default_value)
elif dtype.is_floating:
default_value = float(default_value)
return _RealValuedVarLenColumn(column_name, default_value, dtype, normalizer,
is_sparse)
class _RealValuedColumn(
_FeatureColumn,
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
"_RealValuedColumn",
["column_name", "dimension", "default_value", "dtype", "normalizer"])):
"""Represents a real valued feature column also known as continuous features.
Instances of this class are immutable. The dictionary returned by InputBuilder
contains a ("column_name", Tensor) pair with a Tensor shape of
(batch_size, dimension).
"""
def __new__(cls, column_name, dimension, default_value,
dtype, normalizer):
if default_value is not None:
default_value = tuple(default_value)
return super(_RealValuedColumn, cls).__new__(cls, column_name, dimension,
default_value, dtype,
normalizer)
@property
def name(self):
return self.column_name
@property
def config(self):
default_value = self.default_value
if default_value is not None:
default_value = list(default_value)
return {self.column_name: parsing_ops.FixedLenFeature([self.dimension],
self.dtype,
default_value)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["normalizer"])
@property
def normalizer_fn(self):
"""Returns the function used to normalize the column."""
return self.normalizer
def _normalized_input_tensor(self, input_tensor):
"""Returns the input tensor after custom normalization is applied."""
return (self.normalizer(input_tensor) if self.normalizer is not None else
input_tensor)
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
input_tensor = self._to_dense_tensor(input_tensor)
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return input_tensor
@property
def _variable_shape(self):
return tensor_shape.TensorShape([self.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
def _transform_feature(self, inputs):
return math_ops.to_float(
self._normalized_input_tensor(inputs.get(self.name)))
@property
def _parse_example_spec(self):
return self.config
def real_valued_column(column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32,
normalizer=None):
"""Creates a `_RealValuedColumn` for dense numeric data.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column.
The default is 1.
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on during tf.Example parsing
if data is missing. When dimension is not None, a default value of None
will cause tf.parse_example to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every dimension. If a list of values is provided,
the length of the list should be equal to the value of `dimension`.
Only scalar default value is supported in case dimension is not specified.
dtype: defines the type of values. Default value is tf.float32. Must be a
non-quantized, real integer or floating point type.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for
variable length columns, the normalizer should expect an input_tensor of
type `SparseTensor`.
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertible to tf.float32.
"""
if dimension is None:
raise TypeError("dimension must be an integer. Use the "
"_real_valued_var_len_column for variable length features."
"dimension: {}, column_name: {}".format(dimension,
column_name))
if not isinstance(dimension, int):
raise TypeError("dimension must be an integer. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if dimension < 1:
raise ValueError("dimension must be greater than 0. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, int):
if dtype.is_integer:
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if dtype.is_floating:
default_value = float(default_value)
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, float):
if dtype.is_floating and (not dtype.is_integer):
default_value = ([default_value for _ in range(dimension)] if dimension
else [default_value])
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, list):
if len(default_value) != dimension:
raise ValueError(
"The length of default_value must be equal to dimension. "
"default_value: {}, dimension: {}, column_name: {}".format(
default_value, dimension, column_name))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = True
is_list_all_float = True
for v in default_value:
if not isinstance(v, int):
is_list_all_int = False
if not (isinstance(v, float) or isinstance(v, int)):
is_list_all_float = False
if is_list_all_int:
if dtype.is_integer:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
elif dtype.is_floating:
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if is_list_all_float:
if dtype.is_floating and (not dtype.is_integer):
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
raise TypeError("default_value must be compatible with dtype. "
"default_value: {}, dtype: {}, column_name: {}".format(
default_value, dtype, column_name))
class _BucketizedColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
fc_core._DenseColumn, # pylint: disable=protected-access
collections.namedtuple("_BucketizedColumn", ["source_column",
"boundaries"])):
"""Represents a bucketization transformation also known as binning.
Instances of this class are immutable. Values in `source_column` will be
bucketized based on `boundaries`.
For example, if the inputs are:
boundaries = [0, 10, 100]
source_column = [[-5], [150], [10], [0], [4], [19]]
then the bucketized feature will be:
output = [[0], [3], [2], [1], [1], [2]]
Attributes:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted. [a, b, c] defines following buckets: (-inf., a), [a, b),
[b, c), [c, inf.)
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
def __new__(cls, source_column, boundaries):
if not isinstance(source_column, _RealValuedColumn):
raise TypeError("source_column must be an instance of _RealValuedColumn. "
"source_column: {}".format(source_column))
if source_column.dimension is None:
raise ValueError("source_column must have a defined dimension. "
"source_column: {}".format(source_column))
if (not isinstance(boundaries, list) and
not isinstance(boundaries, tuple)) or not boundaries:
raise ValueError("boundaries must be a non-empty list or tuple. "
"boundaries: {}".format(boundaries))
# We allow bucket boundaries to be monotonically increasing
# (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
# de-duplicate.
sanitized_boundaries = []
for i in range(len(boundaries) - 1):
if boundaries[i] == boundaries[i + 1]:
continue
elif boundaries[i] < boundaries[i + 1]:
sanitized_boundaries.append(boundaries[i])
else:
raise ValueError("boundaries must be a sorted list. "
"boundaries: {}".format(boundaries))
sanitized_boundaries.append(boundaries[len(boundaries) - 1])
return super(_BucketizedColumn, cls).__new__(cls, source_column,
tuple(sanitized_boundaries))
@property
def name(self):
return "{}_bucketized".format(self.source_column.name)
@property
def length(self):
"""Returns total number of buckets."""
return len(self.boundaries) + 1
@property
def config(self):
return self.source_column.config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if output_rank != 2:
raise ValueError("BucketizedColumn currently only supports output_rank=2")
return array_ops.reshape(
array_ops.one_hot(
math_ops.to_int64(input_tensor),
self.length,
1.,
0.,
name="one_hot"), [-1, self.length * self.source_column.dimension],
name="reshape")
def to_sparse_tensor(self, input_tensor):
"""Creates a SparseTensor from the bucketized Tensor."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor, name="shape")[0]
if dimension > 1:
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(
math_ops.range(0, batch_size), 1, name="expand_dims"),
[1, dimension],
name="tile"), [-1],
name="reshape")
i2 = array_ops.tile(
math_ops.range(0, dimension), [batch_size], name="tile")
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = array_ops.reshape(
input_tensor, [-1], name="reshape") + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")
indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
shape = math_ops.to_int64(array_ops.stack([batch_size, dimension]))
sparse_id_values = sparse_tensor_py.SparseTensor(
indices, bucket_indices, shape)
return sparse_id_values
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.to_sparse_tensor(input_tensor),
weight_tensor=None,
vocab_size=self.length * self.source_column.dimension,
initializer=init_ops.zeros_initializer(),
combiner="sum")
def _transform_feature(self, inputs):
"""Handles cross transformation."""
# Bucketize the source column.
return bucketization_op.bucketize(
inputs.get(self.source_column),
boundaries=list(self.boundaries),
name="bucketize")
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
columns_to_tensors[self] = self._transform_feature(
_LazyBuilderByColumnsToTensor(columns_to_tensors))
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length * self.source_column.dimension
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access
self.to_sparse_tensor(inputs.get(self)), None)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(
[self.length * self.source_column.dimension])
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return self._to_dnn_input_layer(
inputs.get(self), weight_collections, trainable)
def bucketized_column(source_column, boundaries):
"""Creates a _BucketizedColumn for discretizing dense input.
Args:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list or tuple of floats specifying the boundaries. It has to
be sorted.
Returns:
A _BucketizedColumn.
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
return _BucketizedColumn(source_column, boundaries)
class _CrossedColumn(
_FeatureColumn,
fc_core._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple("_CrossedColumn", [
"columns", "hash_bucket_size", "hash_key", "combiner",
"ckpt_to_load_from", "tensor_name_in_ckpt"
])):
"""Represents a cross transformation also known as conjunction or combination.
Instances of this class are immutable. It crosses given `columns`. Crossed
column output will be hashed to hash_bucket_size.
Conceptually, transformation can be thought as:
Hash(cartesian product of features in columns) % `hash_bucket_size`
For example, if the columns are
SparseTensor referred by first column: shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
SparseTensor referred by second column: : shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
then crossed feature will look like:
shape = [2, 2]
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
Attributes:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Raises:
TypeError: if all items in columns are not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn.
ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
@staticmethod
def _assert_is_crossable(column):
if isinstance(column, (_SparseColumn, _CrossedColumn, _BucketizedColumn)):
return
raise TypeError("columns must be a set of _SparseColumn, "
"_CrossedColumn, or _BucketizedColumn instances. "
"(column {} is a {})".format(column,
column.__class__.__name__))
def __new__(cls,
columns,
hash_bucket_size,
hash_key,
combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
for column in columns:
_CrossedColumn._assert_is_crossable(column)
if len(columns) < 2:
raise ValueError("columns must contain at least 2 elements. "
"columns: {}".format(columns))
if hash_bucket_size < 2:
raise ValueError("hash_bucket_size must be at least 2. "
"hash_bucket_size: {}".format(hash_bucket_size))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
sorted_columns = sorted(
[column for column in columns], key=lambda column: column.name)
return super(_CrossedColumn, cls).__new__(cls, tuple(sorted_columns),
hash_bucket_size, hash_key,
combiner,
ckpt_to_load_from,
tensor_name_in_ckpt)
@property
def name(self):
sorted_names = sorted([column.name for column in self.columns])
return "_X_".join(sorted_names)
@property
def config(self):
config = {}
for column in self.columns:
config.update(_get_feature_config(column))
return config
@property
def length(self):
"""Returns total number of buckets."""
return self.hash_bucket_size
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
del input_tensor
return None
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
del input_tensor
del weight_collections
del trainable
del output_rank
raise ValueError("CrossedColumn is not supported in DNN. "
"Please use embedding_column. column: {}".format(self))
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.length,
initializer=init_ops.zeros_initializer(),
combiner=self.combiner)
def _transform_feature(self, inputs):
"""Handles cross transformation."""
def _collect_leaf_level_columns(cross):
"""Collects base columns contained in the cross."""
leaf_level_columns = []
for c in cross.columns:
if isinstance(c, _CrossedColumn):
leaf_level_columns.extend(_collect_leaf_level_columns(c))
else:
leaf_level_columns.append(c)
return leaf_level_columns
feature_tensors = []
for c in _collect_leaf_level_columns(self):
if isinstance(c, _SparseColumn):
feature_tensors.append(inputs.get(c.name))
else:
if isinstance(c, _BucketizedColumn):
feature_tensors.append(c.to_sparse_tensor(inputs.get(c)))
else:
feature_tensors.append(inputs.get(c))
return sparse_feature_cross_op.sparse_feature_cross(
feature_tensors,
hashed_output=True,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key,
name="cross")
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
columns_to_tensors[self] = self._transform_feature(
_LazyBuilderByColumnsToTensor(columns_to_tensors))
@property
def _parse_example_spec(self):
return self.config
@property
def _num_buckets(self):
return self.length
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return fc_core._CategoricalColumn.IdWeightPair(inputs.get(self), None) # pylint: disable=protected-access
class _LazyBuilderByColumnsToTensor(object):
def __init__(self, columns_to_tensors):
self._columns_to_tensors = columns_to_tensors
def get(self, key):
"""Gets the transformed feature column."""
if key in self._columns_to_tensors:
return self._columns_to_tensors[key]
if isinstance(key, str):
raise ValueError(
"features dictionary doesn't contain key ({})".format(key))
if not isinstance(key, _FeatureColumn):
raise TypeError('"key" must be either a "str" or "_FeatureColumn". '
"Provided: {}".format(key))
key.insert_transformed_feature(self._columns_to_tensors)
return self._columns_to_tensors[key]
def crossed_column(columns, hash_bucket_size, combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
hash_key=None):
"""Creates a _CrossedColumn for performing feature crosses.
Args:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default. "sqrtn" often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column::
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A _CrossedColumn.
Raises:
TypeError: if any item in columns is not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn, or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or
len(columns) is not > 1.
"""
return _CrossedColumn(
columns,
hash_bucket_size,
hash_key,
combiner=combiner,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt)
class DataFrameColumn(_FeatureColumn,
collections.namedtuple("DataFrameColumn",
["column_name", "series"])):
"""Represents a feature column produced from a `DataFrame`.
Instances of this class are immutable. A `DataFrame` column may be dense or
sparse, and may have any shape, with the constraint that dimension 0 is
batch_size.
Args:
column_name: a name for this column
series: a `Series` to be wrapped, which has already had its base features
substituted with `PredefinedSeries`.
"""
def __new__(cls, column_name, series):
return super(DataFrameColumn, cls).__new__(cls, column_name, series)
@property
def name(self):
return self.column_name
@property
def config(self):
return self.series.required_base_features()
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self.name
def insert_transformed_feature(self, columns_to_tensors):
# The cache must already contain mappings from the expected base feature
# names to Tensors.
# Passing columns_to_tensors as the cache here means that multiple outputs
# of the transform will be cached, keyed by the repr of their associated
# TransformedSeries.
# The specific requested output ends up in columns_to_tensors twice: once
# keyed by the TransformedSeries repr, and once keyed by this
# DataFrameColumn instance.
columns_to_tensors[self] = self.series.build(columns_to_tensors)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return self._to_dnn_input_layer(input_tensor)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _get_feature_config(feature_column):
"""Returns configuration for the base feature defined in feature_column."""
if not isinstance(feature_column, _FeatureColumn):
raise TypeError(
"feature_columns should only contain instances of _FeatureColumn. "
"Given column is {}".format(feature_column))
if isinstance(feature_column, (_SparseColumn, _WeightedSparseColumn,
_EmbeddingColumn, _RealValuedColumn,
_RealValuedVarLenColumn,
_BucketizedColumn, _CrossedColumn,
_OneHotColumn, _ScatteredEmbeddingColumn)):
return feature_column.config
raise TypeError("Not supported _FeatureColumn type. "
"Given column is {}".format(feature_column))
def create_feature_spec_for_parsing(feature_columns):
"""Helper that prepares features config from input feature_columns.
The returned feature config can be used as arg 'features' in tf.parse_example.
Typical usage example:
```python
# Define features and transformations
feature_a = sparse_column_with_vocabulary_file(...)
feature_b = real_valued_column(...)
feature_c_bucketized = bucketized_column(real_valued_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=[feature_a, feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
batch_examples = tf.parse_example(
serialized=serialized_examples,
features=create_feature_spec_for_parsing(feature_columns))
```
For the above example, create_feature_spec_for_parsing would return the dict:
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn, unless
feature_columns is a dict -- in which case, this should be true of all
values in the dict.
Returns:
A dict mapping feature keys to FixedLenFeature or VarLenFeature values.
"""
if isinstance(feature_columns, dict):
feature_columns = feature_columns.values()
features_config = {}
for column in feature_columns:
features_config.update(_get_feature_config(column))
return features_config
def _create_sequence_feature_spec_for_parsing(sequence_feature_columns,
allow_missing_by_default=False):
"""Prepares a feature spec for parsing `tf.SequenceExample`s.
Args:
sequence_feature_columns: an iterable containing all the feature columns.
All items should be instances of classes derived from `_FeatureColumn`.
allow_missing_by_default: whether to set `allow_missing=True` by default for
`FixedLenSequenceFeature`s.
Returns:
A dict mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature`.
"""
feature_spec = create_feature_spec_for_parsing(sequence_feature_columns)
sequence_feature_spec = {}
for key, feature in feature_spec.items():
if isinstance(feature, parsing_ops.VarLenFeature):
sequence_feature = feature
elif (isinstance(feature, parsing_ops.FixedLenFeature) or
isinstance(feature, parsing_ops.FixedLenSequenceFeature)):
default_is_set = feature.default_value is not None
if default_is_set:
logging.warning(
'Found default value {} for feature "{}". Ignoring this value and '
'setting `allow_missing=True` instead.'.
format(feature.default_value, key))
sequence_feature = parsing_ops.FixedLenSequenceFeature(
shape=feature.shape,
dtype=feature.dtype,
allow_missing=(allow_missing_by_default or default_is_set))
else:
raise TypeError(
"Unsupported feature type: {}".format(type(feature).__name__))
sequence_feature_spec[key] = sequence_feature
return sequence_feature_spec
def make_place_holder_tensors_for_base_features(feature_columns):
"""Returns placeholder tensors for inference.
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to SparseTensors (sparse columns) or
placeholder Tensors (dense columns).
"""
# Get dict mapping features to FixedLenFeature or VarLenFeature values.
dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
placeholders = {}
for column_name, column_type in dict_for_parse_example.items():
if isinstance(column_type, parsing_ops.VarLenFeature):
# Sparse placeholder for sparse tensors.
placeholders[column_name] = array_ops.sparse_placeholder(
column_type.dtype, name="Placeholder_{}".format(column_name))
else:
# Simple placeholder for dense tensors.
placeholders[column_name] = array_ops.placeholder(
column_type.dtype,
shape=(None, column_type.shape[0]),
name="Placeholder_{}".format(column_name))
return placeholders
class _SparseIdLookupConfig(
collections.namedtuple("_SparseIdLookupConfig",
["vocabulary_file", "keys", "num_oov_buckets",
"vocab_size", "default_value"])):
"""Defines lookup configuration for a sparse feature.
An immutable object defines lookup table configuration used by
tf.feature_to_id_v2.
Attributes:
vocabulary_file: The vocabulary filename. vocabulary_file cannot be combined
with keys.
keys: A 1-D string iterable that specifies the mapping of strings to
indices. It means a feature in keys will map to it's index in keys.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
"""
def __new__(cls,
vocabulary_file=None,
keys=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1):
return super(_SparseIdLookupConfig, cls).__new__(cls, vocabulary_file, keys,
num_oov_buckets,
vocab_size, default_value)
| apache-2.0 | 4,964,603,536,699,119,000 | 2,373,003,441,070,766,600 | 38.989792 | 110 | 0.650497 | false |
laurenrevere/osf.io | scripts/osfstorage/glacier_audit.py | 6 | 3553 | #!/usr/bin/env python
# encoding: utf-8
"""Verify that all `OsfStorageFileVersion` records created earlier than two
days before the latest inventory report are contained in the inventory, point
to the correct Glacier archive, and have an archive of the correct size.
Should be run after `glacier_inventory.py`.
"""
import gc
import json
import logging
from dateutil.parser import parse as parse_date
from dateutil.relativedelta import relativedelta
from framework.celery_tasks import app as celery_app
from website.app import init_app
from osf.models import FileVersion
from scripts import utils as scripts_utils
from scripts.osfstorage import settings as storage_settings
from scripts.osfstorage import utils as storage_utils
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Glacier inventories take about four hours to generate and reflect files added
# about a day before the request is made; only check records created over two
# days before the job.
DELTA_DATE = relativedelta(days=2)
class AuditError(Exception):
pass
class NotFound(AuditError):
pass
class BadSize(AuditError):
pass
class BadArchiveId(AuditError):
pass
def get_targets(date):
return FileVersion.objects.filter(
created__lt=date - DELTA_DATE, metadata__has_key='archive', location__isnull=False
).iterator()
def check_glacier_version(version, inventory):
data = inventory.get(version.metadata['archive'])
if data is None:
raise NotFound('Glacier archive for version {} not found'.format(version._id))
if version.metadata['archive'] != data['ArchiveId']:
raise BadArchiveId(
'Glacier archive for version {} has incorrect archive ID {} (expected {})'.format(
version._id,
data['ArchiveId'],
version.metadata['archive'],
)
)
if (version.size or version.metadata.get('size')) != data['Size']:
raise BadSize(
'Glacier archive for version {} has incorrect size {} (expected {})'.format(
version._id,
data['Size'],
version.size,
)
)
def main(job_id=None):
glacier = storage_utils.get_glacier_resource()
if job_id:
job = glacier.Job(
storage_settings.GLACIER_VAULT_ACCOUNT_ID,
storage_settings.GLACIER_VAULT_NAME,
job_id,
)
else:
vault = storage_utils.get_glacier_resource().Vault(
storage_settings.GLACIER_VAULT_ACCOUNT_ID,
storage_settings.GLACIER_VAULT_NAME
)
jobs = vault.completed_jobs.all()
if not jobs:
raise RuntimeError('No completed jobs found')
job = sorted(jobs, key=lambda job: job.creation_date)[-1]
response = job.get_output()
output = json.loads(response['body'].read().decode('utf-8'))
creation_date = parse_date(job.creation_date)
inventory = {
each['ArchiveId']: each
for each in output['ArchiveList']
}
for idx, version in enumerate(get_targets(creation_date)):
try:
check_glacier_version(version, inventory)
except AuditError as error:
logger.error(str(error))
if idx % 1000 == 0:
gc.collect()
@celery_app.task(name='scripts.osfstorage.glacier_audit')
def run_main(job_id=None, dry_run=True):
init_app(set_backends=True, routes=False)
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
main(job_id=job_id)
| apache-2.0 | -6,408,704,931,930,188,000 | 2,806,463,316,554,661,000 | 28.608333 | 94 | 0.655502 | false |
tdenniston/Halide | python_bindings/tutorial/lesson_11_cross_compilation.py | 6 | 6731 | #!/usr/bin/python3
# Halide tutorial lesson 11.
# This lesson demonstrates how to use Halide as a cross-compiler.
# This lesson can be built by invoking the command:
# make tutorial_lesson_11_cross_compilation
# in a shell with the current directory at the top of the halide source tree.
# Otherwise, see the platform-specific compiler invocations below.
# On linux, you can compile and run it like so:
# g++ lesson_11*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide -lpthread -ldl -o lesson_11
# LD_LIBRARY_PATH=../bin ./lesson_11
# On os x:
# g++ lesson_11*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide -o lesson_11
# DYLD_LIBRARY_PATH=../bin ./lesson_11
#include "Halide.h"
#include <stdio.h>
#using namespace Halide
from halide import *
from struct import unpack
def main():
# We'll define the simple one-stage pipeline that we used in lesson 10.
brighter = Func("brighter")
x, y = Var("x"), Var("y")
# Declare the arguments.
offset = Param(UInt(8))
input = ImageParam(UInt(8), 2)
args = ArgumentsVector()
args.append(input)
args.append(offset)
# Define the Func.
brighter[x, y] = input[x, y] + offset
# Schedule it.
brighter.vectorize(x, 16).parallel(y)
# The following line is what we did in lesson 10. It compiles an
# object file suitable for the system that you're running this
# program on. For example, if you compile and run this file on
# 64-bit linux on an x86 cpu with sse4.1, then the generated code
# will be suitable for 64-bit linux on x86 with sse4.1.
brighter.compile_to_file("lesson_11_host", args)
# We can also compile object files suitable for other cpus and
# operating systems. You do this with an optional third argument
# to compile_to_file which specifies the target to compile for.
create_android = True
create_windows = True
create_ios = True
if create_android:
# Let's use this to compile a 32-bit arm android version of this code:
target = Target()
target.os = TargetOS.Android # The operating system
target.arch = TargetArch.ARM # The CPU architecture
target.bits = 32 # The bit-width of the architecture
arm_features = FeaturesVector() # A list of features to set
target.set_features(arm_features)
# Pass the target as the last argument.
brighter.compile_to_file("lesson_11_arm_32_android", args, target)
if create_windows:
# And now a Windows object file for 64-bit x86 with AVX and SSE 4.1:
target = Target()
target.os = TargetOS.Windows
target.arch = TargetArch.X86
target.bits = 64
x86_features = FeaturesVector()
x86_features.append(TargetFeature.AVX)
x86_features.append(TargetFeature.SSE41)
target.set_features(x86_features)
brighter.compile_to_file("lesson_11_x86_64_windows", args, target)
if create_ios:
# And finally an iOS mach-o object file for one of Apple's 32-bit
# ARM processors - the A6. It's used in the iPhone 5. The A6 uses
# a slightly modified ARM architecture called ARMv7s. We specify
# this using the target features field. Support for Apple's
# 64-bit ARM processors is very new in llvm, and still somewhat
# flaky.
target = Target()
target.os = TargetOS.IOS
target.arch = TargetArch.ARM
target.bits = 32
armv7s_features = FeaturesVector()
armv7s_features.append(TargetFeature.ARMv7s)
target.set_features(armv7s_features)
brighter.compile_to_file("lesson_11_arm_32_ios", args, target)
# Now let's check these files are what they claim, by examining
# their first few bytes.
if create_android:
# 32-arm android object files start with the magic bytes:
# uint8_t []
arm_32_android_magic = [0x7f, ord('E'), ord('L'), ord('F'), # ELF format
1, # 32-bit
1, # 2's complement little-endian
1] # Current version of elf
length = len(arm_32_android_magic)
f = open("lesson_11_arm_32_android.o", "rb")
try:
header_bytes = f.read(length)
except:
print("Android object file not generated")
return -1
f.close()
header = list(unpack("B"*length, header_bytes))
if header != arm_32_android_magic:
print([x == y for x, y in zip(header, arm_32_android_magic)])
raise Exception("Unexpected header bytes in 32-bit arm object file.")
return -1
if create_windows:
# 64-bit windows object files start with the magic 16-bit value 0x8664
# (presumably referring to x86-64)
# uint8_t []
win_64_magic = [0x64, 0x86]
f = open("lesson_11_x86_64_windows.obj", "rb")
try:
header_bytes = f.read(2)
except:
print("Windows object file not generated")
return -1
f.close()
header = list(unpack("B"*2, header_bytes))
if header != win_64_magic:
raise Exception("Unexpected header bytes in 64-bit windows object file.")
return -1
if create_ios:
# 32-bit arm iOS mach-o files start with the following magic bytes:
# uint32_t []
arm_32_ios_magic = [
0xfeedface, # Mach-o magic bytes
#0xfe, 0xed, 0xfa, 0xce, # Mach-o magic bytes
12, # CPU type is ARM
11, # CPU subtype is ARMv7s
1] # It's a relocatable object file.
f = open("lesson_11_arm_32_ios.o", "rb")
try:
header_bytes = f.read(4*4)
except:
print("ios object file not generated")
return -1
f.close()
header = list(unpack("I"*4, header_bytes))
if header != arm_32_ios_magic:
raise Exception("Unexpected header bytes in 32-bit arm ios object file.")
return -1
# It looks like the object files we produced are plausible for
# those targets. We'll count that as a success for the purposes
# of this tutorial. For a real application you'd then need to
# figure out how to integrate Halide into your cross-compilation
# toolchain. There are several small examples of this in the
# Halide repository under the apps folder. See HelloAndroid and
# HelloiOS here:
# https:#github.com/halide/Halide/tree/master/apps/
print("Success!")
return 0
if __name__ == "__main__":
main()
| mit | 2,058,036,138,833,186,300 | 5,532,567,250,930,390,000 | 35.781421 | 95 | 0.606745 | false |
wzmao/mbio | format.py | 2 | 1229 | import os
import sys
def autop(x, allyes=0):
print x
a = os.popen('autopep8 ' + x + ' -d').read()
if a == '':
print '>>>> No change.'
else:
print a
print x
if not allyes:
a = raw_input("Do you want to change?(y/n):")
if a == 'y':
a = os.popen('autopep8 ' + x + ' -i').read()
else:
print "Didn't change it."
else:
print 'Allyes=1 so correct it automatically.'
a = os.popen('autopep8 ' + x + ' -i').read()
def check(x, allyes=0):
l = os.listdir(x)
for i in l:
if os.path.isfile(os.path.join(x, i)) and os.path.join(x, i).endswith('.py') and not i.startswith('.'):
autop(os.path.join(x, i), allyes=allyes)
if os.path.isdir(os.path.join(x, i)) and not i.startswith('.') and i != 'build':
check(os.path.join(x, i), allyes=allyes)
print '#' * int(os.popen('stty size').read().split()[-1])
if len(sys.argv) > 1 and any([i in sys.argv[1:] for i in ['y', 'Y', '-y', '-Y']]):
allyes = 1
else:
allyes = 0
check(os.path.abspath(os.path.dirname(sys.argv[0])), allyes=allyes)
print '#' * int(os.popen('stty size').read().split()[-1])
| mit | 9,160,924,788,679,785,000 | 1,538,607,201,989,605,600 | 31.342105 | 111 | 0.510985 | false |
Pikecillo/genna | external/4Suite-XML-1.0.2/test/Xml/XPath/Core/test_nodeset_expr.py | 1 | 2062 | #!/usr/bin/env python
#
# File Name: File Name
#
# Documentation: http://docs.fourthought.com/file/name.html
#
def Test(tester):
tester.startGroup('Node-set Expressions')
tester.startTest('Creating test environment')
from Ft.Xml.XPath import ParsedExpr
from Ft.Xml.XPath import ParsedPredicateList
DomTree = tester.test_data['tree']
import DummyExpr
from DummyExpr import boolT, boolF
from DummyExpr import num3, numN4, num4p5
from DummyExpr import strPi, strText
nodeset0 = DummyExpr.DummyNodeSetExpr([])
nodeset1 = DummyExpr.DummyNodeSetExpr([DomTree.ROOT])
nodeset2 = DummyExpr.DummyNodeSetExpr([DomTree.ROOT, DomTree.CHILD1])
nodeset3 = DummyExpr.DummyNodeSetExpr([DomTree.CHILD1])
nodeset4 = DummyExpr.DummyNodeSetExpr([DomTree.CHILD3])
from Ft.Xml.XPath import Context
context1 = Context.Context(DomTree.CHILD1,1,2)
context2 = Context.Context(DomTree.CHILD2,2,2)
plT = ParsedPredicateList.ParsedPredicateList([boolT])
plF = ParsedPredicateList.ParsedPredicateList([boolF])
tests = {ParsedExpr.ParsedFilterExpr : [((nodeset2, plT), context1, list(nodeset2.val)),
((nodeset2, plF), context1, []),
],
ParsedExpr.ParsedPathExpr : [((0, nodeset2, nodeset1), context1, list(nodeset1.val)),
],
ParsedExpr.ParsedUnionExpr : [((nodeset2, nodeset1), context1, list(nodeset2.val)),
],
}
tester.testDone()
for (expr, boolTests) in tests.items():
for (args, context, expected) in boolTests:
p = apply(expr, args)
tester.startTest('Comparing %s' % repr(p))
result = p.evaluate(context)
tester.compare(result, expected)
tester.testDone()
tester.groupDone()
if __name__ == '__main__':
from Ft.Lib.TestSuite import Tester
tester = Tester.Tester()
Test(tester)
| gpl-2.0 | -3,926,586,296,882,840,600 | 8,384,936,698,129,843,000 | 32.258065 | 98 | 0.612027 | false |
AstroTech/atlassian-python-api | examples/bamboo/bamboo_label_based_cleaner.py | 2 | 2343 | import logging
from datetime import datetime
from datetime import timedelta
from atlassian import Bamboo
"""
Example shows how to clean up expired build results for specific label.
Feel free to modify OLDER_DAYS and LABEL parameters.
You can remove, after changing value for DRY_RUN variable
"""
logging.basicConfig(level=logging.ERROR)
BAMBOO_LOGIN = "admin"
BAMBOO_PASSWORD = "password"
BAMBOO_URL = "https://bamboo.example.com"
DRY_RUN = True
LABEL = "cores_found"
OLDER_DAYS = 60
def get_all_projects():
return [x["key"] for x in bamboo.projects(max_results=10000)]
def get_plans_from_project(project_key):
return [x["key"] for x in bamboo.project_plans(project_key, max_results=1000)]
if __name__ == "__main__":
bamboo = Bamboo(url=BAMBOO_URL, username=BAMBOO_LOGIN, password=BAMBOO_PASSWORD, timeout=180)
projects = get_all_projects()
print("Start analyzing the {} projects".format(len(projects)))
for project in projects:
print("Inspecting {} project".format(project))
plans = get_plans_from_project(project)
print("Start analyzing the {} plans".format(len(plans)))
for plan in plans:
print("Inspecting {} plan".format(plan))
build_results = [
x for x in bamboo.results(plan_key=plan, label=LABEL, max_results=100, include_all_states=True)
]
for build in build_results:
build_key = build.get("buildResultKey") or None
print("Inspecting {} build".format(build_key))
build_value = bamboo.build_result(build_key)
build_complete_time = build_value.get("buildCompletedTime") or None
if not build_complete_time:
continue
datetimeObj = datetime.strptime(build_complete_time.split("+")[0] + "000", "%Y-%m-%dT%H:%M:%S.%f")
if datetime.now() > datetimeObj + timedelta(days=OLDER_DAYS):
print(
"Build is old {} as build complete date {}".format(
build_key, build_complete_time.strftime("%Y-%m-%d")
)
)
if not DRY_RUN:
print("Removing {} build".format(build_key))
bamboo.delete_build_result(build_key)
| apache-2.0 | -2,435,646,719,219,330,600 | -5,305,826,861,107,169,000 | 38.05 | 114 | 0.601366 | false |
jfmartinez64/test | couchpotato/core/media/movie/_base/main.py | 15 | 14072 | import traceback
import time
from CodernityDB.database import RecordNotFound
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, getTitle, getImdb, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie import MovieTypeBase
import six
log = CPLog(__name__)
class MovieBase(MovieTypeBase):
_type = 'movie'
def __init__(self):
# Initialize this type
super(MovieBase, self).__init__()
self.initType()
addApiView('movie.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
'return': {'type': 'object', 'example': """{
'success': True,
'movie': object
}"""},
'params': {
'identifier': {'desc': 'IMDB id of the movie your want to add.'},
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
'force_readd': {'desc': 'Force re-add even if movie already in wanted or manage. Default: True'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addApiView('movie.edit', self.edit, docs = {
'desc': 'Add new movie to the wanted list',
'params': {
'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'},
'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addEvent('movie.add', self.add)
addEvent('movie.update', self.update)
addEvent('movie.update_release_dates', self.updateReleaseDate)
def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None):
if not params: params = {}
# Make sure it's a correct zero filled imdb id
params['identifier'] = getImdb(params.get('identifier', ''))
if not params.get('identifier'):
msg = 'Can\'t add movie without imdb identifier.'
log.error(msg)
fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
return False
elif not params.get('info'):
try:
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True)
if not is_movie:
msg = 'Can\'t add movie, seems to be a TV show.'
log.error(msg)
fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
return False
except:
pass
info = params.get('info')
if not info or (info and len(info.get('titles', [])) == 0):
info = fireEvent('movie.info', merge = True, extended = False, identifier = params.get('identifier'))
# Allow force re-add overwrite from param
if 'force_readd' in params:
fra = params.get('force_readd')
force_readd = fra.lower() not in ['0', '-1'] if not isinstance(fra, bool) else fra
# Set default title
default_title = toUnicode(info.get('title'))
titles = info.get('titles', [])
counter = 0
def_title = None
for title in titles:
if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
def_title = toUnicode(title)
break
counter += 1
if not def_title:
def_title = toUnicode(titles[0])
# Default profile and category
default_profile = {}
if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False):
default_profile = fireEvent('profile.default', single = True)
cat_id = params.get('category_id')
try:
db = get_db()
media = {
'_t': 'media',
'type': 'movie',
'title': def_title,
'identifiers': {
'imdb': params.get('identifier')
},
'status': status if status else 'active',
'profile_id': params.get('profile_id') or default_profile.get('_id'),
'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None,
}
# Update movie info
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
media['info'] = info
new = False
previous_profile = None
try:
m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc']
try:
db.get('id', m.get('profile_id'))
previous_profile = m.get('profile_id')
except RecordNotFound:
pass
except:
log.error('Failed getting previous profile: %s', traceback.format_exc())
except:
new = True
m = db.insert(media)
# Update dict to be usable
m.update(media)
added = True
do_search = False
search_after = search_after and self.conf('search_on_add', section = 'moviesearcher')
onComplete = None
if new:
if search_after:
onComplete = self.createOnComplete(m['_id'])
search_after = False
elif force_readd:
# Clean snatched history
for release in fireEvent('release.for_media', m['_id'], single = True):
if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']:
if params.get('ignore_previous', False):
fireEvent('release.update_status', release['_id'], status = 'ignored')
else:
fireEvent('release.delete', release['_id'], single = True)
m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile
m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None)
m['last_edit'] = int(time.time())
m['tags'] = []
do_search = True
db.update(m)
else:
try: del params['info']
except: pass
log.debug('Movie already exists, not updating: %s', params)
added = False
# Trigger update info
if added and update_after:
# Do full update to get images etc
fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete)
# Remove releases
for rel in fireEvent('release.for_media', m['_id'], single = True):
if rel['status'] is 'available':
db.delete(rel)
movie_dict = fireEvent('media.get', m['_id'], single = True)
if not movie_dict:
log.debug('Failed adding media, can\'t find it anymore')
return False
if do_search and search_after:
onComplete = self.createOnComplete(m['_id'])
onComplete()
if added and notify_after:
if params.get('title'):
message = 'Successfully added "%s" to your wanted list.' % params.get('title', '')
else:
title = getTitle(m)
if title:
message = 'Successfully added "%s" to your wanted list.' % title
else:
message = 'Successfully added to your wanted list.'
fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = message)
return movie_dict
except:
log.error('Failed adding media: %s', traceback.format_exc())
def addView(self, **kwargs):
add_dict = self.add(params = kwargs)
return {
'success': True if add_dict else False,
'movie': add_dict,
}
def edit(self, id = '', **kwargs):
try:
db = get_db()
ids = splitString(id)
for media_id in ids:
try:
m = db.get('id', media_id)
m['profile_id'] = kwargs.get('profile_id') or m['profile_id']
cat_id = kwargs.get('category_id')
if cat_id is not None:
m['category_id'] = cat_id if len(cat_id) > 0 else m['category_id']
# Remove releases
for rel in fireEvent('release.for_media', m['_id'], single = True):
if rel['status'] is 'available':
db.delete(rel)
# Default title
if kwargs.get('default_title'):
m['title'] = kwargs.get('default_title')
db.update(m)
fireEvent('media.restatus', m['_id'], single = True)
m = db.get('id', media_id)
movie_dict = fireEvent('media.get', m['_id'], single = True)
fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id))
except:
print traceback.format_exc()
log.error('Can\'t edit non-existing media')
return {
'success': True,
}
except:
log.error('Failed editing media: %s', traceback.format_exc())
return {
'success': False,
}
def update(self, media_id = None, identifier = None, default_title = None, extended = False):
"""
Update movie information inside media['doc']['info']
@param media_id: document id
@param default_title: default title, if empty, use first one or existing one
@param extended: update with extended info (parses more info, actors, images from some info providers)
@return: dict, with media
"""
if self.shuttingDown():
return
lock_key = 'media.get.%s' % media_id if media_id else identifier
self.acquireLock(lock_key)
media = {}
try:
db = get_db()
if media_id:
media = db.get('id', media_id)
else:
media = db.get('media', 'imdb-%s' % identifier, with_doc = True)['doc']
info = fireEvent('movie.info', merge = True, extended = extended, identifier = getIdentifier(media))
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
# Update basic info
media['info'] = info
titles = info.get('titles', [])
log.debug('Adding titles: %s', titles)
# Define default title
if default_title:
def_title = None
if default_title:
counter = 0
for title in titles:
if title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
def_title = toUnicode(title)
break
counter += 1
if not def_title:
def_title = toUnicode(titles[0])
media['title'] = def_title
# Files
image_urls = info.get('images', [])
self.getPoster(media, image_urls)
db.update(media)
except:
log.error('Failed update media: %s', traceback.format_exc())
self.releaseLock(lock_key)
return media
def updateReleaseDate(self, media_id):
"""
Update release_date (eta) info only
@param media_id: document id
@return: dict, with dates dvd, theater, bluray, expires
"""
try:
db = get_db()
media = db.get('id', media_id)
if not media.get('info'):
media = self.update(media_id)
dates = media.get('info', {}).get('release_date')
else:
dates = media.get('info').get('release_date')
if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
dates = fireEvent('movie.info.release_date', identifier = getIdentifier(media), merge = True)
media['info'].update({'release_date': dates})
db.update(media)
return dates
except:
log.error('Failed updating release dates: %s', traceback.format_exc())
return {}
| gpl-3.0 | 1,997,201,273,119,376,000 | -7,117,345,066,402,150,000 | 37.032432 | 208 | 0.511583 | false |
mlmurray/TensorFlow-Experimentation | examples/3 - Neural Networks/alexnet.py | 1 | 5087 | '''
AlexNet implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
AlexNet Paper (http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
# Import MINST data
import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 64
display_step = 20
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.8 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.types.float32, [None, n_input])
y = tf.placeholder(tf.types.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.types.float32) # dropout (keep probability)
# Create AlexNet model
def conv2d(name, l_input, w, b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b), name=name)
def max_pool(name, l_input, k):
return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)
def norm(name, l_input, lsize=4):
return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
def alex_net(_X, _weights, _biases, _dropout):
# Reshape input picture
_X = tf.reshape(_X, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])
# Max Pooling (down-sampling)
pool1 = max_pool('pool1', conv1, k=2)
# Apply Normalization
norm1 = norm('norm1', pool1, lsize=4)
# Apply Dropout
norm1 = tf.nn.dropout(norm1, _dropout)
# Convolution Layer
conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
# Max Pooling (down-sampling)
pool2 = max_pool('pool2', conv2, k=2)
# Apply Normalization
norm2 = norm('norm2', pool2, lsize=4)
# Apply Dropout
norm2 = tf.nn.dropout(norm2, _dropout)
# Convolution Layer
conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
# Max Pooling (down-sampling)
pool3 = max_pool('pool3', conv3, k=2)
# Apply Normalization
norm3 = norm('norm3', pool3, lsize=4)
# Apply Dropout
norm3 = tf.nn.dropout(norm3, _dropout)
# Fully connected layer
dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv3 output to fit dense layer input
dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1') # Relu activation
dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') # Relu activation
# Output, class prediction
out = tf.matmul(dense2, _weights['out']) + _biases['out']
return out
# Store layers weight & bias
weights = {
'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),
'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),
'wd2': tf.Variable(tf.random_normal([1024, 1024])),
'out': tf.Variable(tf.random_normal([1024, 10]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([64])),
'bc2': tf.Variable(tf.random_normal([128])),
'bc3': tf.Variable(tf.random_normal([256])),
'bd1': tf.Variable(tf.random_normal([1024])),
'bd2': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = alex_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.types.float32))
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
step += 1
print "Optimization Finished!"
# Calculate accuracy for 256 mnist test images
print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})
| mit | -6,954,557,889,240,155,000 | -3,822,033,966,914,633,700 | 37.24812 | 144 | 0.654806 | false |
kuasha/cosmos | samples/barebone/views.py | 1 | 3616 | import logging
import settings
from tornado.httpclient import AsyncHTTPClient
import cosmos
from cosmos.service.auth import BasicLoginHandler
__author__ = 'Maruf Maniruzzaman'
import tornado
from tornado import gen
import json
from cosmos.service.requesthandler import RequestHandler
class IndexHandler(RequestHandler):
@gen.coroutine
def get(self):
try:
with open(settings.INDEX_HTML_PATH) as f:
self.write(f.read())
except IOError as e:
msg = """
File not found {}.
If you are developing cosmos create a local_settings.py file beside cosmosmain.py with following content:
import os
STATIC_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../adminpanel/app")
TEMPLATE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../adminpanel/templates")
INDEX_HTML_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../adminpanel/app/index.html")
""".format(settings.INDEX_HTML_PATH)
raise tornado.web.HTTPError(404, msg)
class LoginHandler(BasicLoginHandler):
@gen.coroutine
def get(self):
next = self.get_argument("next", '/')
try:
with open(settings.LOGIN_HTML_PATH) as f:
login_template = f.read()
self._show_login_window(next, login_template=login_template)
except IOError as e:
raise tornado.web.HTTPError(404, "File not found")
class AuthPublicKeyHandler(RequestHandler):
@gen.coroutine
def get(self, tenant_id):
self.set_header("Content-Type", 'application/x-pem-file')
self.set_header('Content-Disposition', 'attachment; filename=%s_pub.pem' % tenant_id)
self.write(settings.OAUTH2_PUBLIC_KEY_PEM)
class OAuth2DummyClientHandler(RequestHandler):
@gen.coroutine
def get(self, function):
protocol = self.request.protocol
host = self.request.host
oauth2_service_host = protocol + "://"+ host
#oauth2_service_host = "https://authp.com"
tenant_id = settings.TENANT_ID
self.write(self.request.uri + " <br />" + function + "<br />")
params = json.dumps({k: self.get_argument(k) for k in self.request.arguments})
self.write(params)
code = self.get_argument("code", "temp")
token = self.get_argument("access_token", default=None)
if token:
http_client = AsyncHTTPClient()
resp = yield http_client.fetch("{0}/{1}/auth/key/".format(oauth2_service_host, tenant_id))
if not resp or not resp.code == 200 or resp.body is None:
self.write("Could not get auth server public key")
else:
pub_pem = resp.body
logging.debug("Public key: {0}".format(pub_pem))
header, claims = cosmos.auth.oauth2.verify_token(token, pub_pem, ['RS256'])
self.write("<br /><hr />")
self.write(json.dumps(header))
self.write("<br /><hr />")
self.write(json.dumps(claims))
self.write("<br /><hr />")
self.write("<a href='{}/{}/oauth2/authorize/?response_type=code&state=mystate&resource=myresource.com/test&redirect_uri={}://{}/oauth2client/authorize/?tag=2'>Request Code</a><br />".format(oauth2_service_host, settings.TENANT_ID, protocol, host))
self.write("<a href='{}/{}/oauth2/token/?code={}&state=mystate&grant_type=code&redirect_uri={}://{}/oauth2client/authorize/?tag=2'>Request Token</a><br />".format(oauth2_service_host, tenant_id, code, protocol, host))
self.finish()
| mit | -2,235,495,488,935,979,800 | -3,592,402,885,134,469,600 | 38.736264 | 255 | 0.630808 | false |
ismail-s/urwid | urwid/listbox.py | 12 | 59569 | #!/usr/bin/python
#
# Urwid listbox class
# Copyright (C) 2004-2012 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from urwid.util import is_mouse_press
from urwid.canvas import SolidCanvas, CanvasCombine
from urwid.widget import Widget, nocache_widget_render_instance, BOX, GIVEN
from urwid.decoration import calculate_top_bottom_filler, normalize_valign
from urwid import signals
from urwid.signals import connect_signal
from urwid.monitored_list import MonitoredList, MonitoredFocusList
from urwid.container import WidgetContainerMixin
from urwid.command_map import (CURSOR_UP, CURSOR_DOWN,
CURSOR_PAGE_UP, CURSOR_PAGE_DOWN)
class ListWalkerError(Exception):
pass
class ListWalker(object):
__metaclass__ = signals.MetaSignals
signals = ["modified"]
def _modified(self):
signals.emit_signal(self, "modified")
def get_focus(self):
"""
This default implementation relies on a focus attribute and a
__getitem__() method defined in a subclass.
Override and don't call this method if these are not defined.
"""
try:
focus = self.focus
return self[focus], focus
except (IndexError, KeyError, TypeError):
return None, None
def get_next(self, position):
"""
This default implementation relies on a next_position() method and a
__getitem__() method defined in a subclass.
Override and don't call this method if these are not defined.
"""
try:
position = self.next_position(position)
return self[position], position
except (IndexError, KeyError):
return None, None
def get_prev(self, position):
"""
This default implementation relies on a prev_position() method and a
__getitem__() method defined in a subclass.
Override and don't call this method if these are not defined.
"""
try:
position = self.prev_position(position)
return self[position], position
except (IndexError, KeyError):
return None, None
class PollingListWalker(object): # NOT ListWalker subclass
def __init__(self, contents):
"""
contents -- list to poll for changes
This class is deprecated. Use SimpleFocusListWalker instead.
"""
import warnings
warnings.warn("PollingListWalker is deprecated, "
"use SimpleFocusListWalker instead.", DeprecationWarning)
self.contents = contents
if not getattr(contents, '__getitem__', None):
raise ListWalkerError("PollingListWalker expecting list like "
"object, got: %r" % (contents,))
self.focus = 0
def _clamp_focus(self):
if self.focus >= len(self.contents):
self.focus = len(self.contents)-1
def get_focus(self):
"""Return (focus widget, focus position)."""
if len(self.contents) == 0: return None, None
self._clamp_focus()
return self.contents[self.focus], self.focus
def set_focus(self, position):
"""Set focus position."""
# this class is deprecated, otherwise I might have fixed this:
assert type(position) == int
self.focus = position
def get_next(self, start_from):
"""
Return (widget after start_from, position after start_from).
"""
pos = start_from + 1
if len(self.contents) <= pos: return None, None
return self.contents[pos],pos
def get_prev(self, start_from):
"""
Return (widget before start_from, position before start_from).
"""
pos = start_from - 1
if pos < 0: return None, None
return self.contents[pos],pos
class SimpleListWalker(MonitoredList, ListWalker):
def __init__(self, contents):
"""
contents -- list to copy into this object
Changes made to this object (when it is treated as a list) are
detected automatically and will cause ListBox objects using
this list walker to be updated.
"""
if not getattr(contents, '__getitem__', None):
raise ListWalkerError, "SimpleListWalker expecting list like object, got: %r"%(contents,)
MonitoredList.__init__(self, contents)
self.focus = 0
def _get_contents(self):
"""
Return self.
Provides compatibility with old SimpleListWalker class.
"""
return self
contents = property(_get_contents)
def _modified(self):
if self.focus >= len(self):
self.focus = max(0, len(self)-1)
ListWalker._modified(self)
def set_modified_callback(self, callback):
"""
This function inherited from MonitoredList is not
implemented in SimpleListWalker.
Use connect_signal(list_walker, "modified", ...) instead.
"""
raise NotImplementedError('Use connect_signal('
'list_walker, "modified", ...) instead.')
def set_focus(self, position):
"""Set focus position."""
try:
if position < 0 or position >= len(self):
raise ValueError
except (TypeError, ValueError):
raise IndexError, "No widget at position %s" % (position,)
self.focus = position
self._modified()
def next_position(self, position):
"""
Return position after start_from.
"""
if len(self) - 1 <= position:
raise IndexError
return position + 1
def prev_position(self, position):
"""
Return position before start_from.
"""
if position <= 0:
raise IndexError
return position - 1
def positions(self, reverse=False):
"""
Optional method for returning an iterable of positions.
"""
if reverse:
return xrange(len(self) - 1, -1, -1)
return xrange(len(self))
class SimpleFocusListWalker(ListWalker, MonitoredFocusList):
def __init__(self, contents):
"""
contents -- list to copy into this object
Changes made to this object (when it is treated as a list) are
detected automatically and will cause ListBox objects using
this list walker to be updated.
Also, items added or removed before the widget in focus with
normal list methods will cause the focus to be updated
intelligently.
"""
if not getattr(contents, '__getitem__', None):
raise ListWalkerError("SimpleFocusListWalker expecting list like "
"object, got: %r"%(contents,))
MonitoredFocusList.__init__(self, contents)
def set_modified_callback(self, callback):
"""
This function inherited from MonitoredList is not
implemented in SimpleFocusListWalker.
Use connect_signal(list_walker, "modified", ...) instead.
"""
raise NotImplementedError('Use connect_signal('
'list_walker, "modified", ...) instead.')
def set_focus(self, position):
"""Set focus position."""
self.focus = position
def next_position(self, position):
"""
Return position after start_from.
"""
if len(self) - 1 <= position:
raise IndexError
return position + 1
def prev_position(self, position):
"""
Return position before start_from.
"""
if position <= 0:
raise IndexError
return position - 1
def positions(self, reverse=False):
"""
Optional method for returning an iterable of positions.
"""
if reverse:
return xrange(len(self) - 1, -1, -1)
return xrange(len(self))
class ListBoxError(Exception):
pass
class ListBox(Widget, WidgetContainerMixin):
"""
a horizontally stacked list of widgets
"""
_selectable = True
_sizing = frozenset([BOX])
def __init__(self, body):
"""
:param body: a ListWalker subclass such as
:class:`SimpleFocusListWalker` that contains
widgets to be displayed inside the list box
:type body: ListWalker
"""
if getattr(body, 'get_focus', None):
self.body = body
else:
self.body = PollingListWalker(body)
try:
connect_signal(self.body, "modified", self._invalidate)
except NameError:
# our list walker has no modified signal so we must not
# cache our canvases because we don't know when our
# content has changed
self.render = nocache_widget_render_instance(self)
# offset_rows is the number of rows between the top of the view
# and the top of the focused item
self.offset_rows = 0
# inset_fraction is used when the focused widget is off the
# top of the view. it is the fraction of the widget cut off
# at the top. (numerator, denominator)
self.inset_fraction = (0,1)
# pref_col is the preferred column for the cursor when moving
# between widgets that use the cursor (edit boxes etc.)
self.pref_col = 'left'
# variable for delayed focus change used by set_focus
self.set_focus_pending = 'first selectable'
# variable for delayed valign change used by set_focus_valign
self.set_focus_valign_pending = None
def calculate_visible(self, size, focus=False ):
"""
Returns the widgets that would be displayed in
the ListBox given the current *size* and *focus*.
see :meth:`Widget.render` for parameter details
:returns: (*middle*, *top*, *bottom*) or (``None``, ``None``, ``None``)
*middle*
(*row offset*(when +ve) or *inset*(when -ve),
*focus widget*, *focus position*, *focus rows*,
*cursor coords* or ``None``)
*top*
(*# lines to trim off top*,
list of (*widget*, *position*, *rows*) tuples above focus
in order from bottom to top)
*bottom*
(*# lines to trim off bottom*,
list of (*widget*, *position*, *rows*) tuples below focus
in order from top to bottom)
"""
(maxcol, maxrow) = size
# 0. set the focus if a change is pending
if self.set_focus_pending or self.set_focus_valign_pending:
self._set_focus_complete( (maxcol, maxrow), focus )
# 1. start with the focus widget
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None: #list box is empty?
return None,None,None
top_pos = focus_pos
offset_rows, inset_rows = self.get_focus_offset_inset(
(maxcol,maxrow))
# force at least one line of focus to be visible
if maxrow and offset_rows >= maxrow:
offset_rows = maxrow -1
# adjust position so cursor remains visible
cursor = None
if maxrow and focus_widget.selectable() and focus:
if hasattr(focus_widget,'get_cursor_coords'):
cursor=focus_widget.get_cursor_coords((maxcol,))
if cursor is not None:
cx, cy = cursor
effective_cy = cy + offset_rows - inset_rows
if effective_cy < 0: # cursor above top?
inset_rows = cy
elif effective_cy >= maxrow: # cursor below bottom?
offset_rows = maxrow - cy -1
if offset_rows < 0: # need to trim the top
inset_rows, offset_rows = -offset_rows, 0
# set trim_top by focus trimmimg
trim_top = inset_rows
focus_rows = focus_widget.rows((maxcol,),True)
# 2. collect the widgets above the focus
pos = focus_pos
fill_lines = offset_rows
fill_above = []
top_pos = pos
while fill_lines > 0:
prev, pos = self.body.get_prev( pos )
if prev is None: # run out of widgets above?
offset_rows -= fill_lines
break
top_pos = pos
p_rows = prev.rows( (maxcol,) )
if p_rows: # filter out 0-height widgets
fill_above.append( (prev, pos, p_rows) )
if p_rows > fill_lines: # crosses top edge?
trim_top = p_rows-fill_lines
break
fill_lines -= p_rows
trim_bottom = focus_rows + offset_rows - inset_rows - maxrow
if trim_bottom < 0: trim_bottom = 0
# 3. collect the widgets below the focus
pos = focus_pos
fill_lines = maxrow - focus_rows - offset_rows + inset_rows
fill_below = []
while fill_lines > 0:
next, pos = self.body.get_next( pos )
if next is None: # run out of widgets below?
break
n_rows = next.rows( (maxcol,) )
if n_rows: # filter out 0-height widgets
fill_below.append( (next, pos, n_rows) )
if n_rows > fill_lines: # crosses bottom edge?
trim_bottom = n_rows-fill_lines
fill_lines -= n_rows
break
fill_lines -= n_rows
# 4. fill from top again if necessary & possible
fill_lines = max(0, fill_lines)
if fill_lines >0 and trim_top >0:
if fill_lines <= trim_top:
trim_top -= fill_lines
offset_rows += fill_lines
fill_lines = 0
else:
fill_lines -= trim_top
offset_rows += trim_top
trim_top = 0
pos = top_pos
while fill_lines > 0:
prev, pos = self.body.get_prev( pos )
if prev is None:
break
p_rows = prev.rows( (maxcol,) )
fill_above.append( (prev, pos, p_rows) )
if p_rows > fill_lines: # more than required
trim_top = p_rows-fill_lines
offset_rows += fill_lines
break
fill_lines -= p_rows
offset_rows += p_rows
# 5. return the interesting bits
return ((offset_rows - inset_rows, focus_widget,
focus_pos, focus_rows, cursor ),
(trim_top, fill_above), (trim_bottom, fill_below))
def render(self, size, focus=False ):
"""
Render ListBox and return canvas.
see :meth:`Widget.render` for details
"""
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol, maxrow), focus=focus)
if middle is None:
return SolidCanvas(" ", maxcol, maxrow)
_ignore, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
trim_bottom, fill_below = bottom
combinelist = []
rows = 0
fill_above.reverse() # fill_above is in bottom-up order
for widget,w_pos,w_rows in fill_above:
canvas = widget.render((maxcol,))
if w_rows != canvas.rows():
raise ListBoxError, "Widget %r at position %r within listbox calculated %d rows but rendered %d!"% (widget,w_pos,w_rows, canvas.rows())
rows += w_rows
combinelist.append((canvas, w_pos, False))
focus_canvas = focus_widget.render((maxcol,), focus=focus)
if focus_canvas.rows() != focus_rows:
raise ListBoxError, "Focus Widget %r at position %r within listbox calculated %d rows but rendered %d!"% (focus_widget,focus_pos,focus_rows, focus_canvas.rows())
c_cursor = focus_canvas.cursor
if cursor != c_cursor:
raise ListBoxError, "Focus Widget %r at position %r within listbox calculated cursor coords %r but rendered cursor coords %r!" %(focus_widget,focus_pos,cursor,c_cursor)
rows += focus_rows
combinelist.append((focus_canvas, focus_pos, True))
for widget,w_pos,w_rows in fill_below:
canvas = widget.render((maxcol,))
if w_rows != canvas.rows():
raise ListBoxError, "Widget %r at position %r within listbox calculated %d rows but rendered %d!"% (widget,w_pos,w_rows, canvas.rows())
rows += w_rows
combinelist.append((canvas, w_pos, False))
final_canvas = CanvasCombine(combinelist)
if trim_top:
final_canvas.trim(trim_top)
rows -= trim_top
if trim_bottom:
final_canvas.trim_end(trim_bottom)
rows -= trim_bottom
if rows > maxrow:
raise ListBoxError, "Listbox contents too long! Probably urwid's fault (please report): %r" % ((top,middle,bottom),)
if rows < maxrow:
bottom_pos = focus_pos
if fill_below: bottom_pos = fill_below[-1][1]
if trim_bottom != 0 or self.body.get_next(bottom_pos) != (None,None):
raise ListBoxError, "Listbox contents too short! Probably urwid's fault (please report): %r" % ((top,middle,bottom),)
final_canvas.pad_trim_top_bottom(0, maxrow - rows)
return final_canvas
def get_cursor_coords(self, size):
"""
See :meth:`Widget.get_cursor_coords` for details
"""
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol, maxrow), True)
if middle is None:
return None
offset_inset, _ignore1, _ignore2, _ignore3, cursor = middle
if not cursor:
return None
x, y = cursor
y += offset_inset
if y < 0 or y >= maxrow:
return None
return (x, y)
def set_focus_valign(self, valign):
"""Set the focus widget's display offset and inset.
:param valign: one of:
'top', 'middle', 'bottom'
('fixed top', rows)
('fixed bottom', rows)
('relative', percentage 0=top 100=bottom)
"""
vt, va = normalize_valign(valign,ListBoxError)
self.set_focus_valign_pending = vt, va
def set_focus(self, position, coming_from=None):
"""
Set the focus position and try to keep the old focus in view.
:param position: a position compatible with :meth:`self.body.set_focus`
:param coming_from: set to 'above' or 'below' if you know that
old position is above or below the new position.
:type coming_from: str
"""
if coming_from not in ('above', 'below', None):
raise ListBoxError("coming_from value invalid: %r" %
(coming_from,))
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None:
raise IndexError("Can't set focus, ListBox is empty")
self.set_focus_pending = coming_from, focus_widget, focus_pos
self.body.set_focus(position)
def get_focus(self):
"""
Return a `(focus widget, focus position)` tuple, for backwards
compatibility. You may also use the new standard container
properties :attr:`focus` and :attr:`focus_position` to read these values.
"""
return self.body.get_focus()
def _get_focus(self):
"""
Return the widget in focus according to our :obj:`list walker <ListWalker>`.
"""
return self.body.get_focus()[0]
focus = property(_get_focus,
doc="the child widget in focus or None when ListBox is empty")
def _get_focus_position(self):
"""
Return the list walker position of the widget in focus. The type
of value returned depends on the :obj:`list walker <ListWalker>`.
"""
w, pos = self.body.get_focus()
if w is None:
raise IndexError, "No focus_position, ListBox is empty"
return pos
focus_position = property(_get_focus_position, set_focus, doc="""
the position of child widget in focus. The valid values for this
position depend on the list walker in use.
:exc:`IndexError` will be raised by reading this property when the
ListBox is empty or setting this property to an invalid position.
""")
def _contents(self):
class ListBoxContents(object):
__getitem__ = self._contents__getitem__
return ListBoxContents()
def _contents__getitem__(self, key):
# try list walker protocol v2 first
getitem = getattr(self.body, '__getitem__', None)
if getitem:
try:
return (getitem(key), None)
except (IndexError, KeyError):
raise KeyError("ListBox.contents key not found: %r" % (key,))
# fall back to v1
w, old_focus = self.body.get_focus()
try:
try:
self.body.set_focus(key)
return self.body.get_focus()[0]
except (IndexError, KeyError):
raise KeyError("ListBox.contents key not found: %r" % (key,))
finally:
self.body.set_focus(old_focus)
contents = property(lambda self: self._contents, doc="""
An object that allows reading widgets from the ListBox's list
walker as a `(widget, options)` tuple. `None` is currently the only
value for options.
.. warning::
This object may not be used to set or iterate over contents.
You must use the list walker stored as
:attr:`.body` to perform manipulation and iteration, if supported.
""")
def options(self):
"""
There are currently no options for ListBox contents.
Return None as a placeholder for future options.
"""
return None
def _set_focus_valign_complete(self, size, focus):
"""
Finish setting the offset and inset now that we have have a
maxcol & maxrow.
"""
(maxcol, maxrow) = size
vt,va = self.set_focus_valign_pending
self.set_focus_valign_pending = None
self.set_focus_pending = None
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None:
return
rows = focus_widget.rows((maxcol,), focus)
rtop, rbot = calculate_top_bottom_filler(maxrow,
vt, va, GIVEN, rows, None, 0, 0)
self.shift_focus((maxcol, maxrow), rtop)
def _set_focus_first_selectable(self, size, focus):
"""
Choose the first visible, selectable widget below the
current focus as the focus widget.
"""
(maxcol, maxrow) = size
self.set_focus_valign_pending = None
self.set_focus_pending = None
middle, top, bottom = self.calculate_visible(
(maxcol, maxrow), focus=focus)
if middle is None:
return
row_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
trim_bottom, fill_below = bottom
if focus_widget.selectable():
return
if trim_bottom:
fill_below = fill_below[:-1]
new_row_offset = row_offset + focus_rows
for widget, pos, rows in fill_below:
if widget.selectable():
self.body.set_focus(pos)
self.shift_focus((maxcol, maxrow),
new_row_offset)
return
new_row_offset += rows
def _set_focus_complete(self, size, focus):
"""
Finish setting the position now that we have maxcol & maxrow.
"""
(maxcol, maxrow) = size
self._invalidate()
if self.set_focus_pending == "first selectable":
return self._set_focus_first_selectable(
(maxcol,maxrow), focus)
if self.set_focus_valign_pending is not None:
return self._set_focus_valign_complete(
(maxcol,maxrow), focus)
coming_from, focus_widget, focus_pos = self.set_focus_pending
self.set_focus_pending = None
# new position
new_focus_widget, position = self.body.get_focus()
if focus_pos == position:
# do nothing
return
# restore old focus temporarily
self.body.set_focus(focus_pos)
middle,top,bottom=self.calculate_visible((maxcol,maxrow),focus)
focus_offset, focus_widget, focus_pos, focus_rows, cursor=middle
trim_top, fill_above = top
trim_bottom, fill_below = bottom
offset = focus_offset
for widget, pos, rows in fill_above:
offset -= rows
if pos == position:
self.change_focus((maxcol, maxrow), pos,
offset, 'below' )
return
offset = focus_offset + focus_rows
for widget, pos, rows in fill_below:
if pos == position:
self.change_focus((maxcol, maxrow), pos,
offset, 'above' )
return
offset += rows
# failed to find widget among visible widgets
self.body.set_focus( position )
widget, position = self.body.get_focus()
rows = widget.rows((maxcol,), focus)
if coming_from=='below':
offset = 0
elif coming_from=='above':
offset = maxrow-rows
else:
offset = (maxrow-rows) // 2
self.shift_focus((maxcol, maxrow), offset)
def shift_focus(self, size, offset_inset):
"""
Move the location of the current focus relative to the top.
This is used internally by methods that know the widget's *size*.
See also :meth:`.set_focus_valign`.
:param size: see :meth:`Widget.render` for details
:param offset_inset: either the number of rows between the
top of the listbox and the start of the focus widget (+ve
value) or the number of lines of the focus widget hidden off
the top edge of the listbox (-ve value) or ``0`` if the top edge
of the focus widget is aligned with the top edge of the
listbox.
:type offset_inset: int
"""
(maxcol, maxrow) = size
if offset_inset >= 0:
if offset_inset >= maxrow:
raise ListBoxError, "Invalid offset_inset: %r, only %r rows in list box"% (offset_inset, maxrow)
self.offset_rows = offset_inset
self.inset_fraction = (0,1)
else:
target, _ignore = self.body.get_focus()
tgt_rows = target.rows( (maxcol,), True )
if offset_inset + tgt_rows <= 0:
raise ListBoxError, "Invalid offset_inset: %r, only %r rows in target!" %(offset_inset, tgt_rows)
self.offset_rows = 0
self.inset_fraction = (-offset_inset,tgt_rows)
self._invalidate()
def update_pref_col_from_focus(self, size):
"""Update self.pref_col from the focus widget."""
# TODO: should this not be private?
(maxcol, maxrow) = size
widget, old_pos = self.body.get_focus()
if widget is None: return
pref_col = None
if hasattr(widget,'get_pref_col'):
pref_col = widget.get_pref_col((maxcol,))
if pref_col is None and hasattr(widget,'get_cursor_coords'):
coords = widget.get_cursor_coords((maxcol,))
if type(coords) == tuple:
pref_col,y = coords
if pref_col is not None:
self.pref_col = pref_col
def change_focus(self, size, position,
offset_inset = 0, coming_from = None,
cursor_coords = None, snap_rows = None):
"""
Change the current focus widget.
This is used internally by methods that know the widget's *size*.
See also :meth:`.set_focus`.
:param size: see :meth:`Widget.render` for details
:param position: a position compatible with :meth:`self.body.set_focus`
:param offset_inset: either the number of rows between the
top of the listbox and the start of the focus widget (+ve
value) or the number of lines of the focus widget hidden off
the top edge of the listbox (-ve value) or 0 if the top edge
of the focus widget is aligned with the top edge of the
listbox (default if unspecified)
:type offset_inset: int
:param coming_from: either 'above', 'below' or unspecified `None`
:type coming_from: str
:param cursor_coords: (x, y) tuple indicating the desired
column and row for the cursor, a (x,) tuple indicating only
the column for the cursor, or unspecified
:type cursor_coords: (int, int)
:param snap_rows: the maximum number of extra rows to scroll
when trying to "snap" a selectable focus into the view
:type snap_rows: int
"""
(maxcol, maxrow) = size
# update pref_col before change
if cursor_coords:
self.pref_col = cursor_coords[0]
else:
self.update_pref_col_from_focus((maxcol,maxrow))
self._invalidate()
self.body.set_focus(position)
target, _ignore = self.body.get_focus()
tgt_rows = target.rows( (maxcol,), True)
if snap_rows is None:
snap_rows = maxrow - 1
# "snap" to selectable widgets
align_top = 0
align_bottom = maxrow - tgt_rows
if ( coming_from == 'above'
and target.selectable()
and offset_inset > align_bottom ):
if snap_rows >= offset_inset - align_bottom:
offset_inset = align_bottom
elif snap_rows >= offset_inset - align_top:
offset_inset = align_top
else:
offset_inset -= snap_rows
if ( coming_from == 'below'
and target.selectable()
and offset_inset < align_top ):
if snap_rows >= align_top - offset_inset:
offset_inset = align_top
elif snap_rows >= align_bottom - offset_inset:
offset_inset = align_bottom
else:
offset_inset += snap_rows
# convert offset_inset to offset_rows or inset_fraction
if offset_inset >= 0:
self.offset_rows = offset_inset
self.inset_fraction = (0,1)
else:
if offset_inset + tgt_rows <= 0:
raise ListBoxError, "Invalid offset_inset: %s, only %s rows in target!" %(offset_inset, tgt_rows)
self.offset_rows = 0
self.inset_fraction = (-offset_inset,tgt_rows)
if cursor_coords is None:
if coming_from is None:
return # must either know row or coming_from
cursor_coords = (self.pref_col,)
if not hasattr(target,'move_cursor_to_coords'):
return
attempt_rows = []
if len(cursor_coords) == 1:
# only column (not row) specified
# start from closest edge and move inwards
(pref_col,) = cursor_coords
if coming_from=='above':
attempt_rows = range( 0, tgt_rows )
else:
assert coming_from == 'below', "must specify coming_from ('above' or 'below') if cursor row is not specified"
attempt_rows = range( tgt_rows, -1, -1)
else:
# both column and row specified
# start from preferred row and move back to closest edge
(pref_col, pref_row) = cursor_coords
if pref_row < 0 or pref_row >= tgt_rows:
raise ListBoxError, "cursor_coords row outside valid range for target. pref_row:%r target_rows:%r"%(pref_row,tgt_rows)
if coming_from=='above':
attempt_rows = range( pref_row, -1, -1 )
elif coming_from=='below':
attempt_rows = range( pref_row, tgt_rows )
else:
attempt_rows = [pref_row]
for row in attempt_rows:
if target.move_cursor_to_coords((maxcol,),pref_col,row):
break
def get_focus_offset_inset(self, size):
"""Return (offset rows, inset rows) for focus widget."""
(maxcol, maxrow) = size
focus_widget, pos = self.body.get_focus()
focus_rows = focus_widget.rows((maxcol,), True)
offset_rows = self.offset_rows
inset_rows = 0
if offset_rows == 0:
inum, iden = self.inset_fraction
if inum < 0 or iden < 0 or inum >= iden:
raise ListBoxError, "Invalid inset_fraction: %r"%(self.inset_fraction,)
inset_rows = focus_rows * inum // iden
if inset_rows and inset_rows >= focus_rows:
raise ListBoxError, "urwid inset_fraction error (please report)"
return offset_rows, inset_rows
def make_cursor_visible(self, size):
"""Shift the focus widget so that its cursor is visible."""
(maxcol, maxrow) = size
focus_widget, pos = self.body.get_focus()
if focus_widget is None:
return
if not focus_widget.selectable():
return
if not hasattr(focus_widget,'get_cursor_coords'):
return
cursor = focus_widget.get_cursor_coords((maxcol,))
if cursor is None:
return
cx, cy = cursor
offset_rows, inset_rows = self.get_focus_offset_inset(
(maxcol, maxrow))
if cy < inset_rows:
self.shift_focus( (maxcol,maxrow), - (cy) )
return
if offset_rows - inset_rows + cy >= maxrow:
self.shift_focus( (maxcol,maxrow), maxrow-cy-1 )
return
def keypress(self, size, key):
"""Move selection through the list elements scrolling when
necessary. 'up' and 'down' are first passed to widget in focus
in case that widget can handle them. 'page up' and 'page down'
are always handled by the ListBox.
Keystrokes handled by this widget are:
'up' up one line (or widget)
'down' down one line (or widget)
'page up' move cursor up one listbox length
'page down' move cursor down one listbox length
"""
(maxcol, maxrow) = size
if self.set_focus_pending or self.set_focus_valign_pending:
self._set_focus_complete( (maxcol,maxrow), focus=True )
focus_widget, pos = self.body.get_focus()
if focus_widget is None: # empty listbox, can't do anything
return key
if self._command_map[key] not in [CURSOR_PAGE_UP, CURSOR_PAGE_DOWN]:
if focus_widget.selectable():
key = focus_widget.keypress((maxcol,),key)
if key is None:
self.make_cursor_visible((maxcol,maxrow))
return
def actual_key(unhandled):
if unhandled:
return key
# pass off the heavy lifting
if self._command_map[key] == CURSOR_UP:
return actual_key(self._keypress_up((maxcol, maxrow)))
if self._command_map[key] == CURSOR_DOWN:
return actual_key(self._keypress_down((maxcol, maxrow)))
if self._command_map[key] == CURSOR_PAGE_UP:
return actual_key(self._keypress_page_up((maxcol, maxrow)))
if self._command_map[key] == CURSOR_PAGE_DOWN:
return actual_key(self._keypress_page_down((maxcol, maxrow)))
return key
def _keypress_up(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return True
focus_row_offset,focus_widget,focus_pos,_ignore,cursor = middle
trim_top, fill_above = top
row_offset = focus_row_offset
# look for selectable widget above
pos = focus_pos
widget = None
for widget, pos, rows in fill_above:
row_offset -= rows
if rows and widget.selectable():
# this one will do
self.change_focus((maxcol,maxrow), pos,
row_offset, 'below')
return
# at this point we must scroll
row_offset += 1
self._invalidate()
while row_offset > 0:
# need to scroll in another candidate widget
widget, pos = self.body.get_prev(pos)
if widget is None:
# cannot scroll any further
return True # keypress not handled
rows = widget.rows((maxcol,), True)
row_offset -= rows
if rows and widget.selectable():
# this one will do
self.change_focus((maxcol,maxrow), pos,
row_offset, 'below')
return
if not focus_widget.selectable() or focus_row_offset+1>=maxrow:
# just take top one if focus is not selectable
# or if focus has moved out of view
if widget is None:
self.shift_focus((maxcol,maxrow), row_offset)
return
self.change_focus((maxcol,maxrow), pos,
row_offset, 'below')
return
# check if cursor will stop scroll from taking effect
if cursor is not None:
x,y = cursor
if y+focus_row_offset+1 >= maxrow:
# cursor position is a problem,
# choose another focus
if widget is None:
# try harder to get prev widget
widget, pos = self.body.get_prev(pos)
if widget is None:
return # can't do anything
rows = widget.rows((maxcol,), True)
row_offset -= rows
if -row_offset >= rows:
# must scroll further than 1 line
row_offset = - (rows-1)
self.change_focus((maxcol,maxrow),pos,
row_offset, 'below')
return
# if all else fails, just shift the current focus.
self.shift_focus((maxcol,maxrow), focus_row_offset+1)
def _keypress_down(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return True
focus_row_offset,focus_widget,focus_pos,focus_rows,cursor=middle
trim_bottom, fill_below = bottom
row_offset = focus_row_offset + focus_rows
rows = focus_rows
# look for selectable widget below
pos = focus_pos
widget = None
for widget, pos, rows in fill_below:
if rows and widget.selectable():
# this one will do
self.change_focus((maxcol,maxrow), pos,
row_offset, 'above')
return
row_offset += rows
# at this point we must scroll
row_offset -= 1
self._invalidate()
while row_offset < maxrow:
# need to scroll in another candidate widget
widget, pos = self.body.get_next(pos)
if widget is None:
# cannot scroll any further
return True # keypress not handled
rows = widget.rows((maxcol,))
if rows and widget.selectable():
# this one will do
self.change_focus((maxcol,maxrow), pos,
row_offset, 'above')
return
row_offset += rows
if not focus_widget.selectable() or focus_row_offset+focus_rows-1 <= 0:
# just take bottom one if current is not selectable
# or if focus has moved out of view
if widget is None:
self.shift_focus((maxcol,maxrow),
row_offset-rows)
return
# FIXME: catch this bug in testcase
#self.change_focus((maxcol,maxrow), pos,
# row_offset+rows, 'above')
self.change_focus((maxcol,maxrow), pos,
row_offset-rows, 'above')
return
# check if cursor will stop scroll from taking effect
if cursor is not None:
x,y = cursor
if y+focus_row_offset-1 < 0:
# cursor position is a problem,
# choose another focus
if widget is None:
# try harder to get next widget
widget, pos = self.body.get_next(pos)
if widget is None:
return # can't do anything
else:
row_offset -= rows
if row_offset >= maxrow:
# must scroll further than 1 line
row_offset = maxrow-1
self.change_focus((maxcol,maxrow),pos,
row_offset, 'above', )
return
# if all else fails, keep the current focus.
self.shift_focus((maxcol,maxrow), focus_row_offset-1)
def _keypress_page_up(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return True
row_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
# topmost_visible is row_offset rows above top row of
# focus (+ve) or -row_offset rows below top row of focus (-ve)
topmost_visible = row_offset
# scroll_from_row is (first match)
# 1. topmost visible row if focus is not selectable
# 2. row containing cursor if focus has a cursor
# 3. top row of focus widget if it is visible
# 4. topmost visible row otherwise
if not focus_widget.selectable():
scroll_from_row = topmost_visible
elif cursor is not None:
x,y = cursor
scroll_from_row = -y
elif row_offset >= 0:
scroll_from_row = 0
else:
scroll_from_row = topmost_visible
# snap_rows is maximum extra rows to scroll when
# snapping to new a focus
snap_rows = topmost_visible - scroll_from_row
# move row_offset to the new desired value (1 "page" up)
row_offset = scroll_from_row + maxrow
# not used below:
scroll_from_row = topmost_visible = None
# gather potential target widgets
t = []
# add current focus
t.append((row_offset,focus_widget,focus_pos,focus_rows))
pos = focus_pos
# include widgets from calculate_visible(..)
for widget, pos, rows in fill_above:
row_offset -= rows
t.append( (row_offset, widget, pos, rows) )
# add newly visible ones, including within snap_rows
snap_region_start = len(t)
while row_offset > -snap_rows:
widget, pos = self.body.get_prev(pos)
if widget is None: break
rows = widget.rows((maxcol,))
row_offset -= rows
# determine if one below puts current one into snap rgn
if row_offset > 0:
snap_region_start += 1
t.append( (row_offset, widget, pos, rows) )
# if we can't fill the top we need to adjust the row offsets
row_offset, w, p, r = t[-1]
if row_offset > 0:
adjust = - row_offset
t = [(ro+adjust, w, p, r) for (ro,w,p,r) in t]
# if focus_widget (first in t) is off edge, remove it
row_offset, w, p, r = t[0]
if row_offset >= maxrow:
del t[0]
snap_region_start -= 1
# we'll need this soon
self.update_pref_col_from_focus((maxcol,maxrow))
# choose the topmost selectable and (newly) visible widget
# search within snap_rows then visible region
search_order = ( range( snap_region_start, len(t))
+ range( snap_region_start-1, -1, -1 ) )
#assert 0, repr((t, search_order))
bad_choices = []
cut_off_selectable_chosen = 0
for i in search_order:
row_offset, widget, pos, rows = t[i]
if not widget.selectable():
continue
if not rows:
continue
# try selecting this widget
pref_row = max(0, -row_offset)
# if completely within snap region, adjust row_offset
if rows + row_offset <= 0:
self.change_focus( (maxcol,maxrow), pos,
-(rows-1), 'below',
(self.pref_col, rows-1),
snap_rows-((-row_offset)-(rows-1)))
else:
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'below',
(self.pref_col, pref_row), snap_rows )
# if we're as far up as we can scroll, take this one
if (fill_above and self.body.get_prev(fill_above[-1][1])
== (None,None) ):
pass #return
# find out where that actually puts us
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, _ign2, _ign3, _ign4 = middle
# discard chosen widget if it will reduce scroll amount
# because of a fixed cursor (absolute last resort)
if act_row_offset > row_offset+snap_rows:
bad_choices.append(i)
continue
if act_row_offset < row_offset:
bad_choices.append(i)
continue
# also discard if off top edge (second last resort)
if act_row_offset < 0:
bad_choices.append(i)
cut_off_selectable_chosen = 1
continue
return
# anything selectable is better than what follows:
if cut_off_selectable_chosen:
return
if fill_above and focus_widget.selectable():
# if we're at the top and have a selectable, return
if self.body.get_prev(fill_above[-1][1]) == (None,None):
pass #return
# if still none found choose the topmost widget
good_choices = [j for j in search_order if j not in bad_choices]
for i in good_choices + search_order:
row_offset, widget, pos, rows = t[i]
if pos == focus_pos: continue
if not rows: # never focus a 0-height widget
continue
# if completely within snap region, adjust row_offset
if rows + row_offset <= 0:
snap_rows -= (-row_offset) - (rows-1)
row_offset = -(rows-1)
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'below', None,
snap_rows )
return
# no choices available, just shift current one
self.shift_focus((maxcol, maxrow), min(maxrow-1,row_offset))
# final check for pathological case where we may fall short
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, pos, _ign2, _ign3 = middle
if act_row_offset >= row_offset:
# no problem
return
# fell short, try to select anything else above
if not t:
return
_ign1, _ign2, pos, _ign3 = t[-1]
widget, pos = self.body.get_prev(pos)
if widget is None:
# no dice, we're stuck here
return
# bring in only one row if possible
rows = widget.rows((maxcol,), True)
self.change_focus((maxcol,maxrow), pos, -(rows-1),
'below', (self.pref_col, rows-1), 0 )
def _keypress_page_down(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return True
row_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_bottom, fill_below = bottom
# bottom_edge is maxrow-focus_pos rows below top row of focus
bottom_edge = maxrow - row_offset
# scroll_from_row is (first match)
# 1. bottom edge if focus is not selectable
# 2. row containing cursor + 1 if focus has a cursor
# 3. bottom edge of focus widget if it is visible
# 4. bottom edge otherwise
if not focus_widget.selectable():
scroll_from_row = bottom_edge
elif cursor is not None:
x,y = cursor
scroll_from_row = y + 1
elif bottom_edge >= focus_rows:
scroll_from_row = focus_rows
else:
scroll_from_row = bottom_edge
# snap_rows is maximum extra rows to scroll when
# snapping to new a focus
snap_rows = bottom_edge - scroll_from_row
# move row_offset to the new desired value (1 "page" down)
row_offset = -scroll_from_row
# not used below:
scroll_from_row = bottom_edge = None
# gather potential target widgets
t = []
# add current focus
t.append((row_offset,focus_widget,focus_pos,focus_rows))
pos = focus_pos
row_offset += focus_rows
# include widgets from calculate_visible(..)
for widget, pos, rows in fill_below:
t.append( (row_offset, widget, pos, rows) )
row_offset += rows
# add newly visible ones, including within snap_rows
snap_region_start = len(t)
while row_offset < maxrow+snap_rows:
widget, pos = self.body.get_next(pos)
if widget is None: break
rows = widget.rows((maxcol,))
t.append( (row_offset, widget, pos, rows) )
row_offset += rows
# determine if one above puts current one into snap rgn
if row_offset < maxrow:
snap_region_start += 1
# if we can't fill the bottom we need to adjust the row offsets
row_offset, w, p, rows = t[-1]
if row_offset + rows < maxrow:
adjust = maxrow - (row_offset + rows)
t = [(ro+adjust, w, p, r) for (ro,w,p,r) in t]
# if focus_widget (first in t) is off edge, remove it
row_offset, w, p, rows = t[0]
if row_offset+rows <= 0:
del t[0]
snap_region_start -= 1
# we'll need this soon
self.update_pref_col_from_focus((maxcol,maxrow))
# choose the bottommost selectable and (newly) visible widget
# search within snap_rows then visible region
search_order = ( range( snap_region_start, len(t))
+ range( snap_region_start-1, -1, -1 ) )
#assert 0, repr((t, search_order))
bad_choices = []
cut_off_selectable_chosen = 0
for i in search_order:
row_offset, widget, pos, rows = t[i]
if not widget.selectable():
continue
if not rows:
continue
# try selecting this widget
pref_row = min(maxrow-row_offset-1, rows-1)
# if completely within snap region, adjust row_offset
if row_offset >= maxrow:
self.change_focus( (maxcol,maxrow), pos,
maxrow-1, 'above',
(self.pref_col, 0),
snap_rows+maxrow-row_offset-1 )
else:
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'above',
(self.pref_col, pref_row), snap_rows )
# find out where that actually puts us
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, _ign2, _ign3, _ign4 = middle
# discard chosen widget if it will reduce scroll amount
# because of a fixed cursor (absolute last resort)
if act_row_offset < row_offset-snap_rows:
bad_choices.append(i)
continue
if act_row_offset > row_offset:
bad_choices.append(i)
continue
# also discard if off top edge (second last resort)
if act_row_offset+rows > maxrow:
bad_choices.append(i)
cut_off_selectable_chosen = 1
continue
return
# anything selectable is better than what follows:
if cut_off_selectable_chosen:
return
# if still none found choose the bottommost widget
good_choices = [j for j in search_order if j not in bad_choices]
for i in good_choices + search_order:
row_offset, widget, pos, rows = t[i]
if pos == focus_pos: continue
if not rows: # never focus a 0-height widget
continue
# if completely within snap region, adjust row_offset
if row_offset >= maxrow:
snap_rows -= snap_rows+maxrow-row_offset-1
row_offset = maxrow-1
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'above', None,
snap_rows )
return
# no choices available, just shift current one
self.shift_focus((maxcol, maxrow), max(1-focus_rows,row_offset))
# final check for pathological case where we may fall short
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, pos, _ign2, _ign3 = middle
if act_row_offset <= row_offset:
# no problem
return
# fell short, try to select anything else below
if not t:
return
_ign1, _ign2, pos, _ign3 = t[-1]
widget, pos = self.body.get_next(pos)
if widget is None:
# no dice, we're stuck here
return
# bring in only one row if possible
rows = widget.rows((maxcol,), True)
self.change_focus((maxcol,maxrow), pos, maxrow-1,
'above', (self.pref_col, 0), 0 )
def mouse_event(self, size, event, button, col, row, focus):
"""
Pass the event to the contained widgets.
May change focus on button 1 press.
"""
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible((maxcol, maxrow),
focus=True)
if middle is None:
return False
_ignore, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
_ignore, fill_below = bottom
fill_above.reverse() # fill_above is in bottom-up order
w_list = ( fill_above +
[ (focus_widget, focus_pos, focus_rows) ] +
fill_below )
wrow = -trim_top
for w, w_pos, w_rows in w_list:
if wrow + w_rows > row:
break
wrow += w_rows
else:
return False
focus = focus and w == focus_widget
if is_mouse_press(event) and button==1:
if w.selectable():
self.change_focus((maxcol,maxrow), w_pos, wrow)
if not hasattr(w,'mouse_event'):
return False
return w.mouse_event((maxcol,), event, button, col, row-wrow,
focus)
def ends_visible(self, size, focus=False):
"""
Return a list that may contain ``'top'`` and/or ``'bottom'``.
i.e. this function will return one of: [], [``'top'``],
[``'bottom'``] or [``'top'``, ``'bottom'``].
convenience function for checking whether the top and bottom
of the list are visible
"""
(maxcol, maxrow) = size
l = []
middle,top,bottom = self.calculate_visible( (maxcol,maxrow),
focus=focus )
if middle is None: # empty listbox
return ['top','bottom']
trim_top, above = top
trim_bottom, below = bottom
if trim_bottom == 0:
row_offset, w, pos, rows, c = middle
row_offset += rows
for w, pos, rows in below:
row_offset += rows
if row_offset < maxrow:
l.append('bottom')
elif self.body.get_next(pos) == (None,None):
l.append('bottom')
if trim_top == 0:
row_offset, w, pos, rows, c = middle
for w, pos, rows in above:
row_offset -= rows
if self.body.get_prev(pos) == (None,None):
l.insert(0, 'top')
return l
def __iter__(self):
"""
Return an iterator over the positions in this ListBox.
If self.body does not implement positions() then iterate
from the focus widget down to the bottom, then from above
the focus up to the top. This is the best we can do with
a minimal list walker implementation.
"""
positions_fn = getattr(self.body, 'positions', None)
if positions_fn:
for pos in positions_fn():
yield pos
return
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None:
return
pos = focus_pos
while True:
yield pos
w, pos = self.body.get_next(pos)
if not w: break
pos = focus_pos
while True:
w, pos = self.body.get_prev(pos)
if not w: break
yield pos
def __reversed__(self):
"""
Return a reversed iterator over the positions in this ListBox.
If :attr:`body` does not implement :meth:`positions` then iterate
from above the focus widget up to the top, then from the focus
widget down to the bottom. Note that this is not actually the
reverse of what `__iter__()` produces, but this is the best we can
do with a minimal list walker implementation.
"""
positions_fn = getattr(self.body, 'positions', None)
if positions_fn:
for pos in positions_fn(reverse=True):
yield pos
return
focus_widget, focus_pos = self.body.get_focus()
if focus_widget is None:
return
pos = focus_pos
while True:
w, pos = self.body.get_prev(pos)
if not w: break
yield pos
pos = focus_pos
while True:
yield pos
w, pos = self.body.get_next(pos)
if not w: break
| lgpl-2.1 | -1,371,156,613,091,733,500 | -7,598,948,047,611,462,000 | 34.71283 | 180 | 0.555927 | false |
Ch00k/ansible | lib/ansible/module_utils/known_hosts.py | 80 | 6716 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hmac
import urlparse
try:
from hashlib import sha1
except ImportError:
import sha as sha1
HASHED_KEY_MAGIC = "|1|"
def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
""" idempotently add a git url hostkey """
if is_ssh_url(url):
fqdn = get_fqdn(url)
if fqdn:
known_host = check_hostkey(module, fqdn)
if not known_host:
if accept_hostkey:
rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
if rc != 0:
module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
else:
module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
def is_ssh_url(url):
""" check if url is ssh """
if "@" in url and "://" not in url:
return True
for scheme in "ssh://", "git+ssh://", "ssh+git://":
if url.startswith(scheme):
return True
return False
def get_fqdn(repo_url):
""" chop the hostname out of a url """
result = None
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
if ":" in repo_url:
repo_url = repo_url.split(":")[0]
result = repo_url
elif "/" in repo_url:
repo_url = repo_url.split("/")[0]
result = repo_url
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse.urlparse(repo_url)
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
# ensure we actually have a parts[1] before continuing.
if parts[1] != '':
result = parts[1]
if ":" in result:
result = result.split(":")[0]
if "@" in result:
result = result.split("@", 1)[1]
return result
def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this.
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError, e:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
""" use ssh-keyscan to add the hostkey """
result = False
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
if 'USER' in os.environ:
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_ssh_dir = "~/.ssh/"
user_host_file = "~/.ssh/known_hosts"
user_ssh_dir = os.path.expanduser(user_ssh_dir)
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
os.makedirs(user_ssh_dir, 0700)
except:
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
else:
module.fail_json(msg="%s does not exist" % user_ssh_dir)
elif not os.path.isdir(user_ssh_dir):
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
rc, out, err = module.run_command(this_cmd)
module.append_to_file(user_host_file, out)
return rc, out, err
| gpl-3.0 | 3,244,327,956,105,237,500 | 8,973,870,086,262,762,000 | 35.107527 | 164 | 0.601697 | false |
crepererum/invenio | invenio/modules/previewer/previewerext/gview.py | 15 | 1631 | # This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from flask import render_template, request
def can_preview(f):
'''
Returns if filetype can be previewed.
'''
return f.superformat in ['.pdf', '.jpeg', '.png', '.gif', '.tiff', '.bmp',
'.mpeg4', '.3gpp', '.mov', '.avi', '.mpegps',
'.wmv', '.flv', '.txt', '.css', '.html', '.php',
'.c', '.cpp', '.h', '.hpp', '.js', '.doc', '.docx',
'.xls', '.xlsx', '.ppt', '.pptx', '.pages', '.ai',
'.psd', '.dfx', '.svg', '.eps', '.ps', '.ttf',
'.xps', '.zip', '.rar']
def preview(f):
'''
Returns appropiate template and passes the filea and an embed flag.
'''
return render_template("previewer/gview.html", f=f,
embed=request.args.get('embed', type=bool))
| gpl-2.0 | -1,737,778,652,563,780,600 | -3,574,425,653,947,017,700 | 40.820513 | 80 | 0.573268 | false |
liberorbis/libernext | env/lib/python2.7/site-packages/celery/tests/bin/test_worker.py | 4 | 24058 | from __future__ import absolute_import
import logging
import os
import sys
from functools import wraps
from billiard import current_process
from kombu import Exchange, Queue
from celery import platforms
from celery import signals
from celery.app import trace
from celery.apps import worker as cd
from celery.bin.worker import worker, main as worker_main
from celery.exceptions import (
ImproperlyConfigured, WorkerShutdown, WorkerTerminate,
)
from celery.utils.log import ensure_process_aware_logger
from celery.worker import state
from celery.tests.case import (
AppCase,
Mock,
SkipTest,
WhateverIO,
patch,
skip_if_pypy,
skip_if_jython,
)
ensure_process_aware_logger()
class WorkerAppCase(AppCase):
def tearDown(self):
super(WorkerAppCase, self).tearDown()
trace.reset_worker_optimizations()
def disable_stdouts(fun):
@wraps(fun)
def disable(*args, **kwargs):
prev_out, prev_err = sys.stdout, sys.stderr
prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__
sys.stdout = sys.__stdout__ = WhateverIO()
sys.stderr = sys.__stderr__ = WhateverIO()
try:
return fun(*args, **kwargs)
finally:
sys.stdout = prev_out
sys.stderr = prev_err
sys.__stdout__ = prev_rout
sys.__stderr__ = prev_rerr
return disable
class Worker(cd.Worker):
redirect_stdouts = False
def start(self, *args, **kwargs):
self.on_start()
class test_Worker(WorkerAppCase):
Worker = Worker
@disable_stdouts
def test_queues_string(self):
w = self.app.Worker()
w.setup_queues('foo,bar,baz')
self.assertTrue('foo' in self.app.amqp.queues)
@disable_stdouts
def test_cpu_count(self):
with patch('celery.worker.cpu_count') as cpu_count:
cpu_count.side_effect = NotImplementedError()
w = self.app.Worker(concurrency=None)
self.assertEqual(w.concurrency, 2)
w = self.app.Worker(concurrency=5)
self.assertEqual(w.concurrency, 5)
@disable_stdouts
def test_windows_B_option(self):
self.app.IS_WINDOWS = True
with self.assertRaises(SystemExit):
worker(app=self.app).run(beat=True)
def test_setup_concurrency_very_early(self):
x = worker()
x.run = Mock()
with self.assertRaises(ImportError):
x.execute_from_commandline(['worker', '-P', 'xyzybox'])
def test_run_from_argv_basic(self):
x = worker(app=self.app)
x.run = Mock()
x.maybe_detach = Mock()
def run(*args, **kwargs):
pass
x.run = run
x.run_from_argv('celery', [])
self.assertTrue(x.maybe_detach.called)
def test_maybe_detach(self):
x = worker(app=self.app)
with patch('celery.bin.worker.detached_celeryd') as detached:
x.maybe_detach([])
self.assertFalse(detached.called)
with self.assertRaises(SystemExit):
x.maybe_detach(['--detach'])
self.assertTrue(detached.called)
@disable_stdouts
def test_invalid_loglevel_gives_error(self):
x = worker(app=self.app)
with self.assertRaises(SystemExit):
x.run(loglevel='GRIM_REAPER')
def test_no_loglevel(self):
self.app.Worker = Mock()
worker(app=self.app).run(loglevel=None)
def test_tasklist(self):
worker = self.app.Worker()
self.assertTrue(worker.app.tasks)
self.assertTrue(worker.app.finalized)
self.assertTrue(worker.tasklist(include_builtins=True))
worker.tasklist(include_builtins=False)
def test_extra_info(self):
worker = self.app.Worker()
worker.loglevel = logging.WARNING
self.assertFalse(worker.extra_info())
worker.loglevel = logging.INFO
self.assertTrue(worker.extra_info())
@disable_stdouts
def test_loglevel_string(self):
worker = self.Worker(app=self.app, loglevel='INFO')
self.assertEqual(worker.loglevel, logging.INFO)
@disable_stdouts
def test_run_worker(self):
handlers = {}
class Signals(platforms.Signals):
def __setitem__(self, sig, handler):
handlers[sig] = handler
p = platforms.signals
platforms.signals = Signals()
try:
w = self.Worker(app=self.app)
w._isatty = False
w.on_start()
for sig in 'SIGINT', 'SIGHUP', 'SIGTERM':
self.assertIn(sig, handlers)
handlers.clear()
w = self.Worker(app=self.app)
w._isatty = True
w.on_start()
for sig in 'SIGINT', 'SIGTERM':
self.assertIn(sig, handlers)
self.assertNotIn('SIGHUP', handlers)
finally:
platforms.signals = p
@disable_stdouts
def test_startup_info(self):
worker = self.Worker(app=self.app)
worker.on_start()
self.assertTrue(worker.startup_info())
worker.loglevel = logging.DEBUG
self.assertTrue(worker.startup_info())
worker.loglevel = logging.INFO
self.assertTrue(worker.startup_info())
worker.autoscale = 13, 10
self.assertTrue(worker.startup_info())
prev_loader = self.app.loader
worker = self.Worker(app=self.app, queues='foo,bar,baz,xuzzy,do,re,mi')
self.app.loader = Mock()
self.app.loader.__module__ = 'acme.baked_beans'
self.assertTrue(worker.startup_info())
self.app.loader = Mock()
self.app.loader.__module__ = 'celery.loaders.foo'
self.assertTrue(worker.startup_info())
from celery.loaders.app import AppLoader
self.app.loader = AppLoader(app=self.app)
self.assertTrue(worker.startup_info())
self.app.loader = prev_loader
worker.send_events = True
self.assertTrue(worker.startup_info())
# test when there are too few output lines
# to draft the ascii art onto
prev, cd.ARTLINES = cd.ARTLINES, ['the quick brown fox']
try:
self.assertTrue(worker.startup_info())
finally:
cd.ARTLINES = prev
@disable_stdouts
def test_run(self):
self.Worker(app=self.app).on_start()
self.Worker(app=self.app, purge=True).on_start()
worker = self.Worker(app=self.app)
worker.on_start()
@disable_stdouts
def test_purge_messages(self):
self.Worker(app=self.app).purge_messages()
@disable_stdouts
def test_init_queues(self):
app = self.app
c = app.conf
app.amqp.queues = app.amqp.Queues({
'celery': {'exchange': 'celery',
'routing_key': 'celery'},
'video': {'exchange': 'video',
'routing_key': 'video'},
})
worker = self.Worker(app=self.app)
worker.setup_queues(['video'])
self.assertIn('video', app.amqp.queues)
self.assertIn('video', app.amqp.queues.consume_from)
self.assertIn('celery', app.amqp.queues)
self.assertNotIn('celery', app.amqp.queues.consume_from)
c.CELERY_CREATE_MISSING_QUEUES = False
del(app.amqp.queues)
with self.assertRaises(ImproperlyConfigured):
self.Worker(app=self.app).setup_queues(['image'])
del(app.amqp.queues)
c.CELERY_CREATE_MISSING_QUEUES = True
worker = self.Worker(app=self.app)
worker.setup_queues(['image'])
self.assertIn('image', app.amqp.queues.consume_from)
self.assertEqual(
Queue('image', Exchange('image'), routing_key='image'),
app.amqp.queues['image'],
)
@disable_stdouts
def test_autoscale_argument(self):
worker1 = self.Worker(app=self.app, autoscale='10,3')
self.assertListEqual(worker1.autoscale, [10, 3])
worker2 = self.Worker(app=self.app, autoscale='10')
self.assertListEqual(worker2.autoscale, [10, 0])
self.assert_no_logging_side_effect()
def test_include_argument(self):
worker1 = self.Worker(app=self.app, include='os')
self.assertListEqual(worker1.include, ['os'])
worker2 = self.Worker(app=self.app,
include='os,sys')
self.assertListEqual(worker2.include, ['os', 'sys'])
self.Worker(app=self.app, include=['os', 'sys'])
@disable_stdouts
def test_unknown_loglevel(self):
with self.assertRaises(SystemExit):
worker(app=self.app).run(loglevel='ALIEN')
worker1 = self.Worker(app=self.app, loglevel=0xFFFF)
self.assertEqual(worker1.loglevel, 0xFFFF)
@disable_stdouts
@patch('os._exit')
def test_warns_if_running_as_privileged_user(self, _exit):
app = self.app
if app.IS_WINDOWS:
raise SkipTest('Not applicable on Windows')
with patch('os.getuid') as getuid:
getuid.return_value = 0
self.app.conf.CELERY_ACCEPT_CONTENT = ['pickle']
worker = self.Worker(app=self.app)
worker.on_start()
_exit.assert_called_with(1)
from celery import platforms
platforms.C_FORCE_ROOT = True
try:
with self.assertWarnsRegex(
RuntimeWarning,
r'absolutely not recommended'):
worker = self.Worker(app=self.app)
worker.on_start()
finally:
platforms.C_FORCE_ROOT = False
self.app.conf.CELERY_ACCEPT_CONTENT = ['json']
with self.assertWarnsRegex(
RuntimeWarning,
r'absolutely not recommended'):
worker = self.Worker(app=self.app)
worker.on_start()
@disable_stdouts
def test_redirect_stdouts(self):
self.Worker(app=self.app, redirect_stdouts=False)
with self.assertRaises(AttributeError):
sys.stdout.logger
@disable_stdouts
def test_on_start_custom_logging(self):
self.app.log.redirect_stdouts = Mock()
worker = self.Worker(app=self.app, redirect_stoutds=True)
worker._custom_logging = True
worker.on_start()
self.assertFalse(self.app.log.redirect_stdouts.called)
def test_setup_logging_no_color(self):
worker = self.Worker(
app=self.app, redirect_stdouts=False, no_color=True,
)
prev, self.app.log.setup = self.app.log.setup, Mock()
try:
worker.setup_logging()
self.assertFalse(self.app.log.setup.call_args[1]['colorize'])
finally:
self.app.log.setup = prev
@disable_stdouts
def test_startup_info_pool_is_str(self):
worker = self.Worker(app=self.app, redirect_stdouts=False)
worker.pool_cls = 'foo'
worker.startup_info()
def test_redirect_stdouts_already_handled(self):
logging_setup = [False]
@signals.setup_logging.connect
def on_logging_setup(**kwargs):
logging_setup[0] = True
try:
worker = self.Worker(app=self.app, redirect_stdouts=False)
worker.app.log.already_setup = False
worker.setup_logging()
self.assertTrue(logging_setup[0])
with self.assertRaises(AttributeError):
sys.stdout.logger
finally:
signals.setup_logging.disconnect(on_logging_setup)
@disable_stdouts
def test_platform_tweaks_osx(self):
class OSXWorker(Worker):
proxy_workaround_installed = False
def osx_proxy_detection_workaround(self):
self.proxy_workaround_installed = True
worker = OSXWorker(app=self.app, redirect_stdouts=False)
def install_HUP_nosupport(controller):
controller.hup_not_supported_installed = True
class Controller(object):
pass
prev = cd.install_HUP_not_supported_handler
cd.install_HUP_not_supported_handler = install_HUP_nosupport
try:
worker.app.IS_OSX = True
controller = Controller()
worker.install_platform_tweaks(controller)
self.assertTrue(controller.hup_not_supported_installed)
self.assertTrue(worker.proxy_workaround_installed)
finally:
cd.install_HUP_not_supported_handler = prev
@disable_stdouts
def test_general_platform_tweaks(self):
restart_worker_handler_installed = [False]
def install_worker_restart_handler(worker):
restart_worker_handler_installed[0] = True
class Controller(object):
pass
prev = cd.install_worker_restart_handler
cd.install_worker_restart_handler = install_worker_restart_handler
try:
worker = self.Worker(app=self.app)
worker.app.IS_OSX = False
worker.install_platform_tweaks(Controller())
self.assertTrue(restart_worker_handler_installed[0])
finally:
cd.install_worker_restart_handler = prev
@disable_stdouts
def test_on_consumer_ready(self):
worker_ready_sent = [False]
@signals.worker_ready.connect
def on_worker_ready(**kwargs):
worker_ready_sent[0] = True
self.Worker(app=self.app).on_consumer_ready(object())
self.assertTrue(worker_ready_sent[0])
class test_funs(WorkerAppCase):
def test_active_thread_count(self):
self.assertTrue(cd.active_thread_count())
@disable_stdouts
def test_set_process_status(self):
try:
__import__('setproctitle')
except ImportError:
raise SkipTest('setproctitle not installed')
worker = Worker(app=self.app, hostname='xyzza')
prev1, sys.argv = sys.argv, ['Arg0']
try:
st = worker.set_process_status('Running')
self.assertIn('celeryd', st)
self.assertIn('xyzza', st)
self.assertIn('Running', st)
prev2, sys.argv = sys.argv, ['Arg0', 'Arg1']
try:
st = worker.set_process_status('Running')
self.assertIn('celeryd', st)
self.assertIn('xyzza', st)
self.assertIn('Running', st)
self.assertIn('Arg1', st)
finally:
sys.argv = prev2
finally:
sys.argv = prev1
@disable_stdouts
def test_parse_options(self):
cmd = worker()
cmd.app = self.app
opts, args = cmd.parse_options('worker', ['--concurrency=512',
'--heartbeat-interval=10'])
self.assertEqual(opts.concurrency, 512)
self.assertEqual(opts.heartbeat_interval, 10)
@disable_stdouts
def test_main(self):
p, cd.Worker = cd.Worker, Worker
s, sys.argv = sys.argv, ['worker', '--discard']
try:
worker_main(app=self.app)
finally:
cd.Worker = p
sys.argv = s
class test_signal_handlers(WorkerAppCase):
class _Worker(object):
stopped = False
terminated = False
def stop(self, in_sighandler=False):
self.stopped = True
def terminate(self, in_sighandler=False):
self.terminated = True
def psig(self, fun, *args, **kwargs):
handlers = {}
class Signals(platforms.Signals):
def __setitem__(self, sig, handler):
handlers[sig] = handler
p, platforms.signals = platforms.signals, Signals()
try:
fun(*args, **kwargs)
return handlers
finally:
platforms.signals = p
@disable_stdouts
def test_worker_int_handler(self):
worker = self._Worker()
handlers = self.psig(cd.install_worker_int_handler, worker)
next_handlers = {}
state.should_stop = False
state.should_terminate = False
class Signals(platforms.Signals):
def __setitem__(self, sig, handler):
next_handlers[sig] = handler
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
p, platforms.signals = platforms.signals, Signals()
try:
handlers['SIGINT']('SIGINT', object())
self.assertTrue(state.should_stop)
finally:
platforms.signals = p
state.should_stop = False
try:
next_handlers['SIGINT']('SIGINT', object())
self.assertTrue(state.should_terminate)
finally:
state.should_terminate = False
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
p, platforms.signals = platforms.signals, Signals()
try:
with self.assertRaises(WorkerShutdown):
handlers['SIGINT']('SIGINT', object())
finally:
platforms.signals = p
with self.assertRaises(WorkerTerminate):
next_handlers['SIGINT']('SIGINT', object())
@disable_stdouts
def test_worker_int_handler_only_stop_MainProcess(self):
try:
import _multiprocessing # noqa
except ImportError:
raise SkipTest('only relevant for multiprocessing')
process = current_process()
name, process.name = process.name, 'OtherProcess'
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
try:
worker = self._Worker()
handlers = self.psig(cd.install_worker_int_handler, worker)
handlers['SIGINT']('SIGINT', object())
self.assertTrue(state.should_stop)
finally:
process.name = name
state.should_stop = False
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
try:
worker = self._Worker()
handlers = self.psig(cd.install_worker_int_handler, worker)
with self.assertRaises(WorkerShutdown):
handlers['SIGINT']('SIGINT', object())
finally:
process.name = name
state.should_stop = False
@disable_stdouts
def test_install_HUP_not_supported_handler(self):
worker = self._Worker()
handlers = self.psig(cd.install_HUP_not_supported_handler, worker)
handlers['SIGHUP']('SIGHUP', object())
@disable_stdouts
def test_worker_term_hard_handler_only_stop_MainProcess(self):
try:
import _multiprocessing # noqa
except ImportError:
raise SkipTest('only relevant for multiprocessing')
process = current_process()
name, process.name = process.name, 'OtherProcess'
try:
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
worker = self._Worker()
handlers = self.psig(
cd.install_worker_term_hard_handler, worker)
try:
handlers['SIGQUIT']('SIGQUIT', object())
self.assertTrue(state.should_terminate)
finally:
state.should_terminate = False
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
handlers = self.psig(
cd.install_worker_term_hard_handler, worker)
with self.assertRaises(WorkerTerminate):
handlers['SIGQUIT']('SIGQUIT', object())
finally:
process.name = name
@disable_stdouts
def test_worker_term_handler_when_threads(self):
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_handler, worker)
try:
handlers['SIGTERM']('SIGTERM', object())
self.assertTrue(state.should_stop)
finally:
state.should_stop = False
@disable_stdouts
def test_worker_term_handler_when_single_thread(self):
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_handler, worker)
try:
with self.assertRaises(WorkerShutdown):
handlers['SIGTERM']('SIGTERM', object())
finally:
state.should_stop = False
@patch('sys.__stderr__')
@skip_if_pypy
@skip_if_jython
def test_worker_cry_handler(self, stderr):
handlers = self.psig(cd.install_cry_handler)
self.assertIsNone(handlers['SIGUSR1']('SIGUSR1', object()))
self.assertTrue(stderr.write.called)
@disable_stdouts
def test_worker_term_handler_only_stop_MainProcess(self):
try:
import _multiprocessing # noqa
except ImportError:
raise SkipTest('only relevant for multiprocessing')
process = current_process()
name, process.name = process.name, 'OtherProcess'
try:
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_handler, worker)
handlers['SIGTERM']('SIGTERM', object())
self.assertTrue(state.should_stop)
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_handler, worker)
with self.assertRaises(WorkerShutdown):
handlers['SIGTERM']('SIGTERM', object())
finally:
process.name = name
state.should_stop = False
@disable_stdouts
@patch('celery.platforms.close_open_fds')
@patch('atexit.register')
@patch('os.close')
def test_worker_restart_handler(self, _close, register, close_open):
if getattr(os, 'execv', None) is None:
raise SkipTest('platform does not have excv')
argv = []
def _execv(*args):
argv.extend(args)
execv, os.execv = os.execv, _execv
try:
worker = self._Worker()
handlers = self.psig(cd.install_worker_restart_handler, worker)
handlers['SIGHUP']('SIGHUP', object())
self.assertTrue(state.should_stop)
self.assertTrue(register.called)
callback = register.call_args[0][0]
callback()
self.assertTrue(argv)
finally:
os.execv = execv
state.should_stop = False
@disable_stdouts
def test_worker_term_hard_handler_when_threaded(self):
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 3
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_hard_handler, worker)
try:
handlers['SIGQUIT']('SIGQUIT', object())
self.assertTrue(state.should_terminate)
finally:
state.should_terminate = False
@disable_stdouts
def test_worker_term_hard_handler_when_single_threaded(self):
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_hard_handler, worker)
with self.assertRaises(WorkerTerminate):
handlers['SIGQUIT']('SIGQUIT', object())
| gpl-2.0 | -8,696,426,851,061,963,000 | 5,763,580,196,885,767,000 | 33.270655 | 79 | 0.579267 | false |
JohannesBuchner/doit | doc/samples/parameters.py | 5 | 1732 | def task_py_params():
def show_params(param1, param2):
print(param1)
print(5 + param2)
return {'actions':[(show_params,)],
'params':[{'name':'param1',
'short':'p',
'default':'default value'},
{'name':'param2',
'long':'param2',
'type': int,
'default':0}],
'verbosity':2,
}
def task_py_params_list():
def print_a_list(list):
for item in list:
print(item)
return {'actions':[(print_a_list,)],
'params':[{'name':'list',
'short':'l',
'long': 'list',
'type': list,
'default': [],
'help': 'Collect a list with multiple -l flags'}],
'verbosity':2,
}
def task_py_params_choice():
def print_choice(choice):
print(choice)
return {'actions':[(print_choice,)],
'params':[{'name':'choice',
'short':'c',
'long': 'choice',
'type': str,
'choices': (('this', ''), ('that', '')),
'default': 'this',
'help': 'Choose between this and that'}],
'verbosity':2,}
def task_cmd_params():
return {'actions':["echo mycmd %(flag)s xxx"],
'params':[{'name':'flag',
'short':'f',
'long': 'flag',
'default': '',
'help': 'helpful message about this flag'}],
'verbosity': 2
}
| mit | 9,009,254,430,728,383,000 | 1,539,617,880,325,519,400 | 31.074074 | 73 | 0.370092 | false |
stackforge/monasca-api | monasca_api/policies/versions.py | 2 | 1175 | # Copyright 2018 OP5 AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_policy import policy
from monasca_api import policies
CONF = cfg.CONF
VERSIONS_ROLES = policies.roles_list_to_check_str(cfg.CONF.security.versions_roles)
rules = [
policy.DocumentedRuleDefault(
name='api:versions',
check_str=VERSIONS_ROLES,
description='List supported versions '
'or get the details about the specified version of Monasca API.',
operations=[
{'path': '/', 'method': 'GET'},
{'path': '/v2.0', 'method': 'GET'}
]
),
]
def list_rules():
return rules
| apache-2.0 | 8,794,587,204,267,942,000 | 2,560,792,154,586,126,000 | 29.921053 | 85 | 0.683404 | false |
uclaros/QGIS | tests/src/python/test_qgssymbolexpressionvariables.py | 45 | 4493 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgssymbolexpressionvariables.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Matthiasd Kuhn'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize
from qgis.core import (
QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsSingleSymbolRenderer,
QgsFillSymbol,
QgsRenderContext,
QgsProperty,
QgsSymbolLayer
)
from qgis.testing import unittest, start_app
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsSymbolExpressionVariables(unittest.TestCase):
def setUp(self):
myShpFile = os.path.join(TEST_DATA_DIR, 'polys.shp')
self.layer = QgsVectorLayer(myShpFile, 'Polys', 'ogr')
QgsProject.instance().addMapLayer(self.layer)
self.iface = get_iface()
rendered_layers = [self.layer]
self.mapsettings = self.iface.mapCanvas().mapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-163, 22, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def tearDown(self):
QgsProject.instance().removeAllMapLayers()
def testPartNum(self):
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#fdbf6f', 'outline_color': 'black'})
renderer = QgsSingleSymbolRenderer(sym1)
renderer.symbols(QgsRenderContext())[0].symbolLayers()[0].setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression('color_rgb( (@geometry_part_num - 1) * 200, 0, 0 )'))
self.layer.setRenderer(renderer)
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_geometry_part_num')
result = renderchecker.runTest('part_geometry_part_num')
self.assertTrue(result)
def testPartCount(self):
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#fdbf6f', 'outline_color': 'black'})
renderer = QgsSingleSymbolRenderer(sym1)
renderer.symbols(QgsRenderContext())[0].symbolLayers()[0].setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression('color_rgb( (@geometry_part_count - 1) * 200, 0, 0 )'))
self.layer.setRenderer(renderer)
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_geometry_part_count')
result = renderchecker.runTest('part_geometry_part_count')
self.assertTrue(result)
def testSymbolColor(self):
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#ff0000', 'outline_color': 'black'})
renderer = QgsSingleSymbolRenderer(sym1)
renderer.symbols(QgsRenderContext())[0].symbolLayers()[0].setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression('set_color_part( @symbol_color, \'value\', "Value" * 4)'))
self.layer.setRenderer(renderer)
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_symbol_color_variable')
result = renderchecker.runTest('symbol_color_variable', 50)
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -4,855,332,163,882,381,000 | -5,000,156,669,828,865,000 | 37.076271 | 208 | 0.619408 | false |
cfg2015/EPT-2015-2 | addons/fetchmail/fetchmail.py | 64 | 15663 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from imaplib import IMAP4
from imaplib import IMAP4_SSL
from poplib import POP3
from poplib import POP3_SSL
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import zipfile
import base64
from openerp import addons
from openerp.osv import fields, osv
from openerp import tools, api
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MAX_POP_MESSAGES = 50
class fetchmail_server(osv.osv):
"""Incoming POP/IMAP mail server account"""
_name = 'fetchmail.server'
_description = "POP/IMAP Server"
_order = 'priority'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'active':fields.boolean('Active', required=False),
'state':fields.selection([
('draft', 'Not Confirmed'),
('done', 'Confirmed'),
], 'Status', select=True, readonly=True, copy=False),
'server' : fields.char('Server Name', readonly=True, help="Hostname or IP of the mail server", states={'draft':[('readonly', False)]}),
'port' : fields.integer('Port', readonly=True, states={'draft':[('readonly', False)]}),
'type':fields.selection([
('pop', 'POP Server'),
('imap', 'IMAP Server'),
('local', 'Local Server'),
], 'Server Type', select=True, required=True, readonly=False),
'is_ssl':fields.boolean('SSL/TLS', help="Connections are encrypted with SSL/TLS through a dedicated port (default: IMAPS=993, POP3S=995)"),
'attach':fields.boolean('Keep Attachments', help="Whether attachments should be downloaded. "
"If not enabled, incoming emails will be stripped of any attachments before being processed"),
'original':fields.boolean('Keep Original', help="Whether a full original copy of each email should be kept for reference"
"and attached to each processed message. This will usually double the size of your message database."),
'date': fields.datetime('Last Fetch Date', readonly=True),
'user' : fields.char('Username', readonly=True, states={'draft':[('readonly', False)]}),
'password' : fields.char('Password', readonly=True, states={'draft':[('readonly', False)]}),
'action_id':fields.many2one('ir.actions.server', 'Server Action', help="Optional custom server action to trigger for each incoming mail, "
"on the record that was created or updated by this mail"),
'object_id': fields.many2one('ir.model', "Create a New Record", help="Process each incoming mail as part of a conversation "
"corresponding to this document type. This will create "
"new documents for new conversations, or attach follow-up "
"emails to the existing conversations (documents)."),
'priority': fields.integer('Server Priority', readonly=True, states={'draft':[('readonly', False)]}, help="Defines the order of processing, "
"lower values mean higher priority"),
'message_ids': fields.one2many('mail.mail', 'fetchmail_server_id', 'Messages', readonly=True),
'configuration' : fields.text('Configuration', readonly=True),
'script' : fields.char('Script', readonly=True),
}
_defaults = {
'state': "draft",
'type': "pop",
'active': True,
'priority': 5,
'attach': True,
'script': '/mail/static/scripts/openerp_mailgate.py',
}
def onchange_server_type(self, cr, uid, ids, server_type=False, ssl=False, object_id=False):
port = 0
values = {}
if server_type == 'pop':
port = ssl and 995 or 110
elif server_type == 'imap':
port = ssl and 993 or 143
else:
values['server'] = ''
values['port'] = port
conf = {
'dbname' : cr.dbname,
'uid' : uid,
'model' : 'MODELNAME',
}
if object_id:
m = self.pool.get('ir.model')
r = m.read(cr,uid,[object_id],['model'])
conf['model']=r[0]['model']
values['configuration'] = """Use the below script with the following command line options with your Mail Transport Agent (MTA)
openerp_mailgate.py --host=HOSTNAME --port=PORT -u %(uid)d -p PASSWORD -d %(dbname)s
Example configuration for the postfix mta running locally:
/etc/postfix/virtual_aliases:
@youdomain openerp_mailgate@localhost
/etc/aliases:
openerp_mailgate: "|/path/to/openerp-mailgate.py --host=localhost -u %(uid)d -p PASSWORD -d %(dbname)s"
""" % conf
return {'value':values}
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids , {'state':'draft'})
return True
@api.cr_uid_ids_context
def connect(self, cr, uid, server_id, context=None):
if isinstance(server_id, (list,tuple)):
server_id = server_id[0]
server = self.browse(cr, uid, server_id, context)
if server.type == 'imap':
if server.is_ssl:
connection = IMAP4_SSL(server.server, int(server.port))
else:
connection = IMAP4(server.server, int(server.port))
connection.login(server.user, server.password)
elif server.type == 'pop':
if server.is_ssl:
connection = POP3_SSL(server.server, int(server.port))
else:
connection = POP3(server.server, int(server.port))
#TODO: use this to remove only unread messages
#connection.user("recent:"+server.user)
connection.user(server.user)
connection.pass_(server.password)
return connection
def button_confirm_login(self, cr, uid, ids, context=None):
if context is None:
context = {}
for server in self.browse(cr, uid, ids, context=context):
try:
connection = server.connect()
server.write({'state':'done'})
except Exception, e:
_logger.exception("Failed to connect to %s server %s.", server.type, server.name)
raise osv.except_osv(_("Connection test failed!"), _("Here is what we got instead:\n %s.") % tools.ustr(e))
finally:
try:
if connection:
if server.type == 'imap':
connection.close()
elif server.type == 'pop':
connection.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
return True
def _fetch_mails(self, cr, uid, ids=False, context=None):
if not ids:
ids = self.search(cr, uid, [('state','=','done'),('type','in',['pop','imap'])])
return self.fetch_mail(cr, uid, ids, context=context)
def fetch_mail(self, cr, uid, ids, context=None):
"""WARNING: meant for cron usage only - will commit() after each email!"""
context = dict(context or {})
context['fetchmail_cron_running'] = True
mail_thread = self.pool.get('mail.thread')
action_pool = self.pool.get('ir.actions.server')
for server in self.browse(cr, uid, ids, context=context):
_logger.info('start checking for new emails on %s server %s', server.type, server.name)
context.update({'fetchmail_server_id': server.id, 'server_type': server.type})
count, failed = 0, 0
imap_server = False
pop_server = False
if server.type == 'imap':
try:
imap_server = server.connect()
imap_server.select()
result, data = imap_server.search(None, '(UNSEEN)')
for num in data[0].split():
res_id = None
result, data = imap_server.fetch(num, '(RFC822)')
imap_server.store(num, '-FLAGS', '\\Seen')
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
data[0][1],
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
imap_server.store(num, '+FLAGS', '\\Seen')
cr.commit()
count += 1
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", count, server.type, server.name, (count - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if imap_server:
imap_server.close()
imap_server.logout()
elif server.type == 'pop':
try:
while True:
pop_server = server.connect()
(numMsgs, totalSize) = pop_server.stat()
pop_server.list()
for num in range(1, min(MAX_POP_MESSAGES, numMsgs) + 1):
(header, msges, octets) = pop_server.retr(num)
msg = '\n'.join(msges)
res_id = None
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
msg,
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
pop_server.dele(num)
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
cr.commit()
if numMsgs < MAX_POP_MESSAGES:
break
pop_server.quit()
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", numMsgs, server.type, server.name, (numMsgs - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if pop_server:
pop_server.quit()
server.write({'date': time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)})
return True
def _update_cron(self, cr, uid, context=None):
if context and context.get('fetchmail_cron_running'):
return
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'fetchmail', 'ir_cron_mail_gateway_action', context=context)
except ValueError:
# Nevermind if default cron cannot be found
return
# Enabled/Disable cron based on the number of 'done' server of type pop or imap
cron.toggle(model=self._name, domain=[('state','=','done'), ('type','in',['pop','imap'])])
def create(self, cr, uid, values, context=None):
res = super(fetchmail_server, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(fetchmail_server, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(fetchmail_server, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class mail_mail(osv.osv):
_inherit = "mail.mail"
_columns = {
'fetchmail_server_id': fields.many2one('fetchmail.server', "Inbound Mail Server",
readonly=True,
select=True,
oldname='server_id'),
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).create(cr, uid, values, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).write(cr, uid, ids, values, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 73,865,768,942,001,580 | -689,549,912,587,961,700 | 48.566456 | 195 | 0.523271 | false |
pumaking/hackracer | lib/flask/_compat.py | 783 | 2164 | # -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
| apache-2.0 | 7,402,875,881,696,096,000 | -1,341,179,840,448,844,000 | 28.643836 | 72 | 0.614603 | false |
vladikr/nova_drafts | nova/tests/integrated/api/client.py | 3 | 11942 | # Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import urllib
import six.moves.urllib.parse as urlparse
from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.tests.image import fake
LOG = logging.getLogger(__name__)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
_status = response.status
_body = response.read()
message = (_('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s') %
{'message': message, '_status': _status,
'_body': _body})
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Authentication error")
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthorizationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Authorization error")
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Item not found")
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
# default project_id
self.project_id = 'openstack'
def request(self, url, method='GET', body=None, headers=None):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
parsed_url = urlparse.urlparse(url)
port = parsed_url.port
hostname = parsed_url.hostname
scheme = parsed_url.scheme
if scheme == 'http':
conn = httplib.HTTPConnection(hostname,
port=port)
elif scheme == 'https':
conn = httplib.HTTPSConnection(hostname,
port=port)
else:
raise OpenStackApiException("Unknown scheme: %s" % url)
relative_url = parsed_url.path
if parsed_url.query:
relative_url = relative_url + "?" + parsed_url.query
LOG.info(_("Doing %(method)s on %(relative_url)s") %
{'method': method, 'relative_url': relative_url})
if body:
LOG.info(_("Body: %s") % body)
conn.request(method, relative_url, body, _headers)
response = conn.getresponse()
return response
def _authenticate(self):
if self.auth_result:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status
LOG.debug("%(auth_uri)s => code %(http_status)s",
{'auth_uri': auth_uri, 'http_status': http_status})
if http_status == 401:
raise OpenStackApiAuthenticationException(response=response)
auth_headers = {}
for k, v in response.getheaders():
auth_headers[k] = v
self.auth_result = auth_headers
return self.auth_result
def api_request(self, relative_uri, check_response_status=None,
strip_version=False, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
if strip_version:
# NOTE(vish): cut out version number and tenant_id
base_uri = '/'.join(base_uri.split('/', 3)[:-1])
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
response = self.request(full_uri, **kwargs)
http_status = response.status
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message=_("Unexpected status code"),
response=response)
return response
def _decode_json(self, response):
body = response.read()
LOG.debug("Decoding JSON: %s", body)
if body:
return jsonutils.loads(body)
else:
return ""
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202, 204])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [200, 202, 204])
return self.api_request(relative_uri, **kwargs)
def get_server(self, server_id):
return self.api_get('/servers/%s' % server_id)['server']
def get_servers(self, detail=True, search_opts=None):
rel_url = '/servers/detail' if detail else '/servers'
if search_opts is not None:
qparams = {}
for opt, val in search_opts.iteritems():
qparams[opt] = val
if qparams:
query_string = "?%s" % urllib.urlencode(qparams)
rel_url += query_string
return self.api_get(rel_url)['servers']
def post_server(self, server):
response = self.api_post('/servers', server)
if 'reservation_id' in response:
return response
else:
return response['server']
def put_server(self, server_id, server):
return self.api_put('/servers/%s' % server_id, server)
def post_server_action(self, server_id, data):
return self.api_post('/servers/%s/action' % server_id, data)
def delete_server(self, server_id):
return self.api_delete('/servers/%s' % server_id)
def get_image(self, image_id):
return self.api_get('/images/%s' % image_id)['image']
def get_images(self, detail=True):
rel_url = '/images/detail' if detail else '/images'
return self.api_get(rel_url)['images']
def post_image(self, image):
return self.api_post('/images', image)['image']
def delete_image(self, image_id):
return self.api_delete('/images/%s' % image_id)
def get_flavor(self, flavor_id):
return self.api_get('/flavors/%s' % flavor_id)['flavor']
def get_flavors(self, detail=True):
rel_url = '/flavors/detail' if detail else '/flavors'
return self.api_get(rel_url)['flavors']
def post_flavor(self, flavor):
return self.api_post('/flavors', flavor)['flavor']
def delete_flavor(self, flavor_id):
return self.api_delete('/flavors/%s' % flavor_id)
def get_volume(self, volume_id):
return self.api_get('/volumes/%s' % volume_id)['volume']
def get_volumes(self, detail=True):
rel_url = '/volumes/detail' if detail else '/volumes'
return self.api_get(rel_url)['volumes']
def post_volume(self, volume):
return self.api_post('/volumes', volume)['volume']
def delete_volume(self, volume_id):
return self.api_delete('/volumes/%s' % volume_id)
def get_server_volume(self, server_id, attachment_id):
return self.api_get('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))['volumeAttachment']
def get_server_volumes(self, server_id):
return self.api_get('/servers/%s/os-volume_attachments' %
(server_id))['volumeAttachments']
def post_server_volume(self, server_id, volume_attachment):
return self.api_post('/servers/%s/os-volume_attachments' %
(server_id), volume_attachment
)['volumeAttachment']
def delete_server_volume(self, server_id, attachment_id):
return self.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
class TestOpenStackClientV3(TestOpenStackClient):
"""Simple OpenStack v3 API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing.
Note that the V3 API does not have an image API and so it is
not possible to query the api for the image information.
So instead we just access the fake image service used by the unittests
directly.
"""
def get_image(self, image_id):
return fake._fakeImageService.show(None, image_id)
def get_images(self, detail=True):
return fake._fakeImageService.detail(None)
def post_image(self, image):
raise NotImplementedError
def delete_image(self, image_id):
return fake._fakeImageService.delete(None, image_id)
class TestOpenStackClientV3Mixin(object):
def _get_test_client(self):
return TestOpenStackClientV3('fake', 'fake', self.auth_url)
| apache-2.0 | 2,702,833,790,674,683,000 | -7,679,334,696,060,498,000 | 34.96988 | 79 | 0.594457 | false |
marcoarruda/MissionPlanner | Lib/lib2to3/main.py | 50 | 7057 | """
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
Prints output to stdout.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs):
self.nobackups = nobackups
self.show_diffs = show_diffs
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error, err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error, err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print line
sys.stdout.flush()
else:
for line in diff_lines:
print line
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print >> sys.stderr, "WARNING: %s" % (msg,)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print "Available transformations for the -f/--fix option:"
for fixname in refactor.get_all_fix_names(fixer_pkg):
print fixname
if not args:
return 0
if not args:
print >> sys.stderr, "At least one file or directory argument required."
print >> sys.stderr, "Use --help to show usage."
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print >> sys.stderr, "Can't write to stdin."
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
rt = StdoutRefactoringTool(sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print >> sys.stderr, "Sorry, -j isn't " \
"supported on this platform."
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
| gpl-3.0 | -6,482,758,925,420,626,000 | -633,299,430,207,091,800 | 36.774725 | 80 | 0.549809 | false |
Sarah-Alsinan/muypicky | lib/python3.6/site-packages/pip/_vendor/html5lib/filters/lint.py | 328 | 3365 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from . import base
from ..constants import namespaces, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class Filter(base.Filter):
def __init__(self, source, require_matching_tags=True):
super(Filter, self).__init__(source)
self.require_matching_tags = require_matching_tags
def __iter__(self):
open_elements = []
for token in base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
namespace = token["namespace"]
name = token["name"]
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
assert isinstance(token["data"], dict)
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
assert type == "EmptyTag"
else:
assert type == "StartTag"
if type == "StartTag" and self.require_matching_tags:
open_elements.append((namespace, name))
for (namespace, name), value in token["data"].items():
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
assert isinstance(value, text_type)
elif type == "EndTag":
namespace = token["namespace"]
name = token["name"]
assert namespace is None or isinstance(namespace, text_type)
assert namespace != ""
assert isinstance(name, text_type)
assert name != ""
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name}
elif self.require_matching_tags:
start = open_elements.pop()
assert start == (namespace, name)
elif type == "Comment":
data = token["data"]
assert isinstance(data, text_type)
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
assert isinstance(data, text_type)
assert data != ""
if type == "SpaceCharacters":
assert data.strip(spaceCharacters) == ""
elif type == "Doctype":
name = token["name"]
assert name is None or isinstance(name, text_type)
assert token["publicId"] is None or isinstance(name, text_type)
assert token["systemId"] is None or isinstance(name, text_type)
elif type == "Entity":
assert isinstance(token["name"], text_type)
elif type == "SerializerError":
assert isinstance(token["data"], text_type)
else:
assert False, "Unknown token type: %(type)s" % {"type": type}
yield token
| mit | 5,804,750,253,922,815,000 | 4,548,307,267,395,593,700 | 40.54321 | 98 | 0.530163 | false |
lanfker/tdma_imac | .waf-1.6.7-0a94702c61504c487a251b8d0a04ca9a/waflib/Node.py | 4 | 11193 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/svn/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
import os,re,sys,shutil
from waflib import Utils,Errors
exclude_regs='''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/BitKeeper
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzrignore
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/.arch-ids
**/{arch}
**/_darcs
**/_darcs/**
**/.DS_Store'''
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret=path.split('/')[2:]
ret[0]='/'+ret[0]
return ret
return path.split('/')
re_sp=re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret=re.split(re_sp,path)[2:]
ret[0]='\\'+ret[0]
return ret
return re.split(re_sp,path)
if sys.platform=='cygwin':
split_path=split_path_cygwin
elif Utils.is_win32:
split_path=split_path_win32
class Node(object):
__slots__=('name','sig','children','parent','cache_abspath','cache_isdir')
def __init__(self,name,parent):
self.name=name
self.parent=parent
if parent:
if name in parent.children:
raise Errors.WafError('node %s exists in the parent files %r already'%(name,parent))
parent.children[name]=self
def __setstate__(self,data):
self.name=data[0]
self.parent=data[1]
if data[2]is not None:
self.children=data[2]
if data[3]is not None:
self.sig=data[3]
def __getstate__(self):
return(self.name,self.parent,getattr(self,'children',None),getattr(self,'sig',None))
def __str__(self):
return self.name
def __repr__(self):
return self.abspath()
def __hash__(self):
return id(self)
def __eq__(self,node):
return id(self)==id(node)
def __copy__(self):
raise Errors.WafError('nodes are not supposed to be copied')
def read(self,flags='r'):
return Utils.readf(self.abspath(),flags)
def write(self,data,flags='w'):
f=None
try:
f=open(self.abspath(),flags)
f.write(data)
finally:
if f:
f.close()
def chmod(self,val):
os.chmod(self.abspath(),val)
def delete(self):
try:
if getattr(self,'children',None):
shutil.rmtree(self.abspath())
else:
os.unlink(self.abspath())
except:
pass
try:
delattr(self,'children')
except:
pass
def suffix(self):
k=max(0,self.name.rfind('.'))
return self.name[k:]
def height(self):
d=self
val=-1
while d:
d=d.parent
val+=1
return val
def listdir(self):
lst=Utils.listdir(self.abspath())
lst.sort()
return lst
def mkdir(self):
if getattr(self,'cache_isdir',None):
return
try:
self.parent.mkdir()
except:
pass
if self.name:
try:
os.makedirs(self.abspath())
except OSError:
pass
if not os.path.isdir(self.abspath()):
raise Errors.WafError('Could not create the directory %s'%self.abspath())
try:
self.children
except:
self.children={}
self.cache_isdir=True
def find_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
try:
if x in cur.children:
cur=cur.children[x]
continue
except:
cur.children={}
cur=self.__class__(x,cur)
try:
os.stat(cur.abspath())
except:
del cur.parent.children[x]
return None
ret=cur
try:
while not getattr(cur.parent,'cache_isdir',None):
cur=cur.parent
cur.cache_isdir=True
except AttributeError:
pass
return ret
def make_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
if getattr(cur,'children',{}):
if x in cur.children:
cur=cur.children[x]
continue
else:
cur.children={}
cur=self.__class__(x,cur)
return cur
def search(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
try:
for x in lst:
if x=='..':
cur=cur.parent or cur
else:
cur=cur.children[x]
return cur
except:
pass
def path_from(self,node):
c1=self
c2=node
c1h=c1.height()
c2h=c2.height()
lst=[]
up=0
while c1h>c2h:
lst.append(c1.name)
c1=c1.parent
c1h-=1
while c2h>c1h:
up+=1
c2=c2.parent
c2h-=1
while id(c1)!=id(c2):
lst.append(c1.name)
up+=1
c1=c1.parent
c2=c2.parent
for i in range(up):
lst.append('..')
lst.reverse()
return os.sep.join(lst)or'.'
def abspath(self):
try:
return self.cache_abspath
except:
pass
if os.sep=='/':
if not self.parent:
val=os.sep
elif not self.parent.name:
val=os.sep+self.name
else:
val=self.parent.abspath()+os.sep+self.name
else:
if not self.parent:
val=''
elif not self.parent.name:
val=self.name+os.sep
else:
val=self.parent.abspath().rstrip(os.sep)+os.sep+self.name
self.cache_abspath=val
return val
def is_child_of(self,node):
p=self
diff=self.height()-node.height()
while diff>0:
diff-=1
p=p.parent
return id(p)==id(node)
def ant_iter(self,accept=None,maxdepth=25,pats=[],dir=False,src=True,remove=True):
dircont=self.listdir()
dircont.sort()
try:
lst=set(self.children.keys())
if remove:
for x in lst-set(dircont):
del self.children[x]
except:
self.children={}
for name in dircont:
npats=accept(name,pats)
if npats and npats[0]:
accepted=[]in npats[0]
node=self.make_node([name])
isdir=os.path.isdir(node.abspath())
if accepted:
if isdir:
if dir:
yield node
else:
if src:
yield node
if getattr(node,'cache_isdir',None)or isdir:
node.cache_isdir=True
if maxdepth:
for k in node.ant_iter(accept=accept,maxdepth=maxdepth-1,pats=npats,dir=dir,src=src):
yield k
raise StopIteration
def ant_glob(self,*k,**kw):
src=kw.get('src',True)
dir=kw.get('dir',False)
excl=kw.get('excl',exclude_regs)
incl=k and k[0]or kw.get('incl','**')
def to_pat(s):
lst=Utils.to_list(s)
ret=[]
for x in lst:
x=x.replace('\\','/').replace('//','/')
if x.endswith('/'):
x+='**'
lst2=x.split('/')
accu=[]
for k in lst2:
if k=='**':
accu.append(k)
else:
k=k.replace('.','[.]').replace('*','.*').replace('?','.').replace('+','\\+')
k='^%s$'%k
try:
accu.append(re.compile(k))
except Exception ,e:
raise Errors.WafError("Invalid pattern: %s"%k,e)
ret.append(accu)
return ret
def filtre(name,nn):
ret=[]
for lst in nn:
if not lst:
pass
elif lst[0]=='**':
ret.append(lst)
if len(lst)>1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def accept(name,pats):
nacc=filtre(name,pats[0])
nrej=filtre(name,pats[1])
if[]in nrej:
nacc=[]
return[nacc,nrej]
ret=[x for x in self.ant_iter(accept=accept,pats=[to_pat(incl),to_pat(excl)],maxdepth=25,dir=dir,src=src,remove=kw.get('remove',True))]
if kw.get('flat',False):
return' '.join([x.path_from(self)for x in ret])
return ret
def find_nodes(self,find_dirs=True,find_files=True,match_fun=lambda x:True):
x="""
Recursively finds nodes::
def configure(cnf):
cnf.find_nodes()
:param find_dirs: whether to return directories
:param find_files: whether to return files
:param match_fun: matching function, taking a node as parameter
:rtype generator
:return: a generator that iterates over all the requested files
"""
files=self.listdir()
for f in files:
node=self.make_node([f])
if os.path.isdir(node.abspath()):
if find_dirs and match_fun(node):
yield node
gen=node.find_nodes(find_dirs,find_files,match_fun)
for g in gen:
yield g
else:
if find_files and match_fun(node):
yield node
def is_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return False
if id(cur)==x:
return True
cur=cur.parent
return False
def is_bld(self):
cur=self
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return True
cur=cur.parent
return False
def get_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
lst.reverse()
return self.ctx.srcnode.make_node(lst)
if id(cur)==x:
return self
lst.append(cur.name)
cur=cur.parent
return self
def get_bld(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
return self
if id(cur)==x:
lst.reverse()
return self.ctx.bldnode.make_node(lst)
lst.append(cur.name)
cur=cur.parent
return self
def find_resource(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search(lst)
if not node:
self=self.get_src()
node=self.search(lst)
if not node:
node=self.find_node(lst)
try:
pat=node.abspath()
if os.path.isdir(pat):
return None
except:
pass
return node
def find_or_declare(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
try:
node.parent.mkdir()
except:
pass
return node
self=self.get_src()
node=self.find_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
try:
node.parent.mkdir()
except:
pass
return node
node=self.get_bld().make_node(lst)
node.parent.mkdir()
return node
def find_dir(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.find_node(lst)
try:
if not os.path.isdir(node.abspath()):
return None
except(OSError,AttributeError):
return None
return node
def change_ext(self,ext,ext_in=None):
name=self.name
if ext_in is None:
k=name.rfind('.')
if k>=0:
name=name[:k]+ext
else:
name=name+ext
else:
name=name[:-len(ext_in)]+ext
return self.parent.find_or_declare([name])
def nice_path(self,env=None):
return self.path_from(self.ctx.launch_node())
def bldpath(self):
return self.path_from(self.ctx.bldnode)
def srcpath(self):
return self.path_from(self.ctx.srcnode)
def relpath(self):
cur=self
x=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==x:
return self.bldpath()
cur=cur.parent
return self.srcpath()
def bld_dir(self):
return self.parent.bldpath()
def bld_base(self):
s=os.path.splitext(self.name)[0]
return self.bld_dir()+os.sep+s
def get_bld_sig(self):
try:
ret=self.ctx.hash_cache[id(self)]
except KeyError:
pass
except AttributeError:
self.ctx.hash_cache={}
else:
return ret
if not self.is_bld()or self.ctx.bldnode is self.ctx.srcnode:
self.sig=Utils.h_file(self.abspath())
self.ctx.hash_cache[id(self)]=ret=self.sig
return ret
pickle_lock=Utils.threading.Lock()
class Nod3(Node):
pass
| gpl-2.0 | -8,897,789,682,463,250,000 | -8,457,699,186,807,641,000 | 21.386 | 137 | 0.62557 | false |
siutanwong/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause | -6,702,107,930,180,400,000 | 8,831,859,389,855,721,000 | 38.268908 | 78 | 0.643484 | false |
AngelkPetkov/titanium_mobile | support/iphone/filetail.py | 37 | 7564 | ## {{{ http://code.activestate.com/recipes/436477/ (r1)
# filetail.py
# Copyright (C) 2005 by The Trustees of the University of Pennsylvania
# Licensed under the Python license http://www.python.org/psf/license/
# Author: Jon Moore
"""
Module to allow for reading lines from a continuously-growing file (such as
a system log). Handles log files that get rotated/trucated out from under
us. Inspired by the Perl File::Tail module.
Example:
t = filetail.Tail("log.txt")
while True:
line = t.nextline()
# do something with the line
or:
t = filetail.Tail("log.txt")
for line in t:
# do something
pass
"""
from os import stat
from os.path import abspath
from stat import ST_SIZE
from time import sleep, time
class Tail(object):
"""The Tail monitor object."""
def __init__(self, path, only_new = False,
min_sleep = 1,
sleep_interval = 1,
max_sleep = 60):
"""Initialize a tail monitor.
path: filename to open
only_new: By default, the tail monitor will start reading from
the beginning of the file when first opened. Set only_new to
True to have it skip to the end when it first opens, so that
you only get the new additions that arrive after you start
monitoring.
min_sleep: Shortest interval in seconds to sleep when waiting
for more input to arrive. Defaults to 1.0 second.
sleep_interval: The tail monitor will dynamically recompute an
appropriate sleep interval based on a sliding window of data
arrival rate. You can set sleep_interval here to seed it
initially if the default of 1.0 second doesn't work for you
and you don't want to wait for it to converge.
max_sleep: Maximum interval in seconds to sleep when waiting
for more input to arrive. Also, if this many seconds have
elapsed without getting any new data, the tail monitor will
check to see if the log got truncated (rotated) and will
quietly reopen itself if this was the case. Defaults to 60.0
seconds.
"""
# remember path to file in case I need to reopen
self.path = abspath(path)
self.f = open(self.path,"r")
self.min_sleep = min_sleep * 1.0
self.sleep_interval = sleep_interval * 1.0
self.max_sleep = max_sleep * 1.0
if only_new:
# seek to current end of file
file_len = stat(path)[ST_SIZE]
self.f.seek(file_len)
self.pos = self.f.tell() # where am I in the file?
self.last_read = time() # when did I last get some data?
self.queue = [] # queue of lines that are ready
self.window = [] # sliding window for dynamically
# adjusting the sleep_interval
def _recompute_rate(self, n, start, stop):
"""Internal function for recomputing the sleep interval. I get
called with a number of lines that appeared between the start and
stop times; this will get added to a sliding window, and I will
recompute the average interarrival rate over the last window.
"""
self.window.append((n, start, stop))
purge_idx = -1 # index of the highest old record
tot_n = 0 # total arrivals in the window
tot_start = stop # earliest time in the window
tot_stop = start # latest time in the window
for i, record in enumerate(self.window):
(i_n, i_start, i_stop) = record
if i_stop < start - self.max_sleep:
# window size is based on self.max_sleep; this record has
# fallen out of the window
purge_idx = i
else:
tot_n += i_n
if i_start < tot_start: tot_start = i_start
if i_stop > tot_stop: tot_stop = i_stop
if purge_idx >= 0:
# clean the old records out of the window (slide the window)
self.window = self.window[purge_idx+1:]
if tot_n > 0:
# recompute; stay within bounds
self.sleep_interval = (tot_stop - tot_start) / tot_n
if self.sleep_interval > self.max_sleep:
self.sleep_interval = self.max_sleep
if self.sleep_interval < self.min_sleep:
self.sleep_interval = self.min_sleep
def _fill_cache(self):
"""Internal method for grabbing as much data out of the file as is
available and caching it for future calls to nextline(). Returns
the number of lines just read.
"""
old_len = len(self.queue)
line = self.f.readline()
while line != "":
self.queue.append(line)
line = self.f.readline()
# how many did we just get?
num_read = len(self.queue) - old_len
if num_read > 0:
self.pos = self.f.tell()
now = time()
self._recompute_rate(num_read, self.last_read, now)
self.last_read = now
return num_read
def _dequeue(self):
"""Internal method; returns the first available line out of the
cache, if any."""
if len(self.queue) > 0:
line = self.queue[0]
self.queue = self.queue[1:]
return line
else:
return None
def _reset(self):
"""Internal method; reopen the internal file handle (probably
because the log file got rotated/truncated)."""
self.f.close()
self.f = open(self.path, "r")
self.pos = self.f.tell()
self.last_read = time()
def nextline(self):
"""Return the next line from the file. Blocks if there are no lines
immediately available."""
# see if we have any lines cached from the last file read
line = self._dequeue()
if line:
return line
# ok, we are out of cache; let's get some lines from the file
if self._fill_cache() > 0:
# got some
return self._dequeue()
# hmm, still no input available
while True:
sleep(self.sleep_interval)
if self._fill_cache() > 0:
return self._dequeue()
now = time()
if (now - self.last_read > self.max_sleep):
# maybe the log got rotated out from under us?
if stat(self.path)[ST_SIZE] < self.pos:
# file got truncated and/or re-created
self._reset()
if self._fill_cache() > 0:
return self._dequeue()
def close(self):
"""Close the tail monitor, discarding any remaining input."""
self.f.close()
self.f = None
self.queue = []
self.window = []
def __iter__(self):
"""Iterator interface, so you can do:
for line in filetail.Tail('log.txt'):
# do stuff
pass
"""
return self
def next(self):
"""Kick the iterator interface. Used under the covers to support:
for line in filetail.Tail('log.txt'):
# do stuff
pass
"""
return self.nextline()
## end of http://code.activestate.com/recipes/436477/ }}}
| apache-2.0 | 4,863,922,794,714,111,000 | -5,688,719,398,513,282,000 | 36.82 | 75 | 0.555526 | false |
gurgeh/data-preppy | convert_coords.py | 1 | 2602 | import math
# Code converted from JS: http://latlong.mellifica.se/
axis = 6378137.0
flattening = 1.0 / 298.257222101
central_meridian = 15.00
lat_of_origin = 0.0
scale = 0.9996
false_northing = 0.0
false_easting = 500000.0
def grid_to_geodetic(x, y):
e2 = flattening * (2.0 - flattening)
n = flattening / (2.0 - flattening)
a_roof = axis / (1.0 + n) * (1.0 + n * n / 4.0 + n * n * n * n / 64.0)
delta1 = n / 2.0 - 2.0 * n * n / 3.0 + 37.0 * n * n * n / 96.0 - n * n * n * n / 360.0
delta2 = n * n / 48.0 + n * n * n / 15.0 - 437.0 * n * n * n * n / 1440.0
delta3 = 17.0 * n * n * n / 480.0 - 37 * n * n * n * n / 840.0
delta4 = 4397.0 * n * n * n * n / 161280.0
Astar = e2 + e2 * e2 + e2 * e2 * e2 + e2 * e2 * e2 * e2
Bstar = -(7.0 * e2 * e2 + 17.0 * e2 * e2 * e2 + 30.0 * e2 * e2 * e2 * e2) / 6.0
Cstar = (224.0 * e2 * e2 * e2 + 889.0 * e2 * e2 * e2 * e2) / 120.0
Dstar = -(4279.0 * e2 * e2 * e2 * e2) / 1260.0
deg_to_rad = math.pi / 180
lambda_zero = central_meridian * deg_to_rad
xi = (x - false_northing) / (scale * a_roof)
eta = (y - false_easting) / (scale * a_roof)
xi_prim = xi - delta1 * math.sin(2.0 * xi) * math.cosh(2.0 * eta) - \
delta2 * math.sin(4.0 * xi) * math.cosh(4.0 * eta) - \
delta3 * math.sin(6.0 * xi) * math.cosh(6.0 * eta) - \
delta4 * math.sin(8.0 * xi) * math.cosh(8.0 * eta)
eta_prim = eta - \
delta1 * math.cos(2.0 * xi) * math.sinh(2.0 * eta) - \
delta2 * math.cos(4.0 * xi) * math.sinh(4.0 * eta) - \
delta3 * math.cos(6.0 * xi) * math.sinh(6.0 * eta) - \
delta4 * math.cos(8.0 * xi) * math.sinh(8.0 * eta)
phi_star = math.asin(math.sin(xi_prim) / math.cosh(eta_prim))
delta_lambda = math.atan(math.sinh(eta_prim) / math.cos(xi_prim))
lon_radian = lambda_zero + delta_lambda
lat_radian = phi_star + math.sin(phi_star) * math.cos(phi_star) * \
(Astar +
Bstar * math.pow(math.sin(phi_star), 2) +
Cstar * math.pow(math.sin(phi_star), 4) +
Dstar * math.pow(math.sin(phi_star), 6))
return lat_radian * 180.0 / math.pi, lon_radian * 180.0 / math.pi
def distance_in_km((lat1, lon1), (lat2, lon2)):
R = 6371 # Radius of the earth in km
dLat = math.radians(lat2 - lat1)
dLon = math.radians(lon2 - lon1)
a = math.sin(dLat / 2) * math.sin(dLat / 2) + \
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * \
math.sin(dLon / 2) * math.sin(dLon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c # Distance in km
return d
| apache-2.0 | 8,703,145,660,984,619,000 | -4,214,618,783,912,457,700 | 39.030769 | 90 | 0.529208 | false |
lasershow/codecombat | scripts/devSetup/repositoryInstaller.py | 70 | 4158 | from __future__ import print_function
__author__ = u'schmatz'
import configuration
import errors
import subprocess
import os
import sys
from which import which
#git clone https://github.com/nwinter/codecombat.git coco
class RepositoryInstaller():
def __init__(self,config):
self.config = config
assert isinstance(self.config,configuration.Configuration)
if not self.checkIfGitExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Git is missing. Please install it (try 'sudo apt-get install git')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing git.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Git is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
#http://stackoverflow.com/questions/9329243/xcode-4-4-and-later-install-command-line-tools
if not self.checkIfCurlExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Curl is missing. Please install it (try 'sudo apt-get install curl')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing curl.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Curl is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
def checkIfGitExecutableExists(self):
gitPath = which(u"git")
if gitPath:
return True
else:
return False
#TODO: Refactor this into a more appropriate file
def checkIfCurlExecutableExists(self):
curlPath = which("curl")
if curlPath:
return True
else:
return False
def cloneRepository(self):
print(u"Cloning repository...")
#TODO: CHANGE THIS BEFORE LAUNCH
return_code = True
git_folder = self.config.directory.root_install_directory + os.sep + "coco"
print("Installing into " + git_folder)
return_code = subprocess.call("git clone " + self.config.repository_url +" coco",cwd=self.config.directory.root_install_directory,shell=True)
#TODO: remove this on windos
subprocess.call("chown -R " +git_folder + " 0777",shell=True)
if return_code and self.config.system.operating_system != u"windows":
#raise errors.CoCoError("Failed to clone git repository")
import shutil
#import sys
#sys.stdout.flush()
raw_input(u"Copy it now")
#shutil.copytree(u"/Users/schmatz/coco",self.config.directory.root_install_directory + os.sep + u"coco")
print(u"Copied tree just for you")
#print("FAILED TO CLONE GIT REPOSITORY")
#input("Clone the repository and click any button to continue")
elif self.config.system.operating_system == u"windows":
raise errors.CoCoError(u"Windows doesn't support automated installations of npm at this point.")
else:
print(u"Cloned git repository")
def install_node_packages(self):
print(u"Installing node packages...")
#TODO: "Replace npm with more robust package
#npm_location = self.config.directory.bin_directory + os.sep + "node" + os.sep + "bin" + os.sep + "npm"
npm_location = u"npm"
if sys.version_info[0] == 2:
py_cmd = "python"
else:
py_cmd = subprocess.check_output(['which', 'python2'])
return_code = subprocess.call([npm_location, u"install",
"--python=" + py_cmd],
cwd=self.config.directory.root_dir +
os.sep + u"coco")
if return_code:
raise errors.CoCoError(u"Failed to install node packages")
else:
print(u"Installed node packages!")
| mit | 8,910,128,217,003,119,000 | 6,960,944,096,916,977,000 | 51.632911 | 221 | 0.627946 | false |
hecchi777/S3-SlaacSecuritySolution | impacket-0.9.11/examples/lookupsid.py | 1 | 5922 | #!/usr/bin/python
# Copyright (c) 2012-2014 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: lookupsid.py 1101 2014-01-14 22:15:30Z [email protected] $
#
# DCE/RPC lookup sid brute forcer example
#
# Author:
# Alberto Solino
#
# Reference for:
# DCE/RPC [MS-LSAT]
import socket
import string
import sys
import types
from impacket import uuid, ntlm, version
from impacket.dcerpc.v5 import transport, lsat, lsad
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.samr import SID_NAME_USE
from impacket.dcerpc.v5.dtypes import MAXIMUM_ALLOWED
import argparse
class LSALookupSid:
KNOWN_PROTOCOLS = {
'139/SMB': (r'ncacn_np:%s[\pipe\lsarpc]', 139),
'445/SMB': (r'ncacn_np:%s[\pipe\lsarpc]', 445),
'135/TCP': (r'ncacn_ip_tcp:%s', 135),
}
def __init__(self, username, password, domain, protocols = None,
hashes = None, maxRid=4000):
if not protocols:
protocols = LSALookupSid.KNOWN_PROTOCOLS.keys()
self.__username = username
self.__password = password
self.__protocols = [protocols]
self.__maxRid = int(maxRid)
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def dump(self, addr):
print 'Brute forcing SIDs at %s' % addr
# Try all requested protocols until one works.
entries = []
for protocol in self.__protocols:
protodef = LSALookupSid.KNOWN_PROTOCOLS[protocol]
port = protodef[1]
print "Trying protocol %s..." % protocol
stringbinding = protodef[0] % addr
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(port)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
try:
entries = self.__bruteForce(rpctransport, self.__maxRid)
except Exception, e:
import traceback
print traceback.print_exc()
print e
raise
else:
# Got a response. No need for further iterations.
break
def __bruteForce(self, rpctransport, maxRid):
dce = rpctransport.get_dce_rpc()
entries = []
dce.connect()
# Want encryption? Uncomment next line
# But make SIMULTANEOUS variable <= 100
#dce.set_auth_level(ntlm.NTLM_AUTH_PKT_PRIVACY)
# Want fragmentation? Uncomment next line
#dce.set_max_fragment_size(32)
dce.bind(lsat.MSRPC_UUID_LSAT)
resp = lsat.hLsarOpenPolicy2(dce, MAXIMUM_ALLOWED | lsat.POLICY_LOOKUP_NAMES)
policyHandle = resp['PolicyHandle']
resp = lsad.hLsarQueryInformationPolicy2(dce, policyHandle, lsad.POLICY_INFORMATION_CLASS.PolicyAccountDomainInformation)
domainSid = resp['PolicyInformation']['PolicyAccountDomainInfo']['DomainSid'].formatCanonical()
soFar = 0
SIMULTANEOUS = 1000
for j in range(maxRid/SIMULTANEOUS+1):
if (maxRid - soFar) / SIMULTANEOUS == 0:
sidsToCheck = (maxRid - soFar) % SIMULTANEOUS
else:
sidsToCheck = SIMULTANEOUS
sids = list()
for i in xrange(soFar, soFar+sidsToCheck):
sids.append(domainSid + '-%d' % (i))
try:
request = lsat.hLsarLookupSids(dce, policyHandle, sids,lsat.LSAP_LOOKUP_LEVEL.LsapLookupWksta)
except Exception, e:
if str(e).find('STATUS_NONE_MAPPED') >= 0:
soFar += SIMULTANEOUS
continue
elif str(e).find('STATUS_SOME_NOT_MAPPED') >= 0:
resp = e.get_packet()
else:
raise
for n, item in enumerate(resp['TranslatedNames']['Names']):
if item['Use'] != SID_NAME_USE.SidTypeUnknown:
print "%d: %s\\%s (%s)" % (soFar+n, resp['ReferencedDomains']['Domains'][item['DomainIndex']]['Name'], item['Name'], SID_NAME_USE.enumItems(item['Use']).name)
soFar += SIMULTANEOUS
dce.disconnect()
return entries
# Process command-line arguments.
if __name__ == '__main__':
print version.BANNER
parser = argparse.ArgumentParser()
parser.add_argument('target', action='store', help='[domain/][username[:password]@]<address>')
parser.add_argument('maxRid', action='store', default = '4000', nargs='?', help='max Rid to check (default 4000)')
parser.add_argument('protocol', choices=LSALookupSid.KNOWN_PROTOCOLS.keys(), nargs='?', default='445/SMB', help='transport protocol (default 445/SMB)')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
import re
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(options.target).groups('')
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None:
from getpass import getpass
password = getpass("Password:")
lookup = LSALookupSid(username, password, domain, options.protocol, options.hashes, options.maxRid)
try:
lookup.dump(address)
except Exception, e:
pass
| apache-2.0 | -4,215,157,383,327,810,000 | 7,135,670,611,006,526,000 | 34.461078 | 178 | 0.600135 | false |
ambikeshwar1991/sandhi-2 | module/gr36/grc/python/convert_hier.py | 16 | 3773 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Constants import BLOCK_DTD
from .. base import ParseXML
from .. base import odict
def convert_hier(flow_graph, python_file):
#extract info from the flow graph
input_sigs = flow_graph.get_io_signaturev('in')
output_sigs = flow_graph.get_io_signaturev('out')
input_msgp = flow_graph.get_msg_pad_sources();
output_msgp = flow_graph.get_msg_pad_sinks();
parameters = flow_graph.get_parameters()
block_key = flow_graph.get_option('id')
block_name = flow_graph.get_option('title') or flow_graph.get_option('id').replace('_', ' ').title()
block_category = flow_graph.get_option('category')
block_desc = flow_graph.get_option('description')
block_author = flow_graph.get_option('author')
#build the nested data
block_n = odict()
block_n['name'] = block_name
block_n['key'] = block_key
block_n['category'] = block_category
block_n['import'] = 'execfile("%s")'%python_file
#make data
if parameters: block_n['make'] = '%s(\n\t%s,\n)'%(
block_key,
',\n\t'.join(['%s=$%s'%(param.get_id(), param.get_id()) for param in parameters]),
)
else: block_n['make'] = '%s()'%block_key
#callback data
block_n['callback'] = ['set_%s($%s)'%(param.get_id(), param.get_id()) for param in parameters]
#param data
params_n = list()
for param in parameters:
param_n = odict()
param_n['name'] = param.get_param('label').get_value() or param.get_id()
param_n['key'] = param.get_id()
param_n['value'] = param.get_param('value').get_value()
param_n['type'] = 'raw'
params_n.append(param_n)
block_n['param'] = params_n
#sink data stream ports
block_n['sink'] = list()
for input_sig in input_sigs:
sink_n = odict()
sink_n['name'] = input_sig['label']
sink_n['type'] = input_sig['type']
sink_n['vlen'] = input_sig['vlen']
if input_sig['optional']: sink_n['optional'] = '1'
block_n['sink'].append(sink_n)
#sink data msg ports
for input_sig in input_msgp:
sink_n = odict()
sink_n['name'] = input_sig.get_param("label").get_value();
sink_n['type'] = "message"
sink_n['optional'] = input_sig.get_param("optional").get_value();
block_n['sink'].append(sink_n)
#source data stream ports
block_n['source'] = list()
for output_sig in output_sigs:
source_n = odict()
source_n['name'] = output_sig['label']
source_n['type'] = output_sig['type']
source_n['vlen'] = output_sig['vlen']
if output_sig['optional']: source_n['optional'] = '1'
block_n['source'].append(source_n)
#source data msg ports
for output_sig in output_msgp:
source_n = odict()
source_n['name'] = output_sig.get_param("label").get_value();
source_n['type'] = "message"
source_n['optional'] = output_sig.get_param("optional").get_value();
block_n['source'].append(source_n)
#doc data
block_n['doc'] = "%s\n%s\n%s"%(block_author, block_desc, python_file)
block_n['grc_source'] = "%s"%(flow_graph.grc_file_path)
#write the block_n to file
xml_file = python_file + '.xml'
ParseXML.to_file({'block': block_n}, xml_file)
ParseXML.validate_dtd(xml_file, BLOCK_DTD)
| gpl-3.0 | -6,297,357,681,309,361,000 | -4,697,780,462,719,887,000 | 37.5 | 101 | 0.682216 | false |
EduPepperPD/pepper2013 | common/lib/capa/capa/util.py | 19 | 3128 | from calc import evaluator
from cmath import isinf
#-----------------------------------------------------------------------------
#
# Utility functions used in CAPA responsetypes
def compare_with_tolerance(v1, v2, tol):
''' Compare v1 to v2 with maximum tolerance tol
tol is relative if it ends in %; otherwise, it is absolute
- v1 : student result (number)
- v2 : instructor result (number)
- tol : tolerance (string representing a number)
'''
relative = tol.endswith('%')
if relative:
tolerance_rel = evaluator(dict(), dict(), tol[:-1]) * 0.01
tolerance = tolerance_rel * max(abs(v1), abs(v2))
else:
tolerance = evaluator(dict(), dict(), tol)
if isinf(v1) or isinf(v2):
# If an input is infinite, we can end up with `abs(v1-v2)` and
# `tolerance` both equal to infinity. Then, below we would have
# `inf <= inf` which is a fail. Instead, compare directly.
return v1 == v2
else:
return abs(v1 - v2) <= tolerance
def contextualize_text(text, context): # private
''' Takes a string with variables. E.g. $a+$b.
Does a substitution of those variables from the context '''
if not text:
return text
for key in sorted(context, lambda x, y: cmp(len(y), len(x))):
# TODO (vshnayder): This whole replacement thing is a big hack
# right now--context contains not just the vars defined in the
# program, but also e.g. a reference to the numpy module.
# Should be a separate dict of variables that should be
# replaced.
if '$' + key in text:
try:
s = str(context[key])
except UnicodeEncodeError:
s = context[key].encode('utf8', errors='ignore')
text = text.replace('$' + key, s)
return text
def convert_files_to_filenames(answers):
'''
Check for File objects in the dict of submitted answers,
convert File objects to their filename (string)
'''
new_answers = dict()
for answer_id in answers.keys():
answer = answers[answer_id]
# Files are stored as a list, even if one file
if is_list_of_files(answer):
new_answers[answer_id] = [f.name for f in answer]
else:
new_answers[answer_id] = answers[answer_id]
return new_answers
def is_list_of_files(files):
return isinstance(files, list) and all(is_file(f) for f in files)
def is_file(file_to_test):
'''
Duck typing to check if 'file_to_test' is a File object
'''
return all(hasattr(file_to_test, method) for method in ['read', 'name'])
def find_with_default(node, path, default):
"""
Look for a child of node using , and return its text if found.
Otherwise returns default.
Arguments:
node: lxml node
path: xpath search expression
default: value to return if nothing found
Returns:
node.find(path).text if the find succeeds, default otherwise.
"""
v = node.find(path)
if v is not None:
return v.text
else:
return default
| agpl-3.0 | -935,201,770,684,071,300 | -1,723,445,723,179,884,800 | 30.59596 | 78 | 0.595588 | false |
emergebtc/muddery | evennia/evennia/utils/ansi.py | 1 | 33100 | """
ANSI - Gives colour to text.
Use the codes defined in ANSIPARSER in your text
to apply colour to text according to the ANSI standard.
Examples:
This is %crRed text%cn and this is normal again.
This is {rRed text{n and this is normal again.
Mostly you should not need to call parse_ansi() explicitly;
it is run by Evennia just before returning data to/from the
user.
"""
import re
from evennia.utils import utils
from evennia.utils.utils import to_str, to_unicode
# ANSI definitions
ANSI_BEEP = "\07"
ANSI_ESCAPE = "\033"
ANSI_NORMAL = "\033[0m"
ANSI_UNDERLINE = "\033[4m"
ANSI_HILITE = "\033[1m"
ANSI_UNHILITE = "\033[22m"
ANSI_BLINK = "\033[5m"
ANSI_INVERSE = "\033[7m"
ANSI_INV_HILITE = "\033[1;7m"
ANSI_INV_BLINK = "\033[7;5m"
ANSI_BLINK_HILITE = "\033[1;5m"
ANSI_INV_BLINK_HILITE = "\033[1;5;7m"
# Foreground colors
ANSI_BLACK = "\033[30m"
ANSI_RED = "\033[31m"
ANSI_GREEN = "\033[32m"
ANSI_YELLOW = "\033[33m"
ANSI_BLUE = "\033[34m"
ANSI_MAGENTA = "\033[35m"
ANSI_CYAN = "\033[36m"
ANSI_WHITE = "\033[37m"
# Background colors
ANSI_BACK_BLACK = "\033[40m"
ANSI_BACK_RED = "\033[41m"
ANSI_BACK_GREEN = "\033[42m"
ANSI_BACK_YELLOW = "\033[43m"
ANSI_BACK_BLUE = "\033[44m"
ANSI_BACK_MAGENTA = "\033[45m"
ANSI_BACK_CYAN = "\033[46m"
ANSI_BACK_WHITE = "\033[47m"
# Formatting Characters
ANSI_RETURN = "\r\n"
ANSI_TAB = "\t"
ANSI_SPACE = " "
# Escapes
ANSI_ESCAPES = ("{{", "\\\\")
from collections import OrderedDict
_PARSE_CACHE = OrderedDict()
_PARSE_CACHE_SIZE = 10000
class ANSIParser(object):
"""
A class that parses ANSI markup
to ANSI command sequences
We also allow to escape colour codes
by prepending with a \ for xterm256,
an extra { for Merc-style codes
"""
def sub_ansi(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
markers with correct ANSI sequences
"""
return self.ansi_map.get(ansimatch.group(), "")
def sub_brightbg(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
bright background markers with Xterm256 replacement
"""
return self.ansi_bright_bgs.get(ansimatch.group(), "")
def sub_xterm256(self, rgbmatch, convert=False):
"""
This is a replacer method called by `re.sub` with the matched
tag. It must return the correct ansi sequence.
It checks `self.do_xterm256` to determine if conversion
to standard ANSI should be done or not.
"""
if not rgbmatch:
return ""
# get tag, stripping the initial marker
rgbtag = rgbmatch.group()[1:]
background = rgbtag[0] == '['
if background:
red, green, blue = int(rgbtag[1]), int(rgbtag[2]), int(rgbtag[3])
else:
red, green, blue = int(rgbtag[0]), int(rgbtag[1]), int(rgbtag[2])
if convert:
colval = 16 + (red * 36) + (green * 6) + blue
#print "RGB colours:", red, green, blue
return "\033[%s8;5;%s%s%sm" % (3 + int(background), colval/100, (colval % 100)/10, colval%10)
else:
#print "ANSI convert:", red, green, blue
# xterm256 not supported, convert the rgb value to ansi instead
if red == green and red == blue and red < 2:
if background:
return ANSI_BACK_BLACK
elif red >= 1:
return ANSI_HILITE + ANSI_BLACK
else:
return ANSI_NORMAL + ANSI_BLACK
elif red == green and red == blue:
if background:
return ANSI_BACK_WHITE
elif red >= 4:
return ANSI_HILITE + ANSI_WHITE
else:
return ANSI_NORMAL + ANSI_WHITE
elif red > green and red > blue:
if background:
return ANSI_BACK_RED
elif red >= 3:
return ANSI_HILITE + ANSI_RED
else:
return ANSI_NORMAL + ANSI_RED
elif red == green and red > blue:
if background:
return ANSI_BACK_YELLOW
elif red >= 3:
return ANSI_HILITE + ANSI_YELLOW
else:
return ANSI_NORMAL + ANSI_YELLOW
elif red == blue and red > green:
if background:
return ANSI_BACK_MAGENTA
elif red >= 3:
return ANSI_HILITE + ANSI_MAGENTA
else:
return ANSI_NORMAL + ANSI_MAGENTA
elif green > blue:
if background:
return ANSI_BACK_GREEN
elif green >= 3:
return ANSI_HILITE + ANSI_GREEN
else:
return ANSI_NORMAL + ANSI_GREEN
elif green == blue:
if background:
return ANSI_BACK_CYAN
elif green >= 3:
return ANSI_HILITE + ANSI_CYAN
else:
return ANSI_NORMAL + ANSI_CYAN
else: # mostly blue
if background:
return ANSI_BACK_BLUE
elif blue >= 3:
return ANSI_HILITE + ANSI_BLUE
else:
return ANSI_NORMAL + ANSI_BLUE
def strip_raw_codes(self, string):
"""
Strips raw ANSI codes from a string.
"""
return self.ansi_regex.sub("", string)
def strip_mxp(self, string):
"""
Strips all MXP codes from a string.
"""
return self.mxp_sub.sub(r'\2', string)
def parse_ansi(self, string, strip_ansi=False, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes according to
the stored mapping.
strip_ansi flag instead removes all ANSI markup.
"""
if hasattr(string, '_raw_string'):
if strip_ansi:
return string.clean()
else:
return string.raw()
if not string:
return ''
# check cached parsings
global _PARSE_CACHE
cachekey = "%s-%s-%s" % (string, strip_ansi, xterm256)
if cachekey in _PARSE_CACHE:
return _PARSE_CACHE[cachekey]
# pre-convert bright colors to xterm256 color tags
string = self.brightbg_sub.sub(self.sub_brightbg, string)
def do_xterm256(part):
return self.sub_xterm256(part, xterm256)
in_string = utils.to_str(string)
# do string replacement
parsed_string = ""
parts = self.ansi_escapes.split(in_string) + [" "]
for part, sep in zip(parts[::2], parts[1::2]):
pstring = self.xterm256_sub.sub(do_xterm256, part)
pstring = self.ansi_sub.sub(self.sub_ansi, pstring)
parsed_string += "%s%s" % (pstring, sep[0].strip())
if not mxp:
parsed_string = self.strip_mxp(parsed_string)
if strip_ansi:
# remove all ansi codes (including those manually
# inserted in string)
return self.strip_raw_codes(parsed_string)
# cache and crop old cache
_PARSE_CACHE[cachekey] = parsed_string
if len(_PARSE_CACHE) > _PARSE_CACHE_SIZE:
_PARSE_CACHE.popitem(last=False)
return parsed_string
# Mapping using {r {n etc
hilite = ANSI_HILITE
unhilite = ANSI_UNHILITE
ext_ansi_map = [
(r'{n', ANSI_NORMAL), # reset
(r'{/', ANSI_RETURN), # line break
(r'{-', ANSI_TAB), # tab
(r'{_', ANSI_SPACE), # space
(r'{*', ANSI_INVERSE), # invert
(r'{^', ANSI_BLINK), # blinking text (very annoying and not supported by all clients)
(r'{r', hilite + ANSI_RED),
(r'{g', hilite + ANSI_GREEN),
(r'{y', hilite + ANSI_YELLOW),
(r'{b', hilite + ANSI_BLUE),
(r'{m', hilite + ANSI_MAGENTA),
(r'{c', hilite + ANSI_CYAN),
(r'{w', hilite + ANSI_WHITE), # pure white
(r'{x', hilite + ANSI_BLACK), # dark grey
(r'{R', unhilite + ANSI_RED),
(r'{G', unhilite + ANSI_GREEN),
(r'{Y', unhilite + ANSI_YELLOW),
(r'{B', unhilite + ANSI_BLUE),
(r'{M', unhilite + ANSI_MAGENTA),
(r'{C', unhilite + ANSI_CYAN),
(r'{W', unhilite + ANSI_WHITE), # light grey
(r'{X', unhilite + ANSI_BLACK), # pure black
# hilight-able colors
(r'{h', hilite),
(r'{H', unhilite),
(r'{!R', ANSI_RED),
(r'{!G', ANSI_GREEN),
(r'{!Y', ANSI_YELLOW),
(r'{!B', ANSI_BLUE),
(r'{!M', ANSI_MAGENTA),
(r'{!C', ANSI_CYAN),
(r'{!W', ANSI_WHITE), # light grey
(r'{!X', ANSI_BLACK), # pure black
# normal ANSI backgrounds
(r'{[R', ANSI_BACK_RED),
(r'{[G', ANSI_BACK_GREEN),
(r'{[Y', ANSI_BACK_YELLOW),
(r'{[B', ANSI_BACK_BLUE),
(r'{[M', ANSI_BACK_MAGENTA),
(r'{[C', ANSI_BACK_CYAN),
(r'{[W', ANSI_BACK_WHITE), # light grey background
(r'{[X', ANSI_BACK_BLACK) # pure black background
]
ansi_bright_bgs = [
# "bright" ANSI backgrounds using xterm256 since ANSI
# standard does not support it (will
# fallback to dark ANSI background colors if xterm256
# is not supported by client)
(r'{[r', r'{[500'),
(r'{[g', r'{[050'),
(r'{[y', r'{[550'),
(r'{[b', r'{[005'),
(r'{[m', r'{[505'),
(r'{[c', r'{[055'),
(r'{[w', r'{[555'), # white background
(r'{[x', r'{[222')] # dark grey background
# xterm256 {123, %c134. These are replaced directly by
# the sub_xterm256 method
xterm256_map = [
(r'%[0-5]{3}', ""), # %123 - foreground colour
(r'%\[[0-5]{3}', ""), # %[123 - background colour
(r'\{[0-5]{3}', ""), # {123 - foreground colour
(r'\{\[[0-5]{3}', "") # {[123 - background colour
]
mxp_re = r'\{lc(.*?)\{lt(.*?)\{le'
# prepare regex matching
brightbg_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ansi_bright_bgs]), re.DOTALL)
xterm256_sub = re.compile(r"|".join([tup[0] for tup in xterm256_map]), re.DOTALL)
ansi_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ext_ansi_map]), re.DOTALL)
mxp_sub = re.compile(mxp_re, re.DOTALL)
# used by regex replacer to correctly map ansi sequences
ansi_map = dict(ext_ansi_map)
ansi_bright_bgs = dict(ansi_bright_bgs)
# prepare matching ansi codes overall
ansi_re = r"\033\[[0-9;]+m"
ansi_regex = re.compile(ansi_re)
# escapes - these double-chars will be replaced with a single
# instance of each
ansi_escapes = re.compile(r"(%s)" % "|".join(ANSI_ESCAPES), re.DOTALL)
ANSI_PARSER = ANSIParser()
#
# Access function
#
def parse_ansi(string, strip_ansi=False, parser=ANSI_PARSER, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes as needed.
"""
return parser.parse_ansi(string, strip_ansi=strip_ansi, xterm256=xterm256, mxp=mxp)
def strip_raw_ansi(string, parser=ANSI_PARSER):
"""
Remove raw ansi codes from string
"""
return parser.strip_raw_codes(string)
def raw(string):
"""
Escapes a string into a form which won't be colorized by the ansi parser.
"""
return string.replace('{', '{{')
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
def _spacing_preflight(func):
"""
This wrapper function is used to do some preflight checks on functions used
for padding ANSIStrings.
"""
def wrapped(self, width, fillchar=None):
if fillchar is None:
fillchar = " "
if (len(fillchar) != 1) or (not isinstance(fillchar, basestring)):
raise TypeError("must be char, not %s" % type(fillchar))
if not isinstance(width, int):
raise TypeError("integer argument expected, got %s" % type(width))
difference = width - len(self)
if difference <= 0:
return self
return func(self, width, fillchar, difference)
return wrapped
def _query_super(func_name):
"""
Have the string class handle this with the cleaned string instead of
ANSIString.
"""
def wrapped(self, *args, **kwargs):
return getattr(self.clean(), func_name)(*args, **kwargs)
return wrapped
def _on_raw(func_name):
"""
Like query_super, but makes the operation run on the raw string.
"""
def wrapped(self, *args, **kwargs):
args = list(args)
try:
string = args.pop(0)
if hasattr(string, '_raw_string'):
args.insert(0, string.raw())
else:
args.insert(0, string)
except IndexError:
pass
result = getattr(self._raw_string, func_name)(*args, **kwargs)
if isinstance(result, basestring):
return ANSIString(result, decoded=True)
return result
return wrapped
def _transform(func_name):
"""
Some string functions, like those manipulating capital letters,
return a string the same length as the original. This function
allows us to do the same, replacing all the non-coded characters
with the resulting string.
"""
def wrapped(self, *args, **kwargs):
replacement_string = _query_super(func_name)(self, *args, **kwargs)
to_string = []
char_counter = 0
for index in range(0, len(self._raw_string)):
if index in self._code_indexes:
to_string.append(self._raw_string[index])
elif index in self._char_indexes:
to_string.append(replacement_string[char_counter])
char_counter += 1
return ANSIString(
''.join(to_string), decoded=True,
code_indexes=self._code_indexes, char_indexes=self._char_indexes,
clean_string=replacement_string)
return wrapped
class ANSIMeta(type):
"""
Many functions on ANSIString are just light wrappers around the unicode
base class. We apply them here, as part of the classes construction.
"""
def __init__(cls, *args, **kwargs):
for func_name in [
'count', 'startswith', 'endswith', 'find', 'index', 'isalnum',
'isalpha', 'isdigit', 'islower', 'isspace', 'istitle', 'isupper',
'rfind', 'rindex', '__len__']:
setattr(cls, func_name, _query_super(func_name))
for func_name in [
'__mod__', 'expandtabs', 'decode', 'replace', 'format',
'encode']:
setattr(cls, func_name, _on_raw(func_name))
for func_name in [
'capitalize', 'translate', 'lower', 'upper', 'swapcase']:
setattr(cls, func_name, _transform(func_name))
super(ANSIMeta, cls).__init__(*args, **kwargs)
class ANSIString(unicode):
"""
String-like object that is aware of ANSI codes.
This isn't especially efficient, as it doesn't really have an
understanding of what the codes mean in order to eliminate
redundant characters. This could be made as an enhancement to ANSI_PARSER.
If one is going to use ANSIString, one should generally avoid converting
away from it until one is about to send information on the wire. This is
because escape sequences in the string may otherwise already be decoded,
and taken literally the second time around.
Please refer to the Metaclass, ANSIMeta, which is used to apply wrappers
for several of the methods that need not be defined directly here.
"""
__metaclass__ = ANSIMeta
def __new__(cls, *args, **kwargs):
"""
When creating a new ANSIString, you may use a custom parser that has
the same attributes as the standard one, and you may declare the
string to be handled as already decoded. It is important not to double
decode strings, as escapes can only be respected once.
Internally, ANSIString can also passes itself precached code/character
indexes and clean strings to avoid doing extra work when combining
ANSIStrings.
"""
string = args[0]
if not isinstance(string, basestring):
string = to_str(string, force_string=True)
parser = kwargs.get('parser', ANSI_PARSER)
decoded = kwargs.get('decoded', False) or hasattr(string, '_raw_string')
code_indexes = kwargs.pop('code_indexes', None)
char_indexes = kwargs.pop('char_indexes', None)
clean_string = kwargs.pop('clean_string', None)
# All True, or All False, not just one.
checks = map(lambda x: x is None, [code_indexes, char_indexes, clean_string])
if not len(set(checks)) == 1:
raise ValueError("You must specify code_indexes, char_indexes, "
"and clean_string together, or not at all.")
if not all(checks):
decoded = True
if not decoded:
# Completely new ANSI String
clean_string = to_unicode(parser.parse_ansi(string, strip_ansi=True, mxp=True))
string = parser.parse_ansi(string, xterm256=True, mxp=True)
elif clean_string is not None:
# We have an explicit clean string.
pass
elif hasattr(string, '_clean_string'):
# It's already an ANSIString
clean_string = string._clean_string
code_indexes = string._code_indexes
char_indexes = string._char_indexes
string = string._raw_string
else:
# It's a string that has been pre-ansi decoded.
clean_string = parser.strip_raw_codes(string)
if not isinstance(string, unicode):
string = string.decode('utf-8')
ansi_string = super(ANSIString, cls).__new__(ANSIString, to_str(clean_string), "utf-8")
ansi_string._raw_string = string
ansi_string._clean_string = clean_string
ansi_string._code_indexes = code_indexes
ansi_string._char_indexes = char_indexes
return ansi_string
def __str__(self):
return self._raw_string.encode('utf-8')
def __unicode__(self):
"""
Unfortunately, this is not called during print() statements due to a
bug in the Python interpreter. You can always do unicode() or str()
around the resulting ANSIString and print that.
"""
return self._raw_string
def __repr__(self):
"""
Let's make the repr the command that would actually be used to
construct this object, for convenience and reference.
"""
return "ANSIString(%s, decoded=True)" % repr(self._raw_string)
def __init__(self, *_, **kwargs):
"""
When the ANSIString is first initialized, a few internal variables
have to be set.
The first is the parser. It is possible to replace Evennia's standard
ANSI parser with one of your own syntax if you wish, so long as it
implements the same interface.
The second is the _raw_string. It should be noted that the ANSIStrings
are unicode based. This seemed more reasonable than basing it off of
the string class, because if someone were to use a unicode character,
the benefits of knowing the indexes of the ANSI characters would be
negated by the fact that a character within the string might require
more than one byte to be represented. The raw string is, then, a
unicode object rather than a true encoded string. If you need the
encoded string for sending over the wire, try using the .encode()
method.
The third thing to set is the _clean_string. This is a unicode object
that is devoid of all ANSI Escapes.
Finally, _code_indexes and _char_indexes are defined. These are lookup
tables for which characters in the raw string are related to ANSI
escapes, and which are for the readable text.
"""
self.parser = kwargs.pop('parser', ANSI_PARSER)
super(ANSIString, self).__init__()
if self._code_indexes is None:
self._code_indexes, self._char_indexes = self._get_indexes()
@staticmethod
def _shifter(iterable, offset):
"""
Takes a list of integers, and produces a new one incrementing all
by a number.
"""
return [i + offset for i in iterable]
@classmethod
def _adder(cls, first, second):
"""
Joins two ANSIStrings, preserving calculated info.
"""
raw_string = first._raw_string + second._raw_string
clean_string = first._clean_string + second._clean_string
code_indexes = first._code_indexes[:]
char_indexes = first._char_indexes[:]
code_indexes.extend(
cls._shifter(second._code_indexes, len(first._raw_string)))
char_indexes.extend(
cls._shifter(second._char_indexes, len(first._raw_string)))
return ANSIString(raw_string, code_indexes=code_indexes,
char_indexes=char_indexes,
clean_string=clean_string)
def __add__(self, other):
"""
We have to be careful when adding two strings not to reprocess things
that don't need to be reprocessed, lest we end up with escapes being
interpreted literally.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(self, other)
def __radd__(self, other):
"""
Likewise, if we're on the other end.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(other, self)
def __getslice__(self, i, j):
"""
This function is deprecated, so we just make it call the proper
function.
"""
return self.__getitem__(slice(i, j))
def _slice(self, slc):
"""
This function takes a slice() object.
Slices have to be handled specially. Not only are they able to specify
a start and end with [x:y], but many forget that they can also specify
an interval with [x:y:z]. As a result, not only do we have to track
the ANSI Escapes that have played before the start of the slice, we
must also replay any in these intervals, should they exist.
Thankfully, slicing the _char_indexes table gives us the actual
indexes that need slicing in the raw string. We can check between
those indexes to figure out what escape characters need to be
replayed.
"""
slice_indexes = self._char_indexes[slc]
# If it's the end of the string, we need to append final color codes.
if not slice_indexes:
return ANSIString('')
try:
string = self[slc.start]._raw_string
except IndexError:
return ANSIString('')
last_mark = slice_indexes[0]
# Check between the slice intervals for escape sequences.
i = None
for i in slice_indexes[1:]:
for index in xrange(last_mark, i):
if index in self._code_indexes:
string += self._raw_string[index]
last_mark = i
try:
string += self._raw_string[i]
except IndexError:
pass
if i is not None:
append_tail = self._get_interleving(self._char_indexes.index(i) + 1)
else:
append_tail = ''
return ANSIString(string + append_tail, decoded=True)
def __getitem__(self, item):
"""
Gateway for slices and getting specific indexes in the ANSIString. If
this is a regexable ANSIString, it will get the data from the raw
string instead, bypassing ANSIString's intelligent escape skipping,
for reasons explained in the __new__ method's docstring.
"""
if isinstance(item, slice):
# Slices must be handled specially.
return self._slice(item)
try:
self._char_indexes[item]
except IndexError:
raise IndexError("ANSIString Index out of range")
# Get character codes after the index as well.
if self._char_indexes[-1] == self._char_indexes[item]:
append_tail = self._get_interleving(item + 1)
else:
append_tail = ''
item = self._char_indexes[item]
clean = self._raw_string[item]
result = ''
# Get the character they're after, and replay all escape sequences
# previous to it.
for index in xrange(0, item + 1):
if index in self._code_indexes:
result += self._raw_string[index]
return ANSIString(result + clean + append_tail, decoded=True)
def clean(self):
"""
Return a unicode object without the ANSI escapes.
"""
return self._clean_string
def raw(self):
"""
Return a unicode object with the ANSI escapes.
"""
return self._raw_string
def partition(self, sep, reverse=False):
"""
Similar to split, but always creates a tuple with three items:
1. The part before the separator
2. The separator itself.
3. The part after.
We use the same techniques we used in split() to make sure each are
colored.
"""
if hasattr(sep, '_clean_string'):
sep = sep.clean()
if reverse:
parent_result = self._clean_string.rpartition(sep)
else:
parent_result = self._clean_string.partition(sep)
current_index = 0
result = tuple()
for section in parent_result:
result += (self[current_index:current_index + len(section)],)
current_index += len(section)
return result
def _get_indexes(self):
"""
Two tables need to be made, one which contains the indexes of all
readable characters, and one which contains the indexes of all ANSI
escapes. It's important to remember that ANSI escapes require more
that one character at a time, though no readable character needs more
than one character, since the unicode base class abstracts that away
from us. However, several readable characters can be placed in a row.
We must use regexes here to figure out where all the escape sequences
are hiding in the string. Then we use the ranges of their starts and
ends to create a final, comprehensive list of all indexes which are
dedicated to code, and all dedicated to text.
It's possible that only one of these tables is actually needed, the
other assumed to be what isn't in the first.
"""
code_indexes = []
for match in self.parser.ansi_regex.finditer(self._raw_string):
code_indexes.extend(range(match.start(), match.end()))
if not code_indexes:
# Plain string, no ANSI codes.
return code_indexes, range(0, len(self._raw_string))
# all indexes not occupied by ansi codes are normal characters
char_indexes = [i for i in range(len(self._raw_string)) if i not in code_indexes]
return code_indexes, char_indexes
def _get_interleving(self, index):
"""
Get the code characters from the given slice end to the next
character.
"""
try:
index = self._char_indexes[index - 1]
except IndexError:
return ''
s = ''
while True:
index += 1
if index in self._char_indexes:
break
elif index in self._code_indexes:
s += self._raw_string[index]
else:
break
return s
def split(self, by, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
res = []
start = 0
while maxsplit != 0:
next = self._clean_string.find(by, start)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[start:next])
start = next + bylen
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[start:len(self)])
return res
def __mul__(self, other):
"""
Multiplication method. Implemented for performance reasons.
"""
if not isinstance(other, int):
return NotImplemented
raw_string = self._raw_string * other
clean_string = self._clean_string * other
code_indexes = self._code_indexes[:]
char_indexes = self._char_indexes[:]
for i in range(1, other + 1):
code_indexes.extend(
self._shifter(self._code_indexes, i * len(self._raw_string)))
char_indexes.extend(
self._shifter(self._char_indexes, i * len(self._raw_string)))
return ANSIString(
raw_string, code_indexes=code_indexes, char_indexes=char_indexes,
clean_string=clean_string)
def __rmul__(self, other):
return self.__mul__(other)
def rsplit(self, by, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
res = []
end = len(self)
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
while maxsplit != 0:
next = self._clean_string.rfind(by, 0, end)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[next+bylen:end])
end = next
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[:end])
res.reverse()
return res
def join(self, iterable):
"""
Joins together strings in an iterable.
"""
result = ANSIString('')
last_item = None
for item in iterable:
if last_item is not None:
result += self._raw_string
if not isinstance(item, ANSIString):
item = ANSIString(item)
result += item
last_item = item
return result
def _filler(self, char, amount):
"""
Generate a line of characters in a more efficient way than just adding
ANSIStrings.
"""
if not isinstance(char, ANSIString):
line = char * amount
return ANSIString(
char * amount, code_indexes=[], char_indexes=range(0, len(line)),
clean_string=char)
try:
start = char._code_indexes[0]
except IndexError:
start = None
end = char._char_indexes[0]
prefix = char._raw_string[start:end]
postfix = char._raw_string[end + 1:]
line = char._clean_string * amount
code_indexes = [i for i in range(0, len(prefix))]
length = len(prefix) + len(line)
code_indexes.extend([i for i in range(length, length + len(postfix))])
char_indexes = self._shifter(xrange(0, len(line)), len(prefix))
raw_string = prefix + line + postfix
return ANSIString(
raw_string, clean_string=line, char_indexes=char_indexes,
code_indexes=code_indexes)
@_spacing_preflight
def center(self, width, fillchar, difference):
"""
Center some text with some spaces padding both sides.
"""
remainder = difference % 2
difference /= 2
spacing = self._filler(fillchar, difference)
result = spacing + self + spacing + self._filler(fillchar, remainder)
return result
@_spacing_preflight
def ljust(self, width, fillchar, difference):
"""
Left justify some text.
"""
return self + self._filler(fillchar, difference)
@_spacing_preflight
def rjust(self, width, fillchar, difference):
"""
Right justify some text.
"""
return self._filler(fillchar, difference) + self
| bsd-3-clause | -3,393,983,738,811,242,000 | -3,675,606,640,067,519,000 | 34.100742 | 105 | 0.569819 | false |
VOLTTRON/volttron-applications | kisensum/openadr/openadr/vtn/migrations/0005_auto_20171020_2019.py | 2 | 1165 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-20 20:19
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('vtn', '0004_auto_20171020_2019'),
]
operations = [
migrations.AlterField(
model_name='drevent',
name='last_status_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2017, 10, 20, 20, 19, 42, 780617, tzinfo=utc), null=True, verbose_name='Last Status Time'),
),
migrations.AlterField(
model_name='siteevent',
name='last_opt_in',
field=models.DateTimeField(blank=True, default=datetime.datetime(2017, 10, 20, 20, 19, 42, 781512, tzinfo=utc), null=True, verbose_name='Last opt-in'),
),
migrations.AlterField(
model_name='siteevent',
name='last_status_time',
field=models.DateTimeField(default=datetime.datetime(2017, 10, 20, 20, 19, 42, 781464, tzinfo=utc), verbose_name='Last Status Time'),
),
]
| bsd-3-clause | -7,614,420,583,790,604,000 | 4,476,983,423,673,133,600 | 35.40625 | 168 | 0.624034 | false |
botify-labs/simpleflow | simpleflow/swf/process/worker/base.py | 1 | 13859 | import json
import multiprocessing
import os
import sys
import traceback
import uuid
from base64 import b64decode
import psutil
import swf.actors
import swf.exceptions
from simpleflow import format, logger, settings
from simpleflow.dispatch import dynamic_dispatcher
from simpleflow.download import download_binaries
from simpleflow.exceptions import ExecutionError
from simpleflow.job import KubernetesJob
from simpleflow.process import Supervisor, with_state
from simpleflow.swf.constants import VALID_PROCESS_MODES
from simpleflow.swf.process import Poller
from simpleflow.swf.task import ActivityTask
from simpleflow.swf.utils import sanitize_activity_context
from simpleflow.utils import format_exc, format_exc_type, json_dumps, to_k8s_identifier
from swf.models import ActivityTask as BaseActivityTask
from swf.responses import Response
class Worker(Supervisor):
def __init__(self, poller, nb_children=None):
self._poller = poller
super(Worker, self).__init__(
payload=self._poller.start, nb_children=nb_children,
)
class ActivityPoller(Poller, swf.actors.ActivityWorker):
"""
Polls an activity and handles it in the worker.
"""
def __init__(
self,
domain,
task_list,
middlewares=None,
heartbeat=60,
process_mode=None,
poll_data=None,
):
"""
:param domain:
:type domain:
:param task_list:
:type task_list:
:param middlewares: Paths to middleware functions to execute before and after any Activity
:type middlewares: Optional[Dict[str, str]]
:param heartbeat:
:type heartbeat:
:param process_mode: Whether to process locally (default) or spawn a Kubernetes job.
:type process_mode: Optional[str]
"""
self.nb_retries = 3
# heartbeat=0 is a special value to disable heartbeating. We want to
# replace it by None because multiprocessing.Process.join() treats
# this as "no timeout"
self._heartbeat = heartbeat or None
self.middlewares = middlewares
self.process_mode = process_mode or "local"
assert (
self.process_mode in VALID_PROCESS_MODES
), 'invalid process_mode "{}"'.format(self.process_mode)
self.poll_data = poll_data
super(ActivityPoller, self).__init__(domain, task_list)
@property
def name(self):
return "{}(task_list={})".format(self.__class__.__name__, self.task_list,)
@with_state("polling")
def poll(self, task_list=None, identity=None):
if self.poll_data:
# the poll data has been passed as input
return self.fake_poll()
else:
# we need to poll SWF's PollForActivityTask
return swf.actors.ActivityWorker.poll(self, task_list, identity)
def fake_poll(self):
polled_activity_data = json.loads(b64decode(self.poll_data))
activity_task = BaseActivityTask.from_poll(
self.domain, self.task_list, polled_activity_data,
)
return Response(
task_token=activity_task.task_token,
activity_task=activity_task,
raw_response=polled_activity_data,
)
@with_state("processing")
def process(self, response):
"""
Process a swf.actors.ActivityWorker poll response..
:param response:
:type response: swf.responses.Response
"""
token = response.task_token
task = response.activity_task
if self.process_mode == "kubernetes":
try:
spawn_kubernetes_job(self, response.raw_response)
except Exception as err:
logger.exception("spawn_kubernetes_job error")
reason = "cannot spawn kubernetes job for task {}: {} {}".format(
task.activity_id, err.__class__.__name__, err,
)
self.fail_with_retry(token, task, reason)
else:
spawn(self, token, task, self.middlewares, self._heartbeat)
@with_state("completing")
def complete(self, token, result=None):
swf.actors.ActivityWorker.complete(self, token, result)
# noinspection PyMethodOverriding
@with_state("failing")
def fail(self, token, task, reason=None, details=None):
"""
Fail the activity, log and ignore exceptions.
:param token:
:type token:
:param task:
:type task:
:param reason:
:type reason:
:param details:
:type details:
:return:
:rtype:
"""
try:
return swf.actors.ActivityWorker.fail(
self, token, reason=reason, details=details,
)
except Exception as err:
logger.error(
"cannot fail task {}: {}".format(task.activity_type.name, err,)
)
@property
def identity(self):
if self.process_mode == "kubernetes":
self.job_name = "{}--{}".format(
to_k8s_identifier(self.task_list), str(uuid.uuid4())
)
return json_dumps(
{
"cluster": os.environ["K8S_CLUSTER"],
"namespace": os.environ["K8S_NAMESPACE"],
"job": self.job_name,
}
)
else:
return super(ActivityPoller, self).identity
class ActivityWorker(object):
def __init__(self, dispatcher=None):
self._dispatcher = dispatcher or dynamic_dispatcher.Dispatcher()
def dispatch(self, task):
"""
:param task:
:type task: swf.models.ActivityTask
:return:
:rtype: simpleflow.activity.Activity
"""
name = task.activity_type.name
return self._dispatcher.dispatch_activity(name)
def process(self, poller, token, task, middlewares=None):
"""
:param poller:
:type poller: ActivityPoller
:param token:
:type token: str
:param task:
:type task: swf.models.ActivityTask
:param middlewares: Paths to middleware functions to execute before and after any Activity
:type middlewares: Optional[Dict[str, str]]
"""
logger.debug("ActivityWorker.process() pid={}".format(os.getpid()))
try:
activity = self.dispatch(task)
input = format.decode(task.input)
args = input.get("args", ())
kwargs = input.get("kwargs", {})
context = sanitize_activity_context(task.context)
context["domain_name"] = poller.domain.name
if input.get("meta", {}).get("binaries"):
download_binaries(input["meta"]["binaries"])
result = ActivityTask(
activity,
*args,
context=context,
simpleflow_middlewares=middlewares,
**kwargs
).execute()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.exception("process error: {}".format(str(exc_value)))
if isinstance(exc_value, ExecutionError) and len(exc_value.args):
details = exc_value.args[0]
reason = format_exc(exc_value) # FIXME json.loads and rebuild?
else:
tb = traceback.format_tb(exc_traceback)
reason = format_exc(exc_value)
details = json_dumps(
{
"error": exc_type.__name__,
"error_type": format_exc_type(exc_type),
"message": str(exc_value),
"traceback": tb,
},
default=repr,
)
return poller.fail_with_retry(token, task, reason=reason, details=details)
try:
logger.info("completing activity")
poller.complete_with_retry(token, result)
except Exception as err:
logger.exception("complete error")
reason = "cannot complete task {}: {} {}".format(
task.activity_id, err.__class__.__name__, err,
)
poller.fail_with_retry(token, task, reason)
def process_task(poller, token, task, middlewares=None):
"""
:param poller:
:type poller: ActivityPoller
:param token:
:type token: str
:param task:
:type task: swf.models.ActivityTask
:param middlewares: Paths to middleware functions to execute before and after any Activity
:type middlewares: Optional[Dict[str, str]]
"""
logger.debug("process_task() pid={}".format(os.getpid()))
format.JUMBO_FIELDS_MEMORY_CACHE.clear()
worker = ActivityWorker()
worker.process(poller, token, task, middlewares)
def spawn_kubernetes_job(poller, swf_response):
logger.info("scheduling new kubernetes job name={}".format(poller.job_name))
job = KubernetesJob(poller.job_name, poller.domain.name, swf_response)
job.schedule()
def reap_process_tree(pid, wait_timeout=settings.ACTIVITY_SIGTERM_WAIT_SEC):
"""
TERMinates (and KILLs) if necessary a process and its descendants.
See also: https://psutil.readthedocs.io/en/latest/#kill-process-tree.
:param pid: Process ID
:type pid: int
:param wait_timeout: Wait timeout
:type wait_timeout: float
"""
def on_terminate(p):
logger.info("process: terminated pid={} retcode={}".format(p.pid, p.returncode))
if pid == os.getpid():
raise RuntimeError("process: cannot terminate self!")
parent = psutil.Process(pid)
procs = parent.children(recursive=True)
procs.append(parent)
# Terminate
for p in procs:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
_, alive = psutil.wait_procs(procs, timeout=wait_timeout, callback=on_terminate)
# Kill
for p in alive:
logger.warning(
"process: pid={} status={} did not respond to SIGTERM. Trying SIGKILL".format(
p.pid, p.status()
)
)
try:
p.kill()
except psutil.NoSuchProcess:
pass
# Check
_, alive = psutil.wait_procs(alive)
for p in alive:
logger.error(
"process: pid={} status={} still alive. Giving up!".format(
p.pid, p.status()
)
)
def spawn(poller, token, task, middlewares=None, heartbeat=60):
"""
Spawn a process and wait for it to end, sending heartbeats to SWF.
On activity timeouts and termination, we reap the worker process and its
children.
:param poller:
:type poller: ActivityPoller
:param token:
:type token: str
:param task:
:type task: swf.models.ActivityTask
:param middlewares: Paths to middleware functions to execute before and after any Activity
:type middlewares: Optional[Dict[str, str]]
:param heartbeat: heartbeat delay (seconds)
:type heartbeat: int
"""
logger.info(
"spawning new activity worker pid={} heartbeat={}".format(
os.getpid(), heartbeat
)
)
worker = multiprocessing.Process(
target=process_task, args=(poller, token, task, middlewares)
)
worker.start()
def worker_alive():
return psutil.pid_exists(worker.pid)
while worker_alive():
worker.join(timeout=heartbeat)
if not worker_alive():
# Most certainly unneeded: we'll see
if worker.exitcode is None:
# race condition, try and re-join
worker.join(timeout=0)
if worker.exitcode is None:
logger.warning(
"process {} is dead but multiprocessing doesn't know it (simpleflow bug)".format(
worker.pid
)
)
if worker.exitcode != 0:
poller.fail_with_retry(
token,
task,
reason="process {} died: exit code {}".format(
worker.pid, worker.exitcode
),
)
return
try:
logger.debug("heartbeating for pid={} (token={})".format(worker.pid, token))
response = poller.heartbeat(token)
except swf.exceptions.DoesNotExistError as error:
# Either the task or the workflow execution no longer exists,
# let's kill the worker process.
logger.warning("heartbeat failed: {}".format(error))
logger.warning("killing (KILL) worker with pid={}".format(worker.pid))
reap_process_tree(worker.pid)
return
except swf.exceptions.RateLimitExceededError as error:
# ignore rate limit errors: high chances the next heartbeat will be
# ok anyway, so it would be stupid to break the task for that
logger.warning(
'got a "ThrottlingException / Rate exceeded" when heartbeating for task {}: {}'.format(
task.activity_type.name, error
)
)
continue
except Exception as error:
# Let's crash if it cannot notify the heartbeat failed. The
# subprocess will become orphan and the heartbeat timeout may
# eventually trigger on Amazon SWF side.
logger.error(
"cannot send heartbeat for task {}: {}".format(
task.activity_type.name, error
)
)
raise
# Task cancelled.
if response and response.get("cancelRequested"):
reap_process_tree(worker.pid)
return
| mit | -2,365,023,812,107,335,000 | 8,340,808,467,819,933,000 | 33.389578 | 105 | 0.579696 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamlink.base/resources/lib/streamlink/utils/six.py | 172 | 30888 | # Copyright (c) 2010-2017 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.11.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
try:
if from_value is None:
raise value
raise value from from_value
finally:
value = None
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-2.0 | -9,085,427,299,719,916,000 | -3,023,431,310,077,839,400 | 33.666667 | 98 | 0.629468 | false |
lokirius/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_loading.py | 52 | 4097 | from ctypes import *
import sys, unittest
import os
from ctypes.util import find_library
from ctypes.test import is_resource_enabled
libc_name = None
if os.name == "nt":
libc_name = find_library("c")
elif os.name == "ce":
libc_name = "coredll"
elif sys.platform == "cygwin":
libc_name = "cygwin1.dll"
else:
libc_name = find_library("c")
if is_resource_enabled("printing"):
print("libc_name is", libc_name)
class LoaderTest(unittest.TestCase):
unknowndll = "xxrandomnamexx"
if libc_name is not None:
def test_load(self):
CDLL(libc_name)
CDLL(os.path.basename(libc_name))
self.assertRaises(OSError, CDLL, self.unknowndll)
if libc_name is not None and os.path.basename(libc_name) == "libc.so.6":
def test_load_version(self):
cdll.LoadLibrary("libc.so.6")
# linux uses version, libc 9 should not exist
self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9")
self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll)
def test_find(self):
for name in ("c", "m"):
lib = find_library(name)
if lib:
cdll.LoadLibrary(lib)
CDLL(lib)
if os.name in ("nt", "ce"):
def test_load_library(self):
self.assertFalse(libc_name is None)
if is_resource_enabled("printing"):
print(find_library("kernel32"))
print(find_library("user32"))
if os.name == "nt":
windll.kernel32.GetModuleHandleW
windll["kernel32"].GetModuleHandleW
windll.LoadLibrary("kernel32").GetModuleHandleW
WinDLL("kernel32").GetModuleHandleW
elif os.name == "ce":
windll.coredll.GetModuleHandleW
windll["coredll"].GetModuleHandleW
windll.LoadLibrary("coredll").GetModuleHandleW
WinDLL("coredll").GetModuleHandleW
def test_load_ordinal_functions(self):
import _ctypes_test
dll = WinDLL(_ctypes_test.__file__)
# We load the same function both via ordinal and name
func_ord = dll[2]
func_name = dll.GetString
# addressof gets the address where the function pointer is stored
a_ord = addressof(func_ord)
a_name = addressof(func_name)
f_ord_addr = c_void_p.from_address(a_ord).value
f_name_addr = c_void_p.from_address(a_name).value
self.assertEqual(hex(f_ord_addr), hex(f_name_addr))
self.assertRaises(AttributeError, dll.__getitem__, 1234)
if os.name == "nt":
def test_1703286_A(self):
from _ctypes import LoadLibrary, FreeLibrary
# On winXP 64-bit, advapi32 loads at an address that does
# NOT fit into a 32-bit integer. FreeLibrary must be able
# to accept this address.
# These are tests for http://www.python.org/sf/1703286
handle = LoadLibrary("advapi32")
FreeLibrary(handle)
def test_1703286_B(self):
# Since on winXP 64-bit advapi32 loads like described
# above, the (arbitrarily selected) CloseEventLog function
# also has a high address. 'call_function' should accept
# addresses so large.
from _ctypes import call_function
advapi32 = windll.advapi32
# Calling CloseEventLog with a NULL argument should fail,
# but the call should not segfault or so.
self.assertEqual(0, advapi32.CloseEventLog(None))
windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p
windll.kernel32.GetProcAddress.restype = c_void_p
proc = windll.kernel32.GetProcAddress(advapi32._handle, b"CloseEventLog")
self.assertTrue(proc)
# This is the real test: call the function via 'call_function'
self.assertEqual(0, call_function(proc, (None,)))
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 9,057,814,960,649,696,000 | -2,166,850,809,884,197,400 | 37.650943 | 85 | 0.594581 | false |
brian-yang/mozillians | vendor-local/lib/python/django_filters/filterset.py | 16 | 15216 | from __future__ import absolute_import
from __future__ import unicode_literals
import types
import copy
from django import forms
from django.core.validators import EMPTY_VALUES
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from sys import version_info
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.db.models.sql.constants import LOOKUP_SEP # noqa
try:
from collections import OrderedDict
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.utils.datastructures import SortedDict as OrderedDict # noqa
try:
from django.db.models.related import RelatedObject as ForeignObjectRel
except ImportError: # pragma: nocover
# Django >= 1.8 replaces RelatedObject with ForeignObjectRel
from django.db.models.fields.related import ForeignObjectRel
from .filters import (Filter, CharFilter, BooleanFilter,
ChoiceFilter, DateFilter, DateTimeFilter, TimeFilter, ModelChoiceFilter,
ModelMultipleChoiceFilter, NumberFilter)
ORDER_BY_FIELD = 'o'
# There is a bug with deepcopy in 2.6, patch if we are running python < 2.7
# http://bugs.python.org/issue1515
if version_info < (2, 7, 0):
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
def get_declared_filters(bases, attrs, with_base_filters=True):
filters = []
for filter_name, obj in list(attrs.items()):
if isinstance(obj, Filter):
obj = attrs.pop(filter_name)
if getattr(obj, 'name', None) is None:
obj.name = filter_name
filters.append((filter_name, obj))
filters.sort(key=lambda x: x[1].creation_counter)
if with_base_filters:
for base in bases[::-1]:
if hasattr(base, 'base_filters'):
filters = list(base.base_filters.items()) + filters
else:
for base in bases[::-1]:
if hasattr(base, 'declared_filters'):
filters = list(base.declared_filters.items()) + filters
return OrderedDict(filters)
def get_model_field(model, f):
parts = f.split(LOOKUP_SEP)
opts = model._meta
for name in parts[:-1]:
try:
rel = opts.get_field_by_name(name)[0]
except FieldDoesNotExist:
return None
if isinstance(rel, ForeignObjectRel):
model = rel.model
opts = rel.opts
else:
model = rel.rel.to
opts = model._meta
try:
rel, model, direct, m2m = opts.get_field_by_name(parts[-1])
except FieldDoesNotExist:
return None
return rel
def filters_for_model(model, fields=None, exclude=None, filter_for_field=None,
filter_for_reverse_field=None):
field_dict = OrderedDict()
opts = model._meta
if fields is None:
fields = [f.name for f in sorted(opts.fields + opts.many_to_many)
if not isinstance(f, models.AutoField)]
# Loop through the list of fields.
for f in fields:
# Skip the field if excluded.
if exclude is not None and f in exclude:
continue
field = get_model_field(model, f)
# Do nothing if the field doesn't exist.
if field is None:
field_dict[f] = None
continue
if isinstance(field, ForeignObjectRel):
filter_ = filter_for_reverse_field(field, f)
if filter_:
field_dict[f] = filter_
# If fields is a dictionary, it must contain lists.
elif isinstance(fields, dict):
# Create a filter for each lookup type.
for lookup_type in fields[f]:
filter_ = filter_for_field(field, f, lookup_type)
if filter_:
filter_name = f
# Don't add "exact" to filter names
if lookup_type != 'exact':
filter_name = f + LOOKUP_SEP + lookup_type
field_dict[filter_name] = filter_
# If fields is a list, it contains strings.
else:
filter_ = filter_for_field(field, f)
if filter_:
field_dict[f] = filter_
return field_dict
class FilterSetOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.order_by = getattr(options, 'order_by', False)
self.form = getattr(options, 'form', forms.Form)
class FilterSetMetaclass(type):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, FilterSet)]
except NameError:
# We are defining FilterSet itself here
parents = None
declared_filters = get_declared_filters(bases, attrs, False)
new_class = super(
FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
if not parents:
return new_class
opts = new_class._meta = FilterSetOptions(
getattr(new_class, 'Meta', None))
if opts.model:
filters = filters_for_model(opts.model, opts.fields, opts.exclude,
new_class.filter_for_field,
new_class.filter_for_reverse_field)
filters.update(declared_filters)
else:
filters = declared_filters
if None in filters.values():
raise TypeError("Meta.fields contains a field that isn't defined "
"on this FilterSet")
new_class.declared_filters = declared_filters
new_class.base_filters = filters
return new_class
FILTER_FOR_DBFIELD_DEFAULTS = {
models.AutoField: {
'filter_class': NumberFilter
},
models.CharField: {
'filter_class': CharFilter
},
models.TextField: {
'filter_class': CharFilter
},
models.BooleanField: {
'filter_class': BooleanFilter
},
models.DateField: {
'filter_class': DateFilter
},
models.DateTimeField: {
'filter_class': DateTimeFilter
},
models.TimeField: {
'filter_class': TimeFilter
},
models.OneToOneField: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name,
}
},
models.ForeignKey: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name
}
},
models.ManyToManyField: {
'filter_class': ModelMultipleChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
}
},
models.DecimalField: {
'filter_class': NumberFilter,
},
models.SmallIntegerField: {
'filter_class': NumberFilter,
},
models.IntegerField: {
'filter_class': NumberFilter,
},
models.PositiveIntegerField: {
'filter_class': NumberFilter,
},
models.PositiveSmallIntegerField: {
'filter_class': NumberFilter,
},
models.FloatField: {
'filter_class': NumberFilter,
},
models.NullBooleanField: {
'filter_class': BooleanFilter,
},
models.SlugField: {
'filter_class': CharFilter,
},
models.EmailField: {
'filter_class': CharFilter,
},
models.FilePathField: {
'filter_class': CharFilter,
},
models.URLField: {
'filter_class': CharFilter,
},
models.IPAddressField: {
'filter_class': CharFilter,
},
models.CommaSeparatedIntegerField: {
'filter_class': CharFilter,
},
}
class BaseFilterSet(object):
filter_overrides = {}
order_by_field = ORDER_BY_FIELD
strict = True
def __init__(self, data=None, queryset=None, prefix=None, strict=None):
self.is_bound = data is not None
self.data = data or {}
if queryset is None:
queryset = self._meta.model._default_manager.all()
self.queryset = queryset
self.form_prefix = prefix
if strict is not None:
self.strict = strict
self.filters = copy.deepcopy(self.base_filters)
# propagate the model being used through the filters
for filter_ in self.filters.values():
filter_.model = self._meta.model
# Apply the parent to the filters, this will allow the filters to access the filterset
for filter_key, filter_ in six.iteritems(self.filters):
filter_.parent = self
def __iter__(self):
for obj in self.qs:
yield obj
def __len__(self):
return len(self.qs)
def __getitem__(self, key):
return self.qs[key]
@property
def qs(self):
if not hasattr(self, '_qs'):
valid = self.is_bound and self.form.is_valid()
if self.strict and self.is_bound and not valid:
self._qs = self.queryset.none()
return self._qs
# start with all the results and filter from there
qs = self.queryset.all()
for name, filter_ in six.iteritems(self.filters):
value = None
if valid:
value = self.form.cleaned_data[name]
else:
raw_value = self.form[name].value()
try:
value = self.form.fields[name].clean(raw_value)
except forms.ValidationError:
# for invalid values either:
# strictly "apply" filter yielding no results and get outta here
if self.strict:
self._qs = self.queryset.none()
return self._qs
else: # or ignore this filter altogether
pass
if value is not None: # valid & clean data
qs = filter_.filter(qs, value)
if self._meta.order_by:
order_field = self.form.fields[self.order_by_field]
data = self.form[self.order_by_field].data
ordered_value = None
try:
ordered_value = order_field.clean(data)
except forms.ValidationError:
pass
if ordered_value in EMPTY_VALUES and self.strict:
ordered_value = self.form.fields[self.order_by_field].choices[0][0]
if ordered_value:
qs = qs.order_by(*self.get_order_by(ordered_value))
self._qs = qs
return self._qs
def count(self):
return self.qs.count()
@property
def form(self):
if not hasattr(self, '_form'):
fields = OrderedDict([
(name, filter_.field)
for name, filter_ in six.iteritems(self.filters)])
fields[self.order_by_field] = self.ordering_field
Form = type(str('%sForm' % self.__class__.__name__),
(self._meta.form,), fields)
if self.is_bound:
self._form = Form(self.data, prefix=self.form_prefix)
else:
self._form = Form(prefix=self.form_prefix)
return self._form
def get_ordering_field(self):
if self._meta.order_by:
if isinstance(self._meta.order_by, (list, tuple)):
if isinstance(self._meta.order_by[0], (list, tuple)):
# e.g. (('field', 'Display name'), ...)
choices = [(f[0], f[1]) for f in self._meta.order_by]
else:
choices = [(f, _('%s (descending)' % capfirst(f[1:])) if f[0] == '-' else capfirst(f))
for f in self._meta.order_by]
else:
# add asc and desc field names
# use the filter's label if provided
choices = []
for f, fltr in self.filters.items():
choices.extend([
(fltr.name or f, fltr.label or capfirst(f)),
("-%s" % (fltr.name or f), _('%s (descending)' % (fltr.label or capfirst(f))))
])
return forms.ChoiceField(label=_("Ordering"), required=False,
choices=choices)
@property
def ordering_field(self):
if not hasattr(self, '_ordering_field'):
self._ordering_field = self.get_ordering_field()
return self._ordering_field
def get_order_by(self, order_choice):
return [order_choice]
@classmethod
def filter_for_field(cls, f, name, lookup_type='exact'):
filter_for_field = dict(FILTER_FOR_DBFIELD_DEFAULTS)
filter_for_field.update(cls.filter_overrides)
default = {
'name': name,
'label': capfirst(f.verbose_name),
'lookup_type': lookup_type
}
if f.choices:
default['choices'] = f.choices
return ChoiceFilter(**default)
data = filter_for_field.get(f.__class__)
if data is None:
# could be a derived field, inspect parents
for class_ in f.__class__.mro():
# skip if class_ is models.Field or object
# 1st item in mro() is original class
if class_ in (f.__class__, models.Field, object):
continue
data = filter_for_field.get(class_)
if data:
break
if data is None:
return
filter_class = data.get('filter_class')
default.update(data.get('extra', lambda f: {})(f))
if filter_class is not None:
return filter_class(**default)
@classmethod
def filter_for_reverse_field(cls, f, name):
rel = f.field.rel
queryset = f.field.model._default_manager.all()
default = {
'name': name,
'label': capfirst(rel.related_name),
'queryset': queryset,
}
if rel.multiple:
return ModelMultipleChoiceFilter(**default)
else:
return ModelChoiceFilter(**default)
class FilterSet(six.with_metaclass(FilterSetMetaclass, BaseFilterSet)):
pass
def filterset_factory(model):
meta = type(str('Meta'), (object,), {'model': model})
filterset = type(str('%sFilterSet' % model._meta.object_name),
(FilterSet,), {'Meta': meta})
return filterset
| bsd-3-clause | -923,876,503,825,238,400 | -3,825,199,842,829,248,500 | 32.515419 | 106 | 0.558228 | false |
beni55/furious.js | configure.py | 3 | 4556 | #!/usr/bin/python
from __future__ import print_function
import optparse
import os
import sys
import glob
import ninja_syntax
def replace_ext(filename, ext):
return os.path.splitext(filename)[0] + ext
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("--with-protoc-c", dest="protoc_c", default="protoc-c")
parser.add_option("--with-nacl-sdk", dest="nacl_sdk", default=os.getenv("NACL_SDK_ROOT"))
options, _ = parser.parse_args()
root_dir = os.path.dirname(os.path.abspath(__file__))
with open('build.ninja', 'w') as buildfile:
ninja = ninja_syntax.Writer(buildfile)
# Variables
ninja.variable('nacl_sdk_dir', options.nacl_sdk)
if sys.platform == 'win32':
ninja.variable('pnacl_toolchain_dir', '$nacl_sdk_dir/toolchain/win_pnacl')
ninja.variable('pnacl_cc', '$pnacl_toolchain_dir/bin/pnacl-clang.bat')
ninja.variable('pnacl_cxx', '$pnacl_toolchain_dir/bin/pnacl-clang++.bat')
ninja.variable('pnacl_finalize', '$pnacl_toolchain_dir/bin/pnacl-finalize.bat')
elif sys.platform == 'linux2':
ninja.variable('pnacl_toolchain_dir', '$nacl_sdk_dir/toolchain/linux_pnacl')
ninja.variable('pnacl_cc', '$pnacl_toolchain_dir/bin/pnacl-clang')
ninja.variable('pnacl_cxx', '$pnacl_toolchain_dir/bin/pnacl-clang++')
ninja.variable('pnacl_finalize', '$pnacl_toolchain_dir/bin/pnacl-finalize')
elif sys.platform == 'darwin':
ninja.variable('pnacl_toolchain_dir', '$nacl_sdk_dir/toolchain/mac_pnacl')
ninja.variable('pnacl_cc', '$pnacl_toolchain_dir/bin/pnacl-clang')
ninja.variable('pnacl_cxx', '$pnacl_toolchain_dir/bin/pnacl-clang++')
ninja.variable('pnacl_finalize', '$pnacl_toolchain_dir/bin/pnacl-finalize')
else:
print("Unsupported platform: " + sys.platform, file=sys.stderr)
exit(1)
ninja.variable('protoc_c', options.protoc_c)
# Rules
ninja.rule('COMPILE_PNACL_C', '$pnacl_cc -o $out -c $in -MMD -MF $out.d $optflags $cflags',
deps='gcc', depfile='$out.d',
description='CC[PNaCl] $in')
ninja.rule('LINK_PNACL_C', '$pnacl_cc -o $out $in $ldflags',
description='CCLD[PNaCl] $out')
ninja.rule('FINALIZE_PNACL', '$pnacl_finalize -o $out $in',
description='FINALIZE[PNaCl] $out')
ninja.rule('PROTOC_C', '$protoc_c --proto_path=$indir --c_out=$outdir $in',
description='PROTOC[CXX] $in')
# Build targets
proto_dir = os.path.join(root_dir, "protobuf")
proto_sources = [os.path.join(proto_dir, path) for path in glob.glob(os.path.join(proto_dir, "*.proto"))]
c_source_dir = os.path.join(root_dir, "lib", "nacl")
c_build_dir = os.path.join(root_dir, "build", "nacl")
c_sources = [os.path.join(root_dir, path) for path in glob.glob(os.path.join(c_source_dir, "*.c"))]
c_objects = [os.path.join(c_build_dir, replace_ext(os.path.relpath(path, c_source_dir), ".bc")) for path in c_sources]
c_proto_sources = [os.path.join(c_source_dir, replace_ext(os.path.relpath(path, proto_dir), ".pb-c.c")) for path in proto_sources]
c_proto_headers = [os.path.join(c_source_dir, replace_ext(os.path.relpath(path, proto_dir), ".pb-c.h")) for path in proto_sources]
c_proto_objects = [os.path.join(c_build_dir, replace_ext(os.path.relpath(path, c_source_dir), ".bc")) for path in c_proto_sources]
for proto_source, c_source, c_header, c_object in zip(proto_sources, c_proto_sources, c_proto_headers, c_proto_objects):
ninja.build([c_source, c_header], "PROTOC_C", proto_source,
variables={'indir': proto_dir, 'outdir': c_source_dir})
if c_source not in c_sources:
c_sources.append(c_source)
c_objects.append(c_object)
for source, object in zip(c_sources, c_objects):
ninja.build(object, 'COMPILE_PNACL_C', source,
variables={'optflags': '-O3',
'cflags': '-I$nacl_sdk_dir/include -pthread -g -std=gnu99 -Wno-long-long -Wall -Werror -Wno-unused-variable -Wno-error=unused-function'})
ninja.build(os.path.join(root_dir, 'furious.bc'), 'LINK_PNACL_C', c_objects,
variables={'ldflags': '-L$nacl_sdk_dir/lib/pnacl/Release -lppapi -lm -lprotobuf-c'})
ninja.build(os.path.join(root_dir, 'furious.pexe'), 'FINALIZE_PNACL', os.path.join(root_dir, 'furious.bc'))
| mit | -7,577,135,738,862,632,000 | 9,191,265,881,437,711,000 | 56.670886 | 157 | 0.618964 | false |
stbuehler/pdns | regression-tests.dnsdist/test_Routing.py | 1 | 12809 | #!/usr/bin/env python
import threading
import time
import dns
from dnsdisttests import DNSDistTest
class TestRoutingPoolRouting(DNSDistTest):
_config_template = """
newServer{address="127.0.0.1:%s", pool="real"}
addAction(makeRule("poolaction.routing.tests.powerdns.com"), PoolAction("real"))
"""
def testPolicyPoolAction(self):
"""
Routing: Set pool by qname via PoolAction
Send an A query to "poolaction.routing.tests.powerdns.com.",
check that dnsdist routes the query to the "real" pool.
"""
name = 'poolaction.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
def testDefaultPool(self):
"""
Routing: Set pool by qname canary
Send an A query to "notpool.routing.tests.powerdns.com.",
check that dnsdist sends no response (no servers
in the default pool).
"""
name = 'notpool.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
(_, receivedResponse) = self.sendUDPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, None)
(_, receivedResponse) = self.sendTCPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, None)
class TestRoutingQPSPoolRouting(DNSDistTest):
_config_template = """
newServer{address="127.0.0.1:%s", pool="regular"}
addAction(makeRule("qpspoolaction.routing.tests.powerdns.com"), QPSPoolAction(10, "regular"))
"""
def testQPSPoolAction(self):
"""
Routing: Set pool by QPS via action
Send queries to "qpspoolaction.routing.tests.powerdns.com."
check that dnsdist does not route the query to the "regular" pool
when the max QPS has been reached.
"""
maxQPS = 10
name = 'qpspoolaction.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
for _ in range(maxQPS):
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
# we should now be sent to the "abuse" pool which is empty,
# so the queries should be dropped
(_, receivedResponse) = self.sendUDPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, None)
time.sleep(1)
# again, over TCP this time
for _ in range(maxQPS):
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
(_, receivedResponse) = self.sendTCPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, None)
class TestRoutingRoundRobinLB(DNSDistTest):
_testServer2Port = 5351
_config_params = ['_testServerPort', '_testServer2Port']
_config_template = """
setServerPolicy(roundrobin)
s1 = newServer{address="127.0.0.1:%s"}
s1:setUp()
s2 = newServer{address="127.0.0.1:%s"}
s2:setUp()
"""
@classmethod
def startResponders(cls):
print("Launching responders..")
cls._UDPResponder = threading.Thread(name='UDP Responder', target=cls.UDPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue])
cls._UDPResponder.setDaemon(True)
cls._UDPResponder.start()
cls._UDPResponder2 = threading.Thread(name='UDP Responder 2', target=cls.UDPResponder, args=[cls._testServer2Port, cls._toResponderQueue, cls._fromResponderQueue])
cls._UDPResponder2.setDaemon(True)
cls._UDPResponder2.start()
cls._TCPResponder = threading.Thread(name='TCP Responder', target=cls.TCPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue])
cls._TCPResponder.setDaemon(True)
cls._TCPResponder.start()
cls._TCPResponder2 = threading.Thread(name='TCP Responder 2', target=cls.TCPResponder, args=[cls._testServer2Port, cls._toResponderQueue, cls._fromResponderQueue])
cls._TCPResponder2.setDaemon(True)
cls._TCPResponder2.start()
def testRR(self):
"""
Routing: Round Robin
Send 100 A queries to "rr.routing.tests.powerdns.com.",
check that dnsdist routes half of it to each backend.
"""
numberOfQueries = 10
name = 'rr.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
# the round robin counter is shared for UDP and TCP,
# so we need to do UDP then TCP to have a clean count
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
for key in self._responsesCounter:
value = self._responsesCounter[key]
self.assertEquals(value, numberOfQueries / 2)
class TestRoutingRoundRobinLBOneDown(DNSDistTest):
_testServer2Port = 5351
_config_params = ['_testServerPort', '_testServer2Port']
_config_template = """
setServerPolicy(roundrobin)
s1 = newServer{address="127.0.0.1:%s"}
s1:setUp()
s2 = newServer{address="127.0.0.1:%s"}
s2:setDown()
"""
def testRRWithOneDown(self):
"""
Routing: Round Robin with one server down
Send 100 A queries to "rr.routing.tests.powerdns.com.",
check that dnsdist routes all of it to the only backend up.
"""
numberOfQueries = 10
name = 'rr.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
# the round robin counter is shared for UDP and TCP,
# so we need to do UDP then TCP to have a clean count
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
total = 0
for key in self._responsesCounter:
value = self._responsesCounter[key]
self.assertTrue(value == numberOfQueries or value == 0)
total += value
self.assertEquals(total, numberOfQueries * 2)
class TestRoutingOrder(DNSDistTest):
_testServer2Port = 5351
_config_params = ['_testServerPort', '_testServer2Port']
_config_template = """
setServerPolicy(firstAvailable)
s1 = newServer{address="127.0.0.1:%s", order=2}
s1:setUp()
s2 = newServer{address="127.0.0.1:%s", order=1}
s2:setUp()
"""
@classmethod
def startResponders(cls):
print("Launching responders..")
cls._UDPResponder = threading.Thread(name='UDP Responder', target=cls.UDPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue])
cls._UDPResponder.setDaemon(True)
cls._UDPResponder.start()
cls._UDPResponder2 = threading.Thread(name='UDP Responder 2', target=cls.UDPResponder, args=[cls._testServer2Port, cls._toResponderQueue, cls._fromResponderQueue])
cls._UDPResponder2.setDaemon(True)
cls._UDPResponder2.start()
cls._TCPResponder = threading.Thread(name='TCP Responder', target=cls.TCPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue])
cls._TCPResponder.setDaemon(True)
cls._TCPResponder.start()
cls._TCPResponder2 = threading.Thread(name='TCP Responder 2', target=cls.TCPResponder, args=[cls._testServer2Port, cls._toResponderQueue, cls._fromResponderQueue])
cls._TCPResponder2.setDaemon(True)
cls._TCPResponder2.start()
def testOrder(self):
"""
Routing: firstAvailable policy based on 'order'
Send 50 A queries to "order.routing.tests.powerdns.com.",
check that dnsdist routes all of it to the second backend
because it has the lower order value.
"""
numberOfQueries = 50
name = 'order.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
response = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
response.answer.append(rrset)
for _ in range(numberOfQueries):
(receivedQuery, receivedResponse) = self.sendUDPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, response)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse)
total = 0
if 'UDP Responder' in self._responsesCounter:
self.assertEquals(self._responsesCounter['UDP Responder'], 0)
self.assertEquals(self._responsesCounter['UDP Responder 2'], numberOfQueries)
if 'TCP Responder' in self._responsesCounter:
self.assertEquals(self._responsesCounter['TCP Responder'], 0)
self.assertEquals(self._responsesCounter['TCP Responder 2'], numberOfQueries)
class TestRoutingNoServer(DNSDistTest):
_config_template = """
newServer{address="127.0.0.1:%s", pool="real"}
setServFailWhenNoServer(true)
"""
def testPolicyPoolNoServer(self):
"""
Routing: No server should return ServFail
"""
name = 'noserver.routing.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
expectedResponse = dns.message.make_response(query)
expectedResponse.set_rcode(dns.rcode.SERVFAIL)
(_, receivedResponse) = self.sendUDPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, expectedResponse)
(_, receivedResponse) = self.sendTCPQuery(query, response=None, useQueue=False)
self.assertEquals(receivedResponse, expectedResponse)
| gpl-2.0 | 3,611,403,062,192,275,500 | -2,415,631,954,546,293,000 | 39.923323 | 171 | 0.621516 | false |
ixc/django-fluent-contents | fluent_contents/tests/utils.py | 2 | 4672 | from __future__ import print_function
from future.builtins import str
from functools import wraps
from django.conf import settings, UserSettingsHolder
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.contrib.sites.models import Site
from django.db.models import loading
from django.test import TestCase
from django.utils.importlib import import_module
import os
class AppTestCase(TestCase):
"""
Tests for URL resolving.
"""
user = None
install_apps = (
'fluent_contents.tests.testapp',
)
@classmethod
def setUpClass(cls):
super(AppTestCase, cls).setUpClass()
# Avoid early import, triggers AppCache
from django.template.loaders import app_directories
User = get_user_model()
if cls.install_apps:
# When running this app via `./manage.py test fluent_pages`, auto install the test app + models.
run_syncdb = False
for appname in cls.install_apps:
if appname not in settings.INSTALLED_APPS:
print('Adding {0} to INSTALLED_APPS'.format(appname))
settings.INSTALLED_APPS += (appname,)
run_syncdb = True
# Flush caches
testapp = import_module(appname)
loading.cache.loaded = False
app_directories.app_template_dirs += (
os.path.join(os.path.dirname(testapp.__file__), 'templates'),
)
print(appname, os.path.join(os.path.dirname(testapp.__file__), 'templates'))
if run_syncdb:
call_command('syncdb', verbosity=0) # may run south's overlaid version
# Create basic objects
# 1.4 does not create site automatically with the defined SITE_ID, 1.3 does.
Site.objects.get_or_create(id=settings.SITE_ID, defaults=dict(domain='django.localhost', name='django at localhost'))
(cls.user, _) = User.objects.get_or_create(is_superuser=True, is_staff=True, username="admin")
def assert200(self, url, msg_prefix=''):
"""
Test that an URL exists.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(self.client.get(url).status_code, 200, str(msg_prefix) + u"Page at {0} should be found.".format(url))
def assert404(self, url, msg_prefix=''):
"""
Test that an URL does not exist.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(self.client.get(url).status_code, 404, str(msg_prefix) + u"Page at {0} should return 404.".format(url))
try:
from django.test.utils import override_settings # Django 1.4
except ImportError:
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type) and issubclass(test_func, TransactionTestCase):
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
| apache-2.0 | 6,903,603,334,644,338,000 | 2,528,559,442,335,394,000 | 36.677419 | 128 | 0.583904 | false |
Ichag/openerp-server | openerp/addons/base/res/res_config.py | 1 | 28694 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
# reload the client; open the first available root menu
menu_obj = self.pool['ir.ui.menu']
menu_ids = menu_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids and menu_ids[0] or False},
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['project_mrp'],
}
will install both ``sale_crm`` and ``project_mrp`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's browse_record
:returns: a list of all installed modules in this installer
:rtype: [browse_record]
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns[module_name]) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', browse_group, browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_group = getattr(field, 'group', 'base.group_user')
groups.append((name, ref(field_group), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, group, implied_group in classified['group']:
res[name] = implied_group in group.implied_ids
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
classified = self._get_classified_fields(cr, uid, context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, uid, model, field, config[name])
# group fields: modify group / implied groups
for name, group, implied_group in classified['group']:
if config[name]:
group.write({'implied_ids': [(4, implied_group.id)]})
else:
group.write({'implied_ids': [(3, implied_group.id)]})
implied_group.write({'users': [(3, u.id) for u in group.users]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 9,198,270,242,289,077,000 | 5,649,603,291,346,424,000 | 42.674277 | 201 | 0.605736 | false |
eufarn7sp/egads-eufar | egads/thirdparty/quantities/constants/alpha.py | 4 | 1344 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from ._utils import _cd
from ..unitquantity import UnitConstant
m_alpha = alpha_particle_mass = UnitConstant(
'alpha_particle_mass',
_cd('alpha particle mass'),
symbol='m_alpha',
u_symbol='m_α'
)
alpha_particle_mass_energy_equivalent = UnitConstant(
'alpha_particle_mass_energy_equivalent',
_cd('alpha particle mass energy equivalent'),
symbol='(m_alpha*c**2)',
u_symbol='(m_α·c²)'
)
alpha_particle_mass_energy_equivalent_in_MeV = UnitConstant(
'alpha_particle_mass_energy_equivalent_in_MeV',
_cd('alpha particle mass energy equivalent in MeV'),
)
alpha_particle_mass_in_u = UnitConstant(
'alpha_particle_mass_in_u',
_cd('alpha particle mass in u')
)
alpha_particle_molar_mass = UnitConstant(
'alpha_particle_molar_mass',
_cd('alpha particle molar mass'),
symbol='M_alpha',
u_symbol='M_α'
)
alpha_particle_electron_mass_ratio = UnitConstant(
'alpha_particle_electron_mass_ratio',
_cd('alpha particle-electron mass ratio'),
symbol='(m_alpha/m_e)',
u_symbol='(m_α/mₑ)'
)
alpha_particle_proton_mass_ratio = UnitConstant(
'alpha_particle_proton_mass_ratio',
_cd('alpha particle-proton mass ratio'),
symbol='(m_alpha/m_p)',
u_symbol='(m_α/m_p)'
)
del UnitConstant, _cd
| bsd-3-clause | -8,816,238,763,503,488,000 | 7,023,942,522,675,684,000 | 26.244898 | 60 | 0.668914 | false |
untom/keras | tests/auto/keras/test_normalization.py | 33 | 3810 | import unittest
import numpy as np
from numpy.testing import assert_allclose
from theano import tensor as T
from keras.layers import normalization
from keras.models import Sequential
class TestBatchNormalization(unittest.TestCase):
def setUp(self):
self.input_1 = np.arange(10)
self.input_2 = np.zeros(10)
self.input_3 = np.ones((10))
self.input_shapes = [np.ones((10, 10)), np.ones((10, 10, 10))]
def test_setup(self):
norm_m0 = normalization.BatchNormalization((10, 10))
norm_m1 = normalization.BatchNormalization((10, 10), mode=1)
# mode 3 does not exist
self.assertRaises(Exception, normalization.BatchNormalization((10, 10), mode=3))
def test_mode_0(self):
model = Sequential()
norm_m0 = normalization.BatchNormalization((10,))
model.add(norm_m0)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(X, X, nb_epoch=5, verbose=0)
norm_m0.input = X
out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma
self.assertAlmostEqual(out.mean().eval(), 0.0, places=1)
self.assertAlmostEqual(out.std().eval(), 1.0, places=1)
def test_mode_1(self):
norm_m1 = normalization.BatchNormalization((10,), mode=1)
norm_m1.init_updates()
for inp in [self.input_1, self.input_2, self.input_3]:
norm_m1.input = inp
out = (norm_m1.get_output(train=True) - norm_m1.beta) / norm_m1.gamma
self.assertAlmostEqual(out.mean().eval(), 0.0)
if inp.std() > 0.:
self.assertAlmostEqual(out.std().eval(), 1.0, places=2)
else:
self.assertAlmostEqual(out.std().eval(), 0.0, places=2)
def test_shapes(self):
"""
Test batch normalization with various input shapes
"""
for inp in self.input_shapes:
norm_m0 = normalization.BatchNormalization(inp.shape, mode=0)
norm_m0.init_updates()
norm_m0.input = inp
out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma
norm_m1 = normalization.BatchNormalization(inp.shape, mode=1)
norm_m1.input = inp
out = (norm_m1.get_output(train=True) - norm_m1.beta) / norm_m1.gamma
def test_weight_init(self):
"""
Test weight initialization
"""
norm_m1 = normalization.BatchNormalization((10,), mode=1, weights=[np.ones(10), np.ones(10), np.zeros(10), np.zeros(10)])
norm_m1.init_updates()
for inp in [self.input_1, self.input_2, self.input_3]:
norm_m1.input = inp
out = (norm_m1.get_output(train=True) - np.ones(10)) / 1.
self.assertAlmostEqual(out.mean().eval(), 0.0)
if inp.std() > 0.:
self.assertAlmostEqual(out.std().eval(), 1.0, places=2)
else:
self.assertAlmostEqual(out.std().eval(), 0.0, places=2)
assert_allclose(norm_m1.gamma.eval(), np.ones(10))
assert_allclose(norm_m1.beta.eval(), np.ones(10))
def test_config(self):
norm = normalization.BatchNormalization((10, 10), mode=1, epsilon=0.1)
conf = norm.get_config()
conf_target = {"input_shape": (10, 10), "name": normalization.BatchNormalization.__name__,
"epsilon": 0.1, "mode": 1}
self.assertDictEqual(conf, conf_target)
def test_save_weights(self):
norm = normalization.BatchNormalization((10, 10), mode=1, epsilon=0.1)
weights = norm.get_weights()
assert(len(weights) == 4)
norm.set_weights(weights)
if __name__ == '__main__':
unittest.main()
| mit | -6,691,587,813,818,240,000 | -2,362,104,603,318,852,600 | 36.352941 | 129 | 0.590289 | false |
afloren/nipype | nipype/interfaces/cmtk/convert.py | 14 | 10363 | """
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os, os.path as op
import datetime
import string
import warnings
import networkx as nx
from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits,
File, TraitedSpec, InputMultiPath, isdefined)
from nipype.utils.filemanip import split_filename
from nipype.utils.misc import package_check
have_cfflib = True
try:
package_check('cfflib')
except Exception, e:
have_cfflib = False
else:
import cfflib as cf
class CFFConverterInputSpec(BaseInterfaceInputSpec):
graphml_networks = InputMultiPath(File(exists=True), desc='list of graphML networks')
gpickled_networks = InputMultiPath(File(exists=True), desc='list of gpickled Networkx graphs')
gifti_surfaces = InputMultiPath(File(exists=True), desc='list of GIFTI surfaces')
gifti_labels = InputMultiPath(File(exists=True), desc='list of GIFTI labels')
nifti_volumes = InputMultiPath(File(exists=True), desc='list of NIFTI volumes')
tract_files = InputMultiPath(File(exists=True), desc='list of Trackvis fiber files')
timeseries_files = InputMultiPath(File(exists=True), desc='list of HDF5 timeseries files')
script_files = InputMultiPath(File(exists=True), desc='list of script files to include')
data_files = InputMultiPath(File(exists=True), desc='list of external data files (i.e. Numpy, HD5, XML) ')
title = traits.Str(desc='Connectome Title')
creator = traits.Str(desc='Creator')
email = traits.Str(desc='Email address')
publisher = traits.Str(desc='Publisher')
license = traits.Str(desc='License')
rights = traits.Str(desc='Rights')
references = traits.Str(desc='References')
relation = traits.Str(desc='Relation')
species = traits.Str('Homo sapiens',desc='Species',usedefault=True)
description = traits.Str('Created with the Nipype CFF converter', desc='Description', usedefault=True)
out_file = File('connectome.cff', usedefault = True, desc='Output connectome file')
class CFFConverterOutputSpec(TraitedSpec):
connectome_file = File(exists=True, desc='Output connectome file')
class CFFConverter(BaseInterface):
"""
Creates a Connectome File Format (CFF) file from input networks, surfaces, volumes, tracts, etcetera....
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> cvt = cmtk.CFFConverter()
>>> cvt.inputs.title = 'subject 1'
>>> cvt.inputs.gifti_surfaces = ['lh.pial_converted.gii', 'rh.pial_converted.gii']
>>> cvt.inputs.tract_files = ['streamlines.trk']
>>> cvt.inputs.gpickled_networks = ['network0.gpickle']
>>> cvt.run() # doctest: +SKIP
"""
input_spec = CFFConverterInputSpec
output_spec = CFFConverterOutputSpec
def _run_interface(self, runtime):
a = cf.connectome()
if isdefined(self.inputs.title):
a.connectome_meta.set_title(self.inputs.title)
else:
a.connectome_meta.set_title(self.inputs.out_file)
if isdefined(self.inputs.creator):
a.connectome_meta.set_creator(self.inputs.creator)
else:
#Probably only works on some OSes...
a.connectome_meta.set_creator(os.getenv('USER'))
if isdefined(self.inputs.email):
a.connectome_meta.set_email(self.inputs.email)
if isdefined(self.inputs.publisher):
a.connectome_meta.set_publisher(self.inputs.publisher)
if isdefined(self.inputs.license):
a.connectome_meta.set_license(self.inputs.license)
if isdefined(self.inputs.rights):
a.connectome_meta.set_rights(self.inputs.rights)
if isdefined(self.inputs.references):
a.connectome_meta.set_references(self.inputs.references)
if isdefined(self.inputs.relation):
a.connectome_meta.set_relation(self.inputs.relation)
if isdefined(self.inputs.species):
a.connectome_meta.set_species(self.inputs.species)
if isdefined(self.inputs.description):
a.connectome_meta.set_description(self.inputs.description)
a.connectome_meta.set_created(datetime.date.today())
count = 0
if isdefined(self.inputs.graphml_networks):
for ntwk in self.inputs.graphml_networks:
# There must be a better way to deal with the unique name problem
#(i.e. tracks and networks can't use the same name, and previously we were pulling them both from the input files)
ntwk_name = 'Network {cnt}'.format(cnt=count)
a.add_connectome_network_from_graphml(ntwk_name, ntwk)
count += 1
if isdefined(self.inputs.gpickled_networks):
unpickled = []
for ntwk in self.inputs.gpickled_networks:
_, ntwk_name, _ = split_filename(ntwk)
unpickled = nx.read_gpickle(ntwk)
cnet = cf.CNetwork(name = ntwk_name)
cnet.set_with_nxgraph(unpickled)
a.add_connectome_network(cnet)
count += 1
count = 0
if isdefined(self.inputs.tract_files):
for trk in self.inputs.tract_files:
_, trk_name, _ = split_filename(trk)
ctrack = cf.CTrack(trk_name, trk)
a.add_connectome_track(ctrack)
count += 1
count = 0
if isdefined(self.inputs.gifti_surfaces):
for surf in self.inputs.gifti_surfaces:
_, surf_name, _ = split_filename(surf)
csurf = cf.CSurface.create_from_gifti("Surface %d - %s" % (count,surf_name), surf)
csurf.fileformat='Gifti'
csurf.dtype='Surfaceset'
a.add_connectome_surface(csurf)
count += 1
count = 0
if isdefined(self.inputs.gifti_labels):
for label in self.inputs.gifti_labels:
_, label_name, _ = split_filename(label)
csurf = cf.CSurface.create_from_gifti("Surface Label %d - %s" % (count,label_name), label)
csurf.fileformat='Gifti'
csurf.dtype='Labels'
a.add_connectome_surface(csurf)
count += 1
if isdefined(self.inputs.nifti_volumes):
for vol in self.inputs.nifti_volumes:
_, vol_name, _ = split_filename(vol)
cvol = cf.CVolume.create_from_nifti(vol_name,vol)
a.add_connectome_volume(cvol)
if isdefined(self.inputs.script_files):
for script in self.inputs.script_files:
_, script_name, _ = split_filename(script)
cscript = cf.CScript.create_from_file(script_name, script)
a.add_connectome_script(cscript)
if isdefined(self.inputs.data_files):
for data in self.inputs.data_files:
_, data_name, _ = split_filename(data)
cda = cf.CData(name=data_name, src=data, fileformat='NumPy')
if not string.find(data_name,'lengths') == -1:
cda.dtype = 'FinalFiberLengthArray'
if not string.find(data_name,'endpoints') == -1:
cda.dtype = 'FiberEndpoints'
if not string.find(data_name,'labels') == -1:
cda.dtype = 'FinalFiberLabels'
a.add_connectome_data(cda)
a.print_summary()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
cf.save_to_cff(a,op.abspath(name + ext))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
outputs['connectome_file'] = op.abspath(name + ext)
return outputs
class MergeCNetworksInputSpec(BaseInterfaceInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True, desc='List of CFF files to extract networks from')
out_file = File('merged_network_connectome.cff', usedefault = True, desc='Output CFF file with all the networks added')
class MergeCNetworksOutputSpec(TraitedSpec):
connectome_file = File(exists=True, desc='Output CFF file with all the networks added')
class MergeCNetworks(BaseInterface):
""" Merges networks from multiple CFF files into one new CFF file.
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> mrg = cmtk.MergeCNetworks()
>>> mrg.inputs.in_files = ['subj1.cff','subj2.cff']
>>> mrg.run() # doctest: +SKIP
"""
input_spec = MergeCNetworksInputSpec
output_spec = MergeCNetworksOutputSpec
def _run_interface(self, runtime):
extracted_networks = []
for i, con in enumerate(self.inputs.in_files):
mycon = cf.load(con)
nets = mycon.get_connectome_network()
for ne in nets:
# here, you might want to skip networks with a given
# metadata information
ne.load()
contitle = mycon.get_connectome_meta().get_title()
ne.set_name( str(i) + ': ' + contitle + ' - ' + ne.get_name() )
ne.set_src(ne.get_name())
extracted_networks.append(ne)
# Add networks to new connectome
newcon = cf.connectome(title = 'All CNetworks', connectome_network = extracted_networks)
# Setting additional metadata
metadata = newcon.get_connectome_meta()
metadata.set_creator('My Name')
metadata.set_email('My Email')
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
cf.save_to_cff(newcon, op.abspath(name + ext))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
outputs['connectome_file'] = op.abspath(name + ext)
return outputs
| bsd-3-clause | 6,310,763,466,843,640,000 | -1,599,040,998,266,415,400 | 38.553435 | 130 | 0.611116 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.