repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
kristofvanmoffaert/python-omniture
|
build/lib/tests/testElement.py
|
3
|
1771
|
#!/usr/bin/python
import unittest
import omniture
import os
creds = {}
creds['username'] = os.environ['OMNITURE_USERNAME']
creds['secret'] = os.environ['OMNITURE_SECRET']
class ElementTest(unittest.TestCase):
def setUp(self):
fake_list = [{"id":"123","title":"ABC"},{"id":"456","title":"DEF"}]
self.valueList = omniture.elements.Value.list("metrics",fake_list,"test")
def test__repr__(self):
self.assertEqual(self.valueList.__repr__(),"<AddressableList>",\
"The value for __repr__ on the AddressableList was {}"\
.format(self.valueList.__repr__()))
def test_value__repr__(self):
self.assertEqual(self.valueList[0].__repr__(),"<ABC: 123 in test>", \
"The value of the first item in the AddressableList \
was {}".format(self.valueList[0].__repr__()))
def test_value__copy__(self):
value = self.valueList[0].copy()
self.assertEqual(value.__repr__(), self.valueList[0].__repr__(),\
"The copied value was: {} the original was: {}"\
.format(value, self.valueList[0]))
def test_repr_html_(self):
self.assertEqual(self.valueList[0]._repr_html_(),\
"<td><b>123</b></td><td>ABC</td>",\
"The html value was: {}"\
.format(self.valueList[0]._repr_html_()))
def test__str__(self):
self.assertEqual(self.valueList[0].__str__(),\
"ID 123 | Name: ABC \n",\
"__str__ returned: {}"\
.format(self.valueList[0].__str__()))
if __name__ == '__main__':
unittest.main()
|
mit
|
rohangoel96/IRCLogParser
|
IRCLogParser/lib/analysis/user.py
|
1
|
18354
|
import networkx as nx
import re
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import text
from nltk.stem.wordnet import WordNetLemmatizer
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from time import time
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
from sklearn.pipeline import make_pipeline
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
import lib.util as util
sys.path.append('../lib')
import lib.config as config
import ext.common_english_words as common_english_words
import ext.extend_stop_words as custom_stop_words
def nick_change_graph(log_dict, DAY_BY_DAY_ANALYSIS=False):
""" creates a graph which tracks the nick changes of the users
where each edge has a time stamp denoting the time
at which the nick was changed by the user
Args:
log_dict (str): Dictionary of logs created using reader.py
Returns:
list of the day_to_day nick changes if config.DAY_BY_DAY_ANALYSIS=True or else an aggregate nick change graph for the
given time period.
"""
rem_time = None #remembers the time of the last message of the file parsed before the current file
nick_change_day_list = []
aggregate_nick_change_graph = nx.MultiDiGraph() # graph for nick changes in the whole time span (not day to day)
for day_content_all_channels in log_dict.values():
for day_content in day_content_all_channels:
day_log = day_content["log_data"]
today_nick_change_graph = nx.MultiDiGraph() #using networkx
current_line_no = -1
for line in day_log:
current_line_no = current_line_no + 1
if(line[0] == '=' and "changed the topic of" not in line): #excluding the condition when user changes the topic. Search for only nick changes
nick1 = util.splice_find(line, "=", " is", 3)
nick2 = util.splice_find(line, "wn as", "\n", 5)
earlier_line_no = current_line_no
while earlier_line_no >= 0: #to find the line just before "=="" so as to find time of Nick Change
earlier_line_no = earlier_line_no - 1
if(day_log[earlier_line_no][0] != '='):
year, month, day = util.get_year_month_day(day_content)
util.build_graphs(nick1, nick2, day_log[earlier_line_no][1:6], year, month, day, today_nick_change_graph, aggregate_nick_change_graph)
break
if(earlier_line_no == -1):
today_nick_change_graph.add_edge(nick1, nick2, weight=rem_time)
aggregate_nick_change_graph.add_edge(nick1, nick2, weight = rem_time)
count = len(day_log) - 1 #setting up the rem_time for next file, by noting the last message sent on that file.
while(count >= 0):
if(day_log[count][0] != '='):
rem_time = day_log[count][1:6]
break
count = count-1
nick_change_day_list.append(today_nick_change_graph)
if DAY_BY_DAY_ANALYSIS:
return nick_change_day_list
else:
return aggregate_nick_change_graph
def top_keywords_for_nick(user_keyword_freq_dict, nick, threshold, min_words_spoken):
"""
outputs top keywords for a particular nick
Args:
user_keyword_freq_dict(dict): dictionary for each user having keywords and their frequency
nick(str) : user to do analysis on
threshold(float): threshold on normalised values to seperate meaningful words
min_words_spoken(int): threhold on the minumum number of words spoken by a user to perform analysis on
Returns:
null
"""
keywords = None
for dicts in user_keyword_freq_dict:
if dicts['nick'] == nick:
keywords = dicts['keywords']
break
total_freq = 0.0
for freq_tuple in keywords:
total_freq += freq_tuple[1]
top_keywords = []
top_keywords_normal_freq = []
if total_freq > min_words_spoken:
if keywords:
for keyword in keywords:
if keyword[2] >= threshold:
top_keywords.append(keyword[0].encode('ascii', 'ignore'))
top_keywords_normal_freq.append(keyword[2])
if len(top_keywords) == 0:
if config.DEBUGGER:
print "No word's normalised score crosses the value of", threshold
top_keywords = None
else:
if config.DEBUGGER:
print "No message sent by nick", nick
pass
else:
if config.DEBUGGER:
print "Not enough words spoken by", nick, "; spoke" ,int(total_freq), "words only, required", min_words_spoken
pass
return (top_keywords, top_keywords_normal_freq)
def keywords(log_dict, nicks, nick_same_list):
"""
Returns keywods for all users
Args:
log_dict (str): Dictionary of logs data created using reader.py
nicks(List) : list of nickname created using nickTracker.py
nick_same_list :List of same_nick names created using nickTracker.py
Returns
keywords_filtered: filtered keywords for user
user_keyword_freq_dict: dictionary for each user having keywords and their frequency
user_words_dict: keywods for user
nicks_for_stop_words: stop words
"""
user_words_dict = []
user_keyword_freq_dict = []
keywords_filtered = []
no_messages = 0
def get_nick_receiver(nick_receiver, rec, nick_to_compare, nick_name, nicks, nick_same_list):
if(rec == nick_name):
if(nick_to_compare != nick_name):
nick_receiver = iter_nicks(nick_receiver, nicks, nick_same_list, nick_name)
return nick_receiver
def iter_nicks(nick_sender_receiver, nicks, nick_same_list, nick_comp):
for i in range(len(nicks)):
if nick_comp in nick_same_list[i]:
nick_sender_receiver = nick_same_list[i][0]
break
else:
nick_sender_receiver = nick_comp
return nick_sender_receiver
for day_content_all_channels in log_dict.values():
for day_content in day_content_all_channels:
day_log = day_content["log_data"]
for line in day_log:
flag_comma = 0
if(util.check_if_msg_line(line)):
m = re.search(r"\<(.*?)\>", line)
nick_to_compare = util.correctLastCharCR((m.group(0)[1:-1]))
nick_sender = ''
nick_sender = iter_nicks(nick_sender, nicks, nick_same_list, nick_to_compare)
nick_receiver = ''
for nick_name in nicks:
rec_list = [e.strip() for e in line.split(':')] #receiver list splited about :
util.rec_list_splice(rec_list)
if not rec_list[1]: #index 0 will contain time 14:02
break
rec_list = util.correct_last_char_list(rec_list)
for rec in rec_list:
nick_receiver = get_nick_receiver(nick_receiver, rec, nick_to_compare, nick_name, nicks, nick_same_list)
if "," in rec_list[1]: #receiver list may of the form <Dhruv> Rohan, Ram :
flag_comma = 1
rec_list_2 = [e.strip() for e in rec_list[1].split(',')]
rec_list_2 = util.correct_last_char_list(rec_list_2)
for rec in rec_list_2:
nick_receiver = get_nick_receiver(nick_receiver, rec, nick_to_compare, nick_name, nicks, nick_same_list)
if(flag_comma == 0): #receiver list can be <Dhruv> Rohan, Hi!
rec = util.splice_find(line, ">", ", ", 1)
nick_receiver = get_nick_receiver(nick_receiver, rec, nick_to_compare, nick_name, nicks, nick_same_list)
#generating the words written by the sender
message = rec_list[1:]
no_messages += 1
correctedNickReciever = util.correct_nick_for_(nick_receiver)
if correctedNickReciever in message:
message.remove(correctedNickReciever)
lmtzr = WordNetLemmatizer()
#limit word size = 3, drop numbers.
word_list_temp = re.sub(r'\d+', '', " ".join(re.findall(r'\w{3,}', ":".join(message).replace(","," ")))).split(" ")
word_list = []
#remove punctuations
for word in word_list_temp:
word = word.lower()
word_list.append(word.replace("'",""))
word_list_lemmatized = []
try:
word_list_lemmatized = map(lmtzr.lemmatize, map(lambda x: lmtzr.lemmatize(x, 'v'), word_list))
except UnicodeDecodeError:
pass
fr = 1
for dic in user_words_dict:
if dic['sender'] == nick_sender:
dic['words'].extend(word_list_lemmatized)
fr = 0
if fr:
user_words_dict.append({'sender':nick_sender, 'words':word_list_lemmatized })
nicks_for_stop_words = []
stop_word_without_apostrophe = []
for l in nick_same_list:
nicks_for_stop_words.extend(l)
for dictonary in user_words_dict:
nicks_for_stop_words.append(dictonary['sender'])
nicks_for_stop_words.extend([x.lower() for x in nicks_for_stop_words])
for words in common_english_words.words:
stop_word_without_apostrophe.append(words.replace("'",""))
stop_words_extended = extended_stop_words(nicks_for_stop_words, stop_word_without_apostrophe)
count_vect = CountVectorizer(analyzer = 'word', stop_words=stop_words_extended, min_df = 1)
for dictonary in user_words_dict:
try:
matrix = count_vect.fit_transform(dictonary['words'])
freqs = [[word, matrix.getcol(idx).sum()] for word, idx in count_vect.vocabulary_.items()]
keywords = sorted(freqs, key = lambda x: -x[1])
total_freq = 0.0
for freq_tuple in keywords:
total_freq += freq_tuple[1]
for freq_tuple in keywords:
freq_tuple.append(round(freq_tuple[1]/float(total_freq), 5))
user_keyword_freq_dict.append({'nick':dictonary['sender'], 'keywords': keywords })
except ValueError:
pass
for data in user_keyword_freq_dict:
keywords, normal_scores = top_keywords_for_nick(user_keyword_freq_dict, data['nick'], config.KEYWORDS_THRESHOLD, config.KEYWORDS_MIN_WORDS)
if config.DEBUGGER:
print "Nick:", data['nick']
print "Keywords with normalised score > 0.01\n", keywords
print "Their Normal scores\n", normal_scores
print "\n"
if keywords:
keywords_filtered.append({'nick': data['nick'], 'keywords': keywords})
return keywords_filtered, user_keyword_freq_dict, user_words_dict, nicks_for_stop_words
def keywords_clusters(log_dict, nicks, nick_same_list):
"""
Uses `keywords` to form clusters of words post TF IDF (optional).
Args:
log_dict (str): Dictionary of logs data created using reader.py
nicks(List) : list of nickname created using nickTracker.py
nick_same_list :List of same_nick names created using nickTracker.py
Returns
null
"""
'''
AUTO TFIDF FROM JUST SENTENCES
'''
#http://scikit-learn.org/stable/auto_examples/text/document_clustering.html
#BUILDING CORPUS
keyword_dict_list, user_keyword_freq_dict, user_words_dict_list, nicks_for_stop_words = keywords(log_dict, nicks, nick_same_list)
corpus = []
def build_centroid(km):
if config.ENABLE_SVD:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
return order_centroids
for user_words_dict in user_words_dict_list:
corpus.append(" ".join(map(str,user_words_dict['words'])))
print "No. of users", len(corpus)
#TF_IDF
stop_word_without_apostrophe = []
for words in common_english_words.words:
stop_word_without_apostrophe.append(words.replace("'",""))
stop_words_extended = extended_stop_words(nicks_for_stop_words, stop_word_without_apostrophe)
vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, stop_words=stop_words_extended,
use_idf=True)
print "Extracting features from the training dataset using TF-IDF"
t0 = time()
tf_idf = vectorizer.fit_transform(corpus)
print("done in %fs" % (time() - t0))
print "n_samples: %d, n_features: %d \n" % tf_idf.shape
# LSA
if config.ENABLE_SVD:
print("============USING SVD==========")
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(100) #recommened value = 100
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
tf_idf = lsa.fit_transform(tf_idf)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
if not config.ENABLE_ELBOW_METHOD_FOR_K:
# CLUSTERING
km = KMeans(n_clusters=config.NUMBER_OF_CLUSTERS, init='k-means++',
random_state=3465, max_iter=100, n_init=8)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(tf_idf)
print("done in %0.3fs" % (time() - t0))
print("Top terms per cluster:")
order_centroids = build_centroid(km)
np.set_printoptions(threshold=np.nan)
terms = vectorizer.get_feature_names()
for i in range(config.NUMBER_OF_CLUSTERS):
print("Cluster %d:" % i)
for ind in order_centroids[i, :config.SHOW_N_WORDS_PER_CLUSTER]:
print terms[ind]+"\t"+str(round(km.cluster_centers_[i][ind], 2))
print ""
else:
print "============ELBOW METHOD ============="
sum_squared_errors_list = []
avg_sum_squared_errors_list = []
for i in xrange(1, config.CHECK_K_TILL + 1):
print "\n===>> K = ", i
km = KMeans(n_clusters=i, init='k-means++', max_iter=100, n_init=8)
t0 = time()
km.fit(tf_idf)
order_centroids = build_centroid(km)
distance_matrix_all_combination = cdist(tf_idf, km.cluster_centers_, 'euclidean')
# cIdx = np.argmin(distance_matrix_all_combination,axis=1)
distance_from_nearest_centroid = np.min(distance_matrix_all_combination, axis=1)
sum_squared_errors = sum(distance_from_nearest_centroid)
avg_sum_squared_errors = sum_squared_errors/tf_idf.shape[0]
print "Sum Squared Error =", sum_squared_errors
print "Avg Sum Squared Error =", avg_sum_squared_errors
sum_squared_errors_list.append(sum_squared_errors)
avg_sum_squared_errors_list.append(avg_sum_squared_errors)
print("Top terms per cluster:")
terms = vectorizer.get_feature_names()
for i in range(i):
print("Cluster %d:" % i)
for ind in order_centroids[i, :config.SHOW_N_WORDS_PER_CLUSTER]:
print(' %s' % terms[ind])
print()
plt.plot(range(1, config.CHECK_K_TILL+1), sum_squared_errors_list, 'b*-')
# ax.plot(K[kIdx], avgWithinSS[kIdx], marker='o', markersize=12,
# markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Average sum of squares')
plt.title('Elbow for KMeans clustering')
plt.show()
#NOTE RANDOM OUTPUTS BECAUSE OF RANDOM INITIALISATION
print "NOTE RANDOM OUTPUTS BECAUSE OF RANDOM INITIALISATION"
def extended_stop_words(nicks_for_stop_words, stop_word_without_apostrophe):
stop_words_extended = text.ENGLISH_STOP_WORDS.union(common_english_words.words).union(nicks_for_stop_words).union(stop_word_without_apostrophe).union(custom_stop_words.words).union(custom_stop_words.slangs)
return stop_words_extended
|
mit
|
m2ci-msp/ematoblender
|
ematoblender/scripts/ema_io/ema_gameserver/ActionBuilder.py
|
3
|
1073
|
__author__ = "Alexander Hewer"
__email__ = "[email protected]"
class ActionBuilder:
def build_fit_action(self, coilPositions, timeStamp):
action = self.__base_action("FIT")
action["points"] = coilPositions
action["timeStamp"] = timeStamp
return action
def build_fix_speaker_action(self):
return self.__base_action("FIX_SPEAKER")
def build_reset_action(self):
return self.__base_action("RESET")
def build_set_model_indices_action(self, indices):
action = self.__base_action("SET_MODEL_INDICES")
action["indices"] = indices
return action
def build_set_settings_action(self, weights, priorSize):
action = self.__base_action("SET_SETTINGS")
action["speakerSmoothnessTerm"] = weights["speakerSmoothnessTerm"]
action["phonemeSmoothnessTerm"] = weights["phonemeSmoothnessTerm"]
action["priorSize"] = priorSize
return action
def __base_action(self, id):
action = {}
action["id"] = id
return action
|
gpl-3.0
|
gingerswede/ITSecCardGame
|
src/View/Card.py
|
1
|
2437
|
'''
IDE: Eclipse (PyDev)
Python version: 2.7
Operating system: Windows 8.1
@author: Emil Carlsson
@copyright: 2015 Emil Carlsson
@license: This program is distributed under the terms of the GNU General Public License
'''
import Model
import Tkinter as tk
from Tkconstants import LEFT
from PIL import ImageTk
from tkFont import BOLD
class Card(object):
__healthPoints = None
__attackPoints = None
__defensePoints = None
__image = None
__description = None
__name = None
__root = None
__image = None
def __init__(self, card, controller):
if isinstance(card, Model.Card.Card):
self.__controller = controller
self.__card = card
self.__attackPoints = card.AP
self.__defensePoints = card.DP
self.__healthPoints = card.HP
self.__description = card.Description
self.__image = card.Image
self.__name = card.Name
else:
raise TypeError
def Draw(self, root, height, width):
base = tk.Frame(root, height=height, width=width)
base.bind("<Button-1>", lambda e, card=self.__card:self.__controller.PlayCard(card))
base.pack(side=LEFT, padx=5)
title = tk.Label(base, text=self.__name)
title.config(font=("Arial", 12, BOLD))
title.bind("<Button-1>", lambda e, card=self.__card:self.__controller.PlayCard(card))
title.pack()
img = ImageTk.PhotoImage(self.__image)
self.__img = img
imgLabel = tk.Label(base, image=img)
imgLabel.bind("<Button-1>", lambda e, card=self.__card:self.__controller.PlayCard(card))
imgLabel.pack()
cardInformationText = "AP: %d | DP: %d | HP: %d" % (self.__attackPoints, self.__defensePoints, self.__healthPoints)
cardInformationFrame = tk.Label(base, text=cardInformationText, width=width)
cardInformationFrame.config(font=("Arial", 10))
cardInformationFrame.bind("<Button-1>", lambda e, card=self.__card:self.__controller.PlayCard(card))
cardInformationFrame.pack()
description = tk.Label(base, text=self.__description, wraplength=width-15, width=width)
description.config(font=("Arial", 8))
description.bind("<Button-1>", lambda e, card=self.__card:self.__controller.PlayCard(card))
description.pack()
|
gpl-3.0
|
taknevski/tensorflow-xsmm
|
tensorflow/contrib/learn/python/learn/estimators/test_data.py
|
122
|
1988
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test data utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
def get_quantile_based_buckets(feature_values, num_buckets):
quantiles = np.percentile(
np.array(feature_values),
([100 * (i + 1.) / (num_buckets + 1.) for i in range(num_buckets)]))
return list(quantiles)
def prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
return base.Dataset(data=iris.data[ids], target=iris.target[ids])
def iris_input_multiclass_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=(150, 1), dtype=dtypes.int32)
def iris_input_logistic_fn():
iris = prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=(100, 1), dtype=dtypes.int32)
|
apache-2.0
|
elkingtonmcb/h2o-2
|
py/testdir_ec2/test_rf_airlines.py
|
9
|
2702
|
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_rf
# RF train parameters
paramsTrainRF = {
'ntree' : 50,
'depth' : 30,
'bin_limit' : 10000,
'ignore' : 'AirTime,ArrDelay,DepDelay,CarrierDelay,IsArrDelayed',
'stat_type' : 'ENTROPY',
'out_of_bag_error_estimate': 1,
'exclusive_split_limit' : 0,
'timeoutSecs': 14800,
'iterative_cm': 0,
}
# RF test parameters
paramsScoreRF = {
# scoring requires the response_variable. it defaults to last, so normally
# we don't need to specify. But put this here and (above if used)
# in case a dataset doesn't use last col
'response_variable': None,
'out_of_bag_error_estimate': 0,
'timeoutSecs': 14800,
}
trainDS = {
's3bucket' : 'h2o-airlines-unpacked',
'filename' : 'allyears1987to2007.csv',
'timeoutSecs' : 14800,
'header' : 1
}
scoreDS = {
's3bucket' : 'h2o-airlines-unpacked',
'filename' : 'year2008.csv',
'timeoutSecs' : 14800,
'header' : 1
}
PARSE_TIMEOUT=14800
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def parseS3File(self, s3bucket, filename, **kwargs):
start = time.time()
parseResult = h2o_cmd.parseS3File(bucket=s3bucket, filename=filename, **kwargs)
parse_time = time.time() - start
h2o.verboseprint("py-S3 parse took {0} sec".format(parse_time))
parseResult['python_call_timer'] = parse_time
return parseResult
def loadTrainData(self):
kwargs = trainDS.copy()
trainKey = self.parseS3File(**kwargs)
return trainKey
def loadScoreData(self):
kwargs = scoreDS.copy()
scoreKey = self.parseS3File(**kwargs)
return scoreKey
def test_RF(self):
trainKey = self.loadTrainData()
kwargs = paramsTrainRF.copy()
trainResult = h2o_rf.trainRF(trainKey, **kwargs)
scoreKey = self.loadScoreData()
kwargs = paramsScoreRF.copy()
scoreResult = h2o_rf.scoreRF(scoreKey, trainResult, **kwargs)
print "\nTrain\n=========={0}".format(h2o_rf.pp_rf_result(trainResult))
print "\nScoring\n========={0}".format(h2o_rf.pp_rf_result(scoreResult))
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
emkailu/PAT3DEM
|
bin/p3movierun.py
|
1
|
3600
|
#!/usr/bin/env python
import os
import sys
import argparse
import time
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <f.txt>
Run p3movie.py to process movies listed in f.txt, and the movies will be deleted to save space.
"""
args_def = {'apix':1.25, 'voltage':200, 'time':200, 'rate':8, 'save':'0 0 0', 'xsuper':7420, 'scale':1, 'delete':1}
parser = argparse.ArgumentParser()
parser.add_argument("f", nargs='*', help="specify the txt file used for p3download.py")
parser.add_argument("-a", "--apix", type=float, help="specify counting apix before scaling, by default {}".format(args_def['apix']))
parser.add_argument("-v", "--voltage", type=int, help="specify the voltage (kV), by default {}".format(args_def['voltage']))
parser.add_argument("-t", "--time", type=float, help="specify exposure time per frame in ms, by default {}".format(args_def['time']))
parser.add_argument("-r", "--rate", type=float, help="specify dose rate in e/pix/s (counting pixel, not superresolution), by default {}. if specified as 0, no filtered sum will be output".format(args_def['rate']))
parser.add_argument("-s", "--save", type=str, help="save a specified number of aligned frames, by default '{}', which means do not save. e.g., '0 19 4' means the saved movie starts from frame #0, ends at #19, in total (19-0+1)/4 = 5 frames. if 19 >= the real number of frames of the movie, skip".format(args_def['save']))
parser.add_argument("-x", "--xsuper", type=int, help="specify the x dimension of superresolution images, by default {}".format(args_def['xsuper']))
parser.add_argument("-sc", "--scale", type=float, help="specify the down scaling factor, by default {}. e.g., 1.2 means counting images will be downscaled by 1.2 times, superresolution 2.4".format(args_def['scale']))
parser.add_argument("-d", "--delete", type=int, help="delete (!!!) the raw movie (specify as 1), by default {}, which means do not delete".format(args_def['delete']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# import
import pat3dem.cluster as p3c
# get the local file list based on f.txt
f = args.f[0]
f2 = f + '.p3movie'
with open(f) as f_r:
lines = f_r.readlines()
with open(f2, 'w') as f2_w:
for i in lines:
j = './' + i.replace('\\','/').split('/')[-1]
f2_w.write(j)
with open(f2) as f2_r:
lines = f2_r.readlines()
# run line #i if line #(i+1) exists, the last line will be ignored
walltime, cpu, ptile = 1, 1, 1
option = "-a {} -v {} -t {} -r {} -s '{}' -x {} -sc {} -d {}".format(args.apix, args.voltage, args.time, args.rate, args.save, args.xsuper, args.scale, args.delete)
for i, l in enumerate(lines[:-1]):
l = l.strip()
l2 = lines[i+1].strip()
while not os.path.isfile(l2):
time.sleep(60)
# submit the job, the option '-d 1' means delete the raw movie!!!
cmd = "p3movie.py {} {}".format(l, option)
basename = os.path.basename(os.path.splitext(l)[0])
p3c.ada(cmd, basename, walltime, cpu, ptile)
# process the last one
last = lines[-1].strip()
size = os.path.getsize(last)
time.sleep(30)
size_new = os.path.getsize(last)
while size_new > size:
size = size_new
time.sleep(30)
size_new = os.path.getsize(last)
cmd = "p3movie.py {} {}".format(last, option)
basename = os.path.basename(os.path.splitext(last)[0])
p3c.ada(cmd, basename, walltime, cpu, ptile)
if __name__ == '__main__':
main()
|
mit
|
yceruto/django
|
django/db/migrations/autodetector.py
|
1
|
21219
|
import importlib
import os
import re
import sys
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.models.loading import cache
from django.utils import datetime_safe
from django.utils.six.moves import input
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes()
changes = self._arrange_for_graph(changes, graph)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def _detect_changes(self):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
"""
# We'll store migrations as lists by app names for now
self.migrations = {}
old_app_cache = self.from_state.render()
new_app_cache = self.to_state.render()
# Prepare lists of old/new model keys that we care about
# (i.e. ignoring proxy ones)
old_model_keys = [
(al, mn)
for al, mn in self.from_state.models.keys()
if not old_app_cache.get_model(al, mn)._meta.proxy
]
new_model_keys = [
(al, mn)
for al, mn in self.to_state.models.keys()
if not new_app_cache.get_model(al, mn)._meta.proxy
]
# Adding models. Phase 1 is adding models with no outward relationships.
added_models = set(new_model_keys) - set(old_model_keys)
pending_add = {}
for app_label, model_name in added_models:
model_state = self.to_state.models[app_label, model_name]
# Are there any relationships out from this model? if so, punt it to the next phase.
related_fields = []
for field in new_app_cache.get_model(app_label, model_name)._meta.fields:
if field.rel:
if field.rel.to:
related_fields.append((field.name, field.rel.to._meta.app_label.lower(), field.rel.to._meta.object_name.lower()))
if hasattr(field.rel, "through") and not field.rel.though._meta.auto_created:
related_fields.append((field.name, field.rel.through._meta.app_label.lower(), field.rel.through._meta.object_name.lower()))
if related_fields:
pending_add[app_label, model_name] = related_fields
else:
self.add_to_migration(
app_label,
operations.CreateModel(
name=model_state.name,
fields=model_state.fields,
options=model_state.options,
bases=model_state.bases,
)
)
# Phase 2 is progressively adding pending models, splitting up into two
# migrations if required.
pending_new_fks = []
while pending_add:
# Is there one we can add that has all dependencies satisfied?
satisfied = [(m, rf) for m, rf in pending_add.items() if all((al, mn) not in pending_add for f, al, mn in rf)]
if satisfied:
(app_label, model_name), related_fields = sorted(satisfied)[0]
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.CreateModel(
name=model_state.name,
fields=model_state.fields,
options=model_state.options,
bases=model_state.bases,
)
)
for field_name, other_app_label, other_model_name in related_fields:
if app_label != other_app_label:
self.add_dependency(app_label, other_app_label)
del pending_add[app_label, model_name]
# Ah well, we'll need to split one. Pick deterministically.
else:
(app_label, model_name), related_fields = sorted(pending_add.items())[0]
model_state = self.to_state.models[app_label, model_name]
# Work out the fields that need splitting out
bad_fields = dict((f, (al, mn)) for f, al, mn in related_fields if (al, mn) in pending_add)
# Create the model, without those
self.add_to_migration(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[(n, f) for n, f in model_state.fields if n not in bad_fields],
options=model_state.options,
bases=model_state.bases,
)
)
# Add the bad fields to be made in a phase 3
for field_name, (other_app_label, other_model_name) in bad_fields.items():
pending_new_fks.append((app_label, model_name, field_name, other_app_label))
del pending_add[app_label, model_name]
# Phase 3 is adding the final set of FKs as separate new migrations
for app_label, model_name, field_name, other_app_label in pending_new_fks:
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=model_state.get_field_by_name(field_name),
),
new=True,
)
if app_label != other_app_label:
self.add_dependency(app_label, other_app_label)
# Removing models
removed_models = set(old_model_keys) - set(new_model_keys)
for app_label, model_name in removed_models:
model_state = self.from_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.DeleteModel(
model_state.name,
)
)
# Changes within models
kept_models = set(old_model_keys).intersection(new_model_keys)
for app_label, model_name in kept_models:
old_model_state = self.from_state.models[app_label, model_name]
new_model_state = self.to_state.models[app_label, model_name]
# New fields
old_field_names = set(x for x, y in old_model_state.fields)
new_field_names = set(x for x, y in new_model_state.fields)
for field_name in new_field_names - old_field_names:
field = new_model_state.get_field_by_name(field_name)
# Scan to see if this is actually a rename!
field_dec = field.deconstruct()[1:]
found_rename = False
for removed_field_name in (old_field_names - new_field_names):
if old_model_state.get_field_by_name(removed_field_name).deconstruct()[1:] == field_dec:
if self.questioner.ask_rename(model_name, removed_field_name, field_name, field):
self.add_to_migration(
app_label,
operations.RenameField(
model_name=model_name,
old_name=removed_field_name,
new_name=field_name,
)
)
old_field_names.remove(removed_field_name)
new_field_names.remove(field_name)
found_rename = True
break
if found_rename:
continue
# You can't just add NOT NULL fields with no default
if not field.null and not field.has_default():
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_to_migration(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
)
)
# Old fields
for field_name in old_field_names - new_field_names:
self.add_to_migration(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
)
)
# The same fields
for field_name in old_field_names.intersection(new_field_names):
# Did the field change?
old_field_dec = old_model_state.get_field_by_name(field_name).deconstruct()
new_field_dec = new_model_state.get_field_by_name(field_name).deconstruct()
if old_field_dec != new_field_dec:
self.add_to_migration(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=new_model_state.get_field_by_name(field_name),
)
)
# unique_together changes
if old_model_state.options.get("unique_together", set()) != new_model_state.options.get("unique_together", set()):
self.add_to_migration(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=new_model_state.options.get("unique_together", set()),
)
)
# Alright, now add internal dependencies
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# Clean up dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
return self.migrations
def add_to_migration(self, app_label, operation, new=False):
migrations = self.migrations.setdefault(app_label, [])
if not migrations or new:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(migrations) + 1), app_label)
migrations.append(instance)
migrations[-1].operations.append(operation)
def add_dependency(self, app_label, other_app_label):
"""
Adds a dependency to app_label's newest migration on
other_app_label's latest migration.
"""
if self.migrations.get(other_app_label, []):
dependency = (other_app_label, self.migrations[other_app_label][-1].name)
else:
dependency = (other_app_label, "__first__")
self.migrations[app_label][-1].dependencies.append(dependency)
def _arrange_for_graph(self, changes, graph):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_initial"
else:
new_name = "%04i_%s" % (next_number, self.suggest_name(migration.operations))
name_map[(app_label, migration.name)] = (app_label, new_name)
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names not guaranteed to be unique; they
must be prefixed by a number or date.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name.lower()
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name.lower()
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name.lower() for o in ops))
return "auto"
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
class MigrationQuestioner(object):
"""
Gives the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None):
self.defaults = defaults or {}
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
return self.defaults.get("ask_initial", False)
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self.defaults.get("ask_rename", False)
class InteractiveMigrationQuestioner(MigrationQuestioner):
def __init__(self, specified_apps=set()):
self.specified_apps = specified_apps
def _boolean_input(self, question, default=None):
result = input("%s " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def _choice_input(self, question, choices):
print(question)
for i, choice in enumerate(choices):
print(" %s) %s" % (i + 1, choice))
result = input("Select an option: ")
while True:
try:
value = int(result)
if 0 < value <= len(choices):
return value
except ValueError:
pass
result = input("Please select a valid option: ")
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
# Otherwise, we look to see if it has a migrations module
# without any Python files in it, apart from __init__.py.
# Apps from the new app template will have these; the python
# file check will ensure we skip South ones.
models_module = cache.get_app(app_label)
migrations_import_path = "%s.migrations" % models_module.__package__
try:
migrations_module = importlib.import_module(migrations_import_path)
except ImportError:
return False
else:
filenames = os.listdir(os.path.dirname(migrations_module.__file__))
return not any(x.endswith(".py") for x in filenames if x != "__init__.py")
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
choice = self._choice_input(
"You are trying to add a non-nullable field '%s' to %s without a default;\n" % (field_name, model_name) +
"this is not possible. Please select a fix:",
[
"Provide a one-off default now (will be set on all existing rows)",
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
sys.exit(3)
else:
print("Please enter the default value now, as valid Python")
print("The datetime module is available, so you can do e.g. datetime.date.today()")
while True:
code = input(">>> ")
if not code:
print("Please enter some code, or 'exit' (with no quotes) to exit.")
elif code == "exit":
sys.exit(1)
else:
try:
return eval(code, {}, {"datetime": datetime_safe})
except (SyntaxError, NameError) as e:
print("Invalid input: %s" % e)
else:
break
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self._boolean_input("Did you rename %s.%s to %s.%s (a %s)? [y/N]" % (model_name, old_name, model_name, new_name, field_instance.__class__.__name__), False)
|
bsd-3-clause
|
ehdr/luigi
|
luigi/contrib/scalding.py
|
26
|
10702
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
import warnings
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
"""
Scalding support for Luigi.
Example configuration section in luigi.cfg::
[scalding]
# scala home directory, which should include a lib subdir with scala jars.
scala-home: /usr/share/scala
# scalding home directory, which should include a lib subdir with
# scalding-*-assembly-* jars as built from the official Twitter build script.
scalding-home: /usr/share/scalding
# provided dependencies, e.g. jars required for compiling but not executing
# scalding jobs. Currently requred jars:
# org.apache.hadoop/hadoop-core/0.20.2
# org.slf4j/slf4j-log4j12/1.6.6
# log4j/log4j/1.2.15
# commons-httpclient/commons-httpclient/3.1
# commons-cli/commons-cli/1.2
# org.apache.zookeeper/zookeeper/3.3.4
scalding-provided: /usr/share/scalding/provided
# additional jars required.
scalding-libjars: /usr/share/scalding/libjars
"""
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
"""
JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
"""
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
# additional jar for scala 2.10 only
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
# find name of the job class
# usually the one that matches file name or last class that extends Job
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
# compile scala source
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
# build job jar file
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
# scalding does not parse argument with '=' properly
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, subprocess.list2cmdline(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
"""
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
"""
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def jar(self):
"""
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def extra_jars(self):
"""
Extra jars for building and running this Scalding Job.
"""
return []
def job_class(self):
"""
optional main job class for this Scalding Job.
"""
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
"""
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
"""
return True
def requires(self):
return {}
def job_args(self):
"""
Extra arguments to pass to the Scalding job.
"""
return []
def args(self):
"""
Returns an array of args to pass to the job.
"""
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
|
apache-2.0
|
ClustyROM/Galaxy_Note
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
944
|
1869
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if for_comm is not None:
if common_comm != for_comm:
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16d\n" % (id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20d %10d\n" % (ret, val),
|
gpl-2.0
|
pprett/statsmodels
|
statsmodels/stats/outliers_influence.py
|
1
|
27029
|
# -*- coding: utf-8 -*-
"""Influence and Outlier Measures
Created on Sun Jan 29 11:16:09 2012
Author: Josef Perktold
License: BSD-3
"""
from collections import defaultdict
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.decorators import cache_readonly
#influence measures
def reset_ramsey(res, degree=5):
'''Ramsey's RESET specification test for linear models
This is a general specification test, for additional non-linear effects
in a model.
Notes
-----
The test fits an auxiliary OLS regression where the design matrix, exog,
is augmented by powers 2 to degree of the fitted values. Then it performs
an F-test whether these additional terms are significant.
If the p-value of the f-test is below a threshold, e.g. 0.1, then this
indicates that there might be additional non-linear effects in the model
and that the linear model is mis-specified.
References
----------
http://en.wikipedia.org/wiki/Ramsey_RESET_test
'''
order = degree + 1
k_vars = res.model.exog.shape[1]
#vander without constant and x:
y_fitted_vander = np.vander(res.fittedvalues, order)[:, :-2] #drop constant
exog = np.column_stack((res.model.exog, y_fitted_vander))
res_aux = OLS(res.model.endog, exog).fit()
#r_matrix = np.eye(degree, exog.shape[1], k_vars)
r_matrix = np.eye(degree-1, exog.shape[1], k_vars)
#df1 = degree - 1
#df2 = exog.shape[0] - degree - res.df_model (without constant)
return res_aux.f_test(r_matrix) #, r_matrix, res_aux
def variance_inflation_factor(exog, exog_idx):
'''variance inflation factor, VIF, for one exogenous variable
The variance inflation factor is a measure for the increase of the
variance of the parameter estimates if an additional variable, given by
exog_idx is added to the linear regression. It is a measure for
multicollinearity of the design matrix, exog.
One recommendation is that if VIF is greater than 5, then the explanatory
variable given by exog_idx is highly collinear with the other explanatory
variables, and the parameter estimates will have large standard errors
because of this.
Parameters
----------
exog : ndarray, (nobs, k_vars)
design matrix with all explanatory variables, as for example used in
regression
exog_idx : int
index of the exogenous variable in the columns of exog
Returns
-------
vif : float
variance inflation factor
Notes
-----
This function does not save the auxiliary regression.
See Also
--------
xxx : class for regression diagnostics TODO: doesn't exist yet
References
----------
http://en.wikipedia.org/wiki/Variance_inflation_factor
'''
k_vars = exog.shape[1]
x_i = exog[:, exog_idx]
mask = np.arange(k_vars) != exog_idx
x_noti = exog[:, mask]
r_squared_i = OLS(x_i, x_noti).fit().rsquared
vif = 1. / (1. - r_squared_i)
return vif
class OLSInfluence(object):
'''class to calculate outlier and influence measures for OLS result
Parameters
----------
results : Regression Results instance
currently assumes the results are from an OLS regression
Notes
-----
One part of the results can be calculated without any auxiliary regression
(some of which have the `_internal` postfix in the name. Other statistics
require leave-one-observation-out (LOOO) auxiliary regression, and will be
slower (mainly results with `_external` postfix in the name).
The auxiliary LOOO regression only the required results are stored.
Using the LOO measures is currently only recommended if the data set
is not too large. One possible approach for LOOO measures would be to
identify possible problem observations with the _internal measures, and
then run the leave-one-observation-out only with observations that are
possible outliers. (However, this is not yet available in an automized way.)
This should be extended to general least squares.
The leave-one-variable-out (LOVO) auxiliary regression are currently not
used.
'''
def __init__(self, results):
#check which model is allowed
try:
self.results = results._results # don't use wrapped results
except: # we got unwrapped results
self.results = results
self.nobs, self.k_vars = results.model.exog.shape
self.endog = results.model.endog
self.exog = results.model.exog
self.model_class = results.model.__class__
self.sigma_est = np.sqrt(results.mse_resid)
self.aux_regression_exog = {}
self.aux_regression_endog = {}
@cache_readonly
def hat_matrix_diag(self):
'''(cached attribute) diagonal of the hat_matrix for OLS
Notes
-----
temporarily calculated here, this should go to model class
'''
return (self.exog * self.results.model.pinv_wexog.T).sum(1)
@cache_readonly
def resid_press(self):
'''(cached attribute) PRESS residuals
'''
hii = self.hat_matrix_diag
return self.results.resid / (1 - hii)
@cache_readonly
def influence(self):
'''(cached attribute) influence measure
matches the influence measure that gretl reports
u * h / (1 - h)
where u are the residuals and h is the diagonal of the hat_matrix
'''
hii = self.hat_matrix_diag
return self.results.resid * hii / (1 - hii)
@cache_readonly
def hat_diag_factor(self):
'''(cached attribute) factor of diagonal of hat_matrix used in influence
this might be useful for internal reuse
h / (1 - h)
'''
hii = self.hat_matrix_diag
return hii / (1 - hii)
@cache_readonly
def ess_press(self):
'''(cached attribute) error sum of squares of PRESS residuals
'''
return np.dot(self.resid_press, self.resid_press)
@cache_readonly
def resid_studentized_internal(self):
'''(cached attribute) studentized residuals using variance from OLS
this uses sigma from original estimate
does not require leave one out loop
'''
return self.get_resid_studentized_external(sigma=None)
#return self.results.resid / self.sigma_est
@cache_readonly
def resid_studentized_external(self):
'''(cached attribute) studentized residuals using LOOO variance
this uses sigma from leave-one-out estimates
requires leave one out loop for observations
'''
sigma_looo = np.sqrt(self.sigma2_not_obsi)
return self.get_resid_studentized_external(sigma=sigma_looo)
def get_resid_studentized_external(self, sigma=None):
'''calculate studentized residuals
Parameters
----------
sigma : None or float
estimate of the standard deviation of the residuals. If None, then
the estimate from the regression results is used.
Returns
-------
stzd_resid : ndarray
studentized residuals
Notes
-----
studentized residuals are defined as ::
resid / sigma / np.sqrt(1 - hii)
where resid are the residuals from the regression, sigma is an
estimate of the standard deviation of the residuals, and hii is the
diagonal of the hat_matrix.
'''
hii = self.hat_matrix_diag
if sigma is None:
sigma2_est = self.results.mse_resid
#can be replace by different estimators of sigma
sigma = np.sqrt(sigma2_est)
return self.results.resid / sigma / np.sqrt(1 - hii)
@cache_readonly
def dffits_internal(self):
'''(cached attribute) dffits measure for influence of an observation
based on resid_studentized_internal
uses original results, no nobs loop
'''
#TODO: do I want to use different sigma estimate in
# resid_studentized_external
# -> move definition of sigma_error to the __init__
hii = self.hat_matrix_diag
dffits_ = self.resid_studentized_internal * np.sqrt(hii / (1 - hii))
dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
return dffits_, dffits_threshold
@cache_readonly
def dffits(self):
'''(cached attribute) dffits measure for influence of an observation
based on resid_studentized_external,
uses results from leave-one-observation-out loop
It is recommended that observations with dffits large than a
threshold of 2 sqrt{k / n} where k is the number of parameters, should
be investigated.
Returns
-------
dffits: float
dffits_threshold : float
References
----------
`Wikipedia <http://en.wikipedia.org/wiki/DFFITS>`_
'''
#TODO: do I want to use different sigma estimate in
# resid_studentized_external
# -> move definition of sigma_error to the __init__
hii = self.hat_matrix_diag
dffits_ = self.resid_studentized_external * np.sqrt(hii / (1 - hii))
dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
return dffits_, dffits_threshold
@cache_readonly
def dfbetas(self):
'''(cached attribute) dfbetas
uses results from leave-one-observation-out loop
'''
dfbetas = self.results.params - self.params_not_obsi#[None,:]
dfbetas /= np.sqrt(self.sigma2_not_obsi[:,None])
dfbetas /= np.sqrt(np.diag(self.results.normalized_cov_params))
return dfbetas
@cache_readonly
def sigma2_not_obsi(self):
'''(cached attribute) error variance for all LOOO regressions
This is 'mse_resid' from each auxiliary regression.
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['mse_resid'])
@cache_readonly
def params_not_obsi(self):
'''(cached attribute) parameter estimates for all LOOO regressions
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['params'])
@cache_readonly
def det_cov_params_not_obsi(self):
'''(cached attribute) determinant of cov_params of all LOOO regressions
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['det_cov_params'])
@cache_readonly
def cooks_distance(self):
'''(cached attribute) Cooks distance
uses original results, no nobs loop
'''
hii = self.hat_matrix_diag
#Eubank p.93, 94
cooks_d2 = self.resid_studentized_internal**2 / self.k_vars
cooks_d2 *= hii / (1 - hii)
from scipy import stats
#alpha = 0.1
#print stats.f.isf(1-alpha, n_params, res.df_modelwc)
pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid)
return cooks_d2, pvals
@cache_readonly
def cov_ratio(self):
'''(cached attribute) covariance ratio between LOOO and original
This uses determinant of the estimate of the parameter covariance
from leave-one-out estimates.
requires leave one out loop for observations
'''
#don't use inplace division / because then we change original
cov_ratio = (self.det_cov_params_not_obsi
/ np.linalg.det(self.results.cov_params()))
return cov_ratio
@cache_readonly
def resid_var(self):
'''(cached attribute) estimate of variance of the residuals
::
sigma2 = sigma2_OLS * (1 - hii)
where hii is the diagonal of the hat matrix
'''
#TODO:check if correct outside of ols
return self.results.mse_resid * (1 - self.hat_matrix_diag)
@cache_readonly
def resid_std(self):
'''(cached attribute) estimate of standard deviation of the residuals
See Also
--------
resid_var
'''
return np.sqrt(self.resid_var)
def _ols_xnoti(self, drop_idx, endog_idx='endog', store=True):
'''regression results from LOVO auxiliary regression with cache
The result instances are stored, which could use a large amount of
memory if the datasets are large. There are too many combinations to
store them all, except for small problems.
Parameters
----------
drop_idx : int
index of exog that is dropped from the regression
endog_idx : 'endog' or int
If 'endog', then the endogenous variable of the result instance
is regressed on the exogenous variables, excluding the one at
drop_idx. If endog_idx is an integer, then the exog with that
index is regressed with OLS on all other exogenous variables.
(The latter is the auxiliary regression for the variance inflation
factor.)
this needs more thought, memory versus speed
not yet used in any other parts, not sufficiently tested
'''
#reverse the structure, access store, if fail calculate ?
#this creates keys in store even if store = false ! bug
if endog_idx == 'endog':
stored = self.aux_regression_endog
if hasattr(stored, drop_idx):
return stored[drop_idx]
x_i = self.results.model.endog
else:
#nested dictionary
try:
self.aux_regression_exog[endog_idx][drop_idx]
except KeyError:
pass
stored = self.aux_regression_exog[endog_idx]
stored = {}
x_i = self.exog[:, endog_idx]
mask = np.arange(k_vars) != drop_idx
x_noti = self.exog[:, mask]
res = OLS(x_i, x_noti).fit()
if store:
stored[drop_idx] = res
return res
def _get_drop_vari(self, attributes):
'''regress endog on exog without one of the variables
This uses a k_vars loop, only attributes of the OLS instance are stored.
Parameters
----------
attributes : list of strings
These are the names of the attributes of the auxiliary OLS results
instance that are stored and returned.
not yet used
'''
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
endog = self.results.model.endog
exog = self.exog
cv_iter = LeaveOneOut(self.k_vars)
res_loo = defaultdict(list)
for inidx, outidx in cv_iter:
for att in attributes:
res_i = self.model_class(endog, exog[:,inidx]).fit()
res_loo[att].append(getattr(res_i, att))
return res_loo
@cache_readonly
def _res_looo(self):
'''collect required results from the LOOO loop
all results will be attached.
currently only 'params', 'mse_resid', 'det_cov_params' are stored
regresses endog on exog dropping one observation at a time
this uses a nobs loop, only attributes of the OLS instance are stored.
'''
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
get_det_cov_params = lambda res: np.linalg.det(res.cov_params())
endog = self.endog
exog = self.exog
params = np.zeros_like(exog)
mse_resid = np.zeros_like(endog)
det_cov_params = np.zeros_like(endog)
cv_iter = LeaveOneOut(self.nobs)
for inidx, outidx in cv_iter:
res_i = self.model_class(endog[inidx], exog[inidx]).fit()
params[outidx] = res_i.params
mse_resid[outidx] = res_i.mse_resid
det_cov_params[outidx] = get_det_cov_params(res_i)
return dict(params=params, mse_resid=mse_resid,
det_cov_params=det_cov_params)
def summary_frame(self):
"""
Creates a DataFrame with all available influence results.
Returns
-------
frame : DataFrame
A DataFrame with all results.
Notes
-----
The resultant DataFrame contains six variables in addition to the
DFBETAS. These are:
* cooks_d : Cook's Distance defined in `Influence.cooks_distance`
* standard_resid : Standardized residuals defined in
`Influence.resid_studentized_internal`
* hat_diag : The diagonal of the projection, or hat, matrix defined in
`Influence.hat_matrix_diag`
* dffits_internal : DFFITS statistics using internally Studentized
residuals defined in `Influence.dffits_internal`
* dffits : DFFITS statistics using externally Studentized residuals
defined in `Influence.dffits`
* student_resid : Externally Studentized residuals defined in
`Influence.resid_studentized_external`
"""
from pandas import DataFrame
# row and column labels
data = self.results.model._data
row_labels = data.row_labels
beta_labels = ['dfb_' + i for i in data.xnames]
# grab the results
summary_data = DataFrame(dict(
cooks_d = self.cooks_distance[0],
standard_resid = self.resid_studentized_internal,
hat_diag = self.hat_matrix_diag,
dffits_internal = self.dffits_internal[0],
student_resid = self.resid_studentized_external,
dffits = self.dffits[0],
),
index = row_labels)
#NOTE: if we don't give columns, order of above will be arbitrary
dfbeta = DataFrame(self.dfbetas, columns=beta_labels,
index=row_labels)
return dfbeta.join(summary_data)
def summary_table(self, float_fmt="%6.3f"):
'''create a summary table with all influence and outlier measures
This does currently not distinguish between statistics that can be
calculated from the original regression results and for which a
leave-one-observation-out loop is needed
Returns
-------
res : SimpleTable instance
SimpleTable instance with the results, can be printed
Notes
-----
This also attaches table_data to the instance.
'''
#print self.dfbetas
# table_raw = [ np.arange(self.nobs),
# self.endog,
# self.fittedvalues,
# self.cooks_distance(),
# self.resid_studentized_internal,
# self.hat_matrix_diag,
# self.dffits_internal,
# self.resid_studentized_external,
# self.dffits,
# self.dfbetas
# ]
table_raw = [ ('obs', np.arange(self.nobs)),
('endog', self.endog),
('fitted\nvalue', self.results.fittedvalues),
("Cook's\nd", self.cooks_distance[0]),
("student.\nresidual", self.resid_studentized_internal),
('hat diag', self.hat_matrix_diag),
('dffits \ninternal', self.dffits_internal[0]),
("ext.stud.\nresidual", self.resid_studentized_external),
('dffits', self.dffits[0]),
('dfbeta\nslope', self.dfbetas[:,1]) #skip needs to partially unravel
]
colnames, data = zip(*table_raw) #unzip
data = np.column_stack(data)
self.table_data = data
from statsmodels.iolib.table import SimpleTable, default_html_fmt
from statsmodels.iolib.tableformatting import fmt_base
from copy import deepcopy
fmt = deepcopy(fmt_base)
fmt_html = deepcopy(default_html_fmt)
fmt['data_fmts'] = ["%4d"] + [float_fmt] * (data.shape[1] - 1)
#fmt_html['data_fmts'] = fmt['data_fmts']
return SimpleTable(data, headers=colnames, txt_fmt=fmt,
html_fmt=fmt_html)
def summary_table(res, alpha=0.05):
'''generate summary table of outlier and influence similar to SAS
Parameters
----------
alpha : float
significance level for confidence interval
Returns
-------
st : SimpleTable instance
table with results that can be printed
data : ndarray
calculated measures and statistics for the table
ss2 : list of strings
column_names for table (Note: rows of table are observations)
'''
from scipy import stats
from statsmodels.sandbox.regression.predstd import wls_prediction_std
infl = Influence(res)
#standard error for predicted mean
#Note: using hat_matrix only works for fitted values
predict_mean_se = np.sqrt(infl.hat_matrix_diag*res.mse_resid)
tppf = stats.t.isf(alpha/2., res.df_resid)
predict_mean_ci = np.column_stack([
res.fittedvalues - tppf * predict_mean_se,
res.fittedvalues + tppf * predict_mean_se])
#standard error for predicted observation
predict_se, predict_ci_low, predict_ci_upp = wls_prediction_std(res)
predict_ci = np.column_stack((predict_ci_low, predict_ci_upp))
#standard deviation of residual
resid_se = np.sqrt(res.mse_resid * (1 - infl.hat_matrix_diag))
table_sm = np.column_stack([
np.arange(res.nobs) + 1,
res.model.endog,
res.fittedvalues,
predict_mean_se,
predict_mean_ci[:,0],
predict_mean_ci[:,1],
predict_ci[:,0],
predict_ci[:,1],
res.resid,
resid_se,
infl.resid_studentized_internal,
infl.cooks_distance[0]
])
#colnames, data = zip(*table_raw) #unzip
data = table_sm
ss2 = ['Obs', 'Dep Var\nPopulation', 'Predicted\nValue', 'Std Error\nMean Predict', 'Mean ci\n95% low', 'Mean ci\n95% upp', 'Predict ci\n95% low', 'Predict ci\n95% upp', 'Residual', 'Std Error\nResidual', 'Student\nResidual', "Cook's\nD"]
colnames = ss2
#self.table_data = data
#data = np.column_stack(data)
from statsmodels.iolib.table import SimpleTable, default_html_fmt
from statsmodels.iolib.tableformatting import fmt_base
from copy import deepcopy
fmt = deepcopy(fmt_base)
fmt_html = deepcopy(default_html_fmt)
fmt['data_fmts'] = ["%4d"] + ["%6.3f"] * (data.shape[1] - 1)
#fmt_html['data_fmts'] = fmt['data_fmts']
st = SimpleTable(data, headers=colnames, txt_fmt=fmt,
html_fmt=fmt_html)
return st, data, ss2
if __name__ == '__main__':
import statsmodels.api as sm
data = np.array('''\
64 57 8
71 59 10
53 49 6
67 62 11
55 51 8
58 50 7
77 55 10
57 48 9
56 42 10
51 42 6
76 61 12
68 57 9'''.split(), float).reshape(-1,3)
varnames = 'weight height age'.split()
endog = data[:,0]
exog = sm.add_constant(data[:,2], prepend=True)
res_ols = sm.OLS(endog, exog).fit()
hh = (res_ols.model.exog * res_ols.model.pinv_wexog.T).sum(1)
x = res_ols.model.exog
hh_check = np.diag(np.dot(x, np.dot(res_ols.model.normalized_cov_params, x.T)))
from numpy.testing import assert_almost_equal
assert_almost_equal(hh, hh_check, decimal=13)
res = res_ols #alias
#http://en.wikipedia.org/wiki/PRESS_statistic
#predicted residuals, leave one out predicted residuals
resid_press = res.resid / (1-hh)
ess_press = np.dot(resid_press, resid_press)
sigma2_est = np.sqrt(res.mse_resid) #can be replace by different estimators of sigma
sigma_est = np.sqrt(sigma2_est)
resid_studentized = res.resid / sigma_est / np.sqrt(1 - hh)
#http://en.wikipedia.org/wiki/DFFITS:
dffits = resid_studentized * np.sqrt(hh / (1 - hh))
nobs, k_vars = res.model.exog.shape
#Belsley, Kuh and Welsch (1980) suggest a threshold for abs(DFFITS)
dffits_threshold = 2 * np.sqrt(k_vars/nobs)
res_ols.df_modelwc = res_ols.df_model + 1
n_params = res.model.exog.shape[1]
#http://en.wikipedia.org/wiki/Cook%27s_distance
cooks_d = res.resid**2 / sigma2_est / res_ols.df_modelwc * hh / (1 - hh)**2
#or
#Eubank p.93, 94
cooks_d2 = resid_studentized**2 / res_ols.df_modelwc * hh / (1 - hh)
#threshold if normal, also Wikipedia
from scipy import stats
alpha = 0.1
#df looks wrong
print stats.f.isf(1-alpha, n_params, res.df_resid)
print stats.f.sf(cooks_d, n_params, res.df_resid)
print 'Cooks Distance'
print cooks_d
print cooks_d2
doplot = 0
if doplot:
import matplotlib.pyplot as plt
fig = plt.figure()
# ax = fig.add_subplot(3,1,1)
# plt.plot(andrew_results.weights, 'o', label='rlm weights')
# plt.legend(loc='lower left')
ax = fig.add_subplot(3,1,2)
plt.plot(cooks_d, 'o', label="Cook's distance")
plt.legend(loc='upper left')
ax2 = fig.add_subplot(3,1,3)
plt.plot(resid_studentized, 'o', label='studentized_resid')
plt.plot(dffits, 'o', label='DFFITS')
leg = plt.legend(loc='lower left', fancybox=True)
leg.get_frame().set_alpha(0.5) #, fontsize='small')
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize='small') # the legend text fontsize
print reset_ramsey(res, degree=3)
#note, constant in last column
for i in range(1):
print variance_inflation_factor(res.model.exog, i)
infl = Influence(res_ols)
print infl.resid_studentized_external
print infl.resid_studentized_internal
print infl.summary_table()
print summary_table(res, alpha=0.05)[0]
'''
>>> res.resid
array([ 4.28571429, 4. , 0.57142857, -3.64285714,
-4.71428571, 1.92857143, 10. , -6.35714286,
-11. , -1.42857143, 1.71428571, 4.64285714])
>>> infl.hat_matrix_diag
array([ 0.10084034, 0.11764706, 0.28571429, 0.20168067, 0.10084034,
0.16806723, 0.11764706, 0.08403361, 0.11764706, 0.28571429,
0.33613445, 0.08403361])
>>> infl.resid_press
array([ 4.76635514, 4.53333333, 0.8 , -4.56315789,
-5.24299065, 2.31818182, 11.33333333, -6.94036697,
-12.46666667, -2. , 2.58227848, 5.06880734])
>>> infl.ess_press
465.98646628086374
'''
|
bsd-3-clause
|
caseyrollins/osf.io
|
osf/migrations/0095_reset_osf_abstractprovider_licenses_acceptable_id_seq.py
|
16
|
1189
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-12 20:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunSQL(
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider";
""",
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
"""
),
]
|
apache-2.0
|
lyft/incubator-airflow
|
airflow/operators/s3_to_redshift_operator.py
|
4
|
1190
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.operators.s3_to_redshift`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.s3_to_redshift import S3ToRedshiftTransfer # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.s3_to_redshift`.",
DeprecationWarning, stacklevel=2
)
|
apache-2.0
|
billzorn/mtgencode
|
lib/cardlib.py
|
1
|
41779
|
# card representation
import re
import random
import utils
import transforms
from manalib import Manacost, Manatext
# Some text prettification stuff that people may not have installed
try:
from titlecase import titlecase
except ImportError:
def titlecase(s):
return s.title()
try:
import textwrap
import nltk.data
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# This could me made smarter - MSE will capitalize for us after :,
# but we still need to capitalize the first english component of an activation
# cost that starts with symbols, such as {2U}, *R*emove a +1/+1 counter from @: etc.
def cap(s):
return s[:1].capitalize() + s[1:]
# This crazy thing is actually invoked as an unpass, so newlines are still
# encoded.
def sentencecase(s):
s = s.replace(utils.x_marker, utils.reserved_marker)
lines = s.split(utils.newline)
clines = []
for line in lines:
if line:
sentences = sent_tokenizer.tokenize(line)
clines += [' '.join([cap(sent) for sent in sentences])]
return utils.newline.join(clines).replace(utils.reserved_marker, utils.x_marker)
except ImportError:
# non-nltk implementation provided by PAK90
def uppercaseNewLineAndFullstop(string):
# ok, let's capitalize every letter after a full stop and newline.
# first let's find all indices of '.' and '\n'
indices = [0] # initialise with 0, since we always want to capitalise the first letter.
newlineIndices = [0] # also need to keep track of pure newlines (for planeswalkers).
for i in range (len(string)):
if string[i] == '\\':
indices.append(i + 1) # we want the index of the letter after the \n, so add one.
newlineIndices.append(i + 1)
if string[i] == '.' or string[i] == "=": # also handle the choice bullets.
indices.append(i + 2) # we want the index of the letter after the ., so we need to count the space as well.
indexSet = set(indices) # convert it to a set for the next part; the capitalisation.
return "".join(c.upper() if i in indexSet else c for i, c in enumerate(string))
def sentencecase(s):
return uppercaseNewLineAndFullstop(s)
# These are used later to determine what the fields of the Card object are called.
# Define them here because they have nothing to do with the actual format.
field_name = 'name'
field_rarity = 'rarity'
field_cost = 'cost'
field_supertypes = 'supertypes'
field_types = 'types'
field_subtypes = 'subtypes'
field_loyalty = 'loyalty'
field_pt = 'pt'
field_text = 'text'
field_other = 'other' # it's kind of a pseudo-field
# Import the labels, because these do appear in the encoded text.
field_label_name = utils.field_label_name
field_label_rarity = utils.field_label_rarity
field_label_cost = utils.field_label_cost
field_label_supertypes = utils.field_label_supertypes
field_label_types = utils.field_label_types
field_label_subtypes = utils.field_label_subtypes
field_label_loyalty = utils.field_label_loyalty
field_label_pt = utils.field_label_pt
field_label_text = utils.field_label_text
fieldnames = [
field_name,
field_rarity,
field_cost,
field_supertypes,
field_types,
field_subtypes,
field_loyalty,
field_pt,
field_text,
]
# legacy
fmt_ordered_old = [
field_name,
field_supertypes,
field_types,
field_loyalty,
field_subtypes,
field_rarity,
field_pt,
field_cost,
field_text,
]
fmt_ordered_norarity = [
field_name,
field_supertypes,
field_types,
field_loyalty,
field_subtypes,
field_pt,
field_cost,
field_text,
]
# standard
fmt_ordered_default = [
field_types,
field_supertypes,
field_subtypes,
field_loyalty,
field_pt,
field_text,
field_cost,
field_rarity,
field_name,
]
# minor variations
fmt_ordered_noname = [
field_types,
field_supertypes,
field_subtypes,
field_loyalty,
field_pt,
field_text,
field_cost,
field_rarity,
]
fmt_ordered_named = [
field_name,
field_types,
field_supertypes,
field_subtypes,
field_loyalty,
field_pt,
field_text,
field_cost,
field_rarity,
]
fmt_labeled_default = {
field_name : field_label_name,
field_rarity : field_label_rarity,
field_cost : field_label_cost,
field_supertypes : field_label_supertypes,
field_types : field_label_types,
field_subtypes : field_label_subtypes,
field_loyalty : field_label_loyalty,
field_pt : field_label_pt,
field_text : field_label_text,
}
# sanity test if a card's fields look plausible
def fields_check_valid(fields):
# all cards must have a name and a type
if not field_name in fields:
return False
if not field_types in fields:
return False
# creatures and vehicles have p/t, other things don't
iscreature = False
for idx, value in fields[field_types]:
if 'creature' in value:
iscreature = True
elif field_subtypes in fields:
for idx, value in fields[field_subtypes]:
if 'vehicle' in value:
iscreature = True
if iscreature:
return field_pt in fields
else:
return not field_pt in fields
# These functions take a bunch of source data in some format and turn
# it into nicely labeled fields that we know how to initialize a card from.
# Both return a dict that maps field names to lists of possible values,
# paired with the index that we read that particular field value from.
# So, {fieldname : [(idx, value), (idx, value)...].
# Usually we want these lists to be length 1, but you never know.
# Of course to make things nice and simple, that dict is the third element
# of a triple that reports parsing success and valid success as its
# first two elements.
# This whole things assumes the json format of mtgjson.com.
# Here's a brief list of relevant fields:
# name - string
# names - list (used for split, flip, and double-faced)
# manaCost - string
# cmc - number
# colors - list
# type - string (the whole big long damn thing)
# supertypes - list
# types - list
# subtypes - list
# text - string
# power - string
# toughness - string
# loyalty - number
# And some less useful ones, in case they're wanted for something:
# layout - string
# rarity - string
# flavor - string
# artist - string
# number - string
# multiverseid - number
# variations - list
# imageName - string
# watermark - string
# border - string
# timeshifted - boolean
# hand - number
# life - number
# reserved - boolean
# releaseDate - string
# starter - boolean
def fields_from_json(src_json, linetrans = True):
parsed = True
valid = True
fields = {}
# we hardcode in what the things are called in the mtgjson format
if 'name' in src_json:
name_val = src_json['name'].lower()
name_orig = name_val
name_val = transforms.name_pass_1_sanitize(name_val)
name_val = utils.to_ascii(name_val)
fields[field_name] = [(-1, name_val)]
else:
name_orig = ''
parsed = False
# return the actual Manacost object
if 'manaCost' in src_json:
cost = Manacost(src_json['manaCost'], fmt = 'json')
valid = valid and cost.valid
parsed = parsed and cost.parsed
fields[field_cost] = [(-1, cost)]
if 'supertypes' in src_json:
fields[field_supertypes] = [(-1, map(lambda s: utils.to_ascii(s.lower()),
src_json['supertypes']))]
if 'types' in src_json:
fields[field_types] = [(-1, map(lambda s: utils.to_ascii(s.lower()),
src_json['types']))]
else:
parsed = False
if 'subtypes' in src_json:
fields[field_subtypes] = [(-1, map(lambda s: utils.to_ascii(s.lower())
# urza's lands...
.replace('"', "'").replace('-', utils.dash_marker),
src_json['subtypes']))]
if 'rarity' in src_json:
if src_json['rarity'] in utils.json_rarity_map:
fields[field_rarity] = [(-1, utils.json_rarity_map[src_json['rarity']])]
else:
fields[field_rarity] = [(-1, src_json['rarity'])]
parsed = False
else:
parsed = False
if 'loyalty' in src_json:
fields[field_loyalty] = [(-1, utils.to_unary(str(src_json['loyalty'])))]
p_t = ''
parsed_pt = True
if 'power' in src_json:
p_t = utils.to_ascii(utils.to_unary(src_json['power'])) + '/' # hardcoded
parsed_pt = False
if 'toughness' in src_json:
p_t = p_t + utils.to_ascii(utils.to_unary(src_json['toughness']))
parsed_pt = True
elif 'toughness' in src_json:
p_t = '/' + utils.to_ascii(utils.to_unary(src_json['toughness'])) # hardcoded
parsed_pt = False
if p_t:
fields[field_pt] = [(-1, p_t)]
parsed = parsed and parsed_pt
# similarly, return the actual Manatext object
if 'text' in src_json:
text_val = src_json['text'].lower()
text_val = transforms.text_pass_1_strip_rt(text_val)
text_val = transforms.text_pass_2_cardname(text_val, name_orig)
text_val = transforms.text_pass_3_unary(text_val)
text_val = transforms.text_pass_4a_dashes(text_val)
text_val = transforms.text_pass_4b_x(text_val)
text_val = transforms.text_pass_5_counters(text_val)
text_val = transforms.text_pass_6_uncast(text_val)
text_val = transforms.text_pass_7_choice(text_val)
text_val = transforms.text_pass_8_equip(text_val)
text_val = transforms.text_pass_9_newlines(text_val)
text_val = transforms.text_pass_10_symbols(text_val)
if linetrans:
text_val = transforms.text_pass_11_linetrans(text_val)
text_val = utils.to_ascii(text_val)
text_val = text_val.strip()
mtext = Manatext(text_val, fmt = 'json')
valid = valid and mtext.valid
fields[field_text] = [(-1, mtext)]
# we don't need to worry about bsides because we handle that in the constructor
return parsed, valid and fields_check_valid(fields), fields
def fields_from_format(src_text, fmt_ordered, fmt_labeled, fieldsep):
parsed = True
valid = True
fields = {}
if fmt_labeled:
labels = {fmt_labeled[k] : k for k in fmt_labeled}
field_label_regex = '[' + ''.join(labels.keys()) + ']'
def addf(fields, fkey, fval):
# make sure you pass a pair
if fval and fval[1]:
if fkey in fields:
fields[fkey] += [fval]
else:
fields[fkey] = [fval]
textfields = src_text.split(fieldsep)
idx = 0
true_idx = 0
for textfield in textfields:
# ignore leading or trailing empty fields due to seps
if textfield == '':
if true_idx == 0 or true_idx == len(textfields) - 1:
true_idx += 1
continue
# count the field index for other empty fields but don't add them
else:
idx += 1
true_idx += 1
continue
lab = None
if fmt_labeled:
labs = re.findall(field_label_regex, textfield)
# use the first label if we saw any at all
if len(labs) > 0:
lab = labs[0]
textfield = textfield.replace(lab, '', 1)
# try to use the field label if we got one
if lab and lab in labels:
fname = labels[lab]
# fall back to the field order specified
elif idx < len(fmt_ordered):
fname = fmt_ordered[idx]
# we don't know what to do with this field: call it other
else:
fname = field_other
parsed = False
valid = False
# specialized handling
if fname in [field_cost]:
fval = Manacost(textfield)
parsed = parsed and fval.parsed
valid = valid and fval.valid
addf(fields, fname, (idx, fval))
elif fname in [field_text]:
fval = Manatext(textfield)
valid = valid and fval.valid
addf(fields, fname, (idx, fval))
elif fname in [field_supertypes, field_types, field_subtypes]:
addf(fields, fname, (idx, textfield.split()))
else:
addf(fields, fname, (idx, textfield))
idx += 1
true_idx += 1
# again, bsides are handled by the constructor
return parsed, valid and fields_check_valid(fields), fields
# Here's the actual Card class that other files should use.
class Card:
'''card representation with data'''
def __init__(self, src, fmt_ordered = fmt_ordered_default,
fmt_labeled = fmt_labeled_default,
fieldsep = utils.fieldsep, linetrans = True):
# source fields, exactly one will be set
self.json = None
self.raw = None
# flags
self.parsed = True
self.valid = True # doesn't record that much
# placeholders to fill in with expensive distance metrics
self.nearest_names = []
self.nearest_cards = []
# default values for all fields
self.__dict__[field_name] = ''
self.__dict__[field_rarity] = ''
self.__dict__[field_cost] = Manacost('')
self.__dict__[field_supertypes] = []
self.__dict__[field_types] = []
self.__dict__[field_subtypes] = []
self.__dict__[field_loyalty] = ''
self.__dict__[field_loyalty + '_value'] = None
self.__dict__[field_pt] = ''
self.__dict__[field_pt + '_p'] = None
self.__dict__[field_pt + '_p_value'] = None
self.__dict__[field_pt + '_t'] = None
self.__dict__[field_pt + '_t_value'] = None
self.__dict__[field_text] = Manatext('')
self.__dict__[field_text + '_lines'] = []
self.__dict__[field_text + '_words'] = []
self.__dict__[field_text + '_lines_words'] = []
self.__dict__[field_other] = []
self.bside = None
# format-independent view of processed input
self.fields = None # will be reset later
# looks like a json object
if isinstance(src, dict):
self.json = src
if utils.json_field_bside in src:
self.bside = Card(src[utils.json_field_bside],
fmt_ordered = fmt_ordered,
fmt_labeled = fmt_labeled,
fieldsep = fieldsep,
linetrans = linetrans)
p_success, v_success, parsed_fields = fields_from_json(src, linetrans = linetrans)
self.parsed = p_success
self.valid = v_success
self.fields = parsed_fields
# otherwise assume text encoding
else:
self.raw = src
sides = src.split(utils.bsidesep)
if len(sides) > 1:
self.bside = Card(utils.bsidesep.join(sides[1:]),
fmt_ordered = fmt_ordered,
fmt_labeled = fmt_labeled,
fieldsep = fieldsep,
linetrans = linetrans)
p_success, v_success, parsed_fields = fields_from_format(sides[0], fmt_ordered,
fmt_labeled, fieldsep)
self.parsed = p_success
self.valid = v_success
self.fields = parsed_fields
# amusingly enough, both encodings allow infinitely deep nesting of bsides...
# python name hackery
if self.fields:
for field in self.fields:
# look for a specialized set function
if hasattr(self, '_set_' + field):
getattr(self, '_set_' + field)(self.fields[field])
# otherwise use the default one
elif field in self.__dict__:
self.set_field_default(field, self.fields[field])
# If we don't recognize the field, fail. This is a totally artificial
# limitation; if we just used the default handler for the else case,
# we could set arbitrarily named fields.
else:
raise ValueError('python name mangling failure: unknown field for Card(): '
+ field)
else:
# valid but not parsed indicates that the card was apparently empty
self.parsed = False
# These setters are invoked via name mangling, so they have to match
# the field names specified above to be used. Otherwise we just
# always fall back to the (uninteresting) default handler.
# Also note that all fields come wrapped in pairs, with the first member
# specifying the index the field was found at when parsing the card. These will
# all be -1 if the card was parsed from (unordered) json.
def set_field_default(self, field, values):
first = True
for idx, value in values:
if first:
first = False
self.__dict__[field] = value
else:
# stick it in other so we'll be know about it when we format the card
self.valid = False
self.__dict__[field_other] += [(idx, '<' + field + '> ' + str(value))]
def _set_loyalty(self, values):
first = True
for idx, value in values:
if first:
first = False
self.__dict__[field_loyalty] = value
try:
self.__dict__[field_loyalty + '_value'] = int(value)
except ValueError:
self.__dict__[field_loyalty + '_value'] = None
# Technically '*' could still be valid, but it's unlikely...
else:
self.valid = False
self.__dict__[field_other] += [(idx, '<loyalty> ' + str(value))]
def _set_pt(self, values):
first = True
for idx, value in values:
if first:
first = False
self.__dict__[field_pt] = value
p_t = value.split('/') # hardcoded
if len(p_t) == 2:
self.__dict__[field_pt + '_p'] = p_t[0]
try:
self.__dict__[field_pt + '_p_value'] = int(p_t[0])
except ValueError:
self.__dict__[field_pt + '_p_value'] = None
self.__dict__[field_pt + '_t'] = p_t[1]
try:
self.__dict__[field_pt + '_t_value'] = int(p_t[1])
except ValueError:
self.__dict__[field_pt + '_t_value'] = None
else:
self.valid = False
else:
self.valid = False
self.__dict__[field_other] += [(idx, '<pt> ' + str(value))]
def _set_text(self, values):
first = True
for idx, value in values:
if first:
first = False
mtext = value
self.__dict__[field_text] = mtext
fulltext = mtext.encode()
if fulltext:
self.__dict__[field_text + '_lines'] = map(Manatext,
fulltext.split(utils.newline))
self.__dict__[field_text + '_words'] = re.sub(utils.unletters_regex,
' ',
fulltext).split()
self.__dict__[field_text + '_lines_words'] = map(
lambda line: re.sub(utils.unletters_regex, ' ', line).split(),
fulltext.split(utils.newline))
else:
self.valid = False
self.__dict__[field_other] += [(idx, '<text> ' + str(value))]
def _set_other(self, values):
# just record these, we could do somthing unset valid if we really wanted
for idx, value in values:
self.__dict__[field_other] += [(idx, value)]
# Output functions that produce various formats. encode() is specific to
# the NN representation, use str() or format() for output intended for human
# readers.
def encode(self, fmt_ordered = fmt_ordered_default, fmt_labeled = fmt_labeled_default,
fieldsep = utils.fieldsep, initial_sep = True, final_sep = True,
randomize_fields = False, randomize_mana = False, randomize_lines = False):
outfields = []
for field in fmt_ordered:
if field in self.__dict__:
outfield = self.__dict__[field]
if outfield:
# specialized field handling for the ones that aren't strings (sigh)
if isinstance(outfield, list):
outfield_str = ' '.join(outfield)
elif isinstance(outfield, Manacost):
outfield_str = outfield.encode(randomize = randomize_mana)
elif isinstance(outfield, Manatext):
outfield_str = outfield.encode(randomize = randomize_mana)
if randomize_lines:
outfield_str = transforms.randomize_lines(outfield_str)
else:
outfield_str = outfield
else:
outfield_str = ''
if fmt_labeled and field in fmt_labeled:
outfield_str = fmt_labeled[field] + outfield_str
outfields += [outfield_str]
else:
raise ValueError('unknown field for Card.encode(): ' + str(field))
if randomize_fields:
random.shuffle(outfields)
if initial_sep:
outfields = [''] + outfields
if final_sep:
outfields = outfields + ['']
outstr = fieldsep.join(outfields)
if self.bside:
outstr = (outstr + utils.bsidesep
+ self.bside.encode(fmt_ordered = fmt_ordered,
fmt_labeled = fmt_labeled,
fieldsep = fieldsep,
randomize_fields = randomize_fields,
randomize_mana = randomize_mana,
initial_sep = initial_sep, final_sep = final_sep))
return outstr
def format(self, gatherer = False, for_forum = False, vdump = False, for_html = False):
linebreak = '\n'
if for_html:
linebreak = '<hr>' + linebreak
outstr = ''
if for_html:
outstr += '<div class="card-text">\n'
if gatherer:
cardname = titlecase(transforms.name_unpass_1_dashes(self.__dict__[field_name]))
if vdump and not cardname:
cardname = '_NONAME_'
# in general, for_html overrides for_forum
if for_html:
outstr += '<b>'
elif for_forum:
outstr += '[b]'
outstr += cardname
if for_html:
outstr += '</b>'
elif for_forum:
outstr += '[/b]'
coststr = self.__dict__[field_cost].format(for_forum=for_forum, for_html=for_html)
if vdump or not coststr == '_NOCOST_':
outstr += ' ' + coststr
if for_html and for_forum:
#force for_html to false to create tootip with forum spoiler
outstr += ('<div class="hover_img"><a href="#">[F]</a> <span><p>'
+ self.format(gatherer=gatherer, for_forum=for_forum, for_html=False, vdump=vdump).replace('\n', '<br>')
+ '</p></span></div><a href="#top" style="float: right;">back to top</a>')
if self.__dict__[field_rarity]:
if self.__dict__[field_rarity] in utils.json_rarity_unmap:
rarity = utils.json_rarity_unmap[self.__dict__[field_rarity]]
else:
rarity = self.__dict__[field_rarity]
outstr += ' (' + rarity + ')'
if vdump:
if not self.parsed:
outstr += ' _UNPARSED_'
if not self.valid:
outstr += ' _INVALID_'
outstr += linebreak
basetypes = map(str.capitalize, self.__dict__[field_types])
if vdump and len(basetypes) < 1:
basetypes = ['_NOTYPE_']
outstr += ' '.join(map(str.capitalize, self.__dict__[field_supertypes]) + basetypes)
if self.__dict__[field_subtypes]:
outstr += (' ' + utils.dash_marker + ' ' +
' '.join(self.__dict__[field_subtypes]).title())
if self.__dict__[field_pt]:
outstr += ' (' + utils.from_unary(self.__dict__[field_pt]) + ')'
if self.__dict__[field_loyalty]:
outstr += ' ((' + utils.from_unary(self.__dict__[field_loyalty]) + '))'
if self.__dict__[field_text].text:
outstr += linebreak
mtext = self.__dict__[field_text].text
mtext = transforms.text_unpass_1_choice(mtext, delimit = False)
mtext = transforms.text_unpass_2_counters(mtext)
#mtext = transforms.text_unpass_3_uncast(mtext)
mtext = transforms.text_unpass_4_unary(mtext)
mtext = transforms.text_unpass_5_symbols(mtext, for_forum, for_html)
mtext = sentencecase(mtext)
mtext = transforms.text_unpass_6_cardname(mtext, cardname)
mtext = transforms.text_unpass_7_newlines(mtext)
#mtext = transforms.text_unpass_8_unicode(mtext)
newtext = Manatext('')
newtext.text = mtext
newtext.costs = self.__dict__[field_text].costs
outstr += newtext.format(for_forum = for_forum, for_html = for_html)
if vdump and self.__dict__[field_other]:
outstr += linebreak
if for_html:
outstr += '<i>'
elif for_forum:
outstr += '[i]'
else:
outstr += utils.dash_marker * 2
first = True
for idx, value in self.__dict__[field_other]:
if for_html:
if not first:
outstr += '<br>\n'
else:
first = False
else:
outstr += linebreak
outstr += '(' + str(idx) + ') ' + str(value)
if for_html:
outstr += '</i>'
if for_forum:
outstr += '[/i]'
else:
cardname = self.__dict__[field_name]
#cardname = transforms.name_unpass_1_dashes(self.__dict__[field_name])
if vdump and not cardname:
cardname = '_NONAME_'
outstr += cardname
coststr = self.__dict__[field_cost].format(for_forum=for_forum, for_html=for_html)
if vdump or not coststr == '_NOCOST_':
outstr += ' ' + coststr
if vdump:
if not self.parsed:
outstr += ' _UNPARSED_'
if not self.valid:
outstr += ' _INVALID_'
if for_html and for_forum:
#force for_html to false to create tootip with forum spoiler
outstr += ('<div class="hover_img"><a href="#">[F]</a> <span><p>'
+ self.format(gatherer=gatherer, for_forum=for_forum, for_html=False, vdump=vdump).replace('\n', '<br>')
+ '</p></span></div><a href="#top" style="float: right;">back to top</a>')
outstr += linebreak
outstr += ' '.join(self.__dict__[field_supertypes] + self.__dict__[field_types])
if self.__dict__[field_subtypes]:
outstr += ' ' + utils.dash_marker + ' ' + ' '.join(self.__dict__[field_subtypes])
if self.__dict__[field_rarity]:
if self.__dict__[field_rarity] in utils.json_rarity_unmap:
rarity = utils.json_rarity_unmap[self.__dict__[field_rarity]]
else:
rarity = self.__dict__[field_rarity]
outstr += ' (' + rarity.lower() + ')'
if self.__dict__[field_text].text:
outstr += linebreak
mtext = self.__dict__[field_text].text
mtext = transforms.text_unpass_1_choice(mtext, delimit = True)
#mtext = transforms.text_unpass_2_counters(mtext)
#mtext = transforms.text_unpass_3_uncast(mtext)
mtext = transforms.text_unpass_4_unary(mtext)
mtext = transforms.text_unpass_5_symbols(mtext, for_forum, for_html)
#mtext = transforms.text_unpass_6_cardname(mtext, cardname)
mtext = transforms.text_unpass_7_newlines(mtext)
#mtext = transforms.text_unpass_8_unicode(mtext)
newtext = Manatext('')
newtext.text = mtext
newtext.costs = self.__dict__[field_text].costs
outstr += newtext.format(for_forum=for_forum, for_html=for_html)
if self.__dict__[field_pt]:
outstr += linebreak
outstr += '(' + utils.from_unary(self.__dict__[field_pt]) + ')'
if self.__dict__[field_loyalty]:
outstr += linebreak
outstr += '((' + utils.from_unary(self.__dict__[field_loyalty]) + '))'
if vdump and self.__dict__[field_other]:
outstr += linebreak
if for_html:
outstr += '<i>'
else:
outstr += utils.dash_marker * 2
first = True
for idx, value in self.__dict__[field_other]:
if for_html:
if not first:
outstr += '<br>\n'
else:
first = False
else:
outstr += linebreak
outstr += '(' + str(idx) + ') ' + str(value)
if for_html:
outstr += '</i>'
if self.bside:
if for_html:
outstr += '\n'
# force for_forum to false so that the inner div doesn't duplicate the forum
# spoiler of the bside
outstr += self.bside.format(gatherer=gatherer, for_forum=False, for_html=for_html, vdump=vdump)
else:
outstr += linebreak
outstr += utils.dash_marker * 8
outstr += linebreak
outstr += self.bside.format(gatherer=gatherer, for_forum=for_forum, for_html=for_html, vdump=vdump)
# if for_html:
# if for_forum:
# outstr += linebreak
# # force for_html to false to create a copyable forum spoiler div
# outstr += ('<div>'
# + self.format(gatherer=gatherer, for_forum=for_forum, for_html=False, vdump=vdump).replace('\n', '<br>')
# + '</div>')
if for_html:
outstr += "</div>"
return outstr
def to_mse(self, print_raw = False, vdump = False):
outstr = ''
# need a 'card' string first
outstr += 'card:\n'
cardname = titlecase(transforms.name_unpass_1_dashes(self.__dict__[field_name]))
outstr += '\tname: ' + cardname + '\n'
if self.__dict__[field_rarity]:
if self.__dict__[field_rarity] in utils.json_rarity_unmap:
rarity = utils.json_rarity_unmap[self.__dict__[field_rarity]]
else:
rarity = self.__dict__[field_rarity]
outstr += '\trarity: ' + rarity.lower() + '\n'
if not self.__dict__[field_cost].none:
outstr += ('\tcasting cost: '
+ self.__dict__[field_cost].format().replace('{','').replace('}','')
+ '\n')
outstr += '\tsuper type: ' + ' '.join(self.__dict__[field_supertypes]
+ self.__dict__[field_types]).title() + '\n'
if self.__dict__[field_subtypes]:
outstr += '\tsub type: ' + ' '.join(self.__dict__[field_subtypes]).title() + '\n'
if self.__dict__[field_pt]:
ptstring = utils.from_unary(self.__dict__[field_pt]).split('/')
if (len(ptstring) > 1): # really don't want to be accessing anything nonexistent.
outstr += '\tpower: ' + ptstring[0] + '\n'
outstr += '\ttoughness: ' + ptstring[1] + '\n'
if self.__dict__[field_text].text:
mtext = self.__dict__[field_text].text
mtext = transforms.text_unpass_1_choice(mtext, delimit = False)
mtext = transforms.text_unpass_2_counters(mtext)
mtext = transforms.text_unpass_3_uncast(mtext)
mtext = transforms.text_unpass_4_unary(mtext)
mtext = transforms.text_unpass_5_symbols(mtext, False, False)
mtext = sentencecase(mtext)
# I don't really want these MSE specific passes in transforms,
# but they could be pulled out separately somewhere else in here.
mtext = mtext.replace(utils.this_marker, '<atom-cardname><nospellcheck>'
+ utils.this_marker + '</nospellcheck></atom-cardname>')
mtext = transforms.text_unpass_6_cardname(mtext, cardname)
mtext = transforms.text_unpass_7_newlines(mtext)
mtext = transforms.text_unpass_8_unicode(mtext)
newtext = Manatext('')
newtext.text = mtext
newtext.costs = self.__dict__[field_text].costs
newtext = newtext.format()
# See, the thing is, I think it's simplest and easiest to just leave it like this.
# What could possibly go wrong?
newtext = newtext.replace('{','<sym-auto>').replace('}','</sym-auto>')
else:
newtext = ''
# Annoying special case for bsides;
# This could be improved by having an intermediate function that returned
# all of the formatted fields in a data structure and a separate wrapper
# that actually packed them into the MSE format.
if self.bside:
newtext = newtext.replace('\n','\n\t\t')
outstr += '\trule text:\n\t\t' + newtext + '\n'
outstr += '\tstylesheet: new-split\n'
cardname2 = titlecase(transforms.name_unpass_1_dashes(
self.bside.__dict__[field_name]))
outstr += '\tname 2: ' + cardname2 + '\n'
if self.bside.__dict__[field_rarity]:
if self.bside.__dict__[field_rarity] in utils.json_rarity_unmap:
rarity2 = utils.json_rarity_unmap[self.bside.__dict__[field_rarity]]
else:
rarity2 = self.bside.__dict__[field_rarity]
outstr += '\trarity 2: ' + rarity2.lower() + '\n'
if not self.bside.__dict__[field_cost].none:
outstr += ('\tcasting cost 2: '
+ self.bside.__dict__[field_cost].format()
.replace('{','').replace('}','')
+ '\n')
outstr += ('\tsuper type 2: '
+ ' '.join(self.bside.__dict__[field_supertypes]
+ self.bside.__dict__[field_types]).title() + '\n')
if self.bside.__dict__[field_subtypes]:
outstr += ('\tsub type 2: '
+ ' '.join(self.bside.__dict__[field_subtypes]).title() + '\n')
if self.bside.__dict__[field_pt]:
ptstring2 = utils.from_unary(self.bside.__dict__[field_pt]).split('/')
if (len(ptstring2) > 1): # really don't want to be accessing anything nonexistent.
outstr += '\tpower 2: ' + ptstring2[0] + '\n'
outstr += '\ttoughness 2: ' + ptstring2[1] + '\n'
if self.bside.__dict__[field_text].text:
mtext2 = self.bside.__dict__[field_text].text
mtext2 = transforms.text_unpass_1_choice(mtext2, delimit = False)
mtext2 = transforms.text_unpass_2_counters(mtext2)
mtext2 = transforms.text_unpass_3_uncast(mtext2)
mtext2 = transforms.text_unpass_4_unary(mtext2)
mtext2 = transforms.text_unpass_5_symbols(mtext2, False, False)
mtext2 = sentencecase(mtext2)
mtext2 = mtext2.replace(utils.this_marker, '<atom-cardname><nospellcheck>'
+ utils.this_marker + '</nospellcheck></atom-cardname>')
mtext2 = transforms.text_unpass_6_cardname(mtext2, cardname2)
mtext2 = transforms.text_unpass_7_newlines(mtext2)
mtext2 = transforms.text_unpass_8_unicode(mtext2)
newtext2 = Manatext('')
newtext2.text = mtext2
newtext2.costs = self.bside.__dict__[field_text].costs
newtext2 = newtext2.format()
newtext2 = newtext2.replace('{','<sym-auto>').replace('}','</sym-auto>')
newtext2 = newtext2.replace('\n','\n\t\t')
outstr += '\trule text 2:\n\t\t' + newtext2 + '\n'
# Need to do Special Things if it's a planeswalker.
# This code mostly works, but it won't get quite the right thing if the planeswalker
# abilities don't come before any other ones. Should be fixed.
elif "planeswalker" in str(self.__dict__[field_types]):
outstr += '\tstylesheet: m15-planeswalker\n'
# set up the loyalty cost fields using regex to find how many there are.
i = 0
lcost_regex = r'[-+]?\d+: ' # 1+ figures, might be 0.
for lcost in re.findall(lcost_regex, newtext):
i += 1
outstr += '\tloyalty cost ' + str(i) + ': ' + lcost + '\n'
# sub out the loyalty costs.
newtext = re.sub(lcost_regex, '', newtext)
# We need to uppercase again, because MSE won't magically capitalize for us
# like it does after semicolons.
# Abusing passes like this is terrible, should really fix sentencecase.
newtext = transforms.text_pass_9_newlines(newtext)
newtext = sentencecase(newtext)
newtext = transforms.text_unpass_7_newlines(newtext)
if self.__dict__[field_loyalty]:
outstr += '\tloyalty: ' + utils.from_unary(self.__dict__[field_loyalty]) + '\n'
newtext = newtext.replace('\n','\n\t\t')
outstr += '\trule text:\n\t\t' + newtext + '\n'
else:
newtext = newtext.replace('\n','\n\t\t')
outstr += '\trule text:\n\t\t' + newtext + '\n'
# now append all the other useless fields that the setfile expects.
outstr += '\thas styling: false\n\ttime created:2015-07-20 22:53:07\n\ttime modified:2015-07-20 22:53:08\n\textra data:\n\timage:\n\tcard code text:\n\tcopyright:\n\timage 2:\n\tcopyright 2:\n\tnotes:'
return outstr
def vectorize(self):
ld = '('
rd = ')'
outstr = ''
if self.__dict__[field_rarity]:
outstr += ld + self.__dict__[field_rarity] + rd + ' '
coststr = self.__dict__[field_cost].vectorize(delimit = True)
if coststr:
outstr += coststr + ' '
typestr = ' '.join(map(lambda s: '(' + s + ')',
self.__dict__[field_supertypes] + self.__dict__[field_types]))
if typestr:
outstr += typestr + ' '
if self.__dict__[field_subtypes]:
outstr += ' '.join(self.__dict__[field_subtypes]) + ' '
if self.__dict__[field_pt]:
outstr += ' '.join(map(lambda s: '(' + s + ')',
self.__dict__[field_pt].replace('/', '/ /').split()))
outstr += ' '
if self.__dict__[field_loyalty]:
outstr += '((' + self.__dict__[field_loyalty] + ')) '
outstr += self.__dict__[field_text].vectorize()
if self.bside:
outstr = '_ASIDE_ ' + outstr + '\n\n_BSIDE_ ' + self.bside.vectorize()
return outstr
def get_colors(self):
return self.__dict__[field_cost].get_colors()
def get_types(self):
return self.__dict__[field_types]
def get_cmc(self):
return self.__dict__[field_cost].cmc
|
mit
|
ikcalB/linuxcnc-mirror
|
src/hal/user_comps/pyvcp.py
|
32
|
3152
|
#!/usr/bin/env python
# This is a component of emc
# Copyright 2007 Anders Wallin <[email protected]>
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Python Virtual Control Panel for EMC
A virtual control panel (VCP) is used to display and control
HAL pins, which are either BIT or FLOAT valued.
Usage: pyvcp -g WxH+X+Y -c compname myfile.xml
compname is the name of the HAL component to be created.
The name of the HAL pins associated with the VCP will begin with 'compname.'
myfile.xml is an XML file which specifies the layout of the VCP.
Valid XML tags are described in the documentation for pyvcp_widgets.py
-g option allows setting of the inital size and/or position of the panel
"""
import sys, os
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
sys.path.insert(0, os.path.join(BASE, "lib", "python"))
import vcpparse
import hal
from Tkinter import Tk
import getopt
def usage():
""" prints the usage message """
print "Usage: pyvcp [-g WIDTHxHEIGHT+XOFFSET+YOFFSET][-c hal_component_name] myfile.xml"
print "If the component name is not specified, the basename of the xml file is used."
print "-g options are in pixel units, XOFFSET/YOFFSET is referenced from top left of screen"
print "use -g WIDTHxHEIGHT for just setting size or -g +XOFFSET+YOFFSET for just position"
def main():
""" creates a HAL component.
calls vcpparse with the specified XML file.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "c:g:")
except getopt.GetoptError, detail:
print detail
usage()
sys.exit(1)
window_geometry = None
component_name = None
for o, a in opts:
if o == "-c":
component_name = a
if o == "-g":
window_geometry = a
try:
filename=args[0]
except:
usage()
sys.exit(1)
if component_name is None:
component_name = os.path.splitext(os.path.basename(filename))[0]
pyvcp0 = Tk()
pyvcp0.title(component_name)
if window_geometry:
pyvcp0.geometry(window_geometry)
vcpparse.filename=filename
pycomp=vcpparse.create_vcp(compname=component_name, master=pyvcp0)
pycomp.ready()
try:
try:
pyvcp0.mainloop()
except KeyboardInterrupt:
sys.exit(0)
finally:
pycomp.exit()
if __name__ == '__main__':
main()
|
lgpl-2.1
|
bq/web2board
|
res/common/Scons/sconsFiles/SCons/Tool/GettextCommon.py
|
6
|
17292
|
"""SCons.Tool.GettextCommon module
Used by several tools of `gettext` toolset.
"""
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/GettextCommon.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import SCons.Warnings
import re
#############################################################################
class XgettextToolWarning(SCons.Warnings.Warning): pass
class XgettextNotFound(XgettextToolWarning): pass
class MsginitToolWarning(SCons.Warnings.Warning): pass
class MsginitNotFound(MsginitToolWarning): pass
class MsgmergeToolWarning(SCons.Warnings.Warning): pass
class MsgmergeNotFound(MsgmergeToolWarning): pass
class MsgfmtToolWarning(SCons.Warnings.Warning): pass
class MsgfmtNotFound(MsgfmtToolWarning): pass
#############################################################################
SCons.Warnings.enableWarningClass(XgettextToolWarning)
SCons.Warnings.enableWarningClass(XgettextNotFound)
SCons.Warnings.enableWarningClass(MsginitToolWarning)
SCons.Warnings.enableWarningClass(MsginitNotFound)
SCons.Warnings.enableWarningClass(MsgmergeToolWarning)
SCons.Warnings.enableWarningClass(MsgmergeNotFound)
SCons.Warnings.enableWarningClass(MsgfmtToolWarning)
SCons.Warnings.enableWarningClass(MsgfmtNotFound)
#############################################################################
#############################################################################
class _POTargetFactory(object):
""" A factory of `PO` target files.
Factory defaults differ from these of `SCons.Node.FS.FS`. We set `precious`
(this is required by builders and actions gettext) and `noclean` flags by
default for all produced nodes.
"""
def __init__( self, env, nodefault = True, alias = None, precious = True
, noclean = True ):
""" Object constructor.
**Arguments**
- *env* (`SCons.Environment.Environment`)
- *nodefault* (`boolean`) - if `True`, produced nodes will be ignored
from default target `'.'`
- *alias* (`string`) - if provided, produced nodes will be automatically
added to this alias, and alias will be set as `AlwaysBuild`
- *precious* (`boolean`) - if `True`, the produced nodes will be set as
`Precious`.
- *noclen* (`boolean`) - if `True`, the produced nodes will be excluded
from `Clean`.
"""
self.env = env
self.alias = alias
self.precious = precious
self.noclean = noclean
self.nodefault = nodefault
def _create_node(self, name, factory, directory = None, create = 1):
""" Create node, and set it up to factory settings. """
import SCons.Util
node = factory(name, directory, create)
node.set_noclean(self.noclean)
node.set_precious(self.precious)
if self.nodefault:
self.env.Ignore('.', node)
if self.alias:
self.env.AlwaysBuild(self.env.Alias(self.alias, node))
return node
def Entry(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.Entry` """
return self._create_node(name, self.env.fs.Entry, directory, create)
def File(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.File` """
return self._create_node(name, self.env.fs.File, directory, create)
#############################################################################
#############################################################################
_re_comment = re.compile(r'(#[^\n\r]+)$', re.M)
_re_lang = re.compile(r'([a-zA-Z0-9_]+)', re.M)
#############################################################################
def _read_linguas_from_files(env, linguas_files = None):
""" Parse `LINGUAS` file and return list of extracted languages """
import SCons.Util
import SCons.Environment
global _re_comment
global _re_lang
if not SCons.Util.is_List(linguas_files) \
and not SCons.Util.is_String(linguas_files) \
and not isinstance(linguas_files, SCons.Node.FS.Base) \
and linguas_files:
# If, linguas_files==True or such, then read 'LINGUAS' file.
linguas_files = [ 'LINGUAS' ]
if linguas_files is None:
return []
fnodes = env.arg2nodes(linguas_files)
linguas = []
for fnode in fnodes:
contents = _re_comment.sub("", fnode.get_text_contents())
ls = [ l for l in _re_lang.findall(contents) if l ]
linguas.extend(ls)
return linguas
#############################################################################
#############################################################################
from SCons.Builder import BuilderBase
#############################################################################
class _POFileBuilder(BuilderBase):
""" `PO` file builder.
This is multi-target single-source builder. In typical situation the source
is single `POT` file, e.g. `messages.pot`, and there are multiple `PO`
targets to be updated from this `POT`. We must run
`SCons.Builder.BuilderBase._execute()` separatelly for each target to track
dependencies separatelly for each target file.
**NOTE**: if we call `SCons.Builder.BuilderBase._execute(.., target, ...)`
with target being list of all targets, all targets would be rebuilt each time
one of the targets from this list is missing. This would happen, for example,
when new language `ll` enters `LINGUAS_FILE` (at this moment there is no
`ll.po` file yet). To avoid this, we override
`SCons.Builder.BuilerBase._execute()` and call it separatelly for each
target. Here we also append to the target list the languages read from
`LINGUAS_FILE`.
"""
#
#* The argument for overriding _execute(): We must use environment with
# builder overrides applied (see BuilderBase.__init__(). Here it comes for
# free.
#* The argument against using 'emitter': The emitter is called too late
# by BuilderBase._execute(). If user calls, for example:
#
# env.POUpdate(LINGUAS_FILE = 'LINGUAS')
#
# the builder throws error, because it is called with target=None,
# source=None and is trying to "generate" sources or target list first.
# If user calls
#
# env.POUpdate(['foo', 'baz'], LINGUAS_FILE = 'LINGUAS')
#
# the env.BuilderWrapper() calls our builder with target=None,
# source=['foo', 'baz']. The BuilderBase._execute() then splits execution
# and execute iterativelly (recursion) self._execute(None, source[i]).
# After that it calls emitter (which is quite too late). The emitter is
# also called in each iteration, what makes things yet worse.
def __init__(self, env, **kw):
if not 'suffix' in kw:
kw['suffix'] = '$POSUFFIX'
if not 'src_suffix' in kw:
kw['src_suffix'] = '$POTSUFFIX'
if not 'src_builder' in kw:
kw['src_builder'] = '_POTUpdateBuilder'
if not 'single_source' in kw:
kw['single_source'] = True
alias = None
if 'target_alias' in kw:
alias = kw['target_alias']
del kw['target_alias']
if not 'target_factory' in kw:
kw['target_factory'] = _POTargetFactory(env, alias=alias).File
BuilderBase.__init__(self, **kw)
def _execute(self, env, target, source, *args, **kw):
""" Execute builder's actions.
Here we append to `target` the languages read from `$LINGUAS_FILE` and
apply `SCons.Builder.BuilderBase._execute()` separatelly to each target.
The arguments and return value are same as for
`SCons.Builder.BuilderBase._execute()`.
"""
import SCons.Util
import SCons.Node
linguas_files = None
if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE']:
linguas_files = env['LINGUAS_FILE']
# This prevents endless recursion loop (we'll be invoked once for
# each target appended here, we must not extend the list again).
env['LINGUAS_FILE'] = None
linguas = _read_linguas_from_files(env,linguas_files)
if SCons.Util.is_List(target):
target.extend(linguas)
elif target is not None:
target = [target] + linguas
else:
target = linguas
if not target:
# Let the SCons.BuilderBase to handle this patologic situation
return BuilderBase._execute( self, env, target, source, *args, **kw)
# The rest is ours
if not SCons.Util.is_List(target):
target = [ target ]
result = []
for tgt in target:
r = BuilderBase._execute( self, env, [tgt], source, *args, **kw)
result.extend(r)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return SCons.Node.NodeList(result)
#############################################################################
import SCons.Environment
#############################################################################
def _translate(env, target=None, source=SCons.Environment._null, *args, **kw):
""" Function for `Translate()` pseudo-builder """
if target is None: target = []
pot = env.POTUpdate(None, source, *args, **kw)
po = env.POUpdate(target, pot, *args, **kw)
return po
#############################################################################
#############################################################################
class RPaths(object):
""" Callable object, which returns pathnames relative to SCons current
working directory.
It seems like `SCons.Node.FS.Base.get_path()` returns absolute paths
for nodes that are outside of current working directory (`env.fs.getcwd()`).
Here, we often have `SConscript`, `POT` and `PO` files within `po/`
directory and source files (e.g. `*.c`) outside of it. When generating `POT`
template file, references to source files are written to `POT` template, so
a translator may later quickly jump to appropriate source file and line from
its `PO` editor (e.g. `poedit`). Relative paths in `PO` file are usually
interpreted by `PO` editor as paths relative to the place, where `PO` file
lives. The absolute paths would make resultant `POT` file nonportable, as
the references would be correct only on the machine, where `POT` file was
recently re-created. For such reason, we need a function, which always
returns relative paths. This is the purpose of `RPaths` callable object.
The `__call__` method returns paths relative to current woking directory, but
we assume, that *xgettext(1)* is run from the directory, where target file is
going to be created.
Note, that this may not work for files distributed over several hosts or
across different drives on windows. We assume here, that single local
filesystem holds both source files and target `POT` templates.
Intended use of `RPaths` - in `xgettext.py`::
def generate(env):
from GettextCommon import RPaths
...
sources = '$( ${_concat( "", SOURCES, "", __env__, XgettextRPaths, TARGET, SOURCES)} $)'
env.Append(
...
XGETTEXTCOM = 'XGETTEXT ... ' + sources,
...
XgettextRPaths = RPaths(env)
)
"""
# NOTE: This callable object returns pathnames of dirs/files relative to
# current working directory. The pathname remains relative also for entries
# that are outside of current working directory (node, that
# SCons.Node.FS.File and siblings return absolute path in such case). For
# simplicity we compute path relative to current working directory, this
# seems be enough for our purposes (don't need TARGET variable and
# SCons.Defaults.Variable_Caller stuff).
def __init__(self, env):
""" Initialize `RPaths` callable object.
**Arguments**:
- *env* - a `SCons.Environment.Environment` object, defines *current
working dir*.
"""
self.env = env
# FIXME: I'm not sure, how it should be implemented (what the *args are in
# general, what is **kw).
def __call__(self, nodes, *args, **kw):
""" Return nodes' paths (strings) relative to current working directory.
**Arguments**:
- *nodes* ([`SCons.Node.FS.Base`]) - list of nodes.
- *args* - currently unused.
- *kw* - currently unused.
**Returns**:
- Tuple of strings, which represent paths relative to current working
directory (for given environment).
"""
import os
import SCons.Node.FS
rpaths = ()
cwd = self.env.fs.getcwd().get_abspath()
for node in nodes:
rpath = None
if isinstance(node, SCons.Node.FS.Base):
rpath = os.path.relpath(node.get_abspath(), cwd)
# FIXME: Other types possible here?
if rpath is not None:
rpaths += (rpath,)
return rpaths
#############################################################################
#############################################################################
def _init_po_files(target, source, env):
""" Action function for `POInit` builder. """
nop = lambda target, source, env : 0
if env.has_key('POAUTOINIT'):
autoinit = env['POAUTOINIT']
else:
autoinit = False
# Well, if everything outside works well, this loop should do single
# iteration. Otherwise we are rebuilding all the targets even, if just
# one has changed (but is this out fault?).
for tgt in target:
if not tgt.exists():
if autoinit:
action = SCons.Action.Action('$MSGINITCOM', '$MSGINITCOMSTR')
else:
msg = 'File ' + repr(str(tgt)) + ' does not exist. ' \
+ 'If you are a translator, you can create it through: \n' \
+ '$MSGINITCOM'
action = SCons.Action.Action(nop, msg)
status = action([tgt], source, env)
if status: return status
return 0
#############################################################################
#############################################################################
def _detect_xgettext(env):
""" Detects *xgettext(1)* binary """
if env.has_key('XGETTEXT'):
return env['XGETTEXT']
xgettext = env.Detect('xgettext');
if xgettext:
return xgettext
raise SCons.Errors.StopError(XgettextNotFound,"Could not detect xgettext")
return None
#############################################################################
def _xgettext_exists(env):
return _detect_xgettext(env)
#############################################################################
#############################################################################
def _detect_msginit(env):
""" Detects *msginit(1)* program. """
if env.has_key('MSGINIT'):
return env['MSGINIT']
msginit = env.Detect('msginit');
if msginit:
return msginit
raise SCons.Errors.StopError(MsginitNotFound, "Could not detect msginit")
return None
#############################################################################
def _msginit_exists(env):
return _detect_msginit(env)
#############################################################################
#############################################################################
def _detect_msgmerge(env):
""" Detects *msgmerge(1)* program. """
if env.has_key('MSGMERGE'):
return env['MSGMERGE']
msgmerge = env.Detect('msgmerge');
if msgmerge:
return msgmerge
raise SCons.Errors.StopError(MsgmergeNotFound, "Could not detect msgmerge")
return None
#############################################################################
def _msgmerge_exists(env):
return _detect_msgmerge(env)
#############################################################################
#############################################################################
def _detect_msgfmt(env):
""" Detects *msgmfmt(1)* program. """
if env.has_key('MSGFMT'):
return env['MSGFMT']
msgfmt = env.Detect('msgfmt');
if msgfmt:
return msgfmt
raise SCons.Errors.StopError(MsgfmtNotFound, "Could not detect msgfmt")
return None
#############################################################################
def _msgfmt_exists(env):
return _detect_msgfmt(env)
#############################################################################
#############################################################################
def tool_list(platform, env):
""" List tools that shall be generated by top-level `gettext` tool """
return [ 'xgettext', 'msginit', 'msgmerge', 'msgfmt' ]
#############################################################################
|
lgpl-3.0
|
wimnat/ansible-modules-extras
|
system/ohai.py
|
77
|
1693
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: ohai
short_description: Returns inventory data from I(Ohai)
description:
- Similar to the M(facter) module, this runs the I(Ohai) discovery program
(U(http://wiki.opscode.com/display/chef/Ohai)) on the remote host and
returns JSON inventory data.
I(Ohai) data is a bit more verbose and nested than I(facter).
version_added: "0.6"
options: {}
notes: []
requirements: [ "ohai" ]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''
# Retrieve (ohai) data from all Web servers and store in one-file per host
ansible webservers -m ohai --tree=/tmp/ohaidata
'''
def main():
module = AnsibleModule(
argument_spec = dict()
)
cmd = ["/usr/bin/env", "ohai"]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
jaruba/chromium.src
|
tools/perf/page_sets/mse_cases.py
|
9
|
2010
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class MseCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(MseCasesPage, self).__init__(url=url, page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition('window.__testDone == true')
class MseCasesPageSet(page_set_module.PageSet):
""" Media source extensions perf benchmark """
def __init__(self):
super(MseCasesPageSet, self).__init__(bucket=page_set_module.PUBLIC_BUCKET)
urls_list = [
'file://mse_cases/startup_test.html?testType=AV',
'file://mse_cases/startup_test.html?testType=AV&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=AV&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=AV&useAppendStream=true&doNotWaitForBodyOnLoad=true',
'file://mse_cases/startup_test.html?testType=V',
'file://mse_cases/startup_test.html?testType=V&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=V&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=V&useAppendStream=true&doNotWaitForBodyOnLoad=true',
'file://mse_cases/startup_test.html?testType=A',
'file://mse_cases/startup_test.html?testType=A&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=A&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=A&useAppendStream=true&doNotWaitForBodyOnLoad=true',
]
for url in urls_list:
self.AddUserStory(MseCasesPage(url, self))
|
bsd-3-clause
|
egenerat/flight-manager
|
lib/requests/packages/chardet/langcyrillicmodel.py
|
2762
|
17725
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
|
mit
|
chiefy/kubernetes
|
examples/cluster-dns/images/backend/server.py
|
468
|
1313
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PORT_NUMBER = 8000
# This class will handles any incoming request.
class HTTPHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("Hello World!")
try:
# Create a web server and define the handler to manage the incoming request.
server = HTTPServer(('', PORT_NUMBER), HTTPHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
|
apache-2.0
|
rcomer/iris
|
lib/iris/tests/test_quickplot.py
|
3
|
8026
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Tests the high-level plotting interface.
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import iris.tests.test_plot as test_plot
import iris
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
import iris.quickplot as qplt
# Caches _load_theta so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = "result"
if not cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
def _load_theta():
path = tests.get_data_path(("PP", "COLPEX", "theta_and_orog_subset.pp"))
theta = iris.load_cube(path, "air_potential_temperature")
# Improve the unit
theta.units = "K"
return theta
@tests.skip_data
@tests.skip_plot
class TestQuickplotCoordinatesGiven(test_plot.TestPlotCoordinatesGiven):
def setUp(self):
tests.GraphicsTest.setUp(self)
filename = tests.get_data_path(
("PP", "COLPEX", "theta_and_orog_subset.pp")
)
self.cube = test_plot.load_cube_once(
filename, "air_potential_temperature"
)
self.draw_module = iris.quickplot
self.contourf = test_plot.LambdaStr(
"iris.quickplot.contourf",
lambda cube, *args, **kwargs: iris.quickplot.contourf(
cube, *args, **kwargs
),
)
self.contour = test_plot.LambdaStr(
"iris.quickplot.contour",
lambda cube, *args, **kwargs: iris.quickplot.contour(
cube, *args, **kwargs
),
)
self.points = test_plot.LambdaStr(
"iris.quickplot.points",
lambda cube, *args, **kwargs: iris.quickplot.points(
cube, c=cube.data, *args, **kwargs
),
)
self.plot = test_plot.LambdaStr(
"iris.quickplot.plot",
lambda cube, *args, **kwargs: iris.quickplot.plot(
cube, *args, **kwargs
),
)
self.results = {
"yx": (
[self.contourf, ["grid_latitude", "grid_longitude"]],
[self.contourf, ["grid_longitude", "grid_latitude"]],
[self.contour, ["grid_latitude", "grid_longitude"]],
[self.contour, ["grid_longitude", "grid_latitude"]],
[self.points, ["grid_latitude", "grid_longitude"]],
[self.points, ["grid_longitude", "grid_latitude"]],
),
"zx": (
[self.contourf, ["model_level_number", "grid_longitude"]],
[self.contourf, ["grid_longitude", "model_level_number"]],
[self.contour, ["model_level_number", "grid_longitude"]],
[self.contour, ["grid_longitude", "model_level_number"]],
[self.points, ["model_level_number", "grid_longitude"]],
[self.points, ["grid_longitude", "model_level_number"]],
),
"tx": (
[self.contourf, ["time", "grid_longitude"]],
[self.contourf, ["grid_longitude", "time"]],
[self.contour, ["time", "grid_longitude"]],
[self.contour, ["grid_longitude", "time"]],
[self.points, ["time", "grid_longitude"]],
[self.points, ["grid_longitude", "time"]],
),
"x": ([self.plot, ["grid_longitude"]],),
"y": ([self.plot, ["grid_latitude"]],),
}
@tests.skip_data
@tests.skip_plot
class TestLabels(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.theta = _load_theta()
def _slice(self, coords):
"""Returns the first cube containing the requested coordinates."""
for cube in self.theta.slices(coords):
break
return cube
def _small(self):
# Use a restricted size so we can make out the detail
cube = self._slice(["model_level_number", "grid_longitude"])
return cube[:5, :5]
def test_contour(self):
qplt.contour(self._small())
self.check_graphic()
qplt.contourf(
self._small(), coords=["model_level_number", "grid_longitude"]
)
self.check_graphic()
def test_contourf(self):
qplt.contourf(self._small())
cube = self._small()
iplt.orography_at_points(cube)
self.check_graphic()
qplt.contourf(
self._small(), coords=["model_level_number", "grid_longitude"]
)
self.check_graphic()
qplt.contourf(
self._small(), coords=["grid_longitude", "model_level_number"]
)
self.check_graphic()
def test_contourf_axes_specified(self):
# Check that the contourf function does not modify the matplotlib
# pyplot state machine.
# Create a figure and axes to be used by contourf
plt.figure()
axes1 = plt.axes()
# Create test figure and axes which will be the new results
# of plt.gcf and plt.gca.
plt.figure()
axes2 = plt.axes()
# Add a title to the test axes.
plt.title("This should not be changed")
# Draw the contourf on a specific axes.
qplt.contourf(self._small(), axes=axes1)
# Ensure that the correct axes got the appropriate title.
self.assertEqual(axes2.get_title(), "This should not be changed")
self.assertEqual(axes1.get_title(), "Air potential temperature")
# Check that the axes labels were set correctly.
self.assertEqual(axes1.get_xlabel(), "Grid longitude / degrees")
self.assertEqual(axes1.get_ylabel(), "Altitude / m")
def test_contourf_nameless(self):
cube = self._small()
cube.standard_name = None
cube.attributes["STASH"] = ""
qplt.contourf(cube, coords=["grid_longitude", "model_level_number"])
self.check_graphic()
def test_pcolor(self):
qplt.pcolor(self._small())
self.check_graphic()
def test_pcolormesh(self):
qplt.pcolormesh(self._small())
# cube = self._small()
# iplt.orography_at_bounds(cube)
self.check_graphic()
def test_pcolormesh_str_symbol(self):
pcube = self._small().copy()
pcube.coords("level_height")[0].units = "centimeters"
qplt.pcolormesh(pcube)
self.check_graphic()
def test_map(self):
cube = self._slice(["grid_latitude", "grid_longitude"])
qplt.contour(cube)
self.check_graphic()
# check that the result of adding 360 to the data is *almost* identically the same result
lon = cube.coord("grid_longitude")
lon.points = lon.points + 360
qplt.contour(cube)
self.check_graphic()
def test_alignment(self):
cube = self._small()
qplt.contourf(cube)
# qplt.outline(cube)
qplt.points(cube)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestTimeReferenceUnitsLabels(tests.GraphicsTest):
def setUp(self):
super().setUp()
path = tests.get_data_path(("PP", "aPProt1", "rotatedMHtimecube.pp"))
self.cube = iris.load_cube(path)[:, 0, 0]
def test_reference_time_units(self):
# units should not be displayed for a reference time
qplt.plot(self.cube.coord("time"), self.cube)
plt.gcf().autofmt_xdate()
self.check_graphic()
def test_not_reference_time_units(self):
# units should be displayed for other time coordinates
qplt.plot(self.cube.coord("forecast_period"), self.cube)
self.check_graphic()
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
khosrow/luma-devel
|
luma/base/gui/design/PluginListWidgetDesign.py
|
3
|
1349
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/mnt/debris/devel/repo/git/luma-fixes/resources/forms/PluginListWidgetDesign.ui'
#
# Created: Wed May 25 21:41:09 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_pluginListWidget(object):
def setupUi(self, pluginListWidget):
pluginListWidget.setObjectName(_fromUtf8("pluginListWidget"))
pluginListWidget.resize(444, 313)
self.gridLayout = QtGui.QGridLayout(pluginListWidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.listView = QtGui.QListView(pluginListWidget)
self.listView.setObjectName(_fromUtf8("listView"))
self.gridLayout.addWidget(self.listView, 0, 0, 1, 1)
self.retranslateUi(pluginListWidget)
QtCore.QObject.connect(self.listView, QtCore.SIGNAL(_fromUtf8("activated(QModelIndex)")), pluginListWidget.pluginDoubleClicked)
QtCore.QMetaObject.connectSlotsByName(pluginListWidget)
def retranslateUi(self, pluginListWidget):
pluginListWidget.setWindowTitle(QtGui.QApplication.translate("pluginListWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
|
gpl-2.0
|
andiwundsam/_of_normalize
|
pox/forwarding/l3_learning.py
|
36
|
12330
|
# Copyright 2012-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A stupid L3 switch
For each switch:
1) Keep a table that maps IP addresses to MAC addresses and switch ports.
Stock this table using information from ARP and IP packets.
2) When you see an ARP query, try to answer it using information in the table
from step 1. If the info in the table is old, just flood the query.
3) Flood all other ARPs.
4) When you see an IP packet, if you know the destination port (because it's
in the table from step 1), install a flow for it.
"""
from pox.core import core
import pox
log = core.getLogger()
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.addresses import IPAddr, EthAddr
from pox.lib.util import str_to_bool, dpid_to_str
from pox.lib.recoco import Timer
import pox.openflow.libopenflow_01 as of
from pox.lib.revent import *
import time
# Timeout for flows
FLOW_IDLE_TIMEOUT = 10
# Timeout for ARP entries
ARP_TIMEOUT = 60 * 2
# Maximum number of packet to buffer on a switch for an unknown IP
MAX_BUFFERED_PER_IP = 5
# Maximum time to hang on to a buffer for an unknown IP in seconds
MAX_BUFFER_TIME = 5
class Entry (object):
"""
Not strictly an ARP entry.
We use the port to determine which port to forward traffic out of.
We use the MAC to answer ARP replies.
We use the timeout so that if an entry is older than ARP_TIMEOUT, we
flood the ARP request rather than try to answer it ourselves.
"""
def __init__ (self, port, mac):
self.timeout = time.time() + ARP_TIMEOUT
self.port = port
self.mac = mac
def __eq__ (self, other):
if type(other) == tuple:
return (self.port,self.mac)==other
else:
return (self.port,self.mac)==(other.port,other.mac)
def __ne__ (self, other):
return not self.__eq__(other)
def isExpired (self):
if self.port == of.OFPP_NONE: return False
return time.time() > self.timeout
def dpid_to_mac (dpid):
return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,))
class l3_switch (EventMixin):
def __init__ (self, fakeways = [], arp_for_unknowns = False):
# These are "fake gateways" -- we'll answer ARPs for them with MAC
# of the switch they're connected to.
self.fakeways = set(fakeways)
# If this is true and we see a packet for an unknown
# host, we'll ARP for it.
self.arp_for_unknowns = arp_for_unknowns
# (dpid,IP) -> expire_time
# We use this to keep from spamming ARPs
self.outstanding_arps = {}
# (dpid,IP) -> [(expire_time,buffer_id,in_port), ...]
# These are buffers we've gotten at this datapath for this IP which
# we can't deliver because we don't know where they go.
self.lost_buffers = {}
# For each switch, we map IP addresses to Entries
self.arpTable = {}
# This timer handles expiring stuff
self._expire_timer = Timer(5, self._handle_expiration, recurring=True)
self.listenTo(core)
def _handle_expiration (self):
# Called by a timer so that we can remove old items.
empty = []
for k,v in self.lost_buffers.iteritems():
dpid,ip = k
for item in list(v):
expires_at,buffer_id,in_port = item
if expires_at < time.time():
# This packet is old. Tell this switch to drop it.
v.remove(item)
po = of.ofp_packet_out(buffer_id = buffer_id, in_port = in_port)
core.openflow.sendToDPID(dpid, po)
if len(v) == 0: empty.append(k)
# Remove empty buffer bins
for k in empty:
del self.lost_buffers[k]
def _send_lost_buffers (self, dpid, ipaddr, macaddr, port):
"""
We may have "lost" buffers -- packets we got but didn't know
where to send at the time. We may know now. Try and see.
"""
if (dpid,ipaddr) in self.lost_buffers:
# Yup!
bucket = self.lost_buffers[(dpid,ipaddr)]
del self.lost_buffers[(dpid,ipaddr)]
log.debug("Sending %i buffered packets to %s from %s"
% (len(bucket),ipaddr,dpid_to_str(dpid)))
for _,buffer_id,in_port in bucket:
po = of.ofp_packet_out(buffer_id=buffer_id,in_port=in_port)
po.actions.append(of.ofp_action_dl_addr.set_dst(macaddr))
po.actions.append(of.ofp_action_output(port = port))
core.openflow.sendToDPID(dpid, po)
def _handle_GoingUpEvent (self, event):
self.listenTo(core.openflow)
log.debug("Up...")
def _handle_PacketIn (self, event):
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if dpid not in self.arpTable:
# New switch -- create an empty table
self.arpTable[dpid] = {}
for fake in self.fakeways:
self.arpTable[dpid][IPAddr(fake)] = Entry(of.OFPP_NONE,
dpid_to_mac(dpid))
if packet.type == ethernet.LLDP_TYPE:
# Ignore LLDP packets
return
if isinstance(packet.next, ipv4):
log.debug("%i %i IP %s => %s", dpid,inport,
packet.next.srcip,packet.next.dstip)
# Send any waiting packets...
self._send_lost_buffers(dpid, packet.next.srcip, packet.src, inport)
# Learn or update port/MAC info
if packet.next.srcip in self.arpTable[dpid]:
if self.arpTable[dpid][packet.next.srcip] != (inport, packet.src):
log.info("%i %i RE-learned %s", dpid,inport,packet.next.srcip)
else:
log.debug("%i %i learned %s", dpid,inport,str(packet.next.srcip))
self.arpTable[dpid][packet.next.srcip] = Entry(inport, packet.src)
# Try to forward
dstaddr = packet.next.dstip
if dstaddr in self.arpTable[dpid]:
# We have info about what port to send it out on...
prt = self.arpTable[dpid][dstaddr].port
mac = self.arpTable[dpid][dstaddr].mac
if prt == inport:
log.warning("%i %i not sending packet for %s back out of the " +
"input port" % (dpid, inport, str(dstaddr)))
else:
log.debug("%i %i installing flow for %s => %s out port %i"
% (dpid, inport, packet.next.srcip, dstaddr, prt))
actions = []
actions.append(of.ofp_action_dl_addr.set_dst(mac))
actions.append(of.ofp_action_output(port = prt))
match = of.ofp_match.from_packet(packet, inport)
match.dl_src = None # Wildcard source MAC
msg = of.ofp_flow_mod(command=of.OFPFC_ADD,
idle_timeout=FLOW_IDLE_TIMEOUT,
hard_timeout=of.OFP_FLOW_PERMANENT,
buffer_id=event.ofp.buffer_id,
actions=actions,
match=of.ofp_match.from_packet(packet,
inport))
event.connection.send(msg.pack())
elif self.arp_for_unknowns:
# We don't know this destination.
# First, we track this buffer so that we can try to resend it later
# if we learn the destination, second we ARP for the destination,
# which should ultimately result in it responding and us learning
# where it is
# Add to tracked buffers
if (dpid,dstaddr) not in self.lost_buffers:
self.lost_buffers[(dpid,dstaddr)] = []
bucket = self.lost_buffers[(dpid,dstaddr)]
entry = (time.time() + MAX_BUFFER_TIME,event.ofp.buffer_id,inport)
bucket.append(entry)
while len(bucket) > MAX_BUFFERED_PER_IP: del bucket[0]
# Expire things from our outstanding ARP list...
self.outstanding_arps = {k:v for k,v in
self.outstanding_arps.iteritems() if v > time.time()}
# Check if we've already ARPed recently
if (dpid,dstaddr) in self.outstanding_arps:
# Oop, we've already done this one recently.
return
# And ARP...
self.outstanding_arps[(dpid,dstaddr)] = time.time() + 4
r = arp()
r.hwtype = r.HW_TYPE_ETHERNET
r.prototype = r.PROTO_TYPE_IP
r.hwlen = 6
r.protolen = r.protolen
r.opcode = r.REQUEST
r.hwdst = ETHER_BROADCAST
r.protodst = dstaddr
r.hwsrc = packet.src
r.protosrc = packet.next.srcip
e = ethernet(type=ethernet.ARP_TYPE, src=packet.src,
dst=ETHER_BROADCAST)
e.set_payload(r)
log.debug("%i %i ARPing for %s on behalf of %s" % (dpid, inport,
str(r.protodst), str(r.protosrc)))
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.in_port = inport
event.connection.send(msg)
elif isinstance(packet.next, arp):
a = packet.next
log.debug("%i %i ARP %s %s => %s", dpid, inport,
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), str(a.protosrc), str(a.protodst))
if a.prototype == arp.PROTO_TYPE_IP:
if a.hwtype == arp.HW_TYPE_ETHERNET:
if a.protosrc != 0:
# Learn or update port/MAC info
if a.protosrc in self.arpTable[dpid]:
if self.arpTable[dpid][a.protosrc] != (inport, packet.src):
log.info("%i %i RE-learned %s", dpid,inport,str(a.protosrc))
else:
log.debug("%i %i learned %s", dpid,inport,str(a.protosrc))
self.arpTable[dpid][a.protosrc] = Entry(inport, packet.src)
# Send any waiting packets...
self._send_lost_buffers(dpid, a.protosrc, packet.src, inport)
if a.opcode == arp.REQUEST:
# Maybe we can answer
if a.protodst in self.arpTable[dpid]:
# We have an answer...
if not self.arpTable[dpid][a.protodst].isExpired():
# .. and it's relatively current, so we'll reply ourselves
r = arp()
r.hwtype = a.hwtype
r.prototype = a.prototype
r.hwlen = a.hwlen
r.protolen = a.protolen
r.opcode = arp.REPLY
r.hwdst = a.hwsrc
r.protodst = a.protosrc
r.protosrc = a.protodst
r.hwsrc = self.arpTable[dpid][a.protodst].mac
e = ethernet(type=packet.type, src=dpid_to_mac(dpid),
dst=a.hwsrc)
e.set_payload(r)
log.debug("%i %i answering ARP for %s" % (dpid, inport,
str(r.protosrc)))
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port =
of.OFPP_IN_PORT))
msg.in_port = inport
event.connection.send(msg)
return
# Didn't know how to answer or otherwise handle this ARP, so just flood it
log.debug("%i %i flooding ARP %s %s => %s" % (dpid, inport,
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode,
'op:%i' % (a.opcode,)), str(a.protosrc), str(a.protodst)))
msg = of.ofp_packet_out(in_port = inport, data = event.ofp,
action = of.ofp_action_output(port = of.OFPP_FLOOD))
event.connection.send(msg)
def launch (fakeways="", arp_for_unknowns=None):
fakeways = fakeways.replace(","," ").split()
fakeways = [IPAddr(x) for x in fakeways]
if arp_for_unknowns is None:
arp_for_unknowns = len(fakeways) > 0
else:
arp_for_unknowns = str_to_bool(arp_for_unknowns)
core.registerNew(l3_switch, fakeways, arp_for_unknowns)
|
apache-2.0
|
MicroPyramid/docker-box
|
dockit/views.py
|
1
|
22934
|
from os import statvfs, uname
import time
from socket import socket
import psutil
import uuid
import json
import requests
import subprocess
from django.shortcuts import render, HttpResponse, get_object_or_404, redirect
from django.http import JsonResponse, HttpResponseRedirect, StreamingHttpResponse, Http404
from dockit.models import User, IP, Image, Container
from dockit.forms import UserForm, IPForm, ContainerForm
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse
from django.views.decorators.http import condition
from docker_box.settings import BASE_DIR, DOCKER_API_PORT, HOST_IP_ADDR
def admin_required(function):
def check_admin(request, *args, **kwargs):
if not request.user.is_superuser:
return render(request, 'no_access.html')
return function(request, *args, **kwargs)
return check_admin
def index(request):
if request.method == "POST":
user = authenticate(username=request.POST.get('email'), password=request.POST.get('password'))
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect(request.GET.get('next') or '/')
else:
response_data = {'message': "Your account has been disabled!"}
else:
response_data = {'message': 'The username and password are incorrect.'}
return render(request, 'login.html', response_data)
elif request.user.is_authenticated():
host_name, kernel = uname()[1:3]
return render(request, 'dashboard.html', {'host_name': host_name, 'kernel': kernel, 'ip_addr': HOST_IP_ADDR})
else:
return render(request, 'login.html')
def logout_user(request):
if request.user.is_authenticated():
logout(request)
return HttpResponseRedirect('/')
@login_required
@condition(etag_func=None)
def host_stats(request):
return StreamingHttpResponse(stream_host_stats())
def stream_host_stats():
while True:
net = psutil.net_io_counters(pernic=True)
time.sleep(1)
net1 = psutil.net_io_counters(pernic=True)
net_stat_download = {}
net_stat_upload = {}
for k, v in net.items():
for k1, v1 in net1.items():
if k1 == k:
net_stat_download[k] = (v1.bytes_recv - v.bytes_recv) / 1000.
net_stat_upload[k] = (v1.bytes_sent - v.bytes_sent) / 1000.
ds = statvfs('/')
disk_str = {"Used": ((ds.f_blocks - ds.f_bfree) * ds.f_frsize) / 10 ** 9, "Unused": (ds.f_bavail * ds.f_frsize) / 10 ** 9}
yield '[{"cpu":"%s","memory":"%s","memTotal":"%s","net_stats_down":"%s","net_stats_up":"%s","disk":"%s"}],' \
% (psutil.cpu_percent(interval=1), psutil.virtual_memory().used, psutil.virtual_memory().free, \
net_stat_download, net_stat_upload, disk_str)
@login_required
@admin_required
def docker_images(request):
images = Image.objects.filter(snapshot=None, is_snapshot=False)
uuid_token = str(uuid.uuid4())
return render(request, "images_list.html", {'images': images, 'uuid_token': uuid_token})
@login_required
def launch_image(request, name):
image = Image.objects.get(name=name)
if image.has_access(request.user):
if request.method == "POST":
container_form = ContainerForm(request.POST, ssh_users=request.POST.getlist('ssh_users'))
if container_form.is_valid():
memory, cores, hostname = container_form.cleaned_data['ram'], \
container_form.cleaned_data['cores'], \
container_form.cleaned_data['hostname']
image_obj = Image.objects.get(id=request.POST['image'])
container_obj = container_form.save(commit=False)
if container_obj.ip.is_routed:
result = image_obj.run_bridge(cores, memory, container_obj.ip.ip_addr, hostname)
else:
result = image_obj.run_macvlan(cores, memory, container_obj.ip.ip_addr, container_obj.ip.mac_addr, hostname)
container_obj.container_id = result
container_obj.save()
for r in request.POST.getlist('user'):
container_obj.user.add(r)
container_obj.save()
for ssh_user in request.POST.getlist('ssh_users'):
user_obj = User.objects.get(email=ssh_user)
container_obj.copy_ssh_pub_key(user_obj)
passphrase = container_obj.set_passphrase()
request.session['launched_container_id'] = container_obj.container_id.decode('utf-8')
ip = IP.objects.get(ip_addr=str(container_obj.ip))
ip.is_available = False
ip.save()
url = reverse('docker_box:container_info', kwargs={'container_id': container_obj.container_id.decode('utf-8')})
return JsonResponse({'success': 'image launched', 'url': url, 'passphrase': passphrase})
return JsonResponse({'FORM_ERRORS': 'true', 'form_errors': container_form.errors})
else:
users = [request.user]
if request.user.is_superuser:
users = User.objects.filter(is_active=True)
images = Image.objects.all()
else:
images = Image.objects.filter(user=request.user)
ips = IP.objects.filter(is_active=True, is_available=True)
return render(request, "launch_image.html", {'ips': ips, 'images': images, 'image_name': name, 'users': users})
raise PermissionDenied
@login_required
def container_list(request):
if request.user.is_superuser:
containers = Container.objects.all()
else:
containers = Container.objects.filter(user=request.user)
active_containers_list = []
idle_containers_list = []
for container in containers:
details_d = container.details()
container.__dict__.update(details_d)
if details_d['running']:
active_containers_list.append(container)
else:
idle_containers_list.append(container)
return render(
request,
"container_list.html",
{
'active_containers_list': active_containers_list,
'idle_containers_list': idle_containers_list
}
)
@login_required
def container_details(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
images = Image.objects.filter(user=request.user, snapshot=container, is_snapshot=True)
return render(request, "container_details.html", {"container": container.json(), 'data': \
[['100', 10], ['90', 9], ['80', 8]], 'images': images, 'container_id': container.container_id})
else:
return render(request, 'no_access.html')
@login_required
@admin_required
def users_list(request):
users = User.objects.all()
return render(request, "users.html", {"users_list": users})
@login_required
@admin_required
def new_user(request):
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
form.save()
return JsonResponse({"error": False})
else:
return JsonResponse({"error": True, "errors": form.errors})
form = UserForm()
return render(request, "new_user.html", {"form": form})
@login_required
@admin_required
def edit_user(request, pk):
user = get_object_or_404(User, pk=pk)
if request.method == "POST":
form = UserForm(request.POST, instance=user)
if form.is_valid():
form.save()
return JsonResponse({"error": False})
else:
return JsonResponse({"error": True, "errors": form.errors})
form = UserForm(instance=user)
return render(request, "new_user.html", {"form": form})
@login_required
@admin_required
def delete_user(request, pk):
get_object_or_404(User, pk=pk).delete()
return redirect("docker_box:users-list")
@login_required
@admin_required
def ip_list(request):
ip_list = IP.objects.filter(is_available=True)
return render(request, "ip_address.html", {"ip_list": ip_list})
@login_required
@admin_required
def new_ip(request):
request_post = request.POST.copy()
if '0' in request_post['is_routed']:
del request_post['is_routed']
form = IPForm(request_post)
if form.is_valid():
form.save()
return JsonResponse({"error": False})
else:
return JsonResponse({"error": True, "errors": form.errors})
@login_required
@admin_required
def edit_ip(request, pk):
instance = get_object_or_404(IP, id=pk)
if Container.objects.filter(ip=instance).exists():
raise Http404
else:
form = IPForm(request.POST, instance=instance)
if form.is_valid():
form.save()
return JsonResponse({"error": False})
else:
return JsonResponse({"error": True, "errors": form.errors})
@login_required
@admin_required
def delete_ip(request, pk):
ip_obj = get_object_or_404(IP, id=pk)
if Container.objects.filter(ip=ip_obj).exists():
raise Http404
else:
get_object_or_404(IP, pk=pk).delete()
return redirect("docker_box:ip-list")
@login_required
@admin_required
def search_images(request):
term = request.POST['term']
images = requests.get('http://localhost:' + DOCKER_API_PORT + '/images/search?term=' + term)
return HttpResponse(json.dumps(images.json()),
content_type='application/json')
@login_required
@admin_required
def pull_image_progress(request, uuid_token):
file = open('/tmp/' + uuid_token, 'r')
data = file.read()
file.close()
if data:
return JsonResponse(data, safe=False)
else:
return JsonResponse({"status": "Pulling Please wait..."})
@login_required
@admin_required
def pull_image(request, uuid_token):
# TODO userdefined tag
params = {'tag': 'latest', 'fromImage': request.POST['imageName']}
response = requests.post('http://localhost:' + DOCKER_API_PORT + '/images/create', params=params, stream=True)
if response:
for line in response.iter_lines():
file = open('/tmp/' + uuid_token, 'w')
if line:
output = json.loads(str(line.decode(encoding='UTF-8')))
try:
if output['progressDetail']:
progress = (output['progressDetail']['current'] * 100) / output['progressDetail']['total']
file.write('{"status": "ok","image-status":"' + output['status'] + '","progress":' + str(
int(progress)) + ',"id":"' + output['id'] + '"}')
except KeyError:
try:
if 'Digest:' in output['status']:
Image.objects.get_or_create(name=request.POST['imageName'], user=request.user, tag='latest')
except KeyError:
pass
file.close()
file = open('/tmp/progress', 'w')
file.write(str({"status": output['status']}))
file.close()
return JsonResponse({'status': 'ok'})
else:
return JsonResponse({'status': 'error'})
@login_required
def start_container(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
status_code = container.start()
if status_code == 204:
return JsonResponse({'success': 'started'})
elif status_code == 304:
return JsonResponse({'success': 'Already Started'})
elif status_code == 404:
return JsonResponse({'ERROR': 'No Such Container'})
elif status_code == 500:
return JsonResponse({'ERROR': 'Server Error'})
else:
return JsonResponse({'ERROR': 'ERROR STARTING CONTAINER'})
else:
return render(request, 'no_access.html')
@login_required
def stop_container(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
status_code = container.stop()
if status_code == 204:
response = {'success': 'stopped'}
elif status_code == 304:
response = {'success': 'stopped'}
elif status_code == 404:
response = {'ERROR': 'No Such Container'}
elif status_code == 500:
response = {'ERROR': 'Server Error'}
else:
response = {'ERROR': 'ERROR STOPPING CONTAINER'}
return JsonResponse(response)
else:
return render(request, 'no_access.html')
@login_required
def restart_container(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
status_code = container.restart()
if status_code == 204:
return JsonResponse({'success': 'restarted'})
elif status_code == 404:
return JsonResponse({'ERROR': 'No Such Container'})
elif status_code == 500:
return JsonResponse({'ERROR': 'Server Error'})
else:
return JsonResponse({'ERROR': 'Error Restarting Container'})
return render(request, 'no_access.html')
@login_required
@condition(etag_func=None)
def container_stats(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
return StreamingHttpResponse(stream_response_generator(container))
return render(request, 'no_access.html')
def stream_response_generator(container):
container_id = container.container_id
while True:
response = str(subprocess.check_output("docker stats --no-stream " + container_id + "| tail -1", shell=True))
response = response.split()
if response:
yield '[{"cpu":"%s","memory":"%s","memTotal":"%s","netDow":"%s","netDowUnit":"%s",\
"netUp":"%s","netUpUnit":"%s"}],' % (response[1], response[7], response[5], \
response[8], response[9], response[11], response[12])
else:
yield response
@login_required
def backup_container(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if request.POST:
if container:
name = request.POST['name']
if Image.objects.filter(user=request.user, name=name).exists():
return JsonResponse({'error': True, 'msg': 'Image with this name exists.'})
else:
status_code, response_json = container.commit(name)
if status_code == 201:
Image.objects.create(name=name, tag='latest', user=request.user, snapshot=container, is_snapshot=True)
return JsonResponse({'error': False, 'image_id': response_json['Id']})
return render(request, 'no_access.html')
else:
return render(request, 'backup_container.html')
@login_required
def change_password(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
passphrase = container.set_passphrase()
return JsonResponse({'error': False, 'passphrase': passphrase})
return render(request, 'no_access.html')
@login_required
def ssh_access(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
if request.POST:
ssh_users = request.POST.getlist('ssh_users')
for ssh_user in ssh_users:
user_obj = User.objects.get(email=ssh_user)
if not user_obj.ssh_pub_key:
return JsonResponse({'error': True})
for ssh_user in ssh_users:
user_obj = User.objects.get(email=ssh_user)
container.copy_ssh_pub_key(user_obj)
return JsonResponse({'error': False})
users = [request.user]
if request.user.is_superuser:
users = User.objects.filter(is_active=True)
return render(request, 'ssh_access.html', {'users': users})
return render(request, 'no_access.html')
@login_required
def container_top(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
top = container.top()
if top:
return JsonResponse(top)
return JsonResponse({'Titles': None, 'Processes': None})
return render(request, 'no_access.html')
@login_required
def container_diff(request, container_id, total):
container = Container.objects.get_container(container_id, request.user)
if container:
diff = container.diff()
modified = []
added = []
deleted = []
for file_info in diff:
if file_info['Kind'] == 0:
modified.append(file_info['Path'])
elif file_info['Kind'] == 1:
added.append(file_info['Path'])
elif file_info['Kind'] == 2:
deleted.append(file_info['Path'])
if total == '0':
return JsonResponse({'modified': modified[:10], 'added': added[:10], 'deleted': deleted[:10]})
return JsonResponse({'modified': modified, 'added': added, 'deleted': deleted})
return render(request, 'no_access.html')
def find_free_port():
s = socket()
s.bind(('', 0))
return s.getsockname()[1]
@login_required
def terminal(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
port = find_free_port()
uuid_token = str(uuid.uuid4())
go_cmd = './dockit/terminal %s %s %s %s %s' % (port, HOST_IP_ADDR, DOCKER_API_PORT, container_id, BASE_DIR)
ncp_proc = subprocess.Popen(go_cmd, shell=True, executable='/bin/bash')
return JsonResponse({'container_id': container_id, 'port': port})
return render(request, 'no_access.html')
@login_required
def container_info(request, container_id):
if request.POST:
container = Container.objects.get_container(container_id, request.user)
if container:
details_d = container.details()
container.__dict__.update(details_d)
passphrase = request.POST['passphrase']
return render(request, 'container_info.html', {'container': container, 'passphrase': passphrase})
return render(request, 'no_access.html')
@login_required
def remove_image(request, name):
image = Image.objects.get_image(name, request.user)
if image:
if request.POST:
passphrase = request.POST['passphrase']
if request.user.check_password(passphrase):
status_code = image.remove()
if status_code == 200:
image.delete()
return JsonResponse({'success': 'Deleted'})
elif status_code == 404:
return JsonResponse({'ERROR': 'NO SUCH IMAGE'})
elif status_code == 409:
return JsonResponse({'ERROR': 'Image Conflict'})
return JsonResponse({'ERROR': 'Unable to remove image'})
return JsonResponse({'perror': True})
else:
details_d = image.details()
image.__dict__.update(details_d)
return render(request, 'remove_image.html', {'image': image})
return render(request, 'no_access.html')
@login_required
def edit_container(request, container_id):
container_instance = get_object_or_404(Container, container_id=container_id)
if container_instance:
if request.POST:
container_form = ContainerForm(request.POST, instance=container_instance, ssh_users=request.POST.getlist('ssh_users'))
if container_form.is_valid():
container_obj = container_form.save(commit=False)
container_obj.save()
container_obj.user.clear()
for r in request.POST.getlist('user'):
container_obj.user.add(r)
container_obj.save()
for ssh_user in request.POST.getlist('ssh_users'):
user_obj = User.objects.get(email=ssh_user)
container_obj.copy_ssh_pub_key(user_obj)
url = reverse('docker_box:container-list')
return JsonResponse({'success': 'image launched', 'url': url})
return JsonResponse({'FORM_ERRORS': 'true', 'form_errors': container_form.errors})
else:
users = [request.user]
if request.user.is_superuser:
users = User.objects.filter(is_active=True)
images = Image.objects.all()
else:
images = Image.objects.filter(user=request.user)
ips = IP.objects.filter(is_active=True)
container_details = container_instance.details()
container_instance.__dict__.update(container_details)
return render(request, "launch_image.html", {'edit_container': container_instance, 'ips': ips, 'images': images, 'users': users})
return render(request, 'no_access.html')
@login_required
def delete_container(request, container_id):
container = Container.objects.get_container(container_id, request.user)
if container:
if request.POST:
passphrase = request.POST['passphrase']
if request.user.check_password(passphrase):
status_code = container.remove()
if status_code == 204:
ip_obj = container.ip
ip_obj.is_available = True
ip_obj.save()
container.delete()
response = {'success': 'Deleted'}
elif status_code == 400:
response = {'ERROR': 'Bad Parameter'}
elif status_code == 404:
response = {'ERROR': 'No Such Container'}
elif status_code == 500:
response = {'ERROR': 'Server Error'}
elif status_code == 409:
response = {'ERROR': 'Stop container before deletion.'}
else:
response = {'ERROR': 'Unable to remove container'}
return JsonResponse(response)
return JsonResponse({'perror': True})
else:
details_d = container.details()
container.__dict__.update(details_d)
return render(request, 'remove_container.html', {'container': container})
return render(request, 'no_access.html')
|
mit
|
jbellis/cassandra
|
bin/cqlsh.py
|
1
|
100692
|
#!/bin/sh
# -*- mode: Python -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":"
# bash code here; finds a suitable python interpreter and execs this file.
# this implementation of cqlsh is compatible with both Python 3 and Python 2.7.
# prefer unqualified "python" if suitable:
python -c 'import sys; sys.exit(not (0x020700b0 < sys.hexversion))' 2>/dev/null \
&& exec python "$0" "$@"
for pyver in 3 2.7; do
which python$pyver > /dev/null 2>&1 && exec python$pyver "$0" "$@"
done
echo "No appropriate python interpreter found." >&2
exit 1
":"""
from __future__ import division, unicode_literals
import cmd
import codecs
import csv
import getpass
import optparse
import os
import platform
import sys
import traceback
import warnings
import webbrowser
from contextlib import contextmanager
from glob import glob
from uuid import UUID
if sys.version_info.major != 3 and (sys.version_info.major == 2 and sys.version_info.minor != 7):
sys.exit("\nCQL Shell supports only Python 3 or Python 2.7\n")
# see CASSANDRA-10428
if platform.python_implementation().startswith('Jython'):
sys.exit("\nCQL Shell does not run on Jython\n")
UTF8 = 'utf-8'
CP65001 = 'cp65001' # Win utf-8 variant
description = "CQL Shell for Apache Cassandra"
version = "5.0.1"
readline = None
try:
# check if tty first, cause readline doesn't check, and only cares
# about $TERM. we don't want the funky escape code stuff to be
# output if not a tty.
if sys.stdin.isatty():
import readline
except ImportError:
pass
CQL_LIB_PREFIX = 'cassandra-driver-internal-only-'
CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/cql3/CQL-3.2.html'
# default location of local CQL.html
if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'):
# default location of local CQL.html
CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html'
elif os.path.exists('/usr/share/doc/cassandra/CQL.html'):
# fallback to package file
CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html'
else:
# fallback to online version
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
# On Linux, the Python webbrowser module uses the 'xdg-open' executable
# to open a file/URL. But that only works, if the current session has been
# opened from _within_ a desktop environment. I.e. 'xdg-open' will fail,
# if the session's been opened via ssh to a remote box.
#
# Use 'python' to get some information about the detected browsers.
# >>> import webbrowser
# >>> webbrowser._tryorder
# >>> webbrowser._browser
#
# webbrowser._tryorder is None in python3.7+
if webbrowser._tryorder is None or len(webbrowser._tryorder) == 0:
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '':
# only on Linux (some OS with xdg-open)
webbrowser._tryorder.remove('xdg-open')
webbrowser._tryorder.append('xdg-open')
# use bundled lib for python-cql if available. if there
# is a ../lib dir, use bundled libs there preferentially.
ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
myplatform = platform.system()
is_win = myplatform == 'Windows'
# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
if is_win and sys.version_info < (3, 3):
codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
if myplatform == 'Linux':
ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
if os.environ.get('CQLSH_NO_BUNDLED', ''):
ZIPLIB_DIRS = ()
def find_zip(libprefix):
for ziplibdir in ZIPLIB_DIRS:
zips = glob(os.path.join(ziplibdir, libprefix + '*.zip'))
if zips:
return max(zips) # probably the highest version, if multiple
cql_zip = find_zip(CQL_LIB_PREFIX)
if cql_zip:
ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
third_parties = ('futures-', 'six-', 'geomet-')
for lib in third_parties:
lib_zip = find_zip(lib)
if lib_zip:
sys.path.insert(0, lib_zip)
# We cannot import six until we add its location to sys.path so the Python
# interpreter can find it. Do not move this to the top.
import six
from six.moves import configparser, input
from six import StringIO, ensure_text, ensure_str
warnings.filterwarnings("ignore", r".*blist.*")
try:
import cassandra
except ImportError as e:
sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n"
'You might try "pip install cassandra-driver".\n\n'
'Python: %s\n'
'Module load path: %r\n\n'
'Error: %s\n' % (sys.executable, sys.path, e))
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.cqltypes import cql_typename
from cassandra.marshal import int64_unpack
from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
TableMetadata, protect_name, protect_names)
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
from cassandra.util import datetime_from_timestamp
# cqlsh should run correctly when run out of a Cassandra source tree,
# out of an unpacked Cassandra tarball, and after a proper package install.
cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib')
if os.path.isdir(cqlshlibdir):
sys.path.insert(0, cqlshlibdir)
from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling, cqlshhandling
from cqlshlib.copyutil import ExportTask, ImportTask
from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
RED, WHITE, FormattedValue, colorme)
from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
DEFAULT_TIMESTAMP_FORMAT, CqlType, DateTimeFormat,
format_by_type, formatter_for)
from cqlshlib.tracing import print_trace, print_trace_session
from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9042
DEFAULT_SSL = False
DEFAULT_CONNECT_TIMEOUT_SECONDS = 5
DEFAULT_REQUEST_TIMEOUT_SECONDS = 10
DEFAULT_FLOAT_PRECISION = 5
DEFAULT_DOUBLE_PRECISION = 5
DEFAULT_MAX_TRACE_WAIT = 10
if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__:
DEFAULT_COMPLETEKEY = '\t'
else:
DEFAULT_COMPLETEKEY = 'tab'
cqldocs = None
cqlruleset = None
epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These
defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a
host (and optional port number) are given on the command line, they take
precedence over any defaults.""" % globals()
parser = optparse.OptionParser(description=description, epilog=epilog,
usage="Usage: %prog [options] [host [port]]",
version='cqlsh ' + version)
parser.add_option("-C", "--color", action='store_true', dest='color',
help='Always use color output')
parser.add_option("--no-color", action='store_false', dest='color',
help='Never use color output')
parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be:
- one of the supported browsers in https://docs.python.org/2/library/webbrowser.html.
- browser path followed by %s, example: /usr/bin/google-chrome-stable %s""")
parser.add_option('--ssl', action='store_true', help='Use SSL', default=False)
parser.add_option("-u", "--username", help="Authenticate as user.")
parser.add_option("-p", "--password", help="Authenticate using password.")
parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.')
parser.add_option("-f", "--file", help="Execute commands from FILE, then exit")
parser.add_option('--debug', action='store_true',
help='Show additional debugging information')
parser.add_option('--coverage', action='store_true',
help='Collect coverage data')
parser.add_option("--encoding", help="Specify a non-default encoding for output."
+ " (Default: %s)" % (UTF8,))
parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
parser.add_option('--cqlversion', default=None,
help='Specify a particular CQL version, '
'by default the highest version supported by the server will be used.'
' Examples: "3.0.3", "3.1.0"')
parser.add_option("--protocol-version", type="int", default=None,
help='Specify a specific protcol version otherwise the client will default and downgrade as necessary')
parser.add_option("-e", "--execute", help='Execute the statement and quit.')
parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout',
help='Specify the connection timeout in seconds (default: %default seconds).')
parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout',
help='Specify the default request timeout in seconds (default: %default seconds).')
parser.add_option("-t", "--tty", action='store_true', dest='tty',
help='Force tty mode (command prompt).')
optvalues = optparse.Values()
(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
# BEGIN history/config definition
HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
if hasattr(options, 'cqlshrc'):
CONFIG_FILE = options.cqlshrc
if not os.path.exists(CONFIG_FILE):
print('\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR))
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
else:
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
if not os.path.exists(HISTORY_DIR):
try:
os.mkdir(HISTORY_DIR)
except OSError:
print('\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR)
OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
if os.path.exists(OLD_CONFIG_FILE):
if os.path.exists(CONFIG_FILE):
print('\nWarning: cqlshrc config files were found at both the old location ({0})'
+ ' and the new location ({1}), the old config file will not be migrated to the new'
+ ' location, and the new location will be used for now. You should manually'
+ ' consolidate the config files at the new location and remove the old file.'
.format(OLD_CONFIG_FILE, CONFIG_FILE))
else:
os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
if os.path.exists(OLD_HISTORY):
os.rename(OLD_HISTORY, HISTORY)
# END history/config definition
CQL_ERRORS = (
cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut,
cassandra.cluster.NoHostAvailable,
cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException,
cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable
)
debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES')
class NoKeyspaceError(Exception):
pass
class KeyspaceNotFound(Exception):
pass
class ColumnFamilyNotFound(Exception):
pass
class IndexNotFound(Exception):
pass
class MaterializedViewNotFound(Exception):
pass
class ObjectNotFound(Exception):
pass
class VersionNotSupported(Exception):
pass
class UserTypeNotFound(Exception):
pass
class FunctionNotFound(Exception):
pass
class AggregateNotFound(Exception):
pass
class DecodeError(Exception):
verb = 'decode'
def __init__(self, thebytes, err, colname=None):
self.thebytes = thebytes
self.err = err
self.colname = colname
def __str__(self):
return str(self.thebytes)
def message(self):
what = 'value %r' % (self.thebytes,)
if self.colname is not None:
what = 'value %r (for column %r)' % (self.thebytes, self.colname)
return 'Failed to %s %s : %s' \
% (self.verb, what, self.err)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.message())
class FormatError(DecodeError):
verb = 'format'
def full_cql_version(ver):
while ver.count('.') < 2:
ver += '.0'
ver_parts = ver.split('-', 1) + ['']
vertuple = tuple(list(map(int, ver_parts[0].split('.'))) + [ver_parts[1]])
return ver, vertuple
def format_value(val, cqltype, encoding, addcolor=False, date_time_format=None,
float_precision=None, colormap=None, nullval=None):
if isinstance(val, DecodeError):
if addcolor:
return colorme(repr(val.thebytes), colormap, 'error')
else:
return FormattedValue(repr(val.thebytes))
return format_by_type(val, cqltype=cqltype, encoding=encoding, colormap=colormap,
addcolor=addcolor, nullval=nullval, date_time_format=date_time_format,
float_precision=float_precision)
def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None):
if file is None:
file = sys.stderr
try:
file.write(warnings.formatwarning(message, category, filename, lineno, line=''))
except IOError:
pass
warnings.showwarning = show_warning_without_quoting_line
warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure)
def insert_driver_hooks():
class DateOverFlowWarning(RuntimeWarning):
pass
# Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = int64_unpack(byts)
try:
return datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. "
"Timestamps are displayed in milliseconds from epoch."))
return timestamp_ms
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
if hasattr(cassandra, 'deserializers'):
del cassandra.deserializers.DesDateType
# Return cassandra.cqltypes.EMPTY instead of None for empty values
cassandra.cqltypes.CassandraType.support_empty_values = True
class Shell(cmd.Cmd):
custom_prompt = os.getenv('CQLSH_PROMPT', '')
if custom_prompt != '':
custom_prompt += "\n"
default_prompt = custom_prompt + "cqlsh> "
continue_prompt = " ... "
keyspace_prompt = custom_prompt + "cqlsh:{}> "
keyspace_continue_prompt = "{} ... "
show_line_nums = False
debug = False
coverage = False
coveragerc_path = None
stop = False
last_hist = None
shunted_query_out = None
use_paging = True
default_page_size = 100
def __init__(self, hostname, port, color=False,
username=None, password=None, encoding=None, stdin=None, tty=True,
completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
cqlver=None, keyspace=None,
tracing_enabled=False, expand_enabled=False,
display_nanotime_format=DEFAULT_NANOTIME_FORMAT,
display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT,
display_date_format=DEFAULT_DATE_FORMAT,
display_float_precision=DEFAULT_FLOAT_PRECISION,
display_double_precision=DEFAULT_DOUBLE_PRECISION,
display_timezone=None,
max_trace_wait=DEFAULT_MAX_TRACE_WAIT,
ssl=False,
single_statement=None,
request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
protocol_version=None,
connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS,
is_subshell=False):
cmd.Cmd.__init__(self, completekey=completekey)
self.hostname = hostname
self.port = port
self.auth_provider = None
if username:
if not password:
password = getpass.getpass()
self.auth_provider = PlainTextAuthProvider(username=username, password=password)
self.username = username
self.keyspace = keyspace
self.ssl = ssl
self.tracing_enabled = tracing_enabled
self.page_size = self.default_page_size
self.expand_enabled = expand_enabled
if use_conn:
self.conn = use_conn
else:
kwargs = {}
if protocol_version is not None:
kwargs['protocol_version'] = protocol_version
self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver,
auth_provider=self.auth_provider,
ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=connect_timeout,
connect_timeout=connect_timeout,
**kwargs)
self.owns_connection = not use_conn
if keyspace:
self.session = self.conn.connect(keyspace)
else:
self.session = self.conn.connect()
if browser == "":
browser = None
self.browser = browser
self.color = color
self.display_nanotime_format = display_nanotime_format
self.display_timestamp_format = display_timestamp_format
self.display_date_format = display_date_format
self.display_float_precision = display_float_precision
self.display_double_precision = display_double_precision
self.display_timezone = display_timezone
self.session.default_timeout = request_timeout
self.session.row_factory = ordered_dict_factory
self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE
self.get_connection_versions()
self.set_expanded_cql_version(self.connection_versions['cql'])
self.current_keyspace = keyspace
self.max_trace_wait = max_trace_wait
self.session.max_trace_wait = max_trace_wait
self.tty = tty
self.encoding = encoding
self.check_windows_encoding()
self.output_codec = codecs.lookup(encoding)
self.statement = StringIO()
self.lineno = 1
self.in_comment = False
self.prompt = ''
if stdin is None:
stdin = sys.stdin
if tty:
self.reset_prompt()
self.report_connection()
print('Use HELP for help.')
else:
self.show_line_nums = True
self.stdin = stdin
self.query_out = sys.stdout
self.consistency_level = cassandra.ConsistencyLevel.ONE
self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL
self.empty_lines = 0
self.statement_error = False
self.single_statement = single_statement
self.is_subshell = is_subshell
@property
def batch_mode(self):
return not self.tty
@property
def is_using_utf8(self):
# utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
def check_windows_encoding(self):
if is_win and os.name == 'nt' and self.tty and \
self.is_using_utf8 and sys.stdout.encoding != CP65001:
self.printerr("\nWARNING: console codepage must be set to cp65001 "
"to support {} encoding on Windows platforms.\n"
"If you experience encoding problems, change your console"
" codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
def set_expanded_cql_version(self, ver):
ver, vertuple = full_cql_version(ver)
self.cql_version = ver
self.cql_ver_tuple = vertuple
def cqlver_atleast(self, major, minor=0, patch=0):
return self.cql_ver_tuple[:3] >= (major, minor, patch)
def myformat_value(self, val, cqltype=None, **kwargs):
if isinstance(val, DecodeError):
self.decoding_errors.append(val)
try:
dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format,
date_format=self.display_date_format, nanotime_format=self.display_nanotime_format,
timezone=self.display_timezone)
precision = self.display_double_precision if cqltype is not None and cqltype.type_name == 'double' \
else self.display_float_precision
return format_value(val, cqltype=cqltype, encoding=self.output_codec.name,
addcolor=self.color, date_time_format=dtformats,
float_precision=precision, **kwargs)
except Exception as e:
err = FormatError(val, e)
self.decoding_errors.append(err)
return format_value(err, cqltype=cqltype, encoding=self.output_codec.name, addcolor=self.color)
def myformat_colname(self, name, table_meta=None):
column_colors = COLUMN_NAME_COLORS.copy()
# check column role and color appropriately
if table_meta:
if name in [col.name for col in table_meta.partition_key]:
column_colors.default_factory = lambda: RED
elif name in [col.name for col in table_meta.clustering_key]:
column_colors.default_factory = lambda: CYAN
elif name in table_meta.columns and table_meta.columns[name].is_static:
column_colors.default_factory = lambda: WHITE
return self.myformat_value(name, colormap=column_colors)
def report_connection(self):
self.show_host()
self.show_version()
def show_host(self):
print("Connected to {0} at {1}:{2}."
.format(self.applycolor(self.get_cluster_name(), BLUE),
self.hostname,
self.port))
def show_version(self):
vers = self.connection_versions.copy()
vers['shver'] = version
# system.Versions['cql'] apparently does not reflect changes with
# set_cql_version.
vers['cql'] = self.cql_version
print("[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers)
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
'build': result['release_version'],
'protocol': self.conn.protocol_version,
'cql': result['cql_version'],
}
self.connection_versions = vers
def get_keyspace_names(self):
return list(map(str, list(self.conn.metadata.keyspaces.keys())))
def get_columnfamily_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(map(str, list(self.get_keyspace_meta(ksname).tables.keys())))
def get_materialized_view_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(map(str, list(self.get_keyspace_meta(ksname).views.keys())))
def get_index_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(map(str, list(self.get_keyspace_meta(ksname).indexes.keys())))
def get_column_names(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
layout = self.get_table_meta(ksname, cfname)
return [str(col) for col in layout.columns]
def get_usertype_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return list(self.get_keyspace_meta(ksname).user_types.keys())
def get_usertype_layout(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
ks_meta = self.get_keyspace_meta(ksname)
try:
user_type = ks_meta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type {!r} not found".format(typename))
return list(zip(user_type.field_names, user_type.field_types))
def get_userfunction_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [f.name for f in list(self.get_keyspace_meta(ksname).functions.values())]
def get_useraggregate_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [f.name for f in list(self.get_keyspace_meta(ksname).aggregates.values())]
def get_cluster_name(self):
return self.conn.metadata.cluster_name
def get_partitioner(self):
return self.conn.metadata.partitioner
def get_keyspace_meta(self, ksname):
if ksname in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ksname]
raise KeyspaceNotFound('Keyspace %r not found.' % ksname)
def get_keyspaces(self):
return list(self.conn.metadata.keyspaces.values())
def get_ring(self, ks):
self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True)
return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks]
def get_table_meta(self, ksname, tablename):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if tablename not in ksmeta.tables:
if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']:
self.get_fake_auth_table_meta(ksname, tablename)
else:
raise ColumnFamilyNotFound("Column family {} not found".format(tablename))
else:
return ksmeta.tables[tablename]
def get_fake_auth_table_meta(self, ksname, tablename):
# may be using external auth implementation so internal tables
# aren't actually defined in schema. In this case, we'll fake
# them up
if tablename == 'roles':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'roles')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType)
table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType)
elif tablename == 'role_permissions':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'role_permissions')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type)
table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type)
else:
raise ColumnFamilyNotFound("Column family {} not found".format(tablename))
def get_index_meta(self, ksname, idxname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if idxname not in ksmeta.indexes:
raise IndexNotFound("Index {} not found".format(idxname))
return ksmeta.indexes[idxname]
def get_view_meta(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if viewname not in ksmeta.views:
raise MaterializedViewNotFound("Materialized view '{}' not found".format(viewname))
return ksmeta.views[viewname]
def get_object_meta(self, ks, name):
if name is None:
if ks and ks in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ks]
elif self.current_keyspace is None:
raise ObjectNotFound("'{}' not found in keyspaces".format(ks))
else:
name = ks
ks = self.current_keyspace
if ks is None:
ks = self.current_keyspace
ksmeta = self.get_keyspace_meta(ks)
if name in ksmeta.tables:
return ksmeta.tables[name]
elif name in ksmeta.indexes:
return ksmeta.indexes[name]
elif name in ksmeta.views:
return ksmeta.views[name]
raise ObjectNotFound("'{}' not found in keyspace '{}'".format(name, ks))
def get_usertypes_meta(self):
data = self.session.execute("select * from system.schema_usertypes")
if not data:
return cql3handling.UserTypesMeta({})
return cql3handling.UserTypesMeta.from_layout(data)
def get_trigger_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [trigger.name
for table in list(self.get_keyspace_meta(ksname).tables.values())
for trigger in list(table.triggers.values())]
def reset_statement(self):
self.reset_prompt()
self.statement.truncate(0)
self.statement.seek(0)
self.empty_lines = 0
def reset_prompt(self):
if self.current_keyspace is None:
self.set_prompt(self.default_prompt, True)
else:
self.set_prompt(self.keyspace_prompt.format(self.current_keyspace), True)
def set_continue_prompt(self):
if self.empty_lines >= 3:
self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.")
self.empty_lines = 0
return
if self.current_keyspace is None:
self.set_prompt(self.continue_prompt)
else:
spaces = ' ' * len(str(self.current_keyspace))
self.set_prompt(self.keyspace_continue_prompt.format(spaces))
self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0
@contextmanager
def prepare_loop(self):
readline = None
if self.tty and self.completekey:
try:
import readline
except ImportError:
if is_win:
print("WARNING: pyreadline dependency missing. Install to enable tab completion.")
pass
else:
old_completer = readline.get_completer()
readline.set_completer(self.complete)
if readline.__doc__ is not None and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '" + self.completekey + "' rl_complete")
readline.parse_and_bind("bind ^R em-inc-search-prev")
else:
readline.parse_and_bind(self.completekey + ": complete")
# start coverage collection if requested, unless in subshell
if self.coverage and not self.is_subshell:
# check for coveragerc file, write it if missing
if os.path.exists(HISTORY_DIR):
self.coveragerc_path = os.path.join(HISTORY_DIR, '.coveragerc')
covdata_path = os.path.join(HISTORY_DIR, '.coverage')
if not os.path.isfile(self.coveragerc_path):
with open(self.coveragerc_path, 'w') as f:
f.writelines(["[run]\n",
"concurrency = multiprocessing\n",
"data_file = {}\n".format(covdata_path),
"parallel = true\n"]
)
# start coverage
import coverage
self.cov = coverage.Coverage(config_file=self.coveragerc_path)
self.cov.start()
try:
yield
finally:
if readline is not None:
readline.set_completer(old_completer)
if self.coverage and not self.is_subshell:
self.stop_coverage()
def get_input_line(self, prompt=''):
if self.tty:
self.lastcmd = input(prompt)
line = self.lastcmd + '\n'
else:
self.lastcmd = self.stdin.readline()
line = self.lastcmd
if not len(line):
raise EOFError
self.lineno += 1
line = ensure_text(line)
return line
def use_stdin_reader(self, until='', prompt=''):
until += '\n'
while True:
try:
newline = self.get_input_line(prompt=prompt)
except EOFError:
return
if newline == until:
return
yield newline
def cmdloop(self):
"""
Adapted from cmd.Cmd's version, because there is literally no way with
cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
input and an actual EOF.
"""
with self.prepare_loop():
while not self.stop:
try:
if self.single_statement:
line = self.single_statement
self.stop = True
else:
line = self.get_input_line(self.prompt)
self.statement.write(line)
if self.onecmd(self.statement.getvalue()):
self.reset_statement()
except EOFError:
self.handle_eof()
except CQL_ERRORS as cqlerr:
self.printerr(cqlerr.message)
except KeyboardInterrupt:
self.reset_statement()
print('')
def onecmd(self, statementtext):
"""
Returns true if the statement is complete and was handled (meaning it
can be reset).
"""
try:
statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
except pylexotron.LexingError as e:
if self.show_line_nums:
self.printerr('Invalid syntax at line {0}, char {1}'
.format(e.linenum, e.charnum))
else:
self.printerr('Invalid syntax at char {0}'.format(e.charnum))
statementline = statementtext.split('\n')[e.linenum - 1]
self.printerr(' {0}'.format(statementline))
self.printerr(' {0}^'.format(' ' * e.charnum))
return True
while statements and not statements[-1]:
statements = statements[:-1]
if not statements:
return True
if endtoken_escaped or statements[-1][-1][0] != 'endtoken':
self.set_continue_prompt()
return
for st in statements:
try:
self.handle_statement(st, statementtext)
except Exception as e:
if self.debug:
traceback.print_exc()
else:
self.printerr(e)
return True
def handle_eof(self):
if self.tty:
print('')
statement = self.statement.getvalue()
if statement.strip():
if not self.onecmd(statement):
self.printerr('Incomplete statement at end of file')
self.do_exit()
def handle_statement(self, tokens, srcstr):
# Concat multi-line statements and insert into history
if readline is not None:
nl_count = srcstr.count("\n")
new_hist = srcstr.replace("\n", " ").rstrip()
if nl_count > 1 and self.last_hist != new_hist:
readline.add_history(new_hist)
self.last_hist = new_hist
cmdword = tokens[0][1]
if cmdword == '?':
cmdword = 'help'
custom_handler = getattr(self, 'do_' + cmdword.lower(), None)
if custom_handler:
parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr,
startsymbol='cqlshCommand')
if parsed and not parsed.remainder:
# successful complete parse
return custom_handler(parsed)
else:
return self.handle_parse_error(cmdword, tokens, parsed, srcstr)
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
def handle_parse_error(self, cmdword, tokens, parsed, srcstr):
if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate',
'create', 'drop', 'alter', 'grant', 'revoke',
'batch', 'list'):
# hey, maybe they know about some new syntax we don't. type
# assumptions won't work, but maybe the query will.
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
self.printerr('Improper %s command.' % cmdword)
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig()))
if success:
if ksname[0] == '"' and ksname[-1] == '"':
self.current_keyspace = self.cql_unprotect_name(ksname)
else:
self.current_keyspace = ksname.lower()
def do_select(self, parsed):
tracing_was_enabled = self.tracing_enabled
ksname = parsed.get_binding('ksname')
stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces')
self.tracing_enabled = self.tracing_enabled and not stop_tracing
statement = parsed.extract_orig()
self.perform_statement(statement)
self.tracing_enabled = tracing_was_enabled
def perform_statement(self, statement):
statement = ensure_str(statement)
stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
success, future = self.perform_simple_statement(stmt)
if future:
if future.warnings:
self.print_warnings(future.warnings)
if self.tracing_enabled:
try:
for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level):
print_trace(self, trace)
except TraceUnavailable:
msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,)
self.writeresult(msg, color=RED)
for trace_id in future.get_query_trace_ids():
self.show_session(trace_id, partial_session=True)
except Exception as err:
self.printerr("Unable to fetch query trace: %s" % (str(err),))
return success
def parse_for_select_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname', None))
try:
return self.get_table_meta(ks, name)
except ColumnFamilyNotFound:
try:
return self.get_view_meta(ks, name)
except MaterializedViewNotFound:
raise ObjectNotFound("'{}' not found in keyspace '{}'".format(name, ks))
def parse_for_update_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
return self.get_table_meta(ks, cf)
def perform_simple_statement(self, statement):
if not statement:
return False, None
future = self.session.execute_async(statement, trace=self.tracing_enabled)
result = None
try:
result = future.result()
except CQL_ERRORS as err:
err_msg = ensure_text(err.message if hasattr(err, 'message') else str(err))
self.printerr(str(err.__class__.__name__) + ": " + err_msg)
except Exception:
import traceback
self.printerr(traceback.format_exc())
# Even if statement failed we try to refresh schema if not agreed (see CASSANDRA-9689)
if not future.is_schema_agreed:
try:
self.conn.refresh_schema_metadata(5) # will throw exception if there is a schema mismatch
except Exception:
self.printerr("Warning: schema version mismatch detected; check the schema versions of your "
"nodes in system.local and system.peers.")
self.conn.refresh_schema_metadata(-1)
if result is None:
return False, None
if statement.query_string[:6].lower() == 'select':
self.print_result(result, self.parse_for_select_meta(statement.query_string))
elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"):
self.print_result(result, self.get_table_meta('system_auth', 'roles'))
elif statement.query_string.lower().startswith("list"):
self.print_result(result, self.get_table_meta('system_auth', 'role_permissions'))
elif result:
# CAS INSERT/UPDATE
self.writeresult("")
self.print_static_result(result, self.parse_for_update_meta(statement.query_string))
self.flush_output()
return True, future
def print_result(self, result, table_meta):
self.decoding_errors = []
self.writeresult("")
if result.has_more_pages and self.tty:
num_rows = 0
while True:
if result.current_rows:
num_rows += len(result.current_rows)
self.print_static_result(result, table_meta)
if result.has_more_pages:
if self.shunted_query_out is None:
# Only pause when not capturing.
input("---MORE---")
result.fetch_next_page()
else:
break
else:
num_rows = len(result.current_rows)
self.print_static_result(result, table_meta)
self.writeresult("(%d rows)" % num_rows)
if self.decoding_errors:
for err in self.decoding_errors[:2]:
self.writeresult(err.message(), color=RED)
if len(self.decoding_errors) > 2:
self.writeresult('%d more decoding errors suppressed.'
% (len(self.decoding_errors) - 2), color=RED)
def print_static_result(self, result, table_meta):
if not result.column_names and not table_meta:
return
column_names = result.column_names or list(table_meta.columns.keys())
formatted_names = [self.myformat_colname(name, table_meta) for name in column_names]
if not result.current_rows:
# print header only
self.print_formatted_result(formatted_names, None)
return
cql_types = []
if result.column_types:
ks_name = table_meta.keyspace_name if table_meta else self.current_keyspace
ks_meta = self.conn.metadata.keyspaces.get(ks_name, None)
cql_types = [CqlType(cql_typename(t), ks_meta) for t in result.column_types]
formatted_values = [list(map(self.myformat_value, [row[c] for c in column_names], cql_types)) for row in result.current_rows]
if self.expand_enabled:
self.print_formatted_result_vertically(formatted_names, formatted_values)
else:
self.print_formatted_result(formatted_names, formatted_values)
def print_formatted_result(self, formatted_names, formatted_values):
# determine column widths
widths = [n.displaywidth for n in formatted_names]
if formatted_values is not None:
for fmtrow in formatted_values:
for num, col in enumerate(fmtrow):
widths[num] = max(widths[num], col.displaywidth)
# print header
header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths))
self.writeresult(' ' + header.rstrip())
self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths))
# stop if there are no rows
if formatted_values is None:
self.writeresult("")
return
# print row data
for row in formatted_values:
line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths))
self.writeresult(' ' + line)
self.writeresult("")
def print_formatted_result_vertically(self, formatted_names, formatted_values):
max_col_width = max([n.displaywidth for n in formatted_names])
max_val_width = max([n.displaywidth for row in formatted_values for n in row])
# for each row returned, list all the column-value pairs
for row_id, row in enumerate(formatted_values):
self.writeresult("@ Row %d" % (row_id + 1))
self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width]))
for field_id, field in enumerate(row):
column = formatted_names[field_id].ljust(max_col_width, color=self.color)
value = field.ljust(field.displaywidth, color=self.color)
self.writeresult(' ' + " | ".join([column, value]))
self.writeresult('')
def print_warnings(self, warnings):
if warnings is None or len(warnings) == 0:
return
self.writeresult('')
self.writeresult('Warnings :')
for warning in warnings:
self.writeresult(warning)
self.writeresult('')
def emptyline(self):
pass
def parseline(self, line):
# this shouldn't be needed
raise NotImplementedError
def complete(self, text, state):
if readline is None:
return
if state == 0:
try:
self.completion_matches = self.find_completions(text)
except Exception:
if debug_completion:
import traceback
traceback.print_exc()
else:
raise
try:
return self.completion_matches[state]
except IndexError:
return None
def find_completions(self, text):
curline = readline.get_line_buffer()
prevlines = self.statement.getvalue()
wholestmt = prevlines + curline
begidx = readline.get_begidx() + len(prevlines)
stuff_to_complete = wholestmt[:begidx]
return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self,
debug=debug_completion, startsymbol='cqlshCommand')
def set_prompt(self, prompt, prepend_user=False):
if prepend_user and self.username:
self.prompt = "{0}@{1}".format(self.username, prompt)
return
self.prompt = prompt
def cql_unprotect_name(self, namestr):
if namestr is None:
return
return cqlruleset.dequote_name(namestr)
def cql_unprotect_value(self, valstr):
if valstr is not None:
return cqlruleset.dequote_value(valstr)
def print_recreate_keyspace(self, ksdef, out):
out.write(ksdef.export_as_string())
out.write("\n")
def print_recreate_columnfamily(self, ksname, cfname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given table.
Writes output to the given out stream.
"""
out.write(self.get_table_meta(ksname, cfname).export_as_string())
out.write("\n")
def print_recreate_index(self, ksname, idxname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given index.
Writes output to the given out stream.
"""
out.write(self.get_index_meta(ksname, idxname).export_as_string())
out.write("\n")
def print_recreate_materialized_view(self, ksname, viewname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given materialized view.
Writes output to the given out stream.
"""
out.write(self.get_view_meta(ksname, viewname).export_as_string())
out.write("\n")
def print_recreate_object(self, ks, name, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given object (ks, table or index).
Writes output to the given out stream.
"""
out.write(self.get_object_meta(ks, name).export_as_string())
out.write("\n")
def describe_keyspaces(self):
print('')
cmd.Cmd.columnize(self, protect_names(self.get_keyspace_names()))
print('')
def describe_keyspace(self, ksname):
print('')
self.print_recreate_keyspace(self.get_keyspace_meta(ksname), sys.stdout)
print('')
def describe_columnfamily(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
self.print_recreate_columnfamily(ksname, cfname, sys.stdout)
print('')
def describe_index(self, ksname, idxname):
print('')
self.print_recreate_index(ksname, idxname, sys.stdout)
print('')
def describe_materialized_view(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
self.print_recreate_materialized_view(ksname, viewname, sys.stdout)
print('')
def describe_object(self, ks, name):
print('')
self.print_recreate_object(ks, name, sys.stdout)
print('')
def describe_columnfamilies(self, ksname):
print('')
if ksname is None:
for k in self.get_keyspaces():
name = protect_name(k.name)
print('Keyspace %s' % (name,))
print('---------%s' % ('-' * len(name)))
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(k.name)))
print('')
else:
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(ksname)))
print('')
def describe_functions(self, ksname):
print('')
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print('Keyspace %s' % (name,))
print('---------%s' % ('-' * len(name)))
self._columnize_unicode(list(ksmeta.functions.keys()))
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(list(ksmeta.functions.keys()))
def describe_function(self, ksname, functionname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
ksmeta = self.get_keyspace_meta(ksname)
functions = [f for f in list(ksmeta.functions.values()) if f.name == functionname]
if len(functions) == 0:
raise FunctionNotFound("User defined function {} not found".format(functionname))
print("\n\n".join(func.export_as_string() for func in functions))
print('')
def describe_aggregates(self, ksname):
print('')
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print('Keyspace %s' % (name,))
print('---------%s' % ('-' * len(name)))
self._columnize_unicode(list(ksmeta.aggregates.keys()))
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(list(ksmeta.aggregates.keys()))
def describe_aggregate(self, ksname, aggregatename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
ksmeta = self.get_keyspace_meta(ksname)
aggregates = [f for f in list(ksmeta.aggregates.values()) if f.name == aggregatename]
if len(aggregates) == 0:
raise FunctionNotFound("User defined aggregate {} not found".format(aggregatename))
print("\n\n".join(aggr.export_as_string() for aggr in aggregates))
print('')
def describe_usertypes(self, ksname):
print('')
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print('Keyspace %s' % (name,))
print('---------%s' % ('-' * len(name)))
self._columnize_unicode(list(ksmeta.user_types.keys()), quote=True)
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(list(ksmeta.user_types.keys()), quote=True)
def describe_usertype(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print('')
ksmeta = self.get_keyspace_meta(ksname)
try:
usertype = ksmeta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type {} not found".format(typename))
print(usertype.export_as_string())
def _columnize_unicode(self, name_list, quote=False):
"""
Used when columnizing identifiers that may contain unicode
"""
names = [n for n in name_list]
if quote:
names = protect_names(names)
cmd.Cmd.columnize(self, names)
print('')
def describe_cluster(self):
print('\nCluster: %s' % self.get_cluster_name())
p = trim_if_present(self.get_partitioner(), 'org.apache.cassandra.dht.')
print('Partitioner: %s\n' % p)
# TODO: snitch?
# snitch = trim_if_present(self.get_snitch(), 'org.apache.cassandra.locator.')
# print 'Snitch: %s\n' % snitch
if self.current_keyspace is not None and self.current_keyspace != 'system':
print("Range ownership:")
ring = self.get_ring(self.current_keyspace)
for entry in list(ring.items()):
print(' %39s [%s]' % (str(entry[0].value), ', '.join([host.address for host in entry[1]])))
print('')
def describe_schema(self, include_system=False):
print('')
for k in self.get_keyspaces():
if include_system or k.name not in cql3handling.SYSTEM_KEYSPACES:
self.print_recreate_keyspace(k, sys.stdout)
print('')
def do_describe(self, parsed):
"""
DESCRIBE [cqlsh only]
(DESC may be used as a shorthand.)
Outputs information about the connected Cassandra cluster, or about
the data objects stored in the cluster. Use in one of the following ways:
DESCRIBE KEYSPACES
Output the names of all keyspaces.
DESCRIBE KEYSPACE [<keyspacename>]
Output CQL commands that could be used to recreate the given keyspace,
and the objects in it (such as tables, types, functions, etc.).
In some cases, as the CQL interface matures, there will be some metadata
about a keyspace that is not representable with CQL. That metadata will not be shown.
The '<keyspacename>' argument may be omitted, in which case the current
keyspace will be described.
DESCRIBE TABLES
Output the names of all tables in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TABLE [<keyspace>.]<tablename>
Output CQL commands that could be used to recreate the given table.
In some cases, as above, there may be table metadata which is not
representable and which will not be shown.
DESCRIBE INDEX <indexname>
Output the CQL command that could be used to recreate the given index.
In some cases, there may be index metadata which is not representable
and which will not be shown.
DESCRIBE MATERIALIZED VIEW <viewname>
Output the CQL command that could be used to recreate the given materialized view.
In some cases, there may be materialized view metadata which is not representable
and which will not be shown.
DESCRIBE CLUSTER
Output information about the connected Cassandra cluster, such as the
cluster name, and the partitioner and snitch in use. When you are
connected to a non-system keyspace, also shows endpoint-range
ownership information for the Cassandra ring.
DESCRIBE [FULL] SCHEMA
Output CQL commands that could be used to recreate the entire (non-system) schema.
Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace
k. Use DESCRIBE FULL SCHEMA to include the system keyspaces.
DESCRIBE TYPES
Output the names of all user-defined-types in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TYPE [<keyspace>.]<type>
Output the CQL command that could be used to recreate the given user-defined-type.
DESCRIBE FUNCTIONS
Output the names of all user-defined-functions in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE FUNCTION [<keyspace>.]<function>
Output the CQL command that could be used to recreate the given user-defined-function.
DESCRIBE AGGREGATES
Output the names of all user-defined-aggregates in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE AGGREGATE [<keyspace>.]<aggregate>
Output the CQL command that could be used to recreate the given user-defined-aggregate.
DESCRIBE <objname>
Output CQL commands that could be used to recreate the entire object schema,
where object can be either a keyspace or a table or an index or a materialized
view (in this order).
"""
what = parsed.matched[1][1].lower()
if what == 'functions':
self.describe_functions(self.current_keyspace)
elif what == 'function':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
functionname = self.cql_unprotect_name(parsed.get_binding('udfname'))
self.describe_function(ksname, functionname)
elif what == 'aggregates':
self.describe_aggregates(self.current_keyspace)
elif what == 'aggregate':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
aggregatename = self.cql_unprotect_name(parsed.get_binding('udaname'))
self.describe_aggregate(ksname, aggregatename)
elif what == 'keyspaces':
self.describe_keyspaces()
elif what == 'keyspace':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', ''))
if not ksname:
ksname = self.current_keyspace
if ksname is None:
self.printerr('Not in any keyspace.')
return
self.describe_keyspace(ksname)
elif what in ('columnfamily', 'table'):
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
self.describe_columnfamily(ks, cf)
elif what == 'index':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
idx = self.cql_unprotect_name(parsed.get_binding('idxname', None))
self.describe_index(ks, idx)
elif what == 'materialized' and parsed.matched[2][1].lower() == 'view':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
mv = self.cql_unprotect_name(parsed.get_binding('mvname'))
self.describe_materialized_view(ks, mv)
elif what in ('columnfamilies', 'tables'):
self.describe_columnfamilies(self.current_keyspace)
elif what == 'types':
self.describe_usertypes(self.current_keyspace)
elif what == 'type':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
ut = self.cql_unprotect_name(parsed.get_binding('utname'))
self.describe_usertype(ks, ut)
elif what == 'cluster':
self.describe_cluster()
elif what == 'schema':
self.describe_schema(False)
elif what == 'full' and parsed.matched[2][1].lower() == 'schema':
self.describe_schema(True)
elif what:
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname'))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('idxname', None))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('mvname', None))
self.describe_object(ks, name)
do_desc = do_describe
def do_copy(self, parsed):
r"""
COPY [cqlsh only]
COPY x FROM: Imports CSV data into a Cassandra table
COPY x TO: Exports data from a Cassandra table in CSV format.
COPY <table_name> [ ( column [, ...] ) ]
FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN )
[ WITH <option>='value' [AND ...] ];
File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv.
COPY <table_name> [ ( column [, ...] ) ]
TO ( '<filename>' | STDOUT )
[ WITH <option>='value' [AND ...] ];
Available common COPY options and defaults:
DELIMITER=',' - character that appears between records
QUOTE='"' - quoting character to be used to quote fields
ESCAPE='\' - character to appear before the QUOTE char when quoted
HEADER=false - whether to ignore the first line
NULL='' - string that represents a null value
DATETIMEFORMAT= - timestamp strftime format
'%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc
MAXATTEMPTS=5 - the maximum number of attempts per batch or range
REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds
DECIMALSEP='.' - the separator for decimal values
THOUSANDSSEP='' - the separator for thousands digit groups
BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false,
for example yes,no or 1,0
NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one
capped at 16
CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser
documentation) where you can specify WITH options under the following optional
sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table],
[copy-from:ks.table], where <ks> is your keyspace name and <table> is your table
name. Options are read from these sections, in the order specified
above, and command line options always override options in configuration files.
Depending on the COPY direction, only the relevant copy-from or copy-to sections
are used. If no configfile is specified then .cqlshrc is searched instead.
RATEFILE='' - an optional file where to print the output statistics
Available COPY FROM options and defaults:
CHUNKSIZE=5000 - the size of chunks passed to worker processes
INGESTRATE=100000 - an approximate ingest rate in rows per second
MINBATCHSIZE=10 - the minimum size of an import batch
MAXBATCHSIZE=20 - the maximum size of an import batch
MAXROWS=-1 - the maximum number of rows, -1 means no maximum
SKIPROWS=0 - the number of rows to skip
SKIPCOLS='' - a comma separated list of column names to skip
MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum
MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum
ERRFILE='' - a file where to store all rows that could not be imported, by default this is
import_ks_table.err where <ks> is your keyspace and <table> is your table name.
PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to
False if you don't mind shifting data parsing to the cluster. The cluster will also
have to compile every batch statement. For large and oversized clusters
this will result in a faster import but for smaller clusters it may generate
timeouts.
TTL=3600 - the time to live in seconds, by default data will not expire
Available COPY TO options and defaults:
ENCODING='utf8' - encoding for CSV output
PAGESIZE='1000' - the page size for fetching results
PAGETIMEOUT=10 - the page timeout in seconds for fetching results
BEGINTOKEN='' - the minimum token string to consider when exporting data
ENDTOKEN='' - the maximum token string to consider when exporting data
MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel
MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines,
beyond this maximum the output file will be split into segments,
-1 means unlimited.
FLOATPRECISION=5 - the number of digits displayed after the decimal point for cql float values
DOUBLEPRECISION=12 - the number of digits displayed after the decimal point for cql double values
When entering CSV data on STDIN, you can use the sequence "\."
on a line by itself to end the data input.
"""
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
if ks is None:
ks = self.current_keyspace
if ks is None:
raise NoKeyspaceError("Not in any keyspace.")
table = self.cql_unprotect_name(parsed.get_binding('cfname'))
columns = parsed.get_binding('colnames', None)
if columns is not None:
columns = list(map(self.cql_unprotect_name, columns))
else:
# default to all known columns
columns = self.get_column_names(ks, table)
fname = parsed.get_binding('fname', None)
if fname is not None:
fname = self.cql_unprotect_value(fname)
copyoptnames = list(map(six.text_type.lower, parsed.get_binding('optnames', ())))
copyoptvals = list(map(self.cql_unprotect_value, parsed.get_binding('optvals', ())))
opts = dict(list(zip(copyoptnames, copyoptvals)))
direction = parsed.get_binding('dir').upper()
if direction == 'FROM':
task = ImportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
elif direction == 'TO':
task = ExportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
else:
raise SyntaxError("Unknown direction %s" % direction)
task.run()
def do_show(self, parsed):
"""
SHOW [cqlsh only]
Displays information about the current cqlsh session. Can be called in
the following ways:
SHOW VERSION
Shows the version and build of the connected Cassandra instance, as
well as the version of the CQL spec that the connected Cassandra
instance understands.
SHOW HOST
Shows where cqlsh is currently connected.
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
self.get_connection_versions()
self.show_version()
elif showwhat == 'host':
self.show_host()
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
def do_source(self, parsed):
"""
SOURCE [cqlsh only]
Executes a file containing CQL statements. Gives the output for each
statement in turn, if any, or any errors that occur along the way.
Errors do NOT abort execution of the CQL source file.
Usage:
SOURCE '<file>';
That is, the path to the file to be executed must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
See also the --file option to cqlsh.
"""
fname = parsed.get_binding('fname')
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
encoding, bom_size = get_file_encoding_bomsize(fname)
f = codecs.open(fname, 'r', encoding)
f.seek(bom_size)
except IOError as e:
self.printerr('Could not open %r: %s' % (fname, e))
return
username = self.auth_provider.username if self.auth_provider else None
password = self.auth_provider.password if self.auth_provider else None
subshell = Shell(self.hostname, self.port, color=self.color,
username=username, password=password,
encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
cqlver=self.cql_version, keyspace=self.current_keyspace,
tracing_enabled=self.tracing_enabled,
display_nanotime_format=self.display_nanotime_format,
display_timestamp_format=self.display_timestamp_format,
display_date_format=self.display_date_format,
display_float_precision=self.display_float_precision,
display_double_precision=self.display_double_precision,
display_timezone=self.display_timezone,
max_trace_wait=self.max_trace_wait, ssl=self.ssl,
request_timeout=self.session.default_timeout,
connect_timeout=self.conn.connect_timeout,
is_subshell=True)
# duplicate coverage related settings in subshell
if self.coverage:
subshell.coverage = True
subshell.coveragerc_path = self.coveragerc_path
subshell.cmdloop()
f.close()
def do_capture(self, parsed):
"""
CAPTURE [cqlsh only]
Begins capturing command output and appending it to a specified file.
Output will not be shown at the console while it is captured.
Usage:
CAPTURE '<file>';
CAPTURE OFF;
CAPTURE;
That is, the path to the file to be appended to must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
Only query result output is captured. Errors and output from cqlsh-only
commands will still be shown in the cqlsh session.
To stop capturing output and show it in the cqlsh session again, use
CAPTURE OFF.
To inspect the current capture configuration, use CAPTURE with no
arguments.
"""
fname = parsed.get_binding('fname')
if fname is None:
if self.shunted_query_out is not None:
print("Currently capturing query output to %r." % (self.query_out.name,))
else:
print("Currently not capturing query output.")
return
if fname.upper() == 'OFF':
if self.shunted_query_out is None:
self.printerr('Not currently capturing output.')
return
self.query_out.close()
self.query_out = self.shunted_query_out
self.color = self.shunted_color
self.shunted_query_out = None
del self.shunted_color
return
if self.shunted_query_out is not None:
self.printerr('Already capturing output to %s. Use CAPTURE OFF'
' to disable.' % (self.query_out.name,))
return
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
f = open(fname, 'a')
except IOError as e:
self.printerr('Could not open %r for append: %s' % (fname, e))
return
self.shunted_query_out = self.query_out
self.shunted_color = self.color
self.query_out = f
self.color = False
print('Now capturing query output to %r.' % (fname,))
def do_tracing(self, parsed):
"""
TRACING [cqlsh]
Enables or disables request tracing.
TRACING ON
Enables tracing for all further requests.
TRACING OFF
Disables tracing.
TRACING
TRACING with no arguments shows the current tracing status.
"""
self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr)
def do_expand(self, parsed):
"""
EXPAND [cqlsh]
Enables or disables expanded (vertical) output.
EXPAND ON
Enables expanded (vertical) output.
EXPAND OFF
Disables expanded (vertical) output.
EXPAND
EXPAND with no arguments shows the current value of expand setting.
"""
self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr)
def do_consistency(self, parsed):
"""
CONSISTENCY [cqlsh only]
Overrides default consistency level (default level is ONE).
CONSISTENCY <level>
Sets consistency level for future requests.
Valid consistency levels:
ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL.
SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates.
CONSISTENCY
CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print('Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level]))
return
self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print('Consistency level set to %s.' % (level.upper(),))
def do_serial(self, parsed):
"""
SERIAL CONSISTENCY [cqlsh only]
Overrides serial consistency level (default level is SERIAL).
SERIAL CONSISTENCY <level>
Sets consistency level for future conditional updates.
Valid consistency levels:
SERIAL, LOCAL_SERIAL.
SERIAL CONSISTENCY
SERIAL CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print('Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level]))
return
self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print('Serial consistency level set to %s.' % (level.upper(),))
def do_login(self, parsed):
"""
LOGIN [cqlsh only]
Changes login information without requiring restart.
LOGIN <username> (<password>)
Login using the specified username. If password is specified, it will be used
otherwise, you will be prompted to enter.
"""
username = parsed.get_binding('username')
password = parsed.get_binding('password')
if password is None:
password = getpass.getpass()
else:
password = password[1:-1]
auth_provider = PlainTextAuthProvider(username=username, password=password)
conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version,
protocol_version=self.conn.protocol_version,
auth_provider=auth_provider,
ssl_options=self.conn.ssl_options,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=self.conn.connect_timeout,
connect_timeout=self.conn.connect_timeout)
if self.current_keyspace:
session = conn.connect(self.current_keyspace)
else:
session = conn.connect()
# Copy session properties
session.default_timeout = self.session.default_timeout
session.row_factory = self.session.row_factory
session.default_consistency_level = self.session.default_consistency_level
session.max_trace_wait = self.session.max_trace_wait
# Update after we've connected in case we fail to authenticate
self.conn = conn
self.auth_provider = auth_provider
self.username = username
self.session = session
def do_exit(self, parsed=None):
"""
EXIT/QUIT [cqlsh only]
Exits cqlsh.
"""
self.stop = True
if self.owns_connection:
self.conn.shutdown()
do_quit = do_exit
def do_clear(self, parsed):
"""
CLEAR/CLS [cqlsh only]
Clears the console.
"""
import subprocess
subprocess.call(['clear', 'cls'][is_win], shell=True)
do_cls = do_clear
def do_debug(self, parsed):
import pdb
pdb.set_trace()
def get_help_topics(self):
topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__]
for hide_from_help in ('quit',):
topics.remove(hide_from_help)
return topics
def columnize(self, slist, *a, **kw):
return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw)
def do_help(self, parsed):
"""
HELP [cqlsh only]
Gives information about cqlsh commands. To see available topics,
enter "HELP" without any arguments. To see help on a topic,
use "HELP <topic>".
"""
topics = parsed.get_binding('topic', ())
if not topics:
shell_topics = [t.upper() for t in self.get_help_topics()]
self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80)
cql_topics = [t.upper() for t in cqldocs.get_help_topics()]
self.print_topics("CQL help topics:", cql_topics, 15, 80)
return
for t in topics:
if t.lower() in self.get_help_topics():
doc = getattr(self, 'do_' + t.lower()).__doc__
self.stdout.write(doc + "\n")
elif t.lower() in cqldocs.get_help_topics():
urlpart = cqldocs.get_help_topic(t)
if urlpart is not None:
url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart)
if len(webbrowser._tryorder) == 0:
self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url))
elif self.browser is not None:
webbrowser.get(self.browser).open_new_tab(url)
else:
webbrowser.open_new_tab(url)
else:
self.printerr("*** No help on %s" % (t,))
def do_unicode(self, parsed):
"""
Textual input/output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
def do_paging(self, parsed):
"""
PAGING [cqlsh]
Enables or disables query paging.
PAGING ON
Enables query paging for all further queries.
PAGING OFF
Disables paging.
PAGING
PAGING with no arguments shows the current query paging status.
"""
(self.use_paging, requested_page_size) = SwitchCommandWithValue(
"PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr)
if self.use_paging and requested_page_size is not None:
self.page_size = requested_page_size
if self.use_paging:
print(("Page size: {}".format(self.page_size)))
else:
self.page_size = self.default_page_size
def applycolor(self, text, color=None):
if not color or not self.color:
return text
return color + text + ANSI_RESET
def writeresult(self, text, color=None, newline=True, out=None):
if out is None:
out = self.query_out
# convert Exceptions, etc to text
if not isinstance(text, six.text_type):
text = "{}".format(text)
to_write = self.applycolor(text, color) + ('\n' if newline else '')
to_write = ensure_str(to_write)
out.write(to_write)
def flush_output(self):
self.query_out.flush()
def printerr(self, text, color=RED, newline=True, shownum=None):
self.statement_error = True
if shownum is None:
shownum = self.show_line_nums
if shownum:
text = '%s:%d:%s' % (self.stdin.name, self.lineno, text)
self.writeresult(text, color, newline=newline, out=sys.stderr)
def stop_coverage(self):
if self.coverage and self.cov is not None:
self.cov.stop()
self.cov.save()
self.cov = None
class SwitchCommand(object):
command = None
description = None
def __init__(self, command, desc):
self.command = command
self.description = desc
def execute(self, state, parsed, printerr):
switch = parsed.get_binding('switch')
if switch is None:
if state:
print("%s is currently enabled. Use %s OFF to disable"
% (self.description, self.command))
else:
print("%s is currently disabled. Use %s ON to enable."
% (self.description, self.command))
return state
if switch.upper() == 'ON':
if state:
printerr('%s is already enabled. Use %s OFF to disable.'
% (self.description, self.command))
return state
print('Now %s is enabled' % (self.description,))
return True
if switch.upper() == 'OFF':
if not state:
printerr('%s is not enabled.' % (self.description,))
return state
print('Disabled %s.' % (self.description,))
return False
class SwitchCommandWithValue(SwitchCommand):
"""The same as SwitchCommand except it also accepts a value in place of ON.
This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE)
eg: PAGING 50 returns (True, 50)
PAGING OFF returns (False, None)
PAGING ON returns (True, None)
The value_type must match for the PASSED_VALUE, otherwise it will return None.
"""
def __init__(self, command, desc, value_type=int):
SwitchCommand.__init__(self, command, desc)
self.value_type = value_type
def execute(self, state, parsed, printerr):
binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr)
switch = parsed.get_binding('switch')
try:
value = self.value_type(switch)
binary_switch_value = True
except (ValueError, TypeError):
value = None
return (binary_switch_value, value)
def option_with_default(cparser_getter, section, option, default=None):
try:
return cparser_getter(section, option)
except configparser.Error:
return default
def raw_option_with_default(configs, section, option, default=None):
"""
Same (almost) as option_with_default() but won't do any string interpolation.
Useful for config values that include '%' symbol, e.g. time format string.
"""
try:
return configs.get(section, option, raw=True)
except configparser.Error:
return default
def should_use_color():
if not sys.stdout.isatty():
return False
if os.environ.get('TERM', '') in ('dumb', ''):
return False
try:
import subprocess
p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if int(stdout.strip()) < 8:
return False
except (OSError, ImportError, ValueError):
# oh well, we tried. at least we know there's a $TERM and it's
# not "dumb".
pass
return True
def read_options(cmdlineargs, environment):
configs = configparser.SafeConfigParser() if sys.version_info < (3, 2) else configparser.ConfigParser()
configs.read(CONFIG_FILE)
rawconfigs = configparser.RawConfigParser()
rawconfigs.read(CONFIG_FILE)
optvalues = optparse.Values()
optvalues.username = option_with_default(configs.get, 'authentication', 'username')
optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password')
optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
DEFAULT_COMPLETEKEY)
optvalues.color = option_with_default(configs.getboolean, 'ui', 'color')
optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format',
DEFAULT_TIMESTAMP_FORMAT)
optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format',
DEFAULT_NANOTIME_FORMAT)
optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format',
DEFAULT_DATE_FORMAT)
optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision',
DEFAULT_FLOAT_PRECISION)
optvalues.double_precision = option_with_default(configs.getint, 'ui', 'double_precision',
DEFAULT_DOUBLE_PRECISION)
optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit())
optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait',
DEFAULT_MAX_TRACE_WAIT)
optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None)
optvalues.debug = False
optvalues.coverage = False
if 'CQLSH_COVERAGE' in environment.keys():
optvalues.coverage = True
optvalues.file = None
optvalues.ssl = option_with_default(configs.getboolean, 'connection', 'ssl', DEFAULT_SSL)
optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8)
optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty())
optvalues.protocol_version = option_with_default(configs.getint, 'protocol', 'version', None)
optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', None)
optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
optvalues.execute = None
(options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST)
port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT)
try:
options.connect_timeout = int(options.connect_timeout)
except ValueError:
parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,))
options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS
try:
options.request_timeout = int(options.request_timeout)
except ValueError:
parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,))
options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS
hostname = environment.get('CQLSH_HOST', hostname)
port = environment.get('CQLSH_PORT', port)
if len(arguments) > 0:
hostname = arguments[0]
if len(arguments) > 1:
port = arguments[1]
if options.file or options.execute:
options.tty = False
if options.execute and not options.execute.endswith(';'):
options.execute += ';'
if optvalues.color in (True, False):
options.color = optvalues.color
else:
if options.file is not None:
options.color = False
else:
options.color = should_use_color()
if options.cqlversion is not None:
options.cqlversion, cqlvertup = full_cql_version(options.cqlversion)
if cqlvertup[0] < 3:
parser.error('%r is not a supported CQL version.' % options.cqlversion)
options.cqlmodule = cql3handling
try:
port = int(port)
except ValueError:
parser.error('%r is not a valid port number.' % port)
return options, hostname, port
def setup_cqlruleset(cqlmodule):
global cqlruleset
cqlruleset = cqlmodule.CqlRuleSet
cqlruleset.append_rules(cqlshhandling.cqlsh_extra_syntax_rules)
for rulename, termname, func in cqlshhandling.cqlsh_syntax_completers:
cqlruleset.completer_for(rulename, termname)(func)
cqlruleset.commands_end_with_newline.update(cqlshhandling.my_commands_ending_with_newline)
def setup_cqldocs(cqlmodule):
global cqldocs
cqldocs = cqlmodule.cqldocs
def init_history():
if readline is not None:
try:
readline.read_history_file(HISTORY)
except IOError:
pass
delims = readline.get_completer_delims()
delims.replace("'", "")
delims += '.'
readline.set_completer_delims(delims)
def save_history():
if readline is not None:
try:
readline.write_history_file(HISTORY)
except IOError:
pass
def main(options, hostname, port):
setup_cqlruleset(options.cqlmodule)
setup_cqldocs(options.cqlmodule)
init_history()
csv.field_size_limit(options.field_size_limit)
if options.file is None:
stdin = None
else:
try:
encoding, bom_size = get_file_encoding_bomsize(options.file)
stdin = codecs.open(options.file, 'r', encoding)
stdin.seek(bom_size)
except IOError as e:
sys.exit("Can't open %r: %s" % (options.file, e))
if options.debug:
sys.stderr.write("Using CQL driver: %s\n" % (cassandra,))
sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,))
sys.stderr.write("Using '%s' encoding\n" % (options.encoding,))
sys.stderr.write("Using ssl: %s\n" % (options.ssl,))
# create timezone based on settings, environment or auto-detection
timezone = None
if options.timezone or 'TZ' in os.environ:
try:
import pytz
if options.timezone:
try:
timezone = pytz.timezone(options.timezone)
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone))
if 'TZ' in os.environ:
try:
timezone = pytz.timezone(os.environ['TZ'])
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ']))
except ImportError:
sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n")
# try auto-detect timezone if tzlocal is installed
if not timezone:
try:
from tzlocal import get_localzone
timezone = get_localzone()
except ImportError:
# we silently ignore and fallback to UTC unless a custom timestamp format (which likely
# does contain a TZ part) was specified
if options.time_format != DEFAULT_TIMESTAMP_FORMAT:
sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, "
+ "but local timezone could not be detected.\n"
+ "Either install Python 'tzlocal' module for auto-detection "
+ "or specify client timezone in your cqlshrc.\n\n")
try:
shell = Shell(hostname,
port,
color=options.color,
username=options.username,
password=options.password,
stdin=stdin,
tty=options.tty,
completekey=options.completekey,
browser=options.browser,
protocol_version=options.protocol_version,
cqlver=options.cqlversion,
keyspace=options.keyspace,
display_timestamp_format=options.time_format,
display_nanotime_format=options.nanotime_format,
display_date_format=options.date_format,
display_float_precision=options.float_precision,
display_double_precision=options.double_precision,
display_timezone=timezone,
max_trace_wait=options.max_trace_wait,
ssl=options.ssl,
single_statement=options.execute,
request_timeout=options.request_timeout,
connect_timeout=options.connect_timeout,
encoding=options.encoding)
except KeyboardInterrupt:
sys.exit('Connection aborted.')
except CQL_ERRORS as e:
sys.exit('Connection error: %s' % (e,))
except VersionNotSupported as e:
sys.exit('Unsupported CQL version: %s' % (e,))
if options.debug:
shell.debug = True
if options.coverage:
shell.coverage = True
import signal
def handle_sighup():
shell.stop_coverage()
shell.do_exit()
signal.signal(signal.SIGHUP, handle_sighup)
shell.cmdloop()
save_history()
if shell.batch_mode and shell.statement_error:
sys.exit(2)
# always call this regardless of module name: when a sub-process is spawned
# on Windows then the module name is not __main__, see CASSANDRA-9304
insert_driver_hooks()
if __name__ == '__main__':
main(*read_options(sys.argv[1:], os.environ))
# vim: set ft=python et ts=4 sw=4 :
|
apache-2.0
|
epage/dialcentral-gtk
|
src/alarm_handler.py
|
1
|
9207
|
#!/usr/bin/env python
import os
import time
import datetime
import ConfigParser
import dbus
_FREMANTLE_ALARM = "Fremantle"
_DIABLO_ALARM = "Diablo"
_NO_ALARM = "None"
try:
import alarm
ALARM_TYPE = _FREMANTLE_ALARM
except (ImportError, OSError):
try:
import osso.alarmd as alarmd
ALARM_TYPE = _DIABLO_ALARM
except (ImportError, OSError):
ALARM_TYPE = _NO_ALARM
def _get_start_time(recurrence):
now = datetime.datetime.now()
startTimeMinute = now.minute + max(recurrence, 5) # being safe
startTimeHour = now.hour + int(startTimeMinute / 60)
startTimeMinute = startTimeMinute % 59
now.replace(minute=startTimeMinute)
timestamp = int(time.mktime(now.timetuple()))
return timestamp
def _create_recurrence_mask(recurrence, base):
"""
>>> bin(_create_recurrence_mask(60, 60))
'0b1'
>>> bin(_create_recurrence_mask(30, 60))
'0b1000000000000000000000000000001'
>>> bin(_create_recurrence_mask(2, 60))
'0b10101010101010101010101010101010101010101010101010101010101'
>>> bin(_create_recurrence_mask(1, 60))
'0b111111111111111111111111111111111111111111111111111111111111'
"""
mask = 0
for i in xrange(base / recurrence):
mask |= 1 << (recurrence * i)
return mask
def _unpack_minutes(recurrence):
"""
>>> _unpack_minutes(0)
(0, 0, 0)
>>> _unpack_minutes(1)
(0, 0, 1)
>>> _unpack_minutes(59)
(0, 0, 59)
>>> _unpack_minutes(60)
(0, 1, 0)
>>> _unpack_minutes(129)
(0, 2, 9)
>>> _unpack_minutes(5 * 60 * 24 + 3 * 60 + 2)
(5, 3, 2)
>>> _unpack_minutes(12 * 60 * 24 + 3 * 60 + 2)
(5, 3, 2)
"""
minutesInAnHour = 60
minutesInDay = 24 * minutesInAnHour
minutesInAWeek = minutesInDay * 7
days = recurrence / minutesInDay
daysOfWeek = days % 7
recurrence -= days * minutesInDay
hours = recurrence / minutesInAnHour
recurrence -= hours * minutesInAnHour
mins = recurrence % minutesInAnHour
recurrence -= mins
assert recurrence == 0, "Recurrence %d" % recurrence
return daysOfWeek, hours, mins
class _FremantleAlarmHandler(object):
_INVALID_COOKIE = -1
_REPEAT_FOREVER = -1
_TITLE = "Dialcentral Notifications"
_LAUNCHER = os.path.abspath(os.path.join(os.path.dirname(__file__), "alarm_notify.py"))
def __init__(self):
self._recurrence = 5
self._alarmCookie = self._INVALID_COOKIE
self._launcher = self._LAUNCHER
def load_settings(self, config, sectionName):
try:
self._recurrence = config.getint(sectionName, "recurrence")
self._alarmCookie = config.getint(sectionName, "alarmCookie")
launcher = config.get(sectionName, "notifier")
if launcher:
self._launcher = launcher
except ConfigParser.NoOptionError:
pass
except ConfigParser.NoSectionError:
pass
def save_settings(self, config, sectionName):
config.set(sectionName, "recurrence", str(self._recurrence))
config.set(sectionName, "alarmCookie", str(self._alarmCookie))
launcher = self._launcher if self._launcher != self._LAUNCHER else ""
config.set(sectionName, "notifier", launcher)
def apply_settings(self, enabled, recurrence):
if recurrence != self._recurrence or enabled != self.isEnabled:
if self.isEnabled:
self._clear_alarm()
if enabled:
self._set_alarm(recurrence)
self._recurrence = int(recurrence)
@property
def recurrence(self):
return self._recurrence
@property
def isEnabled(self):
return self._alarmCookie != self._INVALID_COOKIE
def _set_alarm(self, recurrenceMins):
assert 1 <= recurrenceMins, "Notifications set to occur too frequently: %d" % recurrenceMins
alarmTime = _get_start_time(recurrenceMins)
event = alarm.Event()
event.appid = self._TITLE
event.alarm_time = alarmTime
event.recurrences_left = self._REPEAT_FOREVER
action = event.add_actions(1)[0]
action.flags |= alarm.ACTION_TYPE_EXEC | alarm.ACTION_WHEN_TRIGGERED
action.command = self._launcher
recurrence = event.add_recurrences(1)[0]
recurrence.mask_min |= _create_recurrence_mask(recurrenceMins, 60)
recurrence.mask_hour |= alarm.RECUR_HOUR_DONTCARE
recurrence.mask_mday |= alarm.RECUR_MDAY_DONTCARE
recurrence.mask_wday |= alarm.RECUR_WDAY_DONTCARE
recurrence.mask_mon |= alarm.RECUR_MON_DONTCARE
recurrence.special |= alarm.RECUR_SPECIAL_NONE
assert event.is_sane()
self._alarmCookie = alarm.add_event(event)
def _clear_alarm(self):
if self._alarmCookie == self._INVALID_COOKIE:
return
alarm.delete_event(self._alarmCookie)
self._alarmCookie = self._INVALID_COOKIE
class _DiabloAlarmHandler(object):
_INVALID_COOKIE = -1
_TITLE = "Dialcentral Notifications"
_LAUNCHER = os.path.abspath(os.path.join(os.path.dirname(__file__), "alarm_notify.py"))
_REPEAT_FOREVER = -1
def __init__(self):
self._recurrence = 5
bus = dbus.SystemBus()
self._alarmdDBus = bus.get_object("com.nokia.alarmd", "/com/nokia/alarmd");
self._alarmCookie = self._INVALID_COOKIE
self._launcher = self._LAUNCHER
def load_settings(self, config, sectionName):
try:
self._recurrence = config.getint(sectionName, "recurrence")
self._alarmCookie = config.getint(sectionName, "alarmCookie")
launcher = config.get(sectionName, "notifier")
if launcher:
self._launcher = launcher
except ConfigParser.NoOptionError:
pass
except ConfigParser.NoSectionError:
pass
def save_settings(self, config, sectionName):
config.set(sectionName, "recurrence", str(self._recurrence))
config.set(sectionName, "alarmCookie", str(self._alarmCookie))
launcher = self._launcher if self._launcher != self._LAUNCHER else ""
config.set(sectionName, "notifier", launcher)
def apply_settings(self, enabled, recurrence):
if recurrence != self._recurrence or enabled != self.isEnabled:
if self.isEnabled:
self._clear_alarm()
if enabled:
self._set_alarm(recurrence)
self._recurrence = int(recurrence)
@property
def recurrence(self):
return self._recurrence
@property
def isEnabled(self):
return self._alarmCookie != self._INVALID_COOKIE
def _set_alarm(self, recurrence):
assert 1 <= recurrence, "Notifications set to occur too frequently: %d" % recurrence
alarmTime = _get_start_time(recurrence)
#Setup the alarm arguments so that they can be passed to the D-Bus add_event method
_DEFAULT_FLAGS = (
alarmd.ALARM_EVENT_NO_DIALOG |
alarmd.ALARM_EVENT_NO_SNOOZE |
alarmd.ALARM_EVENT_CONNECTED
)
action = []
action.extend(['flags', _DEFAULT_FLAGS])
action.extend(['title', self._TITLE])
action.extend(['path', self._launcher])
action.extend([
'arguments',
dbus.Array(
[alarmTime, int(27)],
signature=dbus.Signature('v')
)
]) #int(27) used in place of alarm_index
event = []
event.extend([dbus.ObjectPath('/AlarmdEventRecurring'), dbus.UInt32(4)])
event.extend(['action', dbus.ObjectPath('/AlarmdActionExec')]) #use AlarmdActionExec instead of AlarmdActionDbus
event.append(dbus.UInt32(len(action) / 2))
event.extend(action)
event.extend(['time', dbus.Int64(alarmTime)])
event.extend(['recurr_interval', dbus.UInt32(recurrence)])
event.extend(['recurr_count', dbus.Int32(self._REPEAT_FOREVER)])
self._alarmCookie = self._alarmdDBus.add_event(*event);
def _clear_alarm(self):
if self._alarmCookie == self._INVALID_COOKIE:
return
deleteResult = self._alarmdDBus.del_event(dbus.Int32(self._alarmCookie))
self._alarmCookie = self._INVALID_COOKIE
assert deleteResult != -1, "Deleting of alarm event failed"
class _NoneAlarmHandler(object):
def __init__(self):
self._alarmCookie = 0
def load_settings(self, config, sectionName):
pass
def save_settings(self, config, sectionName):
pass
def apply_settings(self, enabled, recurrence):
pass
@property
def recurrence(self):
return 0
@property
def isEnabled(self):
return False
AlarmHandler = {
_FREMANTLE_ALARM: _FremantleAlarmHandler,
_DIABLO_ALARM: _DiabloAlarmHandler,
_NO_ALARM: _NoneAlarmHandler,
}[ALARM_TYPE]
def main():
import constants
try:
import optparse
except ImportError:
return
parser = optparse.OptionParser()
parser.add_option("-x", "--display", action="store_true", dest="display", help="Display data")
parser.add_option("-e", "--enable", action="store_true", dest="enabled", help="Whether the alarm should be enabled or not", default=False)
parser.add_option("-d", "--disable", action="store_false", dest="enabled", help="Whether the alarm should be enabled or not", default=False)
parser.add_option("-r", "--recurrence", action="store", type="int", dest="recurrence", help="How often the alarm occurs", default=5)
(commandOptions, commandArgs) = parser.parse_args()
alarmHandler = AlarmHandler()
config = ConfigParser.SafeConfigParser()
config.read(constants._user_settings_)
alarmHandler.load_settings(config, "alarm")
if commandOptions.display:
print "Alarm (%s) is %s for every %d minutes" % (
alarmHandler._alarmCookie,
"enabled" if alarmHandler.isEnabled else "disabled",
alarmHandler.recurrence,
)
else:
isEnabled = commandOptions.enabled
recurrence = commandOptions.recurrence
alarmHandler.apply_settings(isEnabled, recurrence)
alarmHandler.save_settings(config, "alarm")
configFile = open(constants._user_settings_, "wb")
try:
config.write(configFile)
finally:
configFile.close()
if __name__ == "__main__":
main()
|
lgpl-2.1
|
khertan/ownNotes
|
python/requests/packages/charade/escprober.py
|
206
|
3273
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
gpl-3.0
|
s20121035/rk3288_android5.1_repo
|
external/chromium_org/tools/site_compare/commands/scrape.py
|
189
|
1832
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command for scraping images from a URL or list of URLs.
Prerequisites:
1. The command_line package from tools/site_compare
2. Either the IE BHO or Firefox extension (or both)
Installation:
1. Build the IE BHO, or call regsvr32 on a prebuilt binary
2. Add a file called "[email protected]" to
the default Firefox profile directory under extensions, containing
the path to the Firefox extension root
Invoke with the command line arguments as documented within
the command line.
"""
import command_line
from drivers import windowing
from utils import browser_iterate
def CreateCommand(cmdline):
"""Inserts the command and arguments into a command line for parsing."""
cmd = cmdline.AddCommand(
["scrape"],
"Scrapes an image from a URL or series of URLs.",
None,
ExecuteScrape)
browser_iterate.SetupIterationCommandLine(cmd)
cmd.AddArgument(
["-log", "--logfile"], "File to write text output", type="string")
cmd.AddArgument(
["-out", "--outdir"], "Directory to store scrapes", type="string", required=True)
def ExecuteScrape(command):
"""Executes the Scrape command."""
def ScrapeResult(url, proc, wnd, result):
"""Capture and save the scrape."""
if log_file: log_file.write(result)
# Scrape the page
image = windowing.ScrapeWindow(wnd)
filename = windowing.URLtoFilename(url, command["--outdir"], ".bmp")
image.save(filename)
if command["--logfile"]: log_file = open(command["--logfile"], "w")
else: log_file = None
browser_iterate.Iterate(command, ScrapeResult)
# Close the log file and return. We're done.
if log_file: log_file.close()
|
gpl-3.0
|
bluemask2001/namebench
|
tools/alexa-subdomains.py
|
175
|
2862
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subdomain data parser for Alexa."""
__author__ = '[email protected] (Thomas Stromberg)'
import glob
import operator
import os
import os.path
import re
import sys
import time
if __name__ == '__main__':
sys.path.append('..')
# See if a third_party library exists -- use it if so.
try:
import third_party
except ImportError:
pass
import httplib2
CACHE_DIR = os.getenv('HOME') + '/.alexa_cache'
CACHE_EXPIRATION = 86400 * 90
SLEEPY_TIME = 15
MAX_ATTEMPTS = 5
NAKED_DOMAINS = ['twitter.com', 'rapidshare.com', 'perezhilton.com', 'posterous.com']
def FetchUrl(url, attempts=0):
attempts += 1
print >> sys.stderr, "Fetching %s (attempt %s)" % (url, attempts)
h = httplib2.Http(CACHE_DIR, timeout=10)
try:
resp, content = h.request(url, 'GET', headers={'cache_control': 'max-age=%s' % CACHE_EXPIRATION})
except:
if attempts < MAX_ATTEMPTS:
print >> sys.stderr, "Will try again..."
time.sleep(SLEEPY_TIME)
return FetchUrl(url, attempts=attempts)
time.sleep(SLEEPY_TIME)
return content
def FetchCachedAlexaPage(domain):
url_path = 'www.alexa.com/siteinfo/%s' % domain
cache_path = '%s/%s' % (CACHE_DIR, url_path.replace('/', ','))
for file in glob.glob("%s,*" % cache_path):
f = open(file)
return f.read()
# If we haven't returned already...
return FetchUrl("http://%s" % url_path)
def ParseAlexaSubdomains(content):
return re.findall('\<p class=\"tc1.*?>([\w\.-]+\.[\w]{2,})\<\/p>.*?tc1.*?(\d+\.\d+)%', content, re.M | re.S)
def GetHostsForDomain(domain):
content = FetchCachedAlexaPage(domain)
return ParseAlexaSubdomains(content)
if __name__ == '__main__':
index = 0
results = {}
for domain in sys.stdin:
index += 1
domain = domain.rstrip()
for host, percentage in GetHostsForDomain(domain):
if host == domain and domain not in NAKED_DOMAINS:
host = '.'.join(('www', domain))
if percentage == '0.0':
continue
score = index / (float(percentage) / 100)
if host not in results:
results[host] = score
print >> sys.stderr, "%s: %s (%s)" % (score, host, percentage)
for host, score in sorted(results.items(), key=operator.itemgetter(1)):
print "A %s." % host
|
apache-2.0
|
eckucukoglu/arm-linux-gnueabihf
|
arm-linux-gnueabihf/libc/usr/lib/python2.7/ctypes/test/test_array_in_pointer.py
|
117
|
1729
|
import unittest
from ctypes import *
from binascii import hexlify
import re
def dump(obj):
# helper function to dump memory contents in hex, with a hyphen
# between the bytes.
h = hexlify(memoryview(obj))
return re.sub(r"(..)", r"\1-", h)[:-1]
class Value(Structure):
_fields_ = [("val", c_byte)]
class Container(Structure):
_fields_ = [("pvalues", POINTER(Value))]
class Test(unittest.TestCase):
def test(self):
# create an array of 4 values
val_array = (Value * 4)()
# create a container, which holds a pointer to the pvalues array.
c = Container()
c.pvalues = val_array
# memory contains 4 NUL bytes now, that's correct
self.assertEqual("00-00-00-00", dump(val_array))
# set the values of the array through the pointer:
for i in range(4):
c.pvalues[i].val = i + 1
values = [c.pvalues[i].val for i in range(4)]
# These are the expected results: here s the bug!
self.assertEqual(
(values, dump(val_array)),
([1, 2, 3, 4], "01-02-03-04")
)
def test_2(self):
val_array = (Value * 4)()
# memory contains 4 NUL bytes now, that's correct
self.assertEqual("00-00-00-00", dump(val_array))
ptr = cast(val_array, POINTER(Value))
# set the values of the array through the pointer:
for i in range(4):
ptr[i].val = i + 1
values = [ptr[i].val for i in range(4)]
# These are the expected results: here s the bug!
self.assertEqual(
(values, dump(val_array)),
([1, 2, 3, 4], "01-02-03-04")
)
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
zhuyue1314/FuzzLabs
|
requests/file_JPEG_B.py
|
7
|
120707
|
# =============================================================================
# JPEG Image Descriptor - BIG
# This file is part of the FuzzLabs Fuzzing Framework
#
# Author: FuzzLabs
# Date: 21/07/2015
#
# Original file MD5 sum: 4dde17f30fee6e6120a58d890a4ec572
# Original file SHA1 sum: 1e1d1c90b4b0dd9ad5719be96dcbfabf32ff9aee
#
# =============================================================================
from sulley import *
JPEG_SOI = "\xFF\xD8" # Start of Image Marker
JPEG_EOI = "\xFF\xD9" # End of Image Marker
JPEG_DQT = "\xFF\xDB" # Quantization Table
JPEG_DHT = "\xFF\xC4" # Huffman Table
JPEG_SOS = "\xFF\xDA" # Start of Scan
JPEG_COM = "\xFF\xFE" # Comment
JPEG_APP0 = "\xFF\xE0" # Application Marker 0
JPEG_APP1 = "\xFF\xE1" # Application Marker 1
JPEG_SOF0 = "\xFF\xC0" # Start of Frame - Baseline DCT
JPEG_SOF2 = "\xFF\xC2" # Start of Frame - Progressive DCT
s_initialize("JPEG")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOI ffd8 0x0 0x0 0x2
# ------------------------------------------------------------------------------
s_binary(JPEG_SOI)
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_APP0 ffe0 0x10 0x2 0x14
# ------------------------------------------------------------------------------
if s_block_start("O_APP0_0"):
s_binary(JPEG_APP0)
s_size("I_APP0_0", endian=">", inclusive=True, length=2, fuzzable=True)
if s_block_start("I_APP0_0"):
s_string("JFIF")
s_string("\x00")
s_byte(0x1) # Major version
s_byte(0x1) # Minor version
s_byte(0x1) # Density unit
s_word(0x48, endian=">") # Xdensity
s_word(0x48, endian=">") # Ydensity
s_byte(0x0) # Xthumbnail
s_byte(0x0) # Ythumbnail
s_block_end("I_APP0_0")
s_block_end("O_APP0_0")
s_repeat("O_APP0_0", min_reps=0, max_reps=100, step=10)
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_APP1 ffe1 0x3019 0x14 0x302f
# ------------------------------------------------------------------------------
if s_block_start("O_APP1_0"):
s_binary(JPEG_APP1)
s_size("I_APP1_0", endian=">", inclusive=True, length=2, fuzzable=True)
if s_block_start("I_APP1_0"):
# IMAGE DATA
s_string("Exif")
s_word(0x0, endian=">", fuzzable=True)
# ----------------------------------------------------------------------
# Image File Header - TIFF
# Source: partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
#
# All offsets are relative to the beginning of the TIFF file!
# ----------------------------------------------------------------------
s_word(0x4949, fuzzable=True) # Endianness
s_word(0x2A, endian="<", fuzzable=True)
s_dword(0x8, endian="<", fuzzable=True)
# ----------------------------------------------------------------------
# IFD 1
# ----------------------------------------------------------------------
s_word(0x0A, endian="<", fuzzable=True) # Number of IFD entries
s_word(0x010F, endian="<", fuzzable=True) # Tag
s_word(0x02, endian="<", fuzzable=True) # Field type - ASCII
s_dword(0x05, endian="<", fuzzable=True) # Number of values
s_dword(0x86, endian="<", fuzzable=True) # Value offset
# This entry points to FIELD_STR_1
s_word(0x0110, endian="<", fuzzable=False) # Tag
s_word(0x02, endian="<", fuzzable=False) # Field type - ASCII
s_dword(0x06, endian="<", fuzzable=False) # Number of values
s_dword(0x8C, endian="<", fuzzable=False) # Value offset
# This entry points to FIELD_STR_2
s_word(0x0112, endian="<", fuzzable=False) # Tag
s_word(0x03, endian="<", fuzzable=False) # Field type - SHORT
s_dword(0x01, endian="<", fuzzable=False) # Number of values
s_dword(0x01, endian="<", fuzzable=True) # Value offset
s_word(0x011A, endian="<", fuzzable=False)
s_word(0x05, endian="<", fuzzable=False) # Field type - RATIONAL
s_dword(0x01, endian="<", fuzzable=False)
s_dword(0x92, endian="<", fuzzable=True) # This entry points to FIELD_RATIONAL_1
s_word(0x011B, endian="<", fuzzable=False)
s_word(0x05, endian="<", fuzzable=False) # Field type - RATIONAL
s_dword(0x01, endian="<", fuzzable=False)
s_dword(0x9A, endian="<", fuzzable=False) # This entry points to FIELD_RATIONAL_2
s_word(0x0128, endian="<", fuzzable=False)
s_word(0x03, endian="<", fuzzable=False) # Field type - SHORT
s_dword(0x01, endian="<", fuzzable=False)
s_dword(0x02, endian="<", fuzzable=False)
s_word(0x0131, endian="<", fuzzable=False)
s_word(0x02, endian="<", fuzzable=False) # Field type - ASCII
s_dword(0x0B, endian="<", fuzzable=False)
s_dword(0xA2, endian="<", fuzzable=True) # This entry points to FIELD_STR_3
s_word(0x0132, endian="<", fuzzable=False)
s_word(0x02, endian="<", fuzzable=False) # Field type - ASCII
s_dword(0x14, endian="<", fuzzable=False)
s_dword(0xAE, endian="<", fuzzable=False) # This entry points to FIELD_STR_4
s_word(0x0213, endian="<", fuzzable=False)
s_word(0x03, endian="<", fuzzable=False) # Field type - SHORT
s_dword(0x01, endian="<", fuzzable=False)
s_dword(0x01, endian="<", fuzzable=False)
s_word(0x8769, endian="<", fuzzable=False)
s_word(0x04, endian="<", fuzzable=False) # Field type - LONG
s_dword(0x01, endian="<", fuzzable=False)
s_dword(0xC2, endian="<", fuzzable=True) # This entry points to FIELD_LONG_1
s_dword(0x2D30, endian="<", fuzzable=True) # offset of next IFD
# ----------------------------------------------------------------------
# IFE DATA - file offset: 0xA4
# ----------------------------------------------------------------------
s_string("Sony", name="FIELD_STR_1")
s_word(0x00)
s_string("C6903", name="FIELD_STR_2")
s_byte(0x00)
s_dword(0x48, endian="<", fuzzable=True) # FIELD_RATIONAL_1
s_dword(0x01, endian="<", fuzzable=True)
s_dword(0x48, endian="<", fuzzable=True) # FIELD_RATIONAL_2
s_dword(0x01, endian="<", fuzzable=True)
s_string("GIMP 2.8.8", name="FIELD_STR_3")
s_word(0x00)
s_string("2015:06:28 10:30:59", name="FIELD_STR_4")
s_byte(0x00)
s_dword(0x829A001C, endian="<", fuzzable=True, name="FIELD_LONG_1")
# ----------------------------------------------------------------------
# UNKNOWN / UNPARSED DATA - file offset: 0xE4
# ----------------------------------------------------------------------
s_binary("\x05\x00\x01\x00" + \
"\x00\x00\x18\x02\x00\x00\x9d\x82" + \
"\x05\x00\x01\x00\x00\x00\x20\x02" + \
"\x00\x00\x27\x88\x03\x00\x01\x00" + \
"\x00\x00\x00\x19\x00\x00\x00\x90" + \
"\x07\x00\x04\x00\x00\x00\x30\x32" + \
"\x32\x30\x03\x90\x02\x00\x14\x00" + \
"\x00\x00\x28\x02\x00\x00\x04\x90" + \
"\x02\x00\x14\x00\x00\x00\x3c\x02" + \
"\x00\x00\x01\x91\x07\x00\x04\x00" + \
"\x00\x00\x01\x02\x03\x00\x01\x92" + \
"\x0a\x00\x01\x00\x00\x00\x50\x02" + \
"\x00\x00\x04\x92\x0a\x00\x01\x00" + \
"\x00\x00\x58\x02\x00\x00\x07\x92" + \
"\x03\x00\x01\x00\x00\x00\x05\x00" + \
"\x00\x00\x08\x92\x03\x00\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x09\x92" + \
"\x03\x00\x01\x00\x00\x00\x19\x00" + \
"\x00\x00\x0a\x92\x05\x00\x01\x00" + \
"\x00\x00\x60\x02\x00\x00\x7c\x92" + \
"\x07\x00\x8a\x2a\x00\x00\x68\x02" + \
"\x00\x00\x90\x92\x02\x00\x07\x00" + \
"\x00\x00\xf2\x2c\x00\x00\x91\x92" + \
"\x02\x00\x07\x00\x00\x00\xfa\x2c" + \
"\x00\x00\x92\x92\x02\x00\x07\x00" + \
"\x00\x00\x02\x2d\x00\x00\x00\xa0" + \
"\x07\x00\x04\x00\x00\x00\x30\x31" + \
"\x30\x30\x01\xa0\x03\x00\x01\x00" + \
"\x00\x00\x01\x00\x00\x00\x02\xa0" + \
"\x04\x00\x01\x00\x00\x00\x01\x00" + \
"\x00\x00\x03\xa0\x04\x00\x01\x00" + \
"\x00\x00\x01\x00\x00\x00\x05\xa0" + \
"\x04\x00\x01\x00\x00\x00\x12\x2d" + \
"\x00\x00\x01\xa4\x03\x00\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x02\xa4" + \
"\x03\x00\x01\x00\x00\x00\x00\x00" + \
"\x00\x00\x03\xa4\x03\x00\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x04\xa4" + \
"\x05\x00\x01\x00\x00\x00\x0a\x2d" + \
"\x00\x00\x06\xa4\x03\x00\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x0c\xa4" + \
"\x03\x00\x01\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x0a\x00" + \
"\x00\x00\x50\x00\x00\x00\x14\x00" + \
"\x00\x00\x0a\x00\x00\x00")
# file offset: 0x246
s_string("2015:06:28 10:27:35")
s_byte(0x00)
# file offset: 0x25A
s_string("2015:06:28 10:27:35")
s_byte(0x00)
# file offset: 0x26E
s_binary("\x2c\x01" + \
"\x00\x00\x64\x00\x00\x00\x00\x00" + \
"\x00\x00\x03\x00\x00\x00\xea\x01" + \
"\x00\x00\x64\x00\x00\x00\x53\x4f" + \
"\x4e\x59\x20\x4d\x4f\x42\x49\x4c" + \
"\x45\x00\x09\x00\x0f\x20\x04\x00" + \
"\x01\x00\x00\x00\x00\x00\x00\x00" + \
"\x24\x20\x07\x00\x60\x00\x00\x00" + \
"\xfe\x02\x00\x00\x25\x20\x01\x00" + \
"\x04\x00\x00\x00\x01\x00\x00\x00" + \
"\x00\x30\x07\x00\x2e\x01\x00\x00" + \
"\x5e\x03\x00\x00\x01\x94\x07\x00" + \
"\x00\x10\x00\x00\x8c\x04\x00\x00" + \
"\x04\x94\x07\x00\x5c\x0a\x00\x00" + \
"\x8c\x14\x00\x00\x08\x94\x07\x00" + \
"\xe8\x03\x00\x00\xe8\x1e\x00\x00" + \
"\x09\x94\x07\x00\x40\x05\x00\x00" + \
"\xd0\x22\x00\x00\x0e\x94\x07\x00" + \
"\xf9\x04\x00\x00\x10\x28\x00\x00" + \
"\x00\x00\x00\x00\x49\x49\x4e\x00" + \
"\x00\x01")
# file offset: 0x30A
s_string("2015:06:28 10:27:35")
s_byte(0x00)
s_binary("\x70\x08" + \
"\x00\x0f\x30\x30\x32\x32\x30\x42" + \
"\x53\x30\x12\x00\x03\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x04\x00\x07\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x6d\x0b" + \
"\x00\x00\x06\x00\xdc\x05\x00\x00" + \
"\xe8\x03\x00\x00\x49\x49\x5e\x00" + \
"\x02\x01\x32\x30\x31\x35\x3a\x30" + \
"\x36\x3a\x32\x38\x20\x31\x30\x3a" + \
"\x32\x37\x3a\x33\x35\x00\x70\x08" + \
"\x00\x0f\x00\x00\x7a\x08\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x1a\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x1a\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x7d\x01\x6e\xcd\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x95\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x8e\xc4\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x24" + \
"\x00\x00\x00\xf0\x00\x00\x00\x5d" + \
"\x00\x00\x00\xd3\x00\x00\x00\x39" + \
"\x00\x00\x00\x91\x00\x00\x00\x38" + \
"\x00\x00\x00\x82\x00\x00\x00\x00" + \
"\x01\x00\x00\x1a\x00\x00\x00\xd3" + \
"\x00\x00\x00\xec\x00\x00\x00\x92" + \
"\x00\x00\x00\x5c\x00\x00\x00\xac" + \
"\x00\x00\x00\xdd\x00\x00\x00\x29" + \
"\x00\x00\x00\x83\x01\x00\x00\xd7" + \
"\x00\x00\x00\x04\x00\x00\x00\x01" + \
"\x00\x00\x00\x08\x00\x00\x00\x8a" + \
"\x00\x00\x00\xef\x00\x00\x00\x4f" + \
"\x00\x00\x00\x55\x00\x00\x00\x82" + \
"\x01\x00\x00\xcd\x00\x00\x00\x7d" + \
"\x00\x00\x00\x00\x00\x00\x00\x08" + \
"\x00\x00\x00\x7d\x00\x00\x00\xd3" + \
"\x00\x00\x00\xcb\x00\x00\x00\xaf" + \
"\x00\x00\x00\x27\x01\x00\x00\xe7" + \
"\x00\x00\x00\x01\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x7d" + \
"\x00\x00\x00\x5c\x00\x00\x00\xdc" + \
"\x00\x00\x00\xb0\x00\x00\x00\xa9" + \
"\x01\x00\x00\x5e\x00\x00\x00\x40" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x81" + \
"\x00\x00\x00\xe3\x00\x00\x00\xbc" + \
"\x00\x00\x00\xef\x01\x00\x00\xd8" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x88\x00\x00\x00\x1a" + \
"\x00\x00\x00\x4e\x00\x00\x00\xaa" + \
"\x00\x00\x00\xf6\x4f\x45\x45\x45" + \
"\x4f\xf6\x62\x62\x45\x13\x5b\x93" + \
"\x5b\x13\x45\x4f\x4f\xcf\xb1\x73" + \
"\x5f\x73\xb1\xcf\x65\x65\x4b\x27" + \
"\x2d\x4a\x2d\x27\x4b\x65\x65\xcf" + \
"\xb1\x73\x5f\x73\xb1\xcf\x65\x65" + \
"\x45\x13\x5b\x93\x5b\x13\x45\x4f" + \
"\x4f\xf6\x4f\x45\x45\x45\x4f\xf6" + \
"\x62\x62\x00\x00\xe7\x00\x00\x00" + \
"\x70\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x1b\x00\x7d\x00\x1b\x00\x75\x00" + \
"\x00\x00\x75\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x43\xf8" + \
"\x85\xf8\x85\xf8\x56\xf9\x85\xf8" + \
"\x00\x95\xd8\x01\x53\x00\x00\x00" + \
"\xd3\x00\x00\x00\x75\x00\x00\x00" + \
"\xd3\x00\x00\x00\x69\x00\x00\x00" + \
"\x53\x00\x00\x00\x60\x00\x00\x00" + \
"\x92\x00\x00\x00\x04\x00\x00\x00" + \
"\x02\x00\x00\x00\x5e\x00\x00\x00" + \
"\x40\x00\x00\x00\x8e\xc4\x43\x30" + \
"\x00\x8a\x01\x00\x00\x00\x27\xf9" + \
"\xbe\xf9\x85\xf8\x4a\xe2\x4a\xe2" + \
"\x60\xe2\x4a\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x38\xe2\x9d\xe2\x85\xef" + \
"\xd4\x24\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\xe9\xe2\xe2\xef\xc9\xef\x7a\x24" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x84\xe2" + \
"\xa4\xef\x92\x24\xfe\x24\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x38\xe2\xe2\xef" + \
"\x5d\x24\x8a\x79\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x56\xef\x1b\x24" + \
"\xfe\x24\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x24\xe2\x7d\xef\x71\xef\xe7\x79" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x6a\xe2" + \
"\x80\xe2\xae\xef\xa8\x24\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x60\xe2" + \
"\xe3\xef\x65\x24\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\xe3\xe2\xd3\xef" + \
"\x89\xef\x89\x47\x00\x01\x00\xfe" + \
"\x89\xba\x8e\x23\x8e\x23\x8e\x23" + \
"\x53\xba\x89\xba\x7d\x01\x8e\xc4" + \
"\x8e\xc4\x8e\xc4\x8e\xc4\x8e\xc4" + \
"\x7d\x00\x00\x00\x00\x00\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x9d\x88\x5e\xba\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x01" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x38\x00\x00\x00\xa0\x00" + \
"\x00\x00\xba\x00\x00\x00\xa0\x00" + \
"\x00\x00\x70\x00\x00\x00\x38\x00" + \
"\x00\x00\x02\x00\x00\x00\xbb\x00" + \
"\x00\x00\xea\x00\x00\x00\x87\x00" + \
"\x00\x00\xea\x00\x00\x00\xea\x00" + \
"\x00\x00\x8e\xc4\x43\x30\x00\x8a" + \
"\x01\x00\x00\x00\x52\xf9\x7c\xf8" + \
"\x48\xf8\xf3\xe2\x20\xe2\xf3\xe2" + \
"\x0d\x47\x00\x01\x00\xfe\x0d\xba" + \
"\x8e\x23\x8e\xc4\x8e\x23\x79\xba" + \
"\x0d\xba\x7d\x01\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x69\x00\x70\x00\x88\x6c\xe7\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\xd8\x00\x00\x00\x00\xbc\x00\x01" + \
"\x00\x01\x00\x01\x00\x00\x00\x01" + \
"\x00\x00\x00\x00\x00\x00\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x08\x00\x08\x00\x00\x00\x1b" + \
"\xd8\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x01\xb7\x01\x88\x00\x69" + \
"\x00\x70\x43\x20\x5b\xe7\x00\x69" + \
"\x00\x70\x00\x88\x6c\xe7\x00\x69" + \
"\x00\x70\x43\x20\x5b\xe7\x00\x69" + \
"\x00\x70\x43\x20\x5b\xe7\x0c\x00" + \
"\x00\xe7\x5b\xe7\x5b\xe7\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x8e\xc4\x6c\x23\x8e\xc4\x00\x00" + \
"\x69\x00\x00\x69\x00\x70\x00\xe7" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x69" + \
"\x00\x70\x43\x20\x5b\xe7\x00\x69" + \
"\x00\x70\x43\x20\x5b\xe7\x00\x69" + \
"\x00\x70\x43\x20\x5b\xe7\x00\x69" + \
"\x00\x70\x00\x88\x6c\xe7\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x4c\x00\x00\x00\x28\x00\x00" + \
"\x00\xb6\x00\x00\x00\xbb\x00\x00" + \
"\x00\xec\x00\x00\x00\xe0\x00\x00" + \
"\x00\xc2\x00\x00\x00\x82\x00\x00" + \
"\x00\x9b\x00\x00\x00\xd3\x00\x00" + \
"\x00\x30\x00\x00\x00\x5e\x00\x00" + \
"\x00\x88\x00\x00\x00\x92\x00\x00" + \
"\x00\xac\x00\x00\x00\xd0\x00\x00" + \
"\x00\x86\x00\x00\x00\x4b\x01\x00" + \
"\x00\xd7\x00\x00\x00\x08\x00\x00" + \
"\x00\x7d\x00\x00\x00\x08\x00\x00" + \
"\x00\xb6\x00\x00\x00\xe2\x00\x00" + \
"\x00\x33\x00\x00\x00\x77\x00\x00" + \
"\x00\xe5\x01\x00\x00\x88\x00\x00" + \
"\x00\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x04\x00\x00" + \
"\x00\x5c\x00\x00\x00\xc2\x00\x00" + \
"\x00\x49\x00\x00\x00\x6d\x01\x00" + \
"\x00\x5e\x00\x00\x00\x0e\x00\x00" + \
"\x00\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x7d\x00\x00\x00\x92\x00\x00" + \
"\x00\x2b\x00\x00\x00\x27\x00\x00" + \
"\x00\x44\x01\x00\x00\xd8\x00\x00" + \
"\x00\x40\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\xb6\x00\x00\x00\x79\x00\x00" + \
"\x00\x61\x00\x00\x00\x1a\x01\x00" + \
"\x00\x5e\x00\x00\x00\x00\x00\x00" + \
"\x00\x1b\x00\x00\x00\x00\x00\x00" + \
"\x00\x08\x00\x00\x00\xbe\x00\x00" + \
"\x00\xe2\x00\x00\x00\x53\x00\x00" + \
"\x00\xc6\x00\x00\x00\xf6\x4f\x45" + \
"\x45\x45\x4f\xf6\x62\x62\x45\x13" + \
"\x5b\x93\x5b\x13\x45\x4f\x4f\xcf" + \
"\xb1\x73\x5f\x73\xb1\xcf\x65\x65" + \
"\x4b\x27\x2d\x4a\x2d\x27\x4b\x65" + \
"\x65\xcf\xb1\x73\x5f\x73\xb1\xcf" + \
"\x65\x65\x45\x13\x5b\x93\x5b\x13" + \
"\x45\x4f\x4f\xf6\x4f\x45\x45\x45" + \
"\x4f\xf6\x62\x62\x00\x00\xe7\x00" + \
"\x00\x00\x28\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x1b\x00\x7d\x00\x1b\x00" + \
"\xba\x00\x00\x00\xba\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x9c\xf8\x48\xf8\x48\xf8\xfa\xf8" + \
"\x48\xf8\x00\x95\xd8\x01\x38\x00" + \
"\x00\x00\xa0\x00\x00\x00\xba\x00" + \
"\x00\x00\xa0\x00\x00\x00\x70\x00" + \
"\x00\x00\x38\x00\x00\x00\x02\x00" + \
"\x00\x00\xbb\x00\x00\x00\xea\x00" + \
"\x00\x00\x87\x00\x00\x00\xea\x00" + \
"\x00\x00\xea\x00\x00\x00\x8e\xc4" + \
"\x43\x30\x00\x8a\x01\x00\x00\x00" + \
"\x52\xf9\x7c\xf8\x48\xf8\xf3\xe2" + \
"\xf3\xe2\x20\xe2\xf3\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\xe3\xe2\xf8\xe2" + \
"\xd2\xef\xcf\x24\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x24\xe2\x05\xef\x9d\xef" + \
"\x48\x24\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\xe9\xe2\x38\xef\x1b\x24\x71\x24" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x07\xef\x8a\x24\xea\x79\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x60\xe2\x6a\xef" + \
"\xec\x24\x95\x79\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x37\xe2\x12\xef" + \
"\x7d\x79\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x29\xe2\x03\xef\xdf\x24" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\xe6\xe2\xdb\xef\xb1\x24\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x95\xe2" + \
"\x95\xe2\x95\xe2\x95\xe2\x38\xe2" + \
"\x20\xef\xfb\xef\x0d\x47\x00\x01" + \
"\x00\xfe\x0d\xba\x8e\x23\x8e\x23" + \
"\x8e\x23\x79\xba\x0d\xba\x7d\x01" + \
"\x8e\xc4\x8e\xc4\x8e\xc4\x8e\xc4" + \
"\x8e\xc4\x7d\x00\x00\x00\x00\x00" + \
"\x01\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x9d\x88\x5e" + \
"\xba\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x95\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x8e\xc4\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xb9\x61\x00\x00\x01\x00" + \
"\x00\x00\x01\x00\x00\x00\x70\x39" + \
"\x00\x00\xf6\x43\x01\x00\x4c\x40" + \
"\x00\x00\xa8\x16\x7d\x00\xe2\x40" + \
"\x00\x00\x4a\x92\xd8\x00\x00\x01" + \
"\x00\x00\x00\x01\x00\x00\x00\x01" + \
"\x00\x00\x00\x00\x88\x6c\xe7\x00" + \
"\x88\x6c\xe7\x00\x88\x6c\xe7\x00" + \
"\x00\x4a\x00\x00\x00\x4a\x00\x0e" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\xf3\xe2\x00\x00\x00\x00\x00\x00" + \
"\x1b\x00\x1b\x4a\x01\x00\x00\x01" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x0d\xba\x00\x00\x6c" + \
"\x23\x00\x00\xcf\x54\x00\x00\x4a" + \
"\x5d\x07\x00\x00\x00\x00\x00\x01" + \
"\x00\x00\x00\x00\x00\x00\x00\x0d" + \
"\xba\x00\x00\x6c\x23\x00\x00\xcf" + \
"\x54\x00\x00\x4a\x5d\x07\x00\x00" + \
"\x00\x00\x00\x08\x00\x00\x00\x00" + \
"\x00\x00\x00\x0d\xba\x00\x00\x6c" + \
"\x23\x00\x00\xcf\x54\x00\x00\x4a" + \
"\x5d\x07\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x98\x00\x00\x00\x75" + \
"\x01\x00\x00\x59\x01\x00\x00\xaf" + \
"\x08\x00\x00\x00\x01\x00\x00\x48" + \
"\x01\x00\x00\x33\x08\x00\x00\x67" + \
"\x08\x00\x00\x6d\x1b\x00\x00\x6c" + \
"\x23\x0e\x0e\x00\x00\x7d\x7d\x00" + \
"\x00\x00\xd8\x00\x00\xeb\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x01\x41\x04\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x01\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\xd8\x00" + \
"\x00\x00\x01\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x0e\x00\x00\x00\x00\x00\x01" + \
"\x00\x00\xff\x1b\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xff\xff\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xde\xf8\x00\x00\x7d\x5e" + \
"\x00\x00\x1b\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x11\xf0\x6c\x4b\x00\x00" + \
"\x95\x00\xce\x21\xff\xff\xc5\x0e" + \
"\xff\xff\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc4\xc4" + \
"\xff\xff\x76\x92\xff\xff\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\xff\xff\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\xff\xff\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\xff\xff\x00\x00" + \
"\x00\x00\xb3\x79\x00\x00\x00\x00" + \
"\x00\x00\xa9\x00\x00\x00\xa9\x00" + \
"\x00\x00\x70\x00\x00\x00\x47\x00" + \
"\x00\x00\x38\x00\x00\x00\x95\x00" + \
"\x00\x00\xb3\x01\x00\x00\x02\x00" + \
"\x00\x00\x96\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x01\x00" + \
"\x00\x00\x01\x00\x00\x00\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x0e\x00\x00\x00\x24\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x01\x01" + \
"\x00\x00\x00\x00\x00\x00\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x69\x00" + \
"\x00\x00\x08\x00\x00\x00\xd8\x00" + \
"\x00\x00\x00\x00\x00\x00\x08\xd8" + \
"\x00\x00\x00\x00\x00\x00\x01\x00" + \
"\x00\x00\x01\x00\x00\x00\x01\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\xe6\xd8\x00\x00\xb3\x79\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\xe2\xe2\xe2\xe2" + \
"\xe2\xe2\xe2\xe2\x00\x00\x38\x00" + \
"\x00\x00\x5c\x00\x00\x00\x24\x00" + \
"\x00\x00\x51\x00\x00\x00\x1b\x00" + \
"\x00\x00\xcd\x00\x00\x00\x51\x00" + \
"\x00\x00\x56\x00\x00\x00\x6c\x00" + \
"\x00\x00\x6a\x00\x00\x00\xac\x00" + \
"\x00\x00\x38\x00\x00\x00\xe2\x00" + \
"\x00\x00\x95\x00\x00\x00\x88\x00" + \
"\x00\x00\x88\x00\x00\x00\xcd\x00" + \
"\x00\x00\xbe\x00\x00\x00\xe0\x00" + \
"\x00\x00\xe2\x00\x00\x00\xe3\x00" + \
"\x00\x00\xb7\x00\x00\x00\x0c\x00" + \
"\x00\x00\x0e\x00\x00\x00\x08\x00" + \
"\x00\x00\x7d\x00\x00\x00\x1b\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\xe7\x00" + \
"\x00\x00\x6c\x00\x00\x00\x20\x00" + \
"\x00\x00\x56\x00\x00\x00\xe7\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x08\x00" + \
"\x00\x00\xd8\x00\x00\x00\x6a\x00" + \
"\x00\x00\x0c\x00\x00\x00\x0e\x00" + \
"\x00\x00\xd8\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\xd8\x00" + \
"\x00\x00\x92\x00\x00\x00\xb6\x00" + \
"\x00\x00\x69\x00\x00\x00\x04\x00" + \
"\x00\x00\x08\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xcd\x00\x00\x00\x47\x00" + \
"\x00\x00\x23\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x08\x00\x00\x00\xbb\x00" + \
"\x00\x00\xa0\x00\x00\x00\x96\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x20\x00\x00\x00\x88\x00" + \
"\x00\x00\x33\x00\x00\x00\x0c\x00" + \
"\x00\x00\x01\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xe7\x00\x00\x00\xe7\x00" + \
"\x00\x00\xe2\x00\x00\x00\x52\x00" + \
"\x00\x00\x6f\xfc\x00\x00\x20\x1b" + \
"\x00\x00\x01\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xde\xf8\x00\x00\x7d\x5e" + \
"\x00\x00\x1b\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\xce\x21" + \
"\xff\xff\xc5\x0e\xff\xff\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc4\xc4\xff\xff\x76\x92" + \
"\xff\xff\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\xff\xff\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\xff\xff\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\xff\xff\x11\xf0\x6c\x4b\x38\x00" + \
"\x00\x00\x5c\x00\x00\x00\x24\x00" + \
"\x00\x00\x51\x00\x00\x00\x1b\x00" + \
"\x00\x00\xcd\x00\x00\x00\x51\x00" + \
"\x00\x00\x56\x00\x00\x00\x6c\x00" + \
"\x00\x00\x6a\x00\x00\x00\xac\x00" + \
"\x00\x00\x38\x00\x00\x00\xe2\x00" + \
"\x00\x00\x95\x00\x00\x00\x88\x00" + \
"\x00\x00\x88\x00\x00\x00\xcd\x00" + \
"\x00\x00\xbe\x00\x00\x00\xe0\x00" + \
"\x00\x00\xe2\x00\x00\x00\xe3\x00" + \
"\x00\x00\xb7\x00\x00\x00\x0c\x00" + \
"\x00\x00\x0e\x00\x00\x00\x08\x00" + \
"\x00\x00\x7d\x00\x00\x00\x1b\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\xe7\x00" + \
"\x00\x00\x6c\x00\x00\x00\x20\x00" + \
"\x00\x00\x56\x00\x00\x00\xe7\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x08\x00" + \
"\x00\x00\xd8\x00\x00\x00\x6a\x00" + \
"\x00\x00\x0c\x00\x00\x00\x0e\x00" + \
"\x00\x00\xd8\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\xd8\x00" + \
"\x00\x00\x92\x00\x00\x00\xb6\x00" + \
"\x00\x00\x69\x00\x00\x00\x04\x00" + \
"\x00\x00\x08\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xcd\x00\x00\x00\x47\x00" + \
"\x00\x00\x23\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x08\x00\x00\x00\xbb\x00" + \
"\x00\x00\xa0\x00\x00\x00\x96\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x20\x00\x00\x00\x88\x00" + \
"\x00\x00\x33\x00\x00\x00\x0c\x00" + \
"\x00\x00\x01\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xe7\x00\x00\x00\xe7\x00" + \
"\x00\x00\xe2\x00\x00\x00\x52\x00" + \
"\x00\x00\x00\x00\x01\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\xcd\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x4e\x81\x06\xea\x4e\x81\x06\xea" + \
"\xdf\x8a\x07\x88\xdf\x8a\x07\x88" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x8c\xc8\x31\xbd\x0a\xf4\x4d\x1d" + \
"\x95\x4c\x0c\xbf\xd6\xe7\xa3\xea" + \
"\x00\x00\x7a\x70\x89\x70\xd6\xe7" + \
"\x95\x4c\x0c\xbf\x00\x40\x00\x00" + \
"\x00\x00\x00\x00\x00\x40\x00\x00" + \
"\x00\x00\x00\x00\x00\x40\x4a\x1a" + \
"\x00\xa9\x08\x47\x20\x00\xc8\x00" + \
"\x4a\x4a\x00\xdd\x49\x30\x01\x70" + \
"\xcc\x24\x30\x00\x00\x00\xff\xff" + \
"\xff\xff\xff\xff\xff\xff\xff\xff" + \
"\xff\xff\xff\xff\xff\xff\xff\xff" + \
"\xff\xff\x3b\xd7\x0f\xb8\x00\x00" + \
"\x00\x00\x7f\x88\xcf\xce\xff\xff" + \
"\xff\xff\x75\x92\x22\xa2\xff\xff" + \
"\xff\xff\xff\xff\xff\xff\xff\xff" + \
"\xff\xff\xff\xff\xff\xff\xff\xff" + \
"\xff\xff\xff\xff\xff\xff\x94\x95" + \
"\x07\xa2\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x05\x00\x00" + \
"\x00\x00\x00\x00\xc4\x1b\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x3f\x00\xe7\x00\x43\xd7" + \
"\xe0\x98\x6b\x01\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x01\x00\x01\x00\x00\x1e\x28" + \
"\xfb\xa5\x1e\x28\xfb\xa5\x1e\x28" + \
"\xfb\xa5\x69\xc5\x00\xa4\x69\xc5" + \
"\x00\xa4\x1e\x28\xfb\xa5\x69\xc5" + \
"\x00\xa4\x69\xc5\x00\xa4\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\xff" + \
"\x72\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x90\xe2" + \
"\x20\x44\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xff\x00\xff\x00\xb0\x00" + \
"\x95\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\xff\x00\xff\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x7e\x28" + \
"\x95\xa6\xc4\x1b\x00\x00\xd6\x28" + \
"\x41\xa5\x3f\x00\x00\x00\x1e\x28" + \
"\xfb\xa5\x00\x40\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x1e\x28" + \
"\xfb\xa5\xfb\xa5\xfb\xa5\xfb\xa5" + \
"\x00\x00\x1e\x28\xfb\xa5\x1e\x28" + \
"\xfb\xa5\x1e\x28\xfb\xa5\x69\xc5" + \
"\x00\xa4\x69\xc5\x00\xa4\x69\xc5" + \
"\x00\xa4\x00\x38\x00\xff\x00\x47" + \
"\x00\xa4\x00\xff\xff\xff\x00\x00" + \
"\x00\x38\x00\x24\x00\x4a\xa4\x01" + \
"\x00\x08\x00\x00\x00\x00\x1e\x28" + \
"\xfb\xa5\x69\xc5\x00\xa4\x00\x00" + \
"\x00\x00\x00\x00\x00\x08\x00\x08" + \
"\x00\x40\x00\x00\x00\x00\x00\x00" + \
"\x00\x40\x00\x00\x00\x00\x00\x00" + \
"\x00\x40\xd4\x7d\x02\xfe\xc3\x00" + \
"\x65\xff\x79\x7d\x55\xff\x2f\x00" + \
"\x3c\xfe\x41\x7d\x75\x08\x00\x01" + \
"\x5b\x01\x75\x08\x00\x01\x5b\x01" + \
"\x9e\x28\x00\xc4\xea\x24\x12\x0e" + \
"\x00\x40\x54\x7d\x00\x01\x00\x01" + \
"\x00\x01\x00\x00\x00\x00\x4b\x01" + \
"\x00\x00\x00\x00\x00\x01\x4b\x01" + \
"\x00\x00\xfb\xa5\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x95\x4c" + \
"\x0c\xbf\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\xfb\x79\x00\x00\xff\xff\x95\x00" + \
"\x00\x00\x00\x00\xaa\x01\x00\xbb" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x01\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x40\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x33\x01\xf5\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x08\x00" + \
"\x00\x00\x60\x30\xc8\xa2\xff\xff" + \
"\xff\xff\x00\x5e\x00\x00\x6d\xe2" + \
"\xd9\x53\x04\x00\x00\x00\x0c\xf8" + \
"\xff\xff\x94\x88\xff\xff\x49\x00" + \
"\x00\x00\x6f\xff\xff\xff\xfb\xff" + \
"\xff\xff\x4e\x81\x06\xea\xdf\x8a" + \
"\x07\x88\x00\x00\x00\x00\x08\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x01\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x01\x00\x01\x00" + \
"\x1b\x00\x7d\x00\x04\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x08\x00\x1b\x00" + \
"\xd8\x00\xcd\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x01\x00\x1b\x00\x5e\x00" + \
"\x05\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x01\x00\x1b\x00\x0e\x00\xb6\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x01\x00" + \
"\x1b\x00\x5e\x00\xb6\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x01\x00\x08\x00" + \
"\x5e\x00\xb6\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x08\x00\x7d\x00" + \
"\x05\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x08\x00\x7d\x00\x04\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x01\x00\x1b\x00\x5e\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\xec\x70" + \
"\x34\xa7\x44\x30\xe8\xfc\xfd\xb1" + \
"\xff\xff\xc5\x0e\xff\xff\xc5\x0e" + \
"\xff\xff\xc5\x0e\xd7\x9a\xe2\x8a" + \
"\x14\xaa\x94\x56\xc0\xae\x0a\x20" + \
"\x4e\x2e\x80\xd7\xc4\x63\x65\xbe" + \
"\x01\xe6\x11\xbe\xff\xff\xc5\x0e" + \
"\xff\xff\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\xff\xff\xc5\x0e\x76\x35\xc0\x8a" + \
"\x63\x94\x06\x30\x79\xab\xb3\x81" + \
"\xa8\x2a\xfd\xd7\xf5\xa2\xf9\x1a" + \
"\xff\xff\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\xff\xff\xa1\x56\xeb\x17\x69\x20" + \
"\xea\x09\xe7\xd7\x4f\xb4\x2c\x30" + \
"\xed\x8b\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\xff\xff\x1b\x56" + \
"\xff\xff\xe9\x70\xc3\x84\xfd\x92" + \
"\xe0\x1d\xb3\xbb\x9b\x66\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\xff\xff\xf5\xe7\x5c\xa6\xe4\x30" + \
"\xbc\xae\x1d\xbe\xbc\xa6\xe8\xd7" + \
"\xda\x66\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\xff\xff\xc2\x04" + \
"\x2f\xde\xb2\x8a\xb0\xf7\x00\x92" + \
"\x85\x35\xd7\x92\x85\x8b\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\xff\xff\xef\x04\x99\xdf\xc8\xec" + \
"\xe7\xf2\xa4\xec\x4e\xae\xb6\x28" + \
"\xb6\x2a\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\xff\xff\xb5\x0e" + \
"\xff\xff\xbc\x70\x62\x9c\xe4\xbb" + \
"\xb9\xb4\xe3\x92\x18\xae\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\x4f\x5e\xc5\x0e" + \
"\x4f\x5e\xc5\x0e\xff\xff\xc5\x0e" + \
"\x7a\x8d\xf7\xcd\x14\xb4\x8b\x81" + \
"\x40\x97\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\xb9\x40" + \
"\x00\x00\xbd\xa2\x0f\x8d\x6d\xee" + \
"\x3d\x2d\x28\x5e\x12\x72\xf1\x47" + \
"\x07\xa1\x50\x6d\xc2\x31\x17\xb2" + \
"\x4f\x20\x15\x76\xed\x57\xa5\xf6" + \
"\xc5\x33\xcc\x3a\x0c\x3c\xb2\x29" + \
"\x2a\xd0\x06\x5c\x7c\x62\x3a\xdf" + \
"\x35\xbe\x1a\x90\xd5\xca\xca\x7b" + \
"\x08\x1b\x6d\x29\xaa\x32\x6d\xa8" + \
"\x92\x03\x9f\xf8\x62\xd4\xc6\x64" + \
"\xef\x2b\xf2\x80\x0d\xab\xdd\xd2" + \
"\x22\xa0\x60\x26\xe4\xd2\x61\x25" + \
"\x5b\x7c\x9f\xa3\xfb\x3f\x57\xde" + \
"\x28\xed\xb6\xbf\xd4\x6b\x44\x8d" + \
"\x73\x67\xf5\x21\x1f\x33\x72\x90" + \
"\xf9\x75\xf1\x1f\x35\x21\xa0\x2a" + \
"\x2b\xe0\xe0\x1c\x33\x5b\xb1\x67" + \
"\x54\x68\xaf\x57\xc3\x32\x53\x99" + \
"\xa6\xa2\x66\xb5\x97\x9b\xdd\x67" + \
"\xb5\x86\xe6\x6f\x97\x0a\x7e\x28" + \
"\x26\xdd\x90\xdd\x1c\x17\x75\x32" + \
"\x41\x43\xf0\xd5\x32\x61\x68\xb2" + \
"\x78\x32\x26\xc9\x46\xd6\x28\xb4" + \
"\xfb\x9b\x41\xad\x75\x90\x02\x06" + \
"\xa7\x59\xf4\x46\xcb\x57\xf1\x90" + \
"\x85\xe2\x85\x87\x1f\x19\x93\x0f" + \
"\x0b\xe6\x32\xd9\xb8\xae\x78\x8f" + \
"\xf3\xdf\x5b\x54\xc5\xa2\xd0\xd4" + \
"\x46\x7d\x26\x36\x90\xf6\x1f\x81" + \
"\x57\xfb\xa2\x45\xda\xa3\x41\x48" + \
"\x17\x89\xe8\xef\x21\xd1\xf4\xbe" + \
"\x0c\xad\x72\x0b\xad\x6f\x75\xe8" + \
"\xd9\x98\xad\xc3\x8e\x4f\xbf\x6f" + \
"\x2c\x67\x53\xeb\x5f\x95\x04\x3b" + \
"\x8e\x9f\x11\xa8\x92\x72\x7f\x3a" + \
"\x3a\x3a\x3a\x3a\x3a\x3a\x3a\x3a" + \
"\x3a\x3a\x3a\x3a\x3a\x3a\x3a\x3a" + \
"\x3a\x3a\x3a\x3a\x3a\x3a\x83\x42" + \
"\xf0\x38\xd3\x73\xf7\x42\x42\x75" + \
"\x81\xd1\x71\x35\xff\x17\xff\x06" + \
"\xa1\xc8\xd0\x3a\xf6\xcd\xcb\xf5" + \
"\xab\xb3\x1a\x4d\x22\x45\xa1\x7b" + \
"\xc4\x10\x3b\x47\x0c\xf7\xfd\xa1" + \
"\xfb\xb6\x56\x15\xfa\xd2\xc8\x58" + \
"\xed\x30\x85\x1e\x49\x90\x9f\x41" + \
"\xb1\xd9\x57\xba\x6a\x8b\x86\x29" + \
"\x75\xe6\xd1\x82\x0d\x36\xa1\xe1" + \
"\x48\x34\xa2\x12\x58\xce\xb9\xf1" + \
"\xe5\xaf\x45\xc9\x1f\x9a\xf6\x59" + \
"\x9a\x8c\x9f\x35\x57\x06\x79\xa0" + \
"\xbd\x74\x9e\x1d\x74\x94\x0c\xe8" + \
"\x2f\x52\x1b\xd3\x73\x11\x2d\x3d" + \
"\x98\x26\xf7\x7f\xee\x2b\xfb\x74" + \
"\xbd\xae\x40\x6a\xf0\xa9\x76\x1d" + \
"\xd3\xec\x42\x75\x0b\xc2\x9e\x94" + \
"\x54\xa0\x26\x6e\x7e\x85\x12\xc7" + \
"\x94\xf3\xda\xad\x81\x8a\x7c\xf4" + \
"\x47\xfa\x79\xfa\xfe\x40\xaf\x62" + \
"\xbf\xdf\xd7\xf2\x0d\xf2\xbe\x53" + \
"\x6d\x0a\xb4\xdd\x84\x5d\x43\xda" + \
"\x41\xde\x6c\xdd\xef\xdd\xcb\xdd" + \
"\x2f\x66\xa3\x33\xa8\xc7\xd5\xd5" + \
"\xd5\x9d\x44\xa4\x92\x74\x7c\x63" + \
"\xa0\xeb\xd8\x22\x74\xe8\xed\xbc" + \
"\xad\xb0\x51\xaa\x5e\xa5\x8d\x75" + \
"\x60\xf3\x91\x14\x56\x12\x58\x57" + \
"\x4d\x46\xcd\xbf\x7d\x79\xdd\x25" + \
"\x9d\xee\xd1\x85\xf1\xa0\xfe\x3d" + \
"\xc2\x6c\x7f\x27\x7b\x7d\x90\x30" + \
"\x0e\xc8\xaf\xb4\x0a\x75\xb4\x04" + \
"\xcb\x23\xcc\xf8\x69\xbf\x81\xeb" + \
"\xba\x08\xfb\x13\xd8\x66\x35\xc3" + \
"\x22\x20\xfa\x9a\x89\x86\xc7\xa9" + \
"\x41\xca\xe4\x83\x62\x6b\x27\xa3" + \
"\x68\xa7\x73\xeb\xfe\x5a\xb9\xec" + \
"\xfd\x57\xb0\xbf\xa7\x98\x39\x2c" + \
"\x88\x32\xc0\x30\x92\x2b\xde\x6f" + \
"\x2d\x3b\xa8\x81\xd4\x90\x87\xf9" + \
"\x60\x4f\xd5\x62\x65\x06\x79\x44" + \
"\x76\x02\xfd\xa0\xc7\x65\xca\xfb" + \
"\x7b\x56\xde\x0d\xfa\x02\xfd\xd1" + \
"\xdb\x09\xef\x33\x7e\xf5\xe4\x04" + \
"\xec\x42\x6b\x63\xb1\x9e\xa6\x7f" + \
"\x90\x81\x21\x5d\x1f\x90\xe5\x33" + \
"\xb3\x67\x86\x3b\x91\xfa\x2e\xce" + \
"\xf5\xf7\x27\xac\xb0\xa2\xce\xfe" + \
"\x7a\xf7\xfe\x41\xb8\x16\x85\xdf" + \
"\xfc\xa3\x5a\xd8\xeb\x81\xce\xcb" + \
"\x9a\xf1\x48\x36\x49\xb8\xac\x44" + \
"\xa9\xba\xb6\x82\x04\x94\xc3\x34" + \
"\x82\x3b\x3b\xd0\x6d\xf5\x7a\xcf" + \
"\x07\xbb\xed\xa5\x49\xc3\x3e\xaa" + \
"\x86\x0b\xa0\x07\x64\xf2\xa7\x10" + \
"\xd9\xe3\x6a\x1b\xf8\x58\xfa\xf9" + \
"\x00\x25\xf1\x4c\xc1\x0d\x8b\x21" + \
"\x92\x56\x1e\xc9\x38\x57\x66\xea" + \
"\x44\x97\x45\xd1\x8a\x82\xe7\xfc" + \
"\x83\x1c\xf2\x8f\x0c\x68\xf8\xa1" + \
"\x44\x18\x97\x2e\x6f\x87\xde\xda" + \
"\x00\xff\x98\x81\x42\xfe\x8c\xce" + \
"\x75\x60\xad\xf5\xf8\x53\x9e\xb9" + \
"\x8a\x76\xaf\x68\x9a\x54\x25\x22" + \
"\xf5\x01\x8b\x76\xf1\xde\x26\x65" + \
"\x82\x8b\xf1\x7a\xec\x65\xb1\x44" + \
"\x93\xc0\xc8\xcd\x1d\xd6\xba\xc6" + \
"\xd6\xf7\x9b\x21\xe5\x45\x1d\xb3" + \
"\xb5\xce\xf1\x0f\x7b\x83\xd9\x93" + \
"\xc2\x00\xbf\xe7\xbf\x01\x79\xac" + \
"\x9a\xf1\xcb\xf1\x44\x0e\xf1\x67" + \
"\x09\x78\xfe\xfb\x39\xa3\x68\xd2" + \
"\xe8\xd1\xf5\x8e\xed\xae\x61\x04" + \
"\x22\x9c\xea\xb6\xb6\xb6\xb6\xb6" + \
"\xb6\xb6\xb6\xb6\xb6\xb6\xb6\xb6" + \
"\xb6\xb6\xb6\xb6\xb6\xb6\xb6\xb6" + \
"\xb6\xb6\xb6\xb6\xb6\xb6\xb6\xb6" + \
"\xb6\xb6\xb6\xb6\xb6\xb6\xb6\xb6" + \
"\xb6\xb6\xb6\xb6\xb6\xb6\xb6\xb6" + \
"\xb6\xb6\xb6\xb6\xb6\xb6\xb6\xb6" + \
"\xb6\xb6\xb6\xb6\xb6\xb6\xb6\xb6" + \
"\xb6\xb6\x4b\x28\x7c\x95\xad\x51" + \
"\xfc\xd2\x64\xd3\x7d\xa1\xac\xb1" + \
"\x90\xe5\xdc\x28\xbb\x72\x4c\xce" + \
"\x9e\xd5\x91\x72\x73\xca\xea\x71" + \
"\x30\xc3\xc0\x27\xb8\x1c\x7b\xca" + \
"\xa2\xae\xd7\x78\x2e\x89\xdb\xdf" + \
"\x27\x98\x5e\x1e\xf0\x76\xf4\xd9" + \
"\x06\x26\x5a\xe2\xf3\x6d\x89\x78" + \
"\xaf\xb6\x6f\xe5\x9a\x97\xa9\x13" + \
"\xbd\x6d\x7f\xed\x21\xca\xaf\x62" + \
"\x09\xb5\xfe\x8e\x32\xfa\xe8\x25" + \
"\x12\xf9\xb8\xb7\xd4\x0a\xb0\xb0" + \
"\x89\x8c\x2d\xca\xaf\x01\x7a\xa6" + \
"\x0d\x11\x6b\xe9\x81\x26\xc2\xb4" + \
"\xd7\x1f\x68\xd4\xf5\x8f\xaf\xa9" + \
"\xf4\x6b\xe3\x7a\xa6\x6d\x44\xa6" + \
"\xad\x2a\xa1\x6b\x98\xea\xd1\x29" + \
"\x21\x6c\x7e\x0d\x48\x4b\x53\xbe" + \
"\xd3\xef\x13\x7c\xb7\x13\x6e\x94" + \
"\x60\xdc\x5d\x93\x7c\xb7\x13\xca" + \
"\x67\x34\xd3\x84\x94\x60\x13\xa9" + \
"\x72\x67\xc6\xf6\x68\x8d\x13\xdc" + \
"\xd4\x4f\xbe\xfd\xf9\x56\xe1\x26" + \
"\xe5\x7c\xc1\x5c\xca\x36\xd0\x9c" + \
"\x45\xe3\xed\x4f\xee\x87\x96\xaf" + \
"\x72\xb1\xb9\xb0\x59\x29\x21\x6e" + \
"\x06\x9b\x05\xb1\x9c\xf4\xc4\xed" + \
"\x80\x67\xf4\xfd\x10\xf2\x48\x8f" + \
"\x95\xf5\x20\x7f\xb0\x07\x7b\xa9" + \
"\x83\x92\xfa\xfc\x5f\xe3\xed\x18" + \
"\x14\xba\x29\x4d\xd9\x58\xaf\x3b" + \
"\xfd\xd5\x2b\xa8\xfd\x67\xc3\x41" + \
"\xa0\xb1\xa2\xc3\x5b\xf5\xbc\xc9" + \
"\x57\x19\xf8\x10\x83\xeb\x49\x5b" + \
"\x6d\x8a\x9e\xf4\x55\xaa\x90\x81" + \
"\x03\xbd\xa9\xf9\x6a\x74\x33\x27" + \
"\xdb\x07\xf5\x82\x9a\xfe\xfc\xcb" + \
"\xdf\x6b\xea\x6a\x63\xad\xf2\xd4" + \
"\xb0\x61\x7c\x90\x44\x55\x6b\xd4" + \
"\xa1\x47\xe9\x75\xa1\xb4\x26\x28" + \
"\x80\xfb\x56\xc9\xce\x94\x99\x00" + \
"\x35\x39\x38\x38\x38\x39\x00\x00" + \
"\x35\x39\x38\x38\x38\x39\x00\x00" + \
"\x35\x39\x38\x38\x38\x39\x00\x00" + \
"\x64\x00\x00\x00\x64\x00\x00\x00" + \
"\x02\x00\x01\x00\x02\x00\x04\x00" + \
"\x00\x00\x52\x39\x38\x00\x02\x00" + \
"\x07\x00\x04\x00\x00\x00\x30\x31" + \
"\x30\x30\x00\x00\x00\x00")
# ----------------------------------------------------------------------
# IFD 2 - file offset: 0x2D40
# ----------------------------------------------------------------------
s_word(0x07, endian="<", fuzzable=True) # Number of IFD entries
s_word(0x0103, endian="<", fuzzable=True) # Tag
s_word(0x03, endian="<", fuzzable=True) # Field type - SHORT
s_dword(0x01, endian="<", fuzzable=True) # Number of values
s_dword(0x06, endian="<", fuzzable=True) # Value offset / value
s_word(0x0112, endian="<", fuzzable=True) # Tag
s_word(0x03, endian="<", fuzzable=True) # Field type - SHORT
s_dword(0x01, endian="<", fuzzable=True) # Number of values
s_dword(0x03, endian="<", fuzzable=True) # Value offset / value
s_word(0x011a, endian="<", fuzzable=True) # Tag
s_word(0x05, endian="<", fuzzable=True) # Field type - RATIONAL
s_dword(0x01, endian="<", fuzzable=True) # Number of values
s_dword(0x2d8a, endian="<", fuzzable=True) # Value offset / value
# This entry points to FIELD_RATIONAL_3
s_word(0x011b, endian="<", fuzzable=True) # Tag
s_word(0x05, endian="<", fuzzable=True) # Field type - RATIONAL
s_dword(0x01, endian="<", fuzzable=True) # Number of values
s_dword(0x2d92, endian="<", fuzzable=True) # Value offset / value
# This entry points to FIELD_RATIONAL_4
s_word(0x0128, endian="<", fuzzable=True) # Tag
s_word(0x03, endian="<", fuzzable=True) # Field type - SHORT
s_dword(0x01, endian="<", fuzzable=True) # Number of values
s_dword(0x02, endian="<", fuzzable=True) # Value offset / value
s_word(0x0201, endian="<", fuzzable=True) # Tag
s_word(0x04, endian="<", fuzzable=True) # Field type - LONG
s_dword(0x01, endian="<", fuzzable=True) # Number of values
s_dword(0x2d9a, endian="<", fuzzable=True) # Value offset / value
# This entry points to SOI_II
s_word(0x0202, endian="<", fuzzable=True) # Tag
s_word(0x04, endian="<", fuzzable=True) # Field type - LONG
s_dword(0x01, endian="<", fuzzable=True) # Number of values
s_dword(0x0277, endian="<", fuzzable=True) # Value offset / value
s_dword(0x00, endian="<", fuzzable=True) # END OF IFD MARKER
# ----------------------------------------------------------------------
# IFE Data - offset: 0x2DA8
# ----------------------------------------------------------------------
s_dword(0x48, endian="<") # FIELD_RATIONAL_3
s_dword(0x01, endian="<")
s_dword(0x48, endian="<") # FIELD_RATIONAL_4
s_dword(0x01, endian="<")
# ----------------------------------------------------------------------
# SOI - SOI_II
# ----------------------------------------------------------------------
s_binary(JPEG_SOI)
# ----------------------------------------------------------------------
# APP0 - file offset: 0x2DBA
# ----------------------------------------------------------------------
if s_block_start("O_APP0_1"):
s_binary(JPEG_APP0)
s_size("I_APP0_1", endian=">", inclusive=True, length=2, fuzzable=True)
if s_block_start("I_APP0_1"):
s_string("JFIF")
s_string("\x00")
s_byte(0x01) # Major version
s_byte(0x01) # Minor version
s_byte(0x00) # Density unit
s_word(0x01, endian=">") # Xdensity
s_word(0x01, endian=">") # Ydensity
s_byte(0x00) # Xthumbnail
s_byte(0x00) # Ythumbnail
s_block_end("I_APP0_1")
s_block_end("O_APP0_1")
s_repeat("O_APP0_1", min_reps=0, max_reps=100, step=10)
# ----------------------------------------------------------------------
# DQT
# ----------------------------------------------------------------------
if s_block_start("O_DQT_2"):
s_binary(JPEG_DQT)
s_size("I_DQT_2", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DQT_2"):
s_binary("\x00\x08\x06\x06\x07\x06\x05\x08" + \
"\x07\x07\x07\x09\x09\x08\x0a\x0c" + \
"\x14\x0d\x0c\x0b\x0b\x0c\x19\x12" + \
"\x13\x0f\x14\x1d\x1a\x1f\x1e\x1d" + \
"\x1a\x1c\x1c\x20\x24\x2e\x27\x20" + \
"\x22\x2c\x23\x1c\x1c\x28\x37\x29" + \
"\x2c\x30\x31\x34\x34\x34\x1f\x27" + \
"\x39\x3d\x38\x32\x3c\x2e\x33\x34" + \
"\x32")
s_block_end("I_DQT_2")
s_block_end("O_DQT_2")
# ----------------------------------------------------------------------
# DQT
# ----------------------------------------------------------------------
s_binary("\xff\xdb\x00\x43\x01\x09\x09" + \
"\x09\x0c\x0b\x0c\x18\x0d\x0d\x18" + \
"\x32\x21\x1c\x21\x32\x32\x32\x32" + \
"\x32\x32\x32\x32\x32\x32\x32\x32" + \
"\x32\x32\x32\x32\x32\x32\x32\x32" + \
"\x32\x32\x32\x32\x32\x32\x32\x32" + \
"\x32\x32\x32\x32\x32\x32\x32\x32" + \
"\x32\x32\x32\x32\x32\x32\x32\x32" + \
"\x32\x32\x32\x32\x32\x32")
# ----------------------------------------------------------------------
# SOF0
# ----------------------------------------------------------------------
if s_block_start("O_SOF0_1"):
s_binary(JPEG_SOF0)
s_size("I_SOF0_1", endian=">", inclusive=True, length=2, fuzzable=True)
if s_block_start("I_SOF0_1"):
s_binary("\x08\x00\x01\x00\x01\x03\x01\x22" + \
"\x00\x02\x11\x01\x03\x11\x01")
s_block_end("I_SOF0_1")
s_block_end("O_SOF0_1")
# ----------------------------------------------------------------------
# DHT
# ----------------------------------------------------------------------
if s_block_start("O_DHT_10"):
s_binary(JPEG_DHT)
s_size("I_DHT_10", endian=">", inclusive=True, length=2, fuzzable=True)
if s_block_start("I_DHT_10"):
s_binary("\x00\x00\x01\x05\x01\x01\x01\x01" + \
"\x01\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x01\x02\x03\x04\x05\x06" + \
"\x07\x08\x09\x0a\x0b")
s_block_end("I_DHT_10")
s_block_end("O_DHT_10")
s_repeat("O_DHT_10", min_reps=0, max_reps=100, step=10)
# ----------------------------------------------------------------------
# DHTs - leave as static, no need to fuzz
# ----------------------------------------------------------------------
s_binary("\xff\xc4\x00\xb5\x10\x00" + \
"\x02\x01\x03\x03\x02\x04\x03\x05" + \
"\x05\x04\x04\x00\x00\x01\x7d\x01" + \
"\x02\x03\x00\x04\x11\x05\x12\x21" + \
"\x31\x41\x06\x13\x51\x61\x07\x22" + \
"\x71\x14\x32\x81\x91\xa1\x08\x23" + \
"\x42\xb1\xc1\x15\x52\xd1\xf0\x24" + \
"\x33\x62\x72\x82\x09\x0a\x16\x17" + \
"\x18\x19\x1a\x25\x26\x27\x28\x29" + \
"\x2a\x34\x35\x36\x37\x38\x39\x3a" + \
"\x43\x44\x45\x46\x47\x48\x49\x4a" + \
"\x53\x54\x55\x56\x57\x58\x59\x5a" + \
"\x63\x64\x65\x66\x67\x68\x69\x6a" + \
"\x73\x74\x75\x76\x77\x78\x79\x7a" + \
"\x83\x84\x85\x86\x87\x88\x89\x8a" + \
"\x92\x93\x94\x95\x96\x97\x98\x99" + \
"\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8" + \
"\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7" + \
"\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6" + \
"\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5" + \
"\xd6\xd7\xd8\xd9\xda\xe1\xe2\xe3" + \
"\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf1" + \
"\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9" + \
"\xfa\xff\xc4\x00\x1f\x01\x00\x03" + \
"\x01\x01\x01\x01\x01\x01\x01\x01" + \
"\x01\x00\x00\x00\x00\x00\x00\x01" + \
"\x02\x03\x04\x05\x06\x07\x08\x09" + \
"\x0a\x0b\xff\xc4\x00\xb5\x11\x00" + \
"\x02\x01\x02\x04\x04\x03\x04\x07" + \
"\x05\x04\x04\x00\x01\x02\x77\x00" + \
"\x01\x02\x03\x11\x04\x05\x21\x31" + \
"\x06\x12\x41\x51\x07\x61\x71\x13" + \
"\x22\x32\x81\x08\x14\x42\x91\xa1" + \
"\xb1\xc1\x09\x23\x33\x52\xf0\x15" + \
"\x62\x72\xd1\x0a\x16\x24\x34\xe1" + \
"\x25\xf1\x17\x18\x19\x1a\x26\x27" + \
"\x28\x29\x2a\x35\x36\x37\x38\x39" + \
"\x3a\x43\x44\x45\x46\x47\x48\x49" + \
"\x4a\x53\x54\x55\x56\x57\x58\x59" + \
"\x5a\x63\x64\x65\x66\x67\x68\x69" + \
"\x6a\x73\x74\x75\x76\x77\x78\x79" + \
"\x7a\x82\x83\x84\x85\x86\x87\x88" + \
"\x89\x8a\x92\x93\x94\x95\x96\x97" + \
"\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6" + \
"\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5" + \
"\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4" + \
"\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3" + \
"\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe2" + \
"\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea" + \
"\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9" + \
"\xfa")
# ----------------------------------------------------------------------
# SOS - file offset: 0x3019
#
# Awkward way of fuzzing it but well... :) Fuzzing the size of the
# section is done using s_word() so I can avoid playing with the
# magical size calculation... Original size was 0x0C and I have no idea
# how is that possible.
# ----------------------------------------------------------------------
if s_block_start("O_SOS_10"):
s_binary(JPEG_SOS)
s_word(0x0c, endian=">")
if s_block_start("I_SOS_10"):
s_dword(0x03010002, endian=">")
s_dword(0x11031100, endian=">")
s_dword(0x3f00f9fe, endian=">")
s_dword(0x8a28a00f, endian=">")
s_block_end("I_SOS_10")
s_block_end("O_SOS_10")
# ----------------------------------------------------------------------
# EOI
# ----------------------------------------------------------------------
s_binary(JPEG_EOI)
s_block_end("I_APP1_0")
s_block_end("O_APP1_0")
s_repeat("O_APP1_0", min_reps=0, max_reps=100, step=10)
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_APP1 ffe1 0xab3 0x302f 0x3ae4
# ------------------------------------------------------------------------------
if s_block_start("O_APP1_1"):
s_binary(JPEG_APP1)
s_size("I_APP1_1", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_APP1_1"):
s_string("http://ns.adobe.com/xap/1.0/")
s_string("\x00")
# XML DATA
s_binary("<?")
s_string("xpacket")
s_delim(" ")
s_string("begin")
s_delim("=")
s_binary("'")
s_string("\xEF\xBB\xBF")
s_binary("' id='")
s_string("W5M0MpCehiHzreSzNTczkc9d")
s_binary("'?>")
s_binary("\x0A")
s_string("<")
s_string("x")
s_delim(":")
s_string("xmpmeta")
s_delim(" ")
s_string("xmlns")
s_delim(":")
s_string("x")
s_binary("='")
s_string("adobe")
s_delim(":")
s_string("ns")
s_delim(":")
s_string("meta")
s_binary("/'>")
s_binary("\x0A")
s_binary("<")
s_string("rdf")
s_delim(":")
s_string("RDF")
s_binary(" xmlns:")
s_string("rdf")
s_binary("='http://www.w3.org/1999/02/22-rdf-syntax-ns#'>")
s_binary("\x0A\x0A")
s_binary(" <rdf:Description xmlns:exif='http://ns.adobe.com/exif/1.0/'>\x0A")
s_binary(" <")
s_string("exif")
s_delim(":")
s_string("Make")
s_binary(">")
s_string("Sony")
s_binary("</")
s_string("exif")
s_delim(":")
s_string("Make")
s_binary(">\x0A")
s_binary(" <exif:Model>C6903</exif:Model>\x0A")
s_binary(" <exif:Orientation>Bottom-right</exif:Orientation>\x0A")
s_binary(" <exif:XResolution>")
s_string("72")
s_binary("</exif:XResolution>\x0A")
s_binary(" <exif:YResolution>")
s_string("72")
s_binary("</exif:")
s_string("YResolution")
s_binary(">\x0A")
s_binary(" <exif:ResolutionUnit>Inch</exif:ResolutionUnit>\x0A")
s_binary(" <exif:Software>14.5.A.0.270_6_f100000f</exif:Software>\x0A")
s_binary(" <exif:DateTime>")
s_string("2015:06:28")
s_delim(" ")
s_string("10:27:35")
s_binary("</exif:DateTime>\x0A")
s_binary(" <exif:YCbCrPositioning>Centered</exif:YCbCrPositioning>\x0A")
s_binary(" <exif:Compression>JPEG compression</exif:Compression>\x0A")
s_binary(" <exif:Orientation>Bottom-right</exif:Orientation>\x0A")
s_string(" <exif:XResolution>72</exif:XResolution>")
s_binary("\x0A")
s_string(" <exif:YResolution>72</exif:YResolution>")
s_binary("\x0A")
s_binary(" <exif:ResolutionUnit>Inch</exif:ResolutionUnit>\x0A" +\
" <exif:ExposureTime>1/8 sec.</exif:ExposureTime>\x0A" +\
" <exif:FNumber>f/2.0</exif:FNumber>\x0A" +\
" <exif:ISOSpeedRatings>\x0A" +\
" <rdf:Seq>\x0A" +\
" <rdf:li>6400</rdf:li>\x0A" +\
" </rdf:Seq>\x0A" +\
" </exif:ISOSpeedRatings>\x0A")
s_binary(" <exif:ExifVersion>")
s_string("Exif Version 2.2")
s_binary("</exif:ExifVersion>\x0A")
s_binary(" <exif:DateTimeOriginal>")
s_string("2015:06:28")
s_delim(" ")
s_string("10:27:35")
s_binary("</exif:DateTimeOriginal>\x0A")
s_binary(" <exif:DateTimeDigitized>2015:06:28 10:27:35</exif:DateTimeDigitized>\x0A" +\
" <exif:ComponentsConfiguration>\x0A" +\
" <rdf:Seq>\x0A" +\
" <rdf:li>Y Cb Cr -</rdf:li>\x0A" +\
" </rdf:Seq>\x0A" +\
" </exif:ComponentsConfiguration>\x0A" +\
" <exif:ShutterSpeedValue>3.00 EV (1/8 sec.)</exif:ShutterSpeedValue>\x0A" +\
" <exif:ExposureBiasValue>0.00 EV</exif:ExposureBiasValue>\x0A" +\
" <exif:MeteringMode>Pattern</exif:MeteringMode>\x0A" +\
" <exif:LightSource>Unknown</exif:LightSource>\x0A")
s_binary(" <exif:Flash rdf:")
s_string("parseType")
s_delim("='")
s_string("Resource")
s_binary("'>\x0A")
s_binary(" </exif:Flash>\x0A")
s_binary(" <exif:FocalLength>4.9 mm</exif:FocalLength>\x0A" +\
" <exif:MakerNote>10890 bytes undefined data</exif:MakerNote>\x0A" +\
" <exif:SubsecTime>598889</exif:SubsecTime>\x0A" +\
" <exif:SubSecTimeOriginal>598889</exif:SubSecTimeOriginal>\x0A" +\
" <exif:SubSecTimeDigitized>598889</exif:SubSecTimeDigitized>\x0A" +\
" <exif:FlashPixVersion>FlashPix Version 1.0</exif:FlashPixVersion>\x0A")
s_binary(" <exif:ColorSpace>")
s_string("sRGB")
s_binary("</exif:ColorSpace>\x0A")
s_binary(" <exif:PixelXDimension>")
s_string("3840")
s_binary("</exif:PixelXDimension>")
s_binary("\x0A")
s_binary(" <exif:PixelYDimension>")
s_string("2160")
s_binary("</exif:PixelYDimension>")
s_binary("\x0A")
s_binary(" <exif:CustomRendered>Normal process</exif:CustomRendered>\x0A" +\
" <exif:ExposureMode>Auto exposure</exif:ExposureMode>\x0A" +\
" <exif:WhiteBalance>Auto white balance</exif:WhiteBalance>\x0A")
s_binary(" <exif:DigitalZoomRatio>1.00</exif:DigitalZoomRatio>\x0A" +\
" <exif:SceneCaptureType>Standard</exif:SceneCaptureType>\x0A" +\
" <exif:SubjectDistanceRange>Unknown</exif:SubjectDistanceRange>\x0A" +\
" <exif:InteroperabilityIndex>R98</exif:InteroperabilityIndex>\x0A" +\
" <exif:InteroperabilityVersion>0100</exif:InteroperabilityVersion>\x0A")
s_binary(" </rdf:")
s_string("Description")
s_binary(">\x0A\x0A" +\
"</rdf:RDF>\x0A" +\
"</x:xmpmeta>\x0A")
s_binary("<?xpacket ")
s_string("end")
s_binary("='")
s_string("r")
s_binary("'?>\x0A")
s_block_end("I_APP1_1")
s_block_end("O_APP1_1")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DQT ffdb 0x43 0x3ae4 0x3b29
# ------------------------------------------------------------------------------
if s_block_start("O_DQT_0"):
s_binary(JPEG_DQT)
s_size("I_DQT_0", endian=">", inclusive=True, length=2, fuzzable=True)
if s_block_start("I_DQT_0"):
s_binary("\x00\x01\x01\x01\x01\x01\x01\x01" + \
"\x01\x01\x01\x01\x01\x01\x01\x01" + \
"\x02\x02\x01\x01\x01\x01\x03\x02" + \
"\x02\x02\x02\x03\x03\x04\x04\x03" + \
"\x03\x03\x03\x04\x04\x06\x05\x04" + \
"\x04\x05\x04\x03\x03\x05\x07\x05" + \
"\x05\x06\x06\x06\x06\x06\x04\x05" + \
"\x07\x07\x07\x06\x07\x06\x06\x06" + \
"\x06")
s_block_end("I_DQT_0")
s_block_end("O_DQT_0")
s_repeat("O_DQT_0", min_reps=0, max_reps=100, step=10)
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DQT ffdb 0x43 0x3b29 0x3b6e
# ------------------------------------------------------------------------------
if s_block_start("O_DQT_1"):
s_binary(JPEG_DQT)
s_size("I_DQT_1", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DQT_1"):
s_binary("\x01\x01\x01\x01\x01\x01\x01\x03" + \
"\x02\x02\x03\x06\x04\x03\x04\x06" + \
"\x06\x06\x06\x06\x06\x06\x06\x06" + \
"\x06\x06\x06\x06\x06\x06\x06\x06" + \
"\x06\x06\x06\x06\x06\x06\x06\x06" + \
"\x06\x06\x06\x06\x06\x06\x06\x06" + \
"\x06\x06\x06\x06\x06\x06\x06\x06" + \
"\x06\x06\x06\x06\x06\x06\x06\x06" + \
"\x06")
s_block_end("I_DQT_1")
s_block_end("O_DQT_1")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOF2 ffc2 0x11 0x3b6e 0x3b81
# ------------------------------------------------------------------------------
if s_block_start("O_SOF2_0"):
s_binary(JPEG_SOF2)
s_size("I_SOF2_0", endian=">", inclusive=True, length=2, fuzzable=True)
if s_block_start("I_SOF2_0"):
s_dword(0x08000100, endian=">")
s_dword(0x01030111, endian=">")
s_dword(0x00021101, endian=">")
s_word(0x0311, endian=">")
s_byte(0x01)
s_block_end("I_SOF2_0")
s_block_end("O_SOF2_0")
s_repeat("O_SOF2_0", min_reps=0, max_reps=100, step=10)
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3b81 0x3b97
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_0"):
s_binary(JPEG_DHT)
s_size("I_DHT_0", endian=">", inclusive=True, length=2, fuzzable=True)
if s_block_start("I_DHT_0"):
s_dword(0x00010000, endian=">")
s_dword(0x00, endian=">")
s_dword(0x00, endian=">")
s_dword(0x00, endian=">")
s_word(0x000a, endian=">")
s_block_end("I_DHT_0")
s_block_end("O_DHT_0")
s_repeat("O_DHT_0", min_reps=0, max_reps=100, step=10)
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3b97 0x3bad
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_1"):
s_binary(JPEG_DHT)
s_size("I_DHT_1", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DHT_1"):
s_binary("\x01\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00")
s_block_end("I_DHT_1")
s_block_end("O_DHT_1")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0xc 0x3bad 0x3bbc
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_0"):
s_binary(JPEG_SOS)
s_size("I_SOS_0", endian=">", inclusive=False, length=2, fuzzable=True)
if s_block_start("I_SOS_0"):
s_dword("\x03\x01\x00\x02", endian=">")
s_dword("\x10\x03\x10\x00", endian=">")
s_dword("\x00\x01\x3f\xe7", endian=">")
s_block_end("I_SOS_0")
s_block_end("O_SOS_0")
s_repeat("O_SOS_0", min_reps=0, max_reps=100, step=10)
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3bbd 0x3bd3
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_2"):
s_binary(JPEG_DHT)
s_size("I_DHT_2", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DHT_2"):
s_binary("\x10\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00")
s_block_end("I_DHT_2")
s_block_end("O_DHT_2")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0x8 0x3bd3 0x3bde
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_1"):
s_binary(JPEG_SOS)
s_size("I_SOS_1", endian=">", inclusive=False, math=lambda x: x+1, length=2, fuzzable=False)
if s_block_start("I_SOS_1"):
s_dword(0x01010001, endian=">")
s_word(0x0502, endian=">")
s_byte(0x7f)
s_block_end("I_SOS_1")
s_block_end("O_SOS_1")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3bde 0x3bf4
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_3"):
s_binary(JPEG_DHT)
s_size("I_DHT_3", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DHT_3"):
s_binary("\x11\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00")
s_block_end("I_DHT_3")
s_block_end("O_DHT_3")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0x8 0x3bf4 0x3bff
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_2"):
s_binary(JPEG_SOS)
s_size("I_SOS_2", endian=">", inclusive=False, math=lambda x: x+1, length=2, fuzzable=False)
if s_block_start("I_SOS_2"):
s_binary("\x01\x03\x01\x01\x3f\x01\x7f")
s_block_end("I_SOS_2")
s_block_end("O_SOS_2")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3bff 0x3c15
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_4"):
s_binary(JPEG_DHT)
s_size("I_DHT_4", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DHT_4"):
s_binary("\x11\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00")
s_block_end("I_DHT_4")
s_block_end("O_DHT_4")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0x8 0x3c15 0x3c20
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_3"):
s_binary(JPEG_SOS)
s_size("I_SOS_3", endian=">", inclusive=False, math=lambda x: x+1, length=2, fuzzable=False)
if s_block_start("I_SOS_3"):
s_binary("\x01\x02\x01\x01\x3f\x01\x7f")
s_block_end("I_SOS_3")
s_block_end("O_SOS_3")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3c20 0x3c36
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_5"):
s_binary(JPEG_DHT)
s_size("I_DHT_5", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DHT_5"):
s_binary("\x10\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00")
s_block_end("I_DHT_5")
s_block_end("O_DHT_5")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0x8 0x3c36 0x3c41
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_4"):
s_binary(JPEG_SOS)
s_size("I_SOS_4", endian=">", inclusive=False, math=lambda x: x+1, length=2, fuzzable=False)
if s_block_start("I_SOS_4"):
s_binary("\x01\x01\x00\x06\x3f\x02\x7f")
s_block_end("I_SOS_4")
s_block_end("O_SOS_4")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3c41 0x3c57
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_6"):
s_binary(JPEG_DHT)
s_size("I_DHT_6", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DHT_6"):
s_binary("\x10\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00")
s_block_end("I_DHT_6")
s_block_end("O_DHT_6")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0x8 0x3c57 0x3c62
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_5"):
s_binary(JPEG_SOS)
s_size("I_SOS_5", endian=">", inclusive=False, math=lambda x: x+1, length=2, fuzzable=False)
if s_block_start("I_SOS_5"):
s_binary("\x01\x01\x00\x01\x3f\x21\x7f")
s_block_end("I_SOS_5")
s_block_end("O_SOS_5")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0xc 0x3c62 0x3c71
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_6"):
s_binary(JPEG_SOS)
s_size("I_SOS_6", endian=">", inclusive=False, math=lambda x: x+1, length=2, fuzzable=False)
if s_block_start("I_SOS_6"):
s_binary("\x03\x01\x00\x02\x00\x03\x00\x00" + \
"\x00\x10\x1f")
s_block_end("I_SOS_6")
s_block_end("O_SOS_6")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3c71 0x3c87
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_7"):
s_binary(JPEG_DHT)
s_size("I_DHT_7", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DHT_7"):
s_binary("\x11\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00")
s_block_end("I_DHT_7")
s_block_end("O_DHT_7")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0x8 0x3c87 0x3c92
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_7"):
s_binary(JPEG_SOS)
s_size("I_SOS_7", endian=">", inclusive=False, math=lambda x: x+1, length=2, fuzzable=False)
if s_block_start("I_SOS_7"):
s_binary("\x01\x03\x01\x01\x3f\x10\x7f")
s_block_end("I_SOS_7")
s_block_end("O_SOS_7")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3c92 0x3ca8
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_8"):
s_binary(JPEG_DHT)
s_size("I_DHT_8", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DHT_8"):
s_binary("\x11\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00")
s_block_end("I_DHT_8")
s_block_end("O_DHT_8")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0x8 0x3ca8 0x3cb3
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_8"):
s_binary(JPEG_SOS)
s_size("I_SOS_8", endian=">", inclusive=False, math=lambda x: x+1, length=2, fuzzable=False)
if s_block_start("I_SOS_8"):
s_binary("\x01\x02\x01\x01\x3f\x10\x7f")
s_block_end("I_SOS_8")
s_block_end("O_SOS_8")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_DHT ffc4 0x14 0x3cb3 0x3cc9
# ------------------------------------------------------------------------------
if s_block_start("O_DHT_9"):
s_binary(JPEG_DHT)
s_size("I_DHT_9", endian=">", inclusive=True, length=2, fuzzable=False)
if s_block_start("I_DHT_9"):
s_binary("\x10\x01\x00\x00\x00\x00\x00\x00" + \
"\x00\x00\x00\x00\x00\x00\x00\x00" + \
"\x00\x00")
s_block_end("I_DHT_9")
s_block_end("O_DHT_9")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_SOS ffda 0x8 0x3cc9 0x3cd4
# ------------------------------------------------------------------------------
if s_block_start("O_SOS_9"):
s_binary(JPEG_SOS)
s_size("I_SOS_9", endian=">", inclusive=False, math=lambda x: x+1, length=2, fuzzable=False)
if s_block_start("I_SOS_9"):
s_binary("\x01\x01\x00\x01\x3f\x10\x7f")
s_block_end("I_SOS_9")
s_block_end("O_SOS_9")
# ------------------------------------------------------------------------------
# Section Bin name Size Start offset End offset
# JPEG_EOI ffd9 0x0 0x3cd4 0x3cd6
# ------------------------------------------------------------------------------
s_string(JPEG_EOI)
|
gpl-2.0
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/plat-irix6/ERRNO.py
|
9
|
2657
|
# Generated by h2py from /usr/include/errno.h
# Included from sys/errno.h
# Included from standards.h
__KBASE = 1000
__IRIXBASE = 1000
__FTNBASE = 4000
__FTNLAST = 5999
EPERM = 1
ENOENT = 2
ESRCH = 3
EINTR = 4
EIO = 5
ENXIO = 6
E2BIG = 7
ENOEXEC = 8
EBADF = 9
ECHILD = 10
EAGAIN = 11
ENOMEM = 12
EACCES = 13
EFAULT = 14
ENOTBLK = 15
EBUSY = 16
EEXIST = 17
EXDEV = 18
ENODEV = 19
ENOTDIR = 20
EISDIR = 21
EINVAL = 22
ENFILE = 23
EMFILE = 24
ENOTTY = 25
ETXTBSY = 26
EFBIG = 27
ENOSPC = 28
ESPIPE = 29
EROFS = 30
EMLINK = 31
EPIPE = 32
EDOM = 33
ERANGE = 34
ENOMSG = 35
EIDRM = 36
ECHRNG = 37
EL2NSYNC = 38
EL3HLT = 39
EL3RST = 40
ELNRNG = 41
EUNATCH = 42
ENOCSI = 43
EL2HLT = 44
EDEADLK = 45
ENOLCK = 46
ECKPT = 47
EBADE = 50
EBADR = 51
EXFULL = 52
ENOANO = 53
EBADRQC = 54
EBADSLT = 55
EDEADLOCK = 56
EBFONT = 57
ENOSTR = 60
ENODATA = 61
ETIME = 62
ENOSR = 63
ENONET = 64
ENOPKG = 65
EREMOTE = 66
ENOLINK = 67
EADV = 68
ESRMNT = 69
ECOMM = 70
EPROTO = 71
EMULTIHOP = 74
EBADMSG = 77
ENAMETOOLONG = 78
EOVERFLOW = 79
ENOTUNIQ = 80
EBADFD = 81
EREMCHG = 82
ELIBACC = 83
ELIBBAD = 84
ELIBSCN = 85
ELIBMAX = 86
ELIBEXEC = 87
EILSEQ = 88
ENOSYS = 89
ELOOP = 90
ERESTART = 91
ESTRPIPE = 92
ENOTEMPTY = 93
EUSERS = 94
ENOTSOCK = 95
EDESTADDRREQ = 96
EMSGSIZE = 97
EPROTOTYPE = 98
ENOPROTOOPT = 99
EPROTONOSUPPORT = 120
ESOCKTNOSUPPORT = 121
EOPNOTSUPP = 122
EPFNOSUPPORT = 123
EAFNOSUPPORT = 124
EADDRINUSE = 125
EADDRNOTAVAIL = 126
ENETDOWN = 127
ENETUNREACH = 128
ENETRESET = 129
ECONNABORTED = 130
ECONNRESET = 131
ENOBUFS = 132
EISCONN = 133
ENOTCONN = 134
ESHUTDOWN = 143
ETOOMANYREFS = 144
ETIMEDOUT = 145
ECONNREFUSED = 146
EHOSTDOWN = 147
EHOSTUNREACH = 148
LASTERRNO = ENOTCONN
EWOULDBLOCK = __KBASE+101
EWOULDBLOCK = EAGAIN
EALREADY = 149
EINPROGRESS = 150
ESTALE = 151
EIORESID = 500
EUCLEAN = 135
ENOTNAM = 137
ENAVAIL = 138
EISNAM = 139
EREMOTEIO = 140
EINIT = 141
EREMDEV = 142
ECANCELED = 158
ENOLIMFILE = 1001
EPROCLIM = 1002
EDISJOINT = 1003
ENOLOGIN = 1004
ELOGINLIM = 1005
EGROUPLOOP = 1006
ENOATTACH = 1007
ENOTSUP = 1008
ENOATTR = 1009
EFSCORRUPTED = 1010
EDIRCORRUPTED = 1010
EWRONGFS = 1011
EDQUOT = 1133
ENFSREMOTE = 1135
ECONTROLLER = 1300
ENOTCONTROLLER = 1301
EENQUEUED = 1302
ENOTENQUEUED = 1303
EJOINED = 1304
ENOTJOINED = 1305
ENOPROC = 1306
EMUSTRUN = 1307
ENOTSTOPPED = 1308
ECLOCKCPU = 1309
EINVALSTATE = 1310
ENOEXIST = 1311
EENDOFMINOR = 1312
EBUFSIZE = 1313
EEMPTY = 1314
ENOINTRGROUP = 1315
EINVALMODE = 1316
ECANTEXTENT = 1317
EINVALTIME = 1318
EDESTROYED = 1319
EBDHDL = 1400
EDELAY = 1401
ENOBWD = 1402
EBADRSPEC = 1403
EBADTSPEC = 1404
EBADFILT = 1405
EMIGRATED = 1500
EMIGRATING = 1501
ECELLDOWN = 1502
EMEMRETRY = 1600
|
gpl-2.0
|
iksaif/euscan
|
pym/euscan/ebuild.py
|
1
|
3030
|
import os
import sys
import imp
import portage
from portage.const import VDB_PATH
from portage import _encodings
from portage import _shell_quote
from portage import _unicode_decode
from portage import _unicode_encode
# Stolen from the ebuild command
def package_from_ebuild(ebuild):
pf = None
if ebuild.endswith(".ebuild"):
pf = os.path.basename(ebuild)[:-7]
else:
return False
if not os.path.isabs(ebuild):
mycwd = os.getcwd()
# Try to get the non-canonical path from the PWD evironment variable,
# since the canonical path returned from os.getcwd() may may be
# unusable in cases where the directory stucture is built from
# symlinks.
pwd = os.environ.get('PWD', '')
if sys.hexversion < 0x3000000:
pwd = _unicode_decode(pwd, encoding=_encodings['content'],
errors='strict')
if pwd and pwd != mycwd and \
os.path.realpath(pwd) == mycwd:
mycwd = portage.normalize_path(pwd)
ebuild = os.path.join(mycwd, ebuild)
ebuild = portage.normalize_path(ebuild)
# portdbapi uses the canonical path for the base of the portage tree, but
# subdirectories of the base can be built from symlinks (like crossdev
# does).
ebuild_portdir = os.path.realpath(
os.path.dirname(os.path.dirname(os.path.dirname(ebuild))))
ebuild = os.path.join(ebuild_portdir, *ebuild.split(os.path.sep)[-3:])
vdb_path = os.path.join(portage.settings['ROOT'], VDB_PATH)
# Make sure that portdb.findname() returns the correct ebuild.
if ebuild_portdir != vdb_path and \
ebuild_portdir not in portage.portdb.porttrees:
if sys.hexversion >= 0x3000000:
os.environ["PORTDIR_OVERLAY"] = \
os.environ.get("PORTDIR_OVERLAY", "") + \
" " + _shell_quote(ebuild_portdir)
else:
os.environ["PORTDIR_OVERLAY"] = \
os.environ.get("PORTDIR_OVERLAY", "") + \
" " + _unicode_encode(_shell_quote(ebuild_portdir),
encoding=_encodings['content'], errors='strict')
portage.close_portdbapi_caches()
imp.reload(portage)
del portage.portdb.porttrees[1:]
if ebuild_portdir != portage.portdb.porttree_root:
portage.portdb.porttrees.append(ebuild_portdir)
if not os.path.exists(ebuild):
return False
ebuild_split = ebuild.split("/")
cpv = "%s/%s" % (ebuild_split[-3], pf)
if not portage.catpkgsplit(cpv):
return False
if ebuild.startswith(os.path.join(portage.root, portage.const.VDB_PATH)):
mytree = "vartree"
portage_ebuild = portage.db[portage.root][mytree].dbapi.findname(cpv)
if os.path.realpath(portage_ebuild) != ebuild:
return False
else:
mytree = "porttree"
portage_ebuild = portage.portdb.findname(cpv)
if not portage_ebuild or portage_ebuild != ebuild:
return False
return cpv
|
gpl-2.0
|
Vishluck/sympy
|
sympy/calculus/tests/test_finite_diff.py
|
53
|
7632
|
from sympy.core.compatibility import range
from sympy import S, symbols, Function
from sympy.calculus.finite_diff import (
apply_finite_diff, finite_diff_weights, as_finite_diff
)
def test_apply_finite_diff():
x, h = symbols('x h')
f = Function('f')
assert (apply_finite_diff(1, [x-h, x+h], [f(x-h), f(x+h)], x) -
(f(x+h)-f(x-h))/(2*h)).simplify() == 0
assert (apply_finite_diff(1, [5, 6, 7], [f(5), f(6), f(7)], 5) -
(-S(3)/2*f(5) + 2*f(6) - S(1)/2*f(7))).simplify() == 0
def test_finite_diff_weights():
d = finite_diff_weights(1, [5, 6, 7], 5)
assert d[1][2] == [-S(3)/2, 2, -S(1)/2]
# Table 1, p. 702 in doi:10.1090/S0025-5718-1988-0935077-0
# --------------------------------------------------------
xl = [0, 1, -1, 2, -2, 3, -3, 4, -4]
# d holds all coefficients
d = finite_diff_weights(4, xl, S(0))
# Zeroeth derivative
for i in range(5):
assert d[0][i] == [S(1)] + [S(0)]*8
# First derivative
assert d[1][0] == [S(0)]*9
assert d[1][2] == [S(0), S(1)/2, -S(1)/2] + [S(0)]*6
assert d[1][4] == [S(0), S(2)/3, -S(2)/3, -S(1)/12, S(1)/12] + [S(0)]*4
assert d[1][6] == [S(0), S(3)/4, -S(3)/4, -S(3)/20, S(3)/20,
S(1)/60, -S(1)/60] + [S(0)]*2
assert d[1][8] == [S(0), S(4)/5, -S(4)/5, -S(1)/5, S(1)/5,
S(4)/105, -S(4)/105, -S(1)/280, S(1)/280]
# Second derivative
for i in range(2):
assert d[2][i] == [S(0)]*9
assert d[2][2] == [-S(2), S(1), S(1)] + [S(0)]*6
assert d[2][4] == [-S(5)/2, S(4)/3, S(4)/3, -S(1)/12, -S(1)/12] + [S(0)]*4
assert d[2][6] == [-S(49)/18, S(3)/2, S(3)/2, -S(3)/20, -S(3)/20,
S(1)/90, S(1)/90] + [S(0)]*2
assert d[2][8] == [-S(205)/72, S(8)/5, S(8)/5, -S(1)/5, -S(1)/5,
S(8)/315, S(8)/315, -S(1)/560, -S(1)/560]
# Third derivative
for i in range(3):
assert d[3][i] == [S(0)]*9
assert d[3][4] == [S(0), -S(1), S(1), S(1)/2, -S(1)/2] + [S(0)]*4
assert d[3][6] == [S(0), -S(13)/8, S(13)/8, S(1), -S(1),
-S(1)/8, S(1)/8] + [S(0)]*2
assert d[3][8] == [S(0), -S(61)/30, S(61)/30, S(169)/120, -S(169)/120,
-S(3)/10, S(3)/10, S(7)/240, -S(7)/240]
# Fourth derivative
for i in range(4):
assert d[4][i] == [S(0)]*9
assert d[4][4] == [S(6), -S(4), -S(4), S(1), S(1)] + [S(0)]*4
assert d[4][6] == [S(28)/3, -S(13)/2, -S(13)/2, S(2), S(2),
-S(1)/6, -S(1)/6] + [S(0)]*2
assert d[4][8] == [S(91)/8, -S(122)/15, -S(122)/15, S(169)/60, S(169)/60,
-S(2)/5, -S(2)/5, S(7)/240, S(7)/240]
# Table 2, p. 703 in doi:10.1090/S0025-5718-1988-0935077-0
# --------------------------------------------------------
xl = [[j/S(2) for j in list(range(-i*2+1, 0, 2))+list(range(1, i*2+1, 2))]
for i in range(1, 5)]
# d holds all coefficients
d = [finite_diff_weights({0: 1, 1: 2, 2: 4, 3: 4}[i], xl[i], 0) for
i in range(4)]
# Zeroth derivative
assert d[0][0][1] == [S(1)/2, S(1)/2]
assert d[1][0][3] == [-S(1)/16, S(9)/16, S(9)/16, -S(1)/16]
assert d[2][0][5] == [S(3)/256, -S(25)/256, S(75)/128, S(75)/128,
-S(25)/256, S(3)/256]
assert d[3][0][7] == [-S(5)/2048, S(49)/2048, -S(245)/2048, S(1225)/2048,
S(1225)/2048, -S(245)/2048, S(49)/2048, -S(5)/2048]
# First derivative
assert d[0][1][1] == [-S(1), S(1)]
assert d[1][1][3] == [S(1)/24, -S(9)/8, S(9)/8, -S(1)/24]
assert d[2][1][5] == [-S(3)/640, S(25)/384, -S(75)/64, S(75)/64,
-S(25)/384, S(3)/640]
assert d[3][1][7] == [S(5)/7168, -S(49)/5120, S(245)/3072, S(-1225)/1024,
S(1225)/1024, -S(245)/3072, S(49)/5120, -S(5)/7168]
# Reasonably the rest of the table is also correct... (testing of that
# deemed excessive at the moment)
def test_as_finite_diff():
x, h = symbols('x h')
f = Function('f')
# Central 1st derivative at gridpoint
assert (as_finite_diff(f(x).diff(x), [x-2, x-1, x, x+1, x+2]) -
(S(1)/12*(f(x-2)-f(x+2)) + S(2)/3*(f(x+1)-f(x-1)))).simplify() == 0
# Central 1st derivative "half-way"
assert (as_finite_diff(f(x).diff(x)) -
(f(x + S(1)/2)-f(x - S(1)/2))).simplify() == 0
assert (as_finite_diff(f(x).diff(x), h) -
(f(x + h/S(2))-f(x - h/S(2)))/h).simplify() == 0
assert (as_finite_diff(f(x).diff(x), [x - 3*h, x-h, x+h, x + 3*h]) -
(S(9)/(8*2*h)*(f(x+h) - f(x-h)) +
S(1)/(24*2*h)*(f(x - 3*h) - f(x + 3*h)))).simplify() == 0
# One sided 1st derivative at gridpoint
assert (as_finite_diff(f(x).diff(x), [0, 1, 2], 0) -
(-S(3)/2*f(0) + 2*f(1) - f(2)/2)).simplify() == 0
assert (as_finite_diff(f(x).diff(x), [x, x+h], x) -
(f(x+h) - f(x))/h).simplify() == 0
assert (as_finite_diff(f(x).diff(x), [x-h, x, x+h], x-h) -
(-S(3)/(2*h)*f(x-h) + 2/h*f(x) -
S(1)/(2*h)*f(x+h))).simplify() == 0
# One sided 1st derivative "half-way"
assert (as_finite_diff(f(x).diff(x), [x-h, x+h, x + 3*h, x + 5*h, x + 7*h])
- 1/(2*h)*(-S(11)/(12)*f(x-h) + S(17)/(24)*f(x+h)
+ S(3)/8*f(x + 3*h) - S(5)/24*f(x + 5*h)
+ S(1)/24*f(x + 7*h))).simplify() == 0
# Central 2nd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 2), [x-h, x, x+h]) -
h**-2 * (f(x-h) + f(x+h) - 2*f(x))).simplify() == 0
assert (as_finite_diff(f(x).diff(x, 2), [x - 2*h, x-h, x, x+h, x + 2*h]) -
h**-2 * (-S(1)/12*(f(x - 2*h) + f(x + 2*h)) +
S(4)/3*(f(x+h) + f(x-h)) - S(5)/2*f(x))).simplify() == 0
# Central 2nd derivative "half-way"
assert (as_finite_diff(f(x).diff(x, 2), [x - 3*h, x-h, x+h, x + 3*h]) -
(2*h)**-2 * (S(1)/2*(f(x - 3*h) + f(x + 3*h)) -
S(1)/2*(f(x+h) + f(x-h)))).simplify() == 0
# One sided 2nd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 2), [x, x+h, x + 2*h, x + 3*h]) -
h**-2 * (2*f(x) - 5*f(x+h) +
4*f(x+2*h) - f(x+3*h))).simplify() == 0
# One sided 2nd derivative at "half-way"
assert (as_finite_diff(f(x).diff(x, 2), [x-h, x+h, x + 3*h, x + 5*h]) -
(2*h)**-2 * (S(3)/2*f(x-h) - S(7)/2*f(x+h) + S(5)/2*f(x + 3*h) -
S(1)/2*f(x + 5*h))).simplify() == 0
# Central 3rd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 3)) -
(-f(x - 3/S(2)) + 3*f(x - 1/S(2)) -
3*f(x + 1/S(2)) + f(x + 3/S(2)))).simplify() == 0
assert (as_finite_diff(
f(x).diff(x, 3), [x - 3*h, x - 2*h, x-h, x, x+h, x + 2*h, x + 3*h]) -
h**-3 * (S(1)/8*(f(x - 3*h) - f(x + 3*h)) - f(x - 2*h) +
f(x + 2*h) + S(13)/8*(f(x-h) - f(x+h)))).simplify() == 0
# Central 3rd derivative at "half-way"
assert (as_finite_diff(f(x).diff(x, 3), [x - 3*h, x-h, x+h, x + 3*h]) -
(2*h)**-3 * (f(x + 3*h)-f(x - 3*h) +
3*(f(x-h)-f(x+h)))).simplify() == 0
# One sided 3rd derivative at gridpoint
assert (as_finite_diff(f(x).diff(x, 3), [x, x+h, x + 2*h, x + 3*h]) -
h**-3 * (f(x + 3*h)-f(x) + 3*(f(x+h)-f(x + 2*h)))).simplify() == 0
# One sided 3rd derivative at "half-way"
assert (as_finite_diff(f(x).diff(x, 3), [x-h, x+h, x + 3*h, x + 5*h]) -
(2*h)**-3 * (f(x + 5*h)-f(x-h) +
3*(f(x+h)-f(x + 3*h)))).simplify() == 0
|
bsd-3-clause
|
wuliming/pcp
|
src/pcp/uptime/pcp-uptime.py
|
1
|
4952
|
#!/usr/bin/env pmpython
#
# Copyright (C) 2014-2015 Red Hat.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# pylint: disable=C0103
""" Tell how long the system has been running """
import sys
from pcp import pmapi
from cpmapi import PM_TYPE_U32, PM_TYPE_FLOAT
from cpmapi import PM_CONTEXT_ARCHIVE, PM_MODE_FORW, PM_ERR_VALUE
def print_timestamp(stamp):
""" Report the sample time (struct tm) in HH:MM:SS form """
return " %02d:%02d:%02d" % (stamp.tm_hour, stamp.tm_min, stamp.tm_sec)
def print_uptime(seconds):
""" Report on system up-time in days, hours and minutes """
days = int(seconds / (60 * 60 * 24))
minutes = int(seconds / 60)
hours = int(minutes / 60)
hours = int(hours % 24)
minutes = int(minutes % 60)
result = " up"
if days > 1:
result += " %d days," % days
elif days != 0:
result += " 1 day,"
if hours != 0:
result += ' %2d:%02d,' % (hours, minutes)
else:
result += ' %d min,' % minutes
return result
def print_users(nusers):
""" Report the number of logged in users at sample time """
if nusers == 1:
return ' 1 user, '
else:
return (' %2d users, ' % nusers)
def print_load(one, five, fifteen):
""" Report 1, 5, 15 minute load averages at sample time """
return ' load average: %.2f, %.2f, %.2f' % (one, five, fifteen)
class Uptime(object):
""" Gives a one line display of the following information:
The current time;
How long the system has been running;
How many users are currently logged on; and
The system load averages for the past 1, 5, and 15 minutes.
Knows about some of the default PCP arguments - can function
using remote hosts or historical data, using the timezone of
the metric source, at an offset within an archive, and so on.
"""
def __init__(self):
""" Construct object - prepare for command line handling """
self.context = None
self.opts = pmapi.pmOptions()
self.opts.pmSetShortOptions("V?")
self.opts.pmSetLongOptionHeader("Options")
self.opts.pmSetLongOptionVersion()
self.opts.pmSetLongOptionHelp()
def execute(self):
""" Using a PMAPI context (could be either host or archive),
fetch and report a fixed set of values related to uptime.
"""
metrics = ('kernel.all.uptime', 'kernel.all.nusers', 'kernel.all.load')
pmids = self.context.pmLookupName(metrics)
descs = self.context.pmLookupDescs(pmids)
result = self.context.pmFetch(pmids)
if result.contents.numpmid != len(metrics):
raise pmapi.pmErr(PM_ERR_VALUE)
uptime = ''
sample_time = result.contents.timestamp.tv_sec
time_struct = self.context.pmLocaltime(sample_time)
uptime += print_timestamp(time_struct)
atom = self.context.pmExtractValue(
result.contents.get_valfmt(0),
result.contents.get_vlist(0, 0),
descs[0].contents.type, PM_TYPE_U32)
uptime += print_uptime(atom.ul)
atom = self.context.pmExtractValue(
result.contents.get_valfmt(1),
result.contents.get_vlist(1, 0),
descs[1].contents.type, PM_TYPE_U32)
uptime += print_users(atom.ul)
averages = [1, 5, 15]
for inst in range(3):
averages[inst] = self.context.pmExtractValue(
result.contents.get_valfmt(2),
result.contents.get_vlist(2, inst),
descs[2].contents.type, PM_TYPE_FLOAT)
uptime += print_load(averages[0].f, averages[1].f, averages[2].f)
print(uptime)
self.context.pmFreeResult(result)
def connect(self):
""" Establish a PMAPI context to archive, host or local, via args """
self.context = pmapi.pmContext.fromOptions(self.opts, sys.argv)
if self.context.type == PM_CONTEXT_ARCHIVE:
origin = self.opts.pmGetOptionOrigin()
self.context.pmSetMode(PM_MODE_FORW, origin, 0)
if __name__ == '__main__':
try:
UPTIME = Uptime()
UPTIME.connect()
UPTIME.execute()
except pmapi.pmErr as error:
print("uptime:", error.message())
except pmapi.pmUsageErr as usage:
usage.message()
except KeyboardInterrupt:
pass
|
lgpl-2.1
|
muffinresearch/addons-server
|
apps/reviews/tests/test_feeds.py
|
15
|
1267
|
# -*- coding: utf-8 -*-
import mock
from nose.tools import eq_
import amo.tests
from reviews import feeds
from translations.models import Translation
class FeedTest(amo.tests.TestCase):
# Rub some unicode all over the reviews feed.
def setUp(self):
super(FeedTest, self).setUp()
self.feed = feeds.ReviewsRss()
self.u = u'Ελληνικά'
self.wut = Translation(localized_string=self.u, locale='el')
self.addon = mock.Mock()
self.addon.name = self.wut
self.user = mock.Mock()
self.user.name = self.u
self.review = mock.Mock()
self.review.title = self.wut
self.review.rating = 4
self.review.user = self.user
def test_title(self):
eq_(self.feed.title(self.addon),
u'Reviews for %s' % self.u)
def test_item_title(self):
eq_(self.feed.item_title(self.review),
'Rated %s out of 5 stars : %s' % (self.review.rating, self.u))
self.review.rating = None
eq_(self.feed.item_title(self.review), self.u)
def test_item_author_name(self):
eq_(self.feed.item_author_name(self.review), self.u)
self.user.username = self.u
eq_(self.feed.item_author_name(self.review), self.u)
|
bsd-3-clause
|
DaanHoogland/cloudstack
|
test/integration/component/test_storage_motion.py
|
6
|
12352
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Storage motion
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
#Import System modules
import time
_multiprocess_shared_ = True
class Services:
"""Test VM Life Cycle Services
"""
def __init__(self):
self.services = {
"disk_offering":{
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "password",
},
"small":
# Create a small virtual machine instance with disk offering
{
"displayname": "testserver",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"service_offerings":
{
"small":
{
# Small service offering ID to for change VM
# service offering from medium to small
"name": "Small Instance",
"displaytext": "Small Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 256,
}
},
"template": {
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"passwordenabled": True,
},
"sleep": 60,
"timeout": 10,
#Migrate VM to hostid
"ostype": 'CentOS 5.3 (64-bit)',
# CentOS 5.3 (64-bit)
}
class TestStorageMotion(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestStorageMotion, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.hypervisor = cls.testClient.getHypervisorInfo()
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Set Zones and disk offerings
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["small"]["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["small"]
)
#create a virtual machine
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["small"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.small_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.small_offering,
cls.account
]
@classmethod
def tearDownClass(cls):
cls.api_client = super(TestStorageMotion, cls).getClsTestClient().getApiClient()
cleanup_resources(cls.api_client, cls._cleanup)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
#Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
return
@attr(tags=["advanced", "basic", "multicluster", "storagemotion", "xenserver"], required_hardware="true")
def test_01_migrate_vm_with_volume(self):
"""Test migrate virtual machine with its volumes
"""
# Validate the following
# 1. List hosts for migration of a vm. Pick a host that
# requires storage motion too.
# 2. Migrate vm to a host.
# 3. listVM command should return this VM.State of this VM
# should be "Running" and the host should be the host
# to which the VM was migrated to in a different cluster
if self.hypervisor.lower() in ["lxc"]:
self.skipTest("Migration across clusters is not supported on LXC")
hosts = Host.listForMigration(
self.apiclient,
virtualmachineid=self.virtual_machine.id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check the number of hosts in the zone"
)
# Migrate to a host that requires storage motion
hosts[:] = [host for host in hosts if host.requiresStorageMotion]
if hosts is None or len(hosts) == 0:
self.skipTest("No valid hosts for storage motion. Skipping")
host = hosts[0]
self.debug("Migrating VM-ID: %s to Host: %s" % (
self.virtual_machine.id,
host.id
))
cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd()
cmd.hostid = host.id
cmd.virtualmachineid = self.virtual_machine.id
self.apiclient.migrateVirtualMachineWithVolume(cmd)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_vm_response,
None,
"Check virtual machine is listVirtualMachines"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.id,
self.virtual_machine.id,
"Check virtual machine ID of migrated VM"
)
self.assertEqual(
vm_response.hostid,
host.id,
"Check destination hostID of migrated VM"
)
self.assertEqual(
vm_response.state,
'Running',
"Check the state of VM"
)
return
@attr(tags=["advanced", "basic", "multipool", "storagemotion", "xenserver"], required_hardware="false")
def test_02_migrate_volume(self):
"""Test migrate volume of a running vm
"""
# Validate the following
# 1. List all the volumes of a vm. For each volume do step 2 to 4.
# 2. List storage pools for migrating volume of a vm. Multiple
# storage pools should be present in the cluster.
# 3. Migrate volume of the vm to another pool.
# 4. Check volume is present in the new pool and is in Ready state.
# TODO: add test case for data volume migrate and handle it for LXC
list_volumes_response = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(list_volumes_response, list),
True,
"Check list volumes response for valid list"
)
self.assertNotEqual(
list_volumes_response,
None,
"Check if volume exists in ListVolumes"
)
for volume in list_volumes_response:
pools = StoragePool.listForMigration(
self.apiclient,
id=volume.id
)
if not pools:
self.skipTest("No suitable storage pools found for volume migration. Skipping")
self.assert_(isinstance(pools, list), "invalid pool response from listStoragePoolsForMigration: %s" %pools)
self.assert_(len(pools) > 0, "no valid storage pools found for migration")
pool = pools[0]
self.debug("Migrating Volume-ID: %s to Pool: %s" % (
volume.id,
pool.id
))
Volume.migrate(
self.apiclient,
volumeid=volume.id,
storageid=pool.id,
livemigrate='true'
)
migrated_volume_response = list_volumes(
self.apiclient,
id=volume.id
)
self.assertEqual(
isinstance(migrated_volume_response, list),
True,
"Check list volumes response for valid list"
)
self.assertNotEqual(
migrated_volume_response,
None,
"Check if volume exists in ListVolumes"
)
migrated_volume = migrated_volume_response[0]
self.assertEqual(
migrated_volume.state,
'Ready',
"Check migrated volume is in Ready state"
)
self.assertEqual(
migrated_volume.storage,
pool.name,
"Check volume is on migrated pool"
)
return
|
apache-2.0
|
pasqualguerrero/django
|
tests/gis_tests/geoapp/tests.py
|
189
|
41436
|
from __future__ import unicode_literals
import re
import tempfile
from django.contrib.gis import gdal
from django.contrib.gis.db.models import Extent, MakeLine, Union
from django.contrib.gis.geos import (
GeometryCollection, GEOSGeometry, LinearRing, LineString, Point, Polygon,
fromstr,
)
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from ..utils import no_oracle, oracle, postgis, spatialite
from .models import (
City, Country, Feature, MinusOneSRID, NonConcreteModel, PennsylvaniaCity,
State, Track,
)
def postgis_bug_version():
spatial_version = getattr(connection.ops, "spatial_version", (0, 0, 0))
return spatial_version and (2, 0, 0) <= spatial_version <= (2, 0, 1)
@skipUnlessDBFeature("gis_enabled")
class GeoModelTest(TestCase):
fixtures = ['initial']
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
# Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
try:
nullcity.point = bad
except TypeError:
pass
else:
self.fail('Should throw a TypeError')
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.delete()
# Testing on a Polygon
shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
if gdal.HAS_GDAL:
self.assertIsInstance(ns.poly.ogr, gdal.OGRGeometry)
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertIsInstance(ns.poly.srs, gdal.SpatialReference)
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@skipUnlessDBFeature("supports_transform")
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# Oracle doesn't have SRID 3084, using 41157.
if oracle:
# San Antonio in 'Texas 4205, Southern Zone (1983, meters)' (SRID 41157)
# Used the following Oracle SQL to get this value:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(
# SDO_CS.TRANSFORM(SDO_GEOMETRY('POINT (-98.493183 29.424170)', 4326), 41157))
# )
# FROM DUAL;
nad_wkt = 'POINT (300662.034646583 5416427.45974934)'
nad_srid = 41157
else:
# San Antonio in 'NAD83(HARN) / Texas Centric Lambert Conformal' (SRID 3084)
# Used ogr.py in gdal 1.4.1 for this transform
nad_wkt = 'POINT (1645978.362408288754523 6276356.025927528738976)'
nad_srid = 3084
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
nad_pnt = fromstr(nad_wkt, srid=nad_srid)
if oracle:
tx = Country.objects.get(mpoly__contains=nad_pnt)
else:
tx = Country.objects.get(mpoly__intersects=nad_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=nad_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
if spatialite and connection.ops.spatial_version < (3, 0, 0):
# SpatiaLite < 3 does not support missing SRID values.
return
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertEqual(c.point, None)
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertIsInstance(f_1.geom, Point)
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertIsInstance(f_2.geom, LineString)
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertIsInstance(f_3.geom, Polygon)
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertIsInstance(f_4.geom, GeometryCollection)
self.assertEqual(f_3.geom, f_4.geom[2])
@skipUnlessDBFeature("supports_transform")
def test_inherited_geofields(self):
"Test GeoQuerySet methods on inherited Geometry fields."
# Creating a Pennsylvanian city.
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.transform(32128)
self.assertEqual(1, qs.count())
for pc in qs:
self.assertEqual(32128, pc.point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
# Only PostGIS would support a 'select *' query because of its recognized
# HEXEWKB format for geometry fields
as_text = 'ST_AsText(%s)' if postgis else connection.ops.select
cities2 = City.objects.raw(
'select id, name, %s from geoapp_city' % as_text % 'point'
)
self.assertEqual(len(cities1), len(list(cities2)))
self.assertIsInstance(cities2[0].point, Point)
def test_dumpdata_loaddata_cycle(self):
"""
Test a dumpdata/loaddata cycle with geographic data.
"""
out = six.StringIO()
original_data = list(City.objects.all().order_by('name'))
call_command('dumpdata', 'geoapp.City', stdout=out)
result = out.getvalue()
houston = City.objects.get(name='Houston')
self.assertIn('"point": "%s"' % houston.point.ewkt, result)
# Reload now dumped data
with tempfile.NamedTemporaryFile(mode='w', suffix='.json') as tmp:
tmp.write(result)
tmp.seek(0)
call_command('loaddata', tmp.name, verbosity=0)
self.assertListEqual(original_data, list(City.objects.all().order_by('name')))
@skipUnlessDBFeature("gis_enabled")
class GeoLookupTest(TestCase):
fixtures = ['initial']
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
if connection.features.supports_real_shape_operations:
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if connection.features.supports_contained_lookup:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs:
self.assertIn(c.name, cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Spatialite 2.3 thinks that Lawrence is in Puerto Rico (a NULL geometry).
if not (spatialite and connection.ops.spatial_version < (3, 0, 0)):
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(len(Country.objects.filter(mpoly__contains=pueblo.point)), 0) # Query w/GEOSGeometry object
self.assertEqual(len(Country.objects.filter(mpoly__contains=okcity.point.wkt)),
0 if connection.features.supports_real_shape_operations else 1) # Query w/WKT
# OK City is contained w/in bounding box of Texas.
if connection.features.supports_bbcontains_lookup:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
@skipUnlessDBFeature("supports_crosses_lookup")
def test_crosses_lookup(self):
Track.objects.create(
name='Line1',
line=LineString([(-95, 29), (-60, 0)])
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 0), (-60, 29)])).count(),
1
)
self.assertEqual(
Track.objects.filter(line__crosses=LineString([(-95, 30), (0, 30)])).count(),
0
)
@skipUnlessDBFeature("supports_left_right_lookups")
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# The left/right lookup tests are known failures on PostGIS 2.0/2.0.1
# http://trac.osgeo.org/postgis/ticket/2035
if postgis_bug_version():
self.skipTest("PostGIS 2.0/2.0.1 left and right lookups are known to be buggy.")
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual('Houston', c.name)
@skipUnlessDBFeature("supports_null_geometries")
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertIn('Colorado', state_names)
self.assertIn('Kansas', state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertEqual(nmi.poly, None)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertIsNone(State.objects.get(name='Northern Mariana Islands').poly)
@skipUnlessDBFeature("supports_relate_lookup")
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param should
# raise a type error when initializing the GeoQuerySet
self.assertRaises(ValueError, Country.objects.filter, mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
self.assertRaises(e, qs.count)
# Relate works differently for the different backends.
if postgis or spatialite:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
@skipUnlessDBFeature("gis_enabled")
@ignore_warnings(category=RemovedInDjango20Warning)
class GeoQuerySetTest(TestCase):
fixtures = ['initial']
# Please keep the tests in GeoQuerySet method's alphabetic order
@skipUnlessDBFeature("has_centroid_method")
def test_centroid(self):
"Testing the `centroid` GeoQuerySet method."
qs = State.objects.exclude(poly__isnull=True).centroid()
if oracle:
tol = 0.1
elif spatialite:
tol = 0.000001
else:
tol = 0.000000001
for s in qs:
self.assertTrue(s.poly.centroid.equals_exact(s.centroid, tol))
@skipUnlessDBFeature(
"has_difference_method", "has_intersection_method",
"has_sym_difference_method", "has_union_method")
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23)
qs = Country.objects.all().difference(geom).sym_difference(geom).union(geom)
# XXX For some reason SpatiaLite does something screwy with the Texas geometry here. Also,
# XXX it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.intersection(geom)
for c in qs:
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
pass
else:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in c.mpoly.sym_difference(geom)),
set(g.wkt for g in c.sym_difference))
self.assertSetEqual(set(g.wkt for g in c.mpoly.union(geom)),
set(g.wkt for g in c.union))
@skipUnlessDBFeature("has_envelope_method")
def test_envelope(self):
"Testing the `envelope` GeoQuerySet method."
countries = Country.objects.all().envelope()
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("supports_extent_aggr")
@ignore_warnings(category=RemovedInDjango110Warning)
def test_extent(self):
"""
Testing the (deprecated) `extent` GeoQuerySet method and the Extent
aggregate.
"""
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent1 = qs.extent()
extent2 = qs.aggregate(Extent('point'))['point__extent']
for extent in (extent1, extent2):
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
self.assertIsNone(City.objects.filter(name=('Smalltown')).extent())
self.assertIsNone(City.objects.filter(name=('Smalltown')).aggregate(Extent('point'))['point__extent'])
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent_with_limit(self):
"""
Testing if extent supports limit.
"""
extent1 = City.objects.all().aggregate(Extent('point'))['point__extent']
extent2 = City.objects.all()[:3].aggregate(Extent('point'))['point__extent']
self.assertNotEqual(extent1, extent2)
@skipUnlessDBFeature("has_force_rhr_method")
def test_force_rhr(self):
"Testing GeoQuerySet.force_rhr()."
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
s = State.objects.force_rhr().get(name='Foo')
self.assertEqual(rhr_rings, s.force_rhr.coords)
@skipUnlessDBFeature("has_geohash_method")
def test_geohash(self):
"Testing GeoQuerySet.geohash()."
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.geohash().get(name='Houston')
h2 = City.objects.geohash(precision=5).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
def test_geojson(self):
"Testing GeoJSON output from the database using GeoQuerySet.geojson()."
# Only PostGIS and SpatiaLite 3.0+ support GeoJSON.
if not connection.ops.geojson:
self.assertRaises(NotImplementedError, Country.objects.all().geojson, field_name='mpoly')
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
if spatialite:
victoria_json = (
'{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],'
'"coordinates":[-123.305196,48.462611]}'
)
# Precision argument should only be an integer
self.assertRaises(TypeError, City.objects.geojson, precision='foo')
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(pueblo_json, City.objects.geojson().get(name='Pueblo').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(houston_json, City.objects.geojson(crs=True, model_att='json').get(name='Houston').json)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(victoria_json, City.objects.geojson(bbox=True).get(name='Victoria').geojson)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(
chicago_json,
City.objects.geojson(bbox=True, crs=True, precision=5).get(name='Chicago').geojson
)
@skipUnlessDBFeature("has_gml_method")
def test_gml(self):
"Testing GML output from the database using GeoQuerySet.gml()."
# Should throw a TypeError when trying to obtain GML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.gml, field_name='name')
ptown1 = City.objects.gml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.gml(precision=9).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
elif spatialite and connection.ops.spatial_version < (3, 0, 0):
# Spatialite before 3.0 has extra colon in SrsName
gml_regex = re.compile(
r'^<gml:Point SrsName="EPSG::4326"><gml:coordinates decimal="\." '
r'cs="," ts=" ">-104.609251\d+,38.255001</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
for ptown in [ptown1, ptown2]:
self.assertTrue(gml_regex.match(ptown.gml))
if postgis:
self.assertIn('<gml:pos srsDimension="2">', City.objects.gml(version=3).get(name='Pueblo').gml)
@skipUnlessDBFeature("has_kml_method")
def test_kml(self):
"Testing KML output from the database using GeoQuerySet.kml()."
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.kml, 'name')
# Ensuring the KML is as expected.
ptown1 = City.objects.kml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.kml(precision=9).get(name='Pueblo')
for ptown in [ptown1, ptown2]:
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_make_line(self):
"""
Testing the (deprecated) `make_line` GeoQuerySet method and the MakeLine
aggregate.
"""
if not connection.features.supports_make_line_aggr:
# Only PostGIS has support for the MakeLine aggregate. For other
# backends, test that NotImplementedError is raised
self.assertRaises(
NotImplementedError,
City.objects.all().aggregate, MakeLine('point')
)
return
# Ensuring that a `TypeError` is raised on models without PointFields.
self.assertRaises(TypeError, State.objects.make_line)
self.assertRaises(TypeError, Country.objects.make_line)
# MakeLine on an inappropriate field returns simply None
self.assertIsNone(State.objects.aggregate(MakeLine('poly'))['poly__makeline'])
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry(
'LINESTRING(-95.363151 29.763374,-96.801611 32.782057,'
'-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,'
'-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)',
srid=4326
)
# We check for equality with a tolerance of 10e-5 which is a lower bound
# of the precisions of ref_line coordinates
line1 = City.objects.make_line()
line2 = City.objects.aggregate(MakeLine('point'))['point__makeline']
for line in (line1, line2):
self.assertTrue(ref_line.equals_exact(line, tolerance=10e-5),
"%s != %s" % (ref_line, line))
@skipUnlessDBFeature("has_num_geom_method")
def test_num_geom(self):
"Testing the `num_geom` GeoQuerySet method."
# Both 'countries' only have two geometries.
for c in Country.objects.num_geom():
self.assertEqual(2, c.num_geom)
for c in City.objects.filter(point__isnull=False).num_geom():
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections.
self.assertEqual(1, c.num_geom)
@skipUnlessDBFeature("supports_num_points_poly")
def test_num_points(self):
"Testing the `num_points` GeoQuerySet method."
for c in Country.objects.num_points():
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.num_points():
self.assertEqual(1, c.num_points)
@skipUnlessDBFeature("has_point_on_surface_method")
def test_point_on_surface(self):
"Testing the `point_on_surface` GeoQuerySet method."
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
for c in Country.objects.point_on_surface():
if spatialite:
# XXX This seems to be a WKT-translation-related precision issue?
tol = 0.00001
else:
tol = 0.000000001
self.assertTrue(ref[c.name].equals_exact(c.point_on_surface, tol))
@skipUnlessDBFeature("has_reverse_method")
def test_reverse_geom(self):
"Testing GeoQuerySet.reverse_geom()."
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
t = Track.objects.reverse_geom().get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), t.reverse_geom.coords)
if oracle:
self.assertRaises(TypeError, State.objects.reverse_geom)
@skipUnlessDBFeature("has_scale_method")
def test_scale(self):
"Testing the `scale` GeoQuerySet method."
xfac, yfac = 2, 3
tol = 5 # XXX The low precision tolerance is for SpatiaLite
qs = Country.objects.scale(xfac, yfac, model_att='scaled')
for c in qs:
for p1, p2 in zip(c.mpoly, c.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
@skipUnlessDBFeature("has_snap_to_grid_method")
def test_snap_to_grid(self):
"Testing GeoQuerySet.snap_to_grid()."
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
self.assertRaises(ValueError, Country.objects.snap_to_grid, *bad_args)
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
self.assertRaises(TypeError, Country.objects.snap_to_grid, *bad_args)
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.1).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23).get(name='San Marino').snap_to_grid, tol)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.snap_to_grid(0.05, 0.23, 0.5, 0.17).get(name='San Marino').snap_to_grid,
tol
)
)
@skipUnlessDBFeature("has_svg_method")
def test_svg(self):
"Testing SVG output using GeoQuerySet.svg()."
self.assertRaises(TypeError, City.objects.svg, precision='foo')
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.svg().get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.svg(relative=5).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_transform_method")
def test_transform(self):
"Testing the transform() GeoQuerySet method."
# Pre-transformed points for Houston and Pueblo.
htown = fromstr('POINT(1947516.83115183 6322297.06040572)', srid=3084)
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points. Oracle does not have the 3084 SRID.
if not oracle:
h = City.objects.transform(htown.srid).get(name='Houston')
self.assertEqual(3084, h.point.srid)
self.assertAlmostEqual(htown.x, h.point.x, prec)
self.assertAlmostEqual(htown.y, h.point.y, prec)
p1 = City.objects.transform(ptown.srid, field_name='point').get(name='Pueblo')
p2 = City.objects.transform(srid=ptown.srid).get(name='Pueblo')
for p in [p1, p2]:
self.assertEqual(2774, p.point.srid)
self.assertAlmostEqual(ptown.x, p.point.x, prec)
self.assertAlmostEqual(ptown.y, p.point.y, prec)
@skipUnlessDBFeature("has_translate_method")
def test_translate(self):
"Testing the `translate` GeoQuerySet method."
xfac, yfac = 5, -23
qs = Country.objects.translate(xfac, yfac, model_att='translated')
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# XXX The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# TODO: Oracle can be made to pass if
# union1 = union2 = fromstr('POINT (-97.5211570000000023 34.4646419999999978)')
# but this seems unexpected and should be investigated to determine the cause.
@skipUnlessDBFeature("has_unionagg_method")
@no_oracle
@ignore_warnings(category=RemovedInDjango110Warning)
def test_unionagg(self):
"""
Testing the (deprecated) `unionagg` (aggregate union) GeoQuerySet method
and the Union aggregate.
"""
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Ordering may differ depending on backend or GEOS version.
union1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
union2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)')
qs = City.objects.filter(point__within=tx)
self.assertRaises(TypeError, qs.unionagg, 'name')
self.assertRaises(ValueError, qs.aggregate, Union('name'))
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.unionagg(field_name='point')
u2 = qs.order_by('name').unionagg()
u3 = qs.aggregate(Union('point'))['point__union']
u4 = qs.order_by('name').aggregate(Union('point'))['point__union']
tol = 0.00001
self.assertTrue(union1.equals_exact(u1, tol) or union2.equals_exact(u1, tol))
self.assertTrue(union1.equals_exact(u2, tol) or union2.equals_exact(u2, tol))
self.assertTrue(union1.equals_exact(u3, tol) or union2.equals_exact(u3, tol))
self.assertTrue(union1.equals_exact(u4, tol) or union2.equals_exact(u4, tol))
qs = City.objects.filter(name='NotACity')
self.assertIsNone(qs.unionagg(field_name='point'))
self.assertIsNone(qs.aggregate(Union('point'))['point__union'])
def test_within_subquery(self):
"""
Test that using a queryset inside a geo lookup is working (using a subquery)
(#14483).
"""
tex_cities = City.objects.filter(
point__within=Country.objects.filter(name='Texas').values('mpoly')).order_by('name')
expected = ['Dallas', 'Houston']
if not connection.features.supports_real_shape_operations:
expected.append('Oklahoma City')
self.assertEqual(
list(tex_cities.values_list('name', flat=True)),
expected
)
def test_non_concrete_field(self):
NonConcreteModel.objects.create(point=Point(0, 0), name='name')
list(NonConcreteModel.objects.all())
|
bsd-3-clause
|
eucalyptus/se34euca
|
se34euca/lib/EucaUITestLib_Base.py
|
1
|
30882
|
import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
class UICheckException(Exception):
def __init__(self, message):
raise Exception(message)
class EucaUITestLib_Base(unittest.TestCase):
selenium_server_ip = "localhost"
selenium_server_port = "4444"
ui_ip = "localhost"
port = "8888"
accountname = "eucalyptus"
username = "admin"
password = "password"
protocol = "https"
sauce_account = "eucaqa"
sauce_access_key = ""
sauce_address = "@ondemand.saucelabs.com:80"
retry = 400 # waiting time in seconds for element to be present on page
trials = 300 # trial number for verify not present methods
def NoOp(self):
return 0
def setSeleniumWebDriver(self, driver):
self.driver = driver
return 0
def setSeleniumServerInfo(self, ip, port, sauce_account, sauce_address, sauce_access_key):
self.selenium_server_ip = ip
self.selenium_server_port = port
self.sauce_account = sauce_account
self.sauce_address = sauce_address
self.sauce_access_key = sauce_access_key
print "SELENIUM SERVER IP: " + ip
print "SELENIUM SERVER PORT: " + port
print
print "SAUCE ACCOUNT: " + sauce_account
print "SAUCE ACCESS KEY: " + sauce_access_key
print "SAUCE ADDRESS: " + sauce_address
print
return 0
def setUIInfo(self, ip, port, protocol):
self.ui_ip = ip
self.port = port
self.protocol = protocol
print "EUCALYPTUS CONSOLE PROXY IP: " + ip
print "EUCALYPTUS CONSOLE PROXY PORT: " + port
print "EUCALYPTUS CONSOLE PROXY PROTOCOL: " + protocol
print
return 0
def setUserInfo(self, accountname, username, password):
self.accountname = accountname
self.username = username
self.password = password
print "ACCOUNTNAME: " + accountname
print "USERNAME: " + username
print "PASSWORD: " + password
print
return 0
def setUp(self):
print
print "=== setUp ==="
this_ui = self.protocol + "://" + self.ui_ip + ":" + self.port
#this_selenium_server_url = "http://" + self.selenium_server_ip + ":" + self.selenium_server_port + "/wd/hub"
if self.sauce_access_key != "":
desired_capabilities = webdriver.DesiredCapabilities.CHROME
desired_capabilities['version'] = '31'
desired_capabilities['platform'] = 'Windows 8'
desired_capabilities['name'] = 'Testing Chrome 31 on Windows 8'
else:
desired_capabilities = webdriver.DesiredCapabilities.FIREFOX
this_selenium_server_url = "http://" + self.selenium_server_ip + self.sauce_account + ":" + self.sauce_access_key + self.sauce_address + self.selenium_server_port + "/wd/hub"
#this_selenium_server_url = "http://eucaqa:[email protected]:80/wd/hub"
print "SELENIUM SERVER URL: " + this_selenium_server_url
print "EUCALYPTUS UI PROXY URL: " + this_ui
print
if self.selenium_server_ip is not "localhost":
print "SET REMOTE WEBDRIVER AT: " + this_selenium_server_url
self.driver = webdriver.Remote(this_selenium_server_url, desired_capabilities = desired_capabilities)
else:
print "SET LOCAL WEBDRIVER"
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = this_ui
self.verificationErrors = []
print
print "STARTED SELENIUM TEST ON EUCALYPTUS AT: " + self.base_url
print
return 0
def tearDown(self):
print
print "=== tearDown ==="
self.driver.quit()
self.assertEqual([], self.verificationErrors)
print
print "FINISHED SELENIUM TEST ON EUCALYPTUS AT: " + self.base_url
print
return 0
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException, e:
return False
return True
def test_ui_login(self):
print
print "Started Test: Login"
self.driver.get(self.base_url + "/")
print
print "Test: Received the Page Title -> " + self.driver.title
self.verify_element_by_id("password-help")
print
print "Test: Received the Login Page"
self.set_keys_by_id("account", self.accountname)
self.set_keys_by_id("username", self.username)
self.set_keys_by_id("password", self.password)
print
print "Test: Typed the User Info and Clicked the Login Button"
self.click_element_by_name("login")
self.verify_element_by_link_text("Launch new instance")
print
print "Finished Test: Login"
print
return 0
def test_ui_admin_console_login(self):
print
print "Started Test: Admin Console Login"
self.driver.get(self.base_url + "/")
print
print "Test: Received the Page Title -> " + self.driver.title
self.verify_element_by_id("loginForm")
print
print "Test: Received the Login Page"
self.set_keys_by_id("accountName", self.accountname)
self.set_keys_by_id("userName", self.username)
self.set_keys_by_id("password", self.password)
print
print "Test: Typed the User Info and Clicked the Login Button"
self.click_element_by_css_selector('input[type="submit"]')
self.verify_element_by_link_text(str(self.username) + "@" + str(self.accountname))
print
print "Finished Test: Admin Console Login"
print
return 0
def test_ui_logout(self):
print
print "Started Test: Logout"
self.click_element_by_link_text("Dashboard")
print
print "Test: Received the Page Title -> " + self.driver.title
self.verify_element_by_link_text("Launch new instance")
this_link = self.username + "@" + self.accountname
print "Test: Clicked User Account Menu " + this_link
self.click_element_by_link_text(this_link)
print "Test: Clicked the Logout Button"
self.click_element_by_link_text("Log out")
self.verify_element_by_id("password-help")
print
print "Finished Test: Logout"
print
return 0
def test_ui_admin_console_logout(self):
print
print "Started Test: Admin Console Logout"
print "Test: Received the Page Title -> " + self.driver.title
self.verify_element_by_link_text(str(self.username) + "@" + str(self.accountname))
this_link = self.username + "@" + self.accountname
print "Test: Clicked User Account Menu " + this_link
self.click_element_by_link_text(this_link)
print "Test: Clicked the Logout Button"
self.click_element_by_link_text("Sign out")
self.verify_element_by_id("logoImage")
print
print "Finished Test: Admin Console Logout"
print
return 0
def test_ui_gotopage_dashboard(self):
print
print "Started Test: GotoPage Dashboard"
self.click_element_by_link_text("Dashboard")
print
print "Test: Received the Page Title -> " + self.driver.title
self.verify_element_by_link_text("Launch new instance")
print
print "Finished Test: GotoPage Dashboard"
print
return 0
def test_ui_view_page_get_dashboard_source(self):
print
print "Started Test: View Page Get Dashboard Source"
print
self.click_element_by_id("euca-logo")
print
print "Test: Received the Page Title -> " + self.driver.title
self.verify_element_by_link_text("Launch new instance")
time.sleep(3)
running_instance_count = self.get_text_by_css_selector("div.status-readout").split("\n")[0]
stopped_instance_count = self.get_text_by_id("dashboard-instance-stopped").split("\n")[0]
volume_count = self.get_text_by_id("dashboard-storage-volume").split("\n")[0]
snapshot_count = self.get_text_by_id("dashboard-storage-snapshot").split("\n")[0]
sgroup_count = self.get_text_by_id("dashboard-netsec-sgroup").split("\n")[0]
keypair_count = self.get_text_by_id("dashboard-netsec-keypair").split("\n")[0]
eip_count = self.get_text_by_id("dashboard-netsec-eip").split("\n")[0]
print
print "[DASHBOARD] Running Instances: " + running_instance_count
print "[DASHBOARD] Stopped Instances: " + stopped_instance_count
print "[DASHBOARD] Volumes: " + volume_count
print "[DASHBOARD] Snapshots: " + snapshot_count
print "[DASHBOARD] Security Groups: " + sgroup_count
print "[DASHBOARD] Keypairs: " + keypair_count
print "[DASHBOARD] IP Addresses: " + eip_count
print
print "Finished Test: View Page Get Dashboard Source"
print
return 0
# VERIFY VISIBLITY OF ELEMENT BY TYPE
def check_if_element_visible_by_type(self, element_type, element):
"""
:param element_type:
:param element:
:return: :raise:
"""
self.check_if_element_present_by_type(element_type, element)
is_visible = False
for i in range(self.retry):
print "Wait On Visiblity:: Trial: " + str(i) + " Element Type: " + element_type + ", Element: " + element
if element_type is "LINK_TEXT":
is_visible = self.driver.find_element_by_link_text(element).is_displayed()
elif element_type is "ID":
is_visible = self.driver.find_element_by_id(element).is_displayed()
elif element_type is "CSS_SELECTOR":
is_visible = self.driver.find_element_by_css_selector(element).is_displayed()
elif element_type is "XPATH":
is_visible = self.driver.find_element_by_xpath(element).is_displayed()
elif element_type is "NAME":
is_visible = self.driver.find_element_by_name(element).is_displayed()
if is_visible is True:
print "Element " + element + " is visible"
break
time.sleep(1)
if is_visible is False:
print "Element " + element + " is NOT visible!"
return is_visible
def verify_visible_element_by_link_text(self, element):
return self.check_if_element_visible_by_type("LINK_TEXT", element)
def verify_visible_element_by_id(self, element):
return self.check_if_element_visible_by_type("ID", element)
def verify_visible_element_by_css_selector(self, element):
return self.check_if_element_visible_by_type("CSS_SELECTOR", element)
def verify_visible_element_by_xpath(self, element):
return self.check_if_element_visible_by_type("XPATH", element)
def verify_visible_element_by_name(self, element):
return self.check_if_element_visible_by_type("NAME", element)
# VERIFY ELEMENT BY TYPE
def check_if_element_present_by_type(self, element_type, element):
"""
:param element_type:
:param element:
:return: :raise:
"""
this_element_type = ""
if element_type is "LINK_TEXT":
this_element_type = By.LINK_TEXT
elif element_type is "ID":
this_element_type = By.ID
elif element_type is "CSS_SELECTOR":
this_element_type = By.CSS_SELECTOR
elif element_type is "XPATH":
this_element_type = By.XPATH
elif element_type is "NAME":
this_element_type = By.NAME
for i in range(self.retry):
print "Wait On:: Trial: " + str(i) + " Element Type: " + element_type + ", Element: " + element
try:
if self.is_element_present(this_element_type, element):
break
except:
pass
#raise UICheckException("Time out")
time.sleep(1)
# else:
# self.fail("timed out after "+`self.retry`+" seconds")
try:
self.assertTrue(self.is_element_present(this_element_type, element))
except AssertionError as e:
self.verificationErrors.append(str(e))
print "TEST FAILED::: Wait On:: Element Type: " + element_type + ", Element: " + element
raise UICheckException("Failed to find element of type " + element_type + element + " present")
print "Found:: Element type: " + element_type + ", Element: " + element
return 0
# VERIFY ELEMENT NOT PRESENT
#Experimental:
def verify_element_not_present(self, element_type, element):
"""
Driver waits for the element to disappear from the page
"""
this_element_type = ""
if element_type is "LINK_TEXT":
this_element_type = By.LINK_TEXT
elif element_type is "ID":
this_element_type = By.ID
elif element_type is "CSS_SELECTOR":
this_element_type = By.CSS_SELECTOR
elif element_type is "XPATH":
this_element_type = By.XPATH
elif element_type is "NAME":
this_element_type = By.NAME
for i in range(1, self.trials, 1):
print "Wait On Removal:: Trial: " + str(i) + " Element Type: " + element_type + ", Element: " + element
try:
self.driver.find_element(this_element_type, element)
except NoSuchElementException:
print
print "Verified Removal:: Element type: " + element_type + ", Element: " + element
return True
time.sleep(1)
return False
#VERIFY TEXT NOT PRESENT
def verify_text_not_present_by_css(self, locator, text):
print"Verifying that text displayed at " + locator + " does not match " + text
for i in range(1, self.trials, 1):
displayed = self.get_text_by_css_selector(locator)
print "Currently displayed at locator " + locator + " is " + displayed
if displayed != text:
print "Verified " + self.get_text_by_css_selector(locator) + " does not match " + text
return True
else:
print
print "Trial " + str(i) + " :"
def verify_text_not_present_by_id(self, locator, text):
print"Verifying that text displayed at " + locator + " does not match " + text
for i in range(1, self.trials, 1):
if self.get_text_by_id(locator) != text:
print "Verified " + self.get_text_by_id(locator) + " does not match " + text
return True
else:
print
print "Trial " + str(i) + " :"
def verify_text_not_present_by_name(self, locator, text):
print"Verifying that text displayed at " + locator + " does not match " + text
for i in range(1, self.trials, 1):
if self.get_text_by_name(locator) != text:
print "Verified " + self.get_text_by_name(locator) + " does not match " + text
return True
else:
print
print "Trial " + str(i) + " :"
def verify_text_not_present_by_xpath(self, locator, text):
print"Verifying that text displayed at " + locator + " does not match " + text
for i in range(1, self.trials, 1):
text_on_page = self.get_text_by_xpath(locator)
time.sleep(10)
if text_on_page != text:
print "Verified " + self.get_text_by_xpath(locator) + " does not match " + text
return True
else:
print
print "Found text: " + text_on_page + "( Waiting for " + text + " to disappear )"
print
print "Trial " + str(i) + " :"
# VERIFY CALLS
def verify_element_by_link_text(self, element):
return self.check_if_element_present_by_type("LINK_TEXT", element)
def verify_element_by_id(self, element):
return self.check_if_element_present_by_type("ID", element)
def verify_element_by_css_selector(self, element):
return self.check_if_element_present_by_type("CSS_SELECTOR", element)
def verify_element_by_xpath(self, element):
return self.check_if_element_present_by_type("XPATH", element)
def verify_element_by_name(self, element):
return self.check_if_element_present_by_type("NAME", element)
#VERIFY TEXT DISPLAYED
def verify_text_displayed_by_id(self, element_id, element_text):
#print("Verifying text " +element_text+" displayed at ID "+element_id)
for i in range(self.retry):
print "Wait On:: Trial: " + str(i) + " Verifying text " + element_text + " displayed at ID " + element_id
try:
if element_text == self.driver.find_element_by_id(element_id).text:
print"Found text"
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
try:
self.assertEqual(element_text, self.driver.find_element_by_id(element_id).text)
except AssertionError as e:
self.verificationErrors.append(str(e))
displayed_text = self.driver.find_element_by_id(element_id).text
print("Text displayed at ID " + element_id + " is " + displayed_text)
def verify_text_displayed_by_css(self, element_css, element_text):
#print("Verifying text " +element_text+" displayed at ID "+element_css)
for i in range(self.retry):
print "Wait On:: Trial: " + str(i) + " Verifying text " + element_text + " displayed at ID " + element_css
try:
if element_text == self.driver.find_element_by_css_selector(element_css).text:
print"Found text"
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
try:
self.assertEqual(element_text, self.driver.find_element_by_css_selector(element_css).text)
except AssertionError as e:
self.verificationErrors.append(str(e))
displayed_text = self.driver.find_element_by_css_selector(element_css).text
print("Text displayed at ID " + element_css + " is " + displayed_text)
def verify_text_displayed_by_xpath(self, locator, element_text):
#print("Verifying text " +element_text+" displayed at xpath "+locator)
displayed_text = None
for i in range(self.retry):
print "Wait On:: Trial: " + str(i) + " Verifying text " + element_text + " displayed at xpath " + locator
try:
text_on_page = self.get_text_by_xpath(locator)
if element_text == text_on_page:
print"Found text"
displayed_text = text_on_page
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
try:
self.assertEqual(element_text, displayed_text)
except AssertionError as e:
self.verificationErrors.append(str(e))
print("Text displayed at xpath " + locator + " is " + displayed_text)
#CLICK CALLS
def click_element_by_link_text(self, link_text):
if self.check_if_element_present_by_type("LINK_TEXT", link_text) is not 0:
raise UICheckException("Element by link text not present: " + link_text)
if self.check_if_element_visible_by_type("LINK_TEXT", link_text) is not True:
raise UICheckException("Element by link text not visible:" + link_text)
print "Click: Element Type: LINK_TEXT, Element: " + link_text
self.driver.find_element_by_link_text(link_text).click()
time.sleep(1)
return 0
def click_element_by_id(self, this_id):
if self.check_if_element_present_by_type("ID", this_id) is not 0:
raise UICheckException("Element by id not present: " + this_id)
if self.check_if_element_visible_by_type("ID", this_id) is not True:
raise UICheckException("Element by id not visible:" + link_text)
print "Click: Element Type: ID, Element: " + this_id
self.driver.find_element_by_id(this_id).click()
time.sleep(1)
return 0
def click_element_by_css_selector(self, css_selector):
if self.check_if_element_present_by_type("CSS_SELECTOR", css_selector) is not 0:
raise UICheckException("Element by css selector not present: " + css_selector)
if self.check_if_element_visible_by_type("CSS_SELECTOR", css_selector) is not True:
raise UICheckException("Element by css selector not visible:" + css_selector)
print "Click: Element Type: CSS_SELECTOR, Element: " + css_selector
self.driver.find_element_by_css_selector(css_selector).click()
time.sleep(1)
return 0
def click_element_by_xpath(self, xpath):
if self.check_if_element_present_by_type("XPATH", xpath) is not 0:
raise UICheckException("Element by xpath not present: " + xpath)
# if self.check_if_element_visible_by_type("XPATH", xpath) is not True:
# raise UICheckException("Element by xpath not visible:" + xpath)
print "Click: Element Type: XPATH, Element: " + xpath
self.driver.find_element_by_xpath(xpath).click()
time.sleep(1)
return 0
def click_element_by_name(self, name):
if self.check_if_element_present_by_type("NAME", name) is not 0:
raise UICheckException("Element by name not present: " + name)
if self.check_if_element_visible_by_type("NAME", name) is not True:
raise UICheckException("Element by name not visible:" + name)
print "Click: Element Type: NAME, Element: " + name
self.driver.find_element_by_name(name).click()
return 0
#SET KEYS CALLS
def set_keys_by_link_text(self, link_text, keys):
if self.check_if_element_present_by_type("LINK_TEXT", link_text) is not 0:
raise UICheckException("Element by link text not present:" + link_text)
if self.check_if_element_visible_by_type("LINK_TEXT", link_text) is not True:
raise UICheckException("Element by link text not visible:" + link_text)
print "Set: Element Type: LINK_TEXT, Element: " + link_text + ", Keys: " + keys
self.driver.find_element_by_link_text(link_text).clear()
self.driver.find_element_by_link_text(link_text).send_keys(keys)
return 0
def set_keys_by_id(self, this_id, keys):
if self.check_if_element_present_by_type("ID", this_id) is not 0:
raise UICheckException("Element by id not present:" + this_id)
if self.check_if_element_visible_by_type("ID", this_id) is not True:
raise UICheckException("Element by id not visible:" + link_text)
print "Set: Element Type: ID, Element: " + this_id + ", Keys: " + keys
self.driver.find_element_by_id(this_id).clear()
self.driver.find_element_by_id(this_id).send_keys(keys)
return 0
def set_keys_by_css_selector(self, css_selector, keys):
if self.check_if_element_present_by_type("CSS_SELECTOR", css_selector) is not 0:
raise UICheckException("Element by css selector not present:" + css_selector)
if self.check_if_element_visible_by_type("CSS_SELECTOR", css_selector) is not True:
raise UICheckException("Element by css selector not visible:" + css_selector)
print "Set: Element Type: CSS_SELECTOR, Element: " + css_selector + ", Keys: " + keys
self.driver.find_element_by_css_selector(css_selector).clear()
self.driver.find_element_by_css_selector(css_selector).send_keys(keys)
return 0
def set_keys_by_xpath(self, xpath, keys):
if self.check_if_element_present_by_type("XPATH", xpath) is not 0:
raise UICheckException("Element by xpath not found :" + xpath)
# if self.check_if_element_visible_by_type("XPATH", xpath) is not True:
# raise UICheckException("Element by xpath not visible:" + xpath)
print "Set: Element Type: XPATH, Element: " + xpath + ", Keys: " + keys
self.driver.find_element_by_xpath(xpath).clear()
self.driver.find_element_by_xpath(xpath).send_keys(keys)
return 0
def set_keys_by_name(self, name, keys):
if self.check_if_element_present_by_type("NAME", name) is not 0:
raise UICheckException("Element by name not found:" + name)
if self.check_if_element_visible_by_type("NAME", name) is not True:
raise UICheckException("Element by name not visible:" + name)
print "Set: Element Type: NAME, Element: " + name + ", Keys: " + keys
self.driver.find_element_by_name(name).clear()
return 0
#GET TEXT CALLS
def get_text_by_link_text(self, link_text):
if self.check_if_element_present_by_type("LINK_TEXT", link_text) is not 0:
raise UICheckException("Element by link text not present:" + link_text)
if self.check_if_element_visible_by_type("LINK_TEXT", link_text) is not True:
raise UICheckException("Element by link text not visible:" + link_text)
print "Get Text: Element Type: LINK_TEXT, Element: " + link_text
return self.driver.find_element_by_link_text(link_text).text
def get_text_by_id(self, this_id):
if self.check_if_element_present_by_type("ID", this_id) is not 0:
raise UICheckException("Element by id not present:" + this_id)
if self.check_if_element_visible_by_type("ID", this_id) is not True:
raise UICheckException("Element by id not visible:" + link_text)
print "Get Text: Element Type: ID, Element: " + this_id
return self.driver.find_element_by_id(this_id).text
def get_text_by_css_selector(self, css_selector):
if self.check_if_element_present_by_type("CSS_SELECTOR", css_selector) is not 0:
raise UICheckException("Element by css selector not present:" + css_selector)
if self.check_if_element_visible_by_type("CSS_SELECTOR", css_selector) is not True:
raise UICheckException("Element by css selector not visible:" + css_selector)
print "Get Text: Element Type: CSS_SELECTOR, Element: " + css_selector
return self.driver.find_element_by_css_selector(css_selector).text
def get_text_by_xpath(self, xpath):
if self.check_if_element_present_by_type("XPATH", xpath) is not 0:
raise UICheckException("Element by xpath not present: " + xpath)
# if self.check_if_element_visible_by_type("XPATH", xpath) is not True:
# raise UICheckException("Element by xpath not visible:" + xpath)
print "Get Text: Element Type: XPATH, Element: " + xpath
return self.driver.find_element_by_xpath(xpath).text
def get_text_by_name(self, name):
if self.check_if_element_present_by_type("NAME", name) is not 0:
raise UICheckException("Element by name not present: " + name)
if self.check_if_element_visible_by_type("NAME", name) is not True:
raise UICheckException("Element by name not visible:" + name)
print "Click: Element Type: NAME, Element: " + name
return self.driver.find_element_by_name(name).text
# SELECT TEXT CALLS
def select_text_by_link_text(self, link_text, visible_text):
if self.check_if_element_present_by_type("LINK_TEXT", link_text) is not 0:
raise UICheckException("Element by link text not present: " + link_text)
if self.check_if_element_visible_by_type("LINK_TEXT", link_text) is not True:
raise UICheckException("Element by link text not visible:" + link_text)
print "Select: Element Type: LINK_TEXT, Element: " + link_text + ", Text: " + visible_text
Select(self.driver.find_element_by_link_text(link_text)).select_by_visible_text(visible_text)
return 0
def select_text_by_id(self, this_id, visible_text):
if self.check_if_element_present_by_type("ID", this_id) is not 0:
raise UICheckException("Element by id not present: " + this_id)
if self.check_if_element_visible_by_type("ID", this_id) is not True:
raise UICheckException("Element by id not visible:" + link_text)
print "Select: Element Type: ID, Element: " + this_id + ", Text: " + visible_text
Select(self.driver.find_element_by_id(this_id)).select_by_visible_text(visible_text)
return 0
def select_text_by_css_selector(self, css_selector, visible_text):
if self.check_if_element_present_by_type("CSS_SELECTOR", css_selector) is not 0:
raise UICheckException("Element by css selector not present: " + css_selector)
if self.check_if_element_visible_by_type("CSS_SELECTOR", css_selector) is not True:
raise UICheckException("Element by css selector not visible:" + css_selector)
print "Select: Element Type: CSS_SELECTOR, Element: " + css_selector + ", Text: " + visible_text
Select(self.driver.find_element_by_css_selector(css_selector)).select_by_visible_text(visible_text)
return 0
def select_text_by_xpath(self, xpath, visible_text):
if self.check_if_element_present_by_type("XPATH", xpath) is not 0:
raise UICheckException("Element by xpath not present: " + xpath)
# if self.check_if_element_visible_by_type("XPATH", xpath) is not True:
# raise UICheckException("Element by xpath not visible:" + xpath)
print "Select: Element Type: XPATH, Element: " + xpath + ", Text: " + visible_text
Select(self.driver.find_element_by_xpath(xpath)).select_by_visible_text(visible_text)
return 0
def select_text_by_name(self, name, visible_text):
if self.check_if_element_present_by_type("NAME", name) is not 0:
raise UICheckException("Element by name not present: " + name)
if self.check_if_element_visible_by_type("NAME", name) is not True:
raise UICheckException("Element by name not visible:" + name)
print "Select: Element Type: NAME, Element: " + name + ", Text: " + visible_text
Select(self.driver.find_element_by_name(name)).select_by_visible_text(visible_text)
return 0
if __name__ == "__main__":
unittest.main()
|
bsd-2-clause
|
cmdunkers/DeeperMind
|
PythonEnv/lib/python2.7/site-packages/numpy/distutils/exec_command.py
|
63
|
20462
|
#!/usr/bin/env python
"""
exec_command
Implements exec_command function that is (almost) equivalent to
commands.getstatusoutput function but on NT, DOS systems the
returned status is actually correct (though, the returned status
values may be different by a factor). In addition, exec_command
takes keyword arguments for (re-)defining environment variables.
Provides functions:
exec_command --- execute command in a specified directory and
in the modified environment.
find_executable --- locate a command using info from environment
variable PATH. Equivalent to posix `which`
command.
Author: Pearu Peterson <[email protected]>
Created: 11 January 2003
Requires: Python 2.x
Succesfully tested on:
======== ============ =================================================
os.name sys.platform comments
======== ============ =================================================
posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
PyCrust 0.9.3, Idle 1.0.2
posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
posix sunos5 SunOS 5.9, Python 2.2, 2.3.2
posix darwin Darwin 7.2.0, Python 2.3
nt win32 Windows Me
Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
Python 2.1.1 Idle 0.8
nt win32 Windows 98, Python 2.1.1. Idle 0.8
nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
fail i.e. redefining environment variables may
not work. FIXED: don't use cygwin echo!
Comment: also `cmd /c echo` will not work
but redefining environment variables do work.
posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)
nt win32 Windows XP, Python 2.3.3
======== ============ =================================================
Known bugs:
* Tests, that send messages to stderr, fail when executed from MSYS prompt
because the messages are lost at some point.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['exec_command', 'find_executable']
import os
import sys
import shlex
from numpy.distutils.misc_util import is_sequence, make_temp_file
from numpy.distutils import log
from numpy.distutils.compat import get_exception
from numpy.compat import open_latin1
def temp_file_name():
fo, name = make_temp_file()
fo.close()
return name
def get_pythonexe():
pythonexe = sys.executable
if os.name in ['nt', 'dos']:
fdir, fn = os.path.split(pythonexe)
fn = fn.upper().replace('PYTHONW', 'PYTHON')
pythonexe = os.path.join(fdir, fn)
assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
return pythonexe
def find_executable(exe, path=None, _cache={}):
"""Return full path of a executable or None.
Symbolic links are not followed.
"""
key = exe, path
try:
return _cache[key]
except KeyError:
pass
log.debug('find_executable(%r)' % exe)
orig_exe = exe
if path is None:
path = os.environ.get('PATH', os.defpath)
if os.name=='posix':
realpath = os.path.realpath
else:
realpath = lambda a:a
if exe.startswith('"'):
exe = exe[1:-1]
suffixes = ['']
if os.name in ['nt', 'dos', 'os2']:
fn, ext = os.path.splitext(exe)
extra_suffixes = ['.exe', '.com', '.bat']
if ext.lower() not in extra_suffixes:
suffixes = extra_suffixes
if os.path.isabs(exe):
paths = ['']
else:
paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
for path in paths:
fn = os.path.join(path, exe)
for s in suffixes:
f_ext = fn+s
if not os.path.islink(f_ext):
f_ext = realpath(f_ext)
if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
log.info('Found executable %s' % f_ext)
_cache[key] = f_ext
return f_ext
log.warn('Could not locate executable %s' % orig_exe)
return None
############################################################
def _preserve_environment( names ):
log.debug('_preserve_environment(%r)' % (names))
env = {}
for name in names:
env[name] = os.environ.get(name)
return env
def _update_environment( **env ):
log.debug('_update_environment(...)')
for name, value in env.items():
os.environ[name] = value or ''
def _supports_fileno(stream):
"""
Returns True if 'stream' supports the file descriptor and allows fileno().
"""
if hasattr(stream, 'fileno'):
try:
r = stream.fileno()
return True
except IOError:
return False
else:
return False
def exec_command(command, execute_in='', use_shell=None, use_tee=None,
_with_python = 1, **env ):
"""
Return (status,output) of executed command.
Parameters
----------
command : str
A concatenated string of executable and arguments.
execute_in : str
Before running command ``cd execute_in`` and after ``cd -``.
use_shell : {bool, None}, optional
If True, execute ``sh -c command``. Default None (True)
use_tee : {bool, None}, optional
If True use tee. Default None (True)
Returns
-------
res : str
Both stdout and stderr messages.
Notes
-----
On NT, DOS systems the returned status is correct for external commands.
Wild cards will not work for non-posix systems or when use_shell=0.
"""
log.debug('exec_command(%r,%s)' % (command,\
','.join(['%s=%r'%kv for kv in env.items()])))
if use_tee is None:
use_tee = os.name=='posix'
if use_shell is None:
use_shell = os.name=='posix'
execute_in = os.path.abspath(execute_in)
oldcwd = os.path.abspath(os.getcwd())
if __name__[-12:] == 'exec_command':
exec_dir = os.path.dirname(os.path.abspath(__file__))
elif os.path.isfile('exec_command.py'):
exec_dir = os.path.abspath('.')
else:
exec_dir = os.path.abspath(sys.argv[0])
if os.path.isfile(exec_dir):
exec_dir = os.path.dirname(exec_dir)
if oldcwd!=execute_in:
os.chdir(execute_in)
log.debug('New cwd: %s' % execute_in)
else:
log.debug('Retaining cwd: %s' % oldcwd)
oldenv = _preserve_environment( list(env.keys()) )
_update_environment( **env )
try:
# _exec_command is robust but slow, it relies on
# usable sys.std*.fileno() descriptors. If they
# are bad (like in win32 Idle, PyCrust environments)
# then _exec_command_python (even slower)
# will be used as a last resort.
#
# _exec_command_posix uses os.system and is faster
# but not on all platforms os.system will return
# a correct status.
if (_with_python and _supports_fileno(sys.stdout) and
sys.stdout.fileno() == -1):
st = _exec_command_python(command,
exec_command_dir = exec_dir,
**env)
elif os.name=='posix':
st = _exec_command_posix(command,
use_shell=use_shell,
use_tee=use_tee,
**env)
else:
st = _exec_command(command, use_shell=use_shell,
use_tee=use_tee,**env)
finally:
if oldcwd!=execute_in:
os.chdir(oldcwd)
log.debug('Restored cwd to %s' % oldcwd)
_update_environment(**oldenv)
return st
def _exec_command_posix( command,
use_shell = None,
use_tee = None,
**env ):
log.debug('_exec_command_posix(...)')
if is_sequence(command):
command_str = ' '.join(list(command))
else:
command_str = command
tmpfile = temp_file_name()
stsfile = None
if use_tee:
stsfile = temp_file_name()
filter = ''
if use_tee == 2:
filter = r'| tr -cd "\n" | tr "\n" "."; echo'
command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\
% (command_str, stsfile, tmpfile, filter)
else:
stsfile = temp_file_name()
command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\
% (command_str, stsfile, tmpfile)
#command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile)
log.debug('Running os.system(%r)' % (command_posix))
status = os.system(command_posix)
if use_tee:
if status:
# if command_tee fails then fall back to robust exec_command
log.warn('_exec_command_posix failed (status=%s)' % status)
return _exec_command(command, use_shell=use_shell, **env)
if stsfile is not None:
f = open_latin1(stsfile, 'r')
status_text = f.read()
status = int(status_text)
f.close()
os.remove(stsfile)
f = open_latin1(tmpfile, 'r')
text = f.read()
f.close()
os.remove(tmpfile)
if text[-1:]=='\n':
text = text[:-1]
return status, text
def _exec_command_python(command,
exec_command_dir='', **env):
log.debug('_exec_command_python(...)')
python_exe = get_pythonexe()
cmdfile = temp_file_name()
stsfile = temp_file_name()
outfile = temp_file_name()
f = open(cmdfile, 'w')
f.write('import os\n')
f.write('import sys\n')
f.write('sys.path.insert(0,%r)\n' % (exec_command_dir))
f.write('from exec_command import exec_command\n')
f.write('del sys.path[0]\n')
f.write('cmd = %r\n' % command)
f.write('os.environ = %r\n' % (os.environ))
f.write('s,o = exec_command(cmd, _with_python=0, **%r)\n' % (env))
f.write('f=open(%r,"w")\nf.write(str(s))\nf.close()\n' % (stsfile))
f.write('f=open(%r,"w")\nf.write(o)\nf.close()\n' % (outfile))
f.close()
cmd = '%s %s' % (python_exe, cmdfile)
status = os.system(cmd)
if status:
raise RuntimeError("%r failed" % (cmd,))
os.remove(cmdfile)
f = open_latin1(stsfile, 'r')
status = int(f.read())
f.close()
os.remove(stsfile)
f = open_latin1(outfile, 'r')
text = f.read()
f.close()
os.remove(outfile)
return status, text
def quote_arg(arg):
if arg[0]!='"' and ' ' in arg:
return '"%s"' % arg
return arg
def _exec_command( command, use_shell=None, use_tee = None, **env ):
log.debug('_exec_command(...)')
if use_shell is None:
use_shell = os.name=='posix'
if use_tee is None:
use_tee = os.name=='posix'
using_command = 0
if use_shell:
# We use shell (unless use_shell==0) so that wildcards can be
# used.
sh = os.environ.get('SHELL', '/bin/sh')
if is_sequence(command):
argv = [sh, '-c', ' '.join(list(command))]
else:
argv = [sh, '-c', command]
else:
# On NT, DOS we avoid using command.com as it's exit status is
# not related to the exit status of a command.
if is_sequence(command):
argv = command[:]
else:
argv = shlex.split(command)
if hasattr(os, 'spawnvpe'):
spawn_command = os.spawnvpe
else:
spawn_command = os.spawnve
argv[0] = find_executable(argv[0]) or argv[0]
if not os.path.isfile(argv[0]):
log.warn('Executable %s does not exist' % (argv[0]))
if os.name in ['nt', 'dos']:
# argv[0] might be internal command
argv = [os.environ['COMSPEC'], '/C'] + argv
using_command = 1
_so_has_fileno = _supports_fileno(sys.stdout)
_se_has_fileno = _supports_fileno(sys.stderr)
so_flush = sys.stdout.flush
se_flush = sys.stderr.flush
if _so_has_fileno:
so_fileno = sys.stdout.fileno()
so_dup = os.dup(so_fileno)
if _se_has_fileno:
se_fileno = sys.stderr.fileno()
se_dup = os.dup(se_fileno)
outfile = temp_file_name()
fout = open(outfile, 'w')
if using_command:
errfile = temp_file_name()
ferr = open(errfile, 'w')
log.debug('Running %s(%s,%r,%r,os.environ)' \
% (spawn_command.__name__, os.P_WAIT, argv[0], argv))
if sys.version_info[0] >= 3 and os.name == 'nt':
# Pre-encode os.environ, discarding un-encodable entries,
# to avoid it failing during encoding as part of spawn. Failure
# is possible if the environment contains entries that are not
# encoded using the system codepage as windows expects.
#
# This is not necessary on unix, where os.environ is encoded
# using the surrogateescape error handler and decoded using
# it as part of spawn.
encoded_environ = {}
for k, v in os.environ.items():
try:
encoded_environ[k.encode(sys.getfilesystemencoding())] = v.encode(
sys.getfilesystemencoding())
except UnicodeEncodeError:
log.debug("ignoring un-encodable env entry %s", k)
else:
encoded_environ = os.environ
argv0 = argv[0]
if not using_command:
argv[0] = quote_arg(argv0)
so_flush()
se_flush()
if _so_has_fileno:
os.dup2(fout.fileno(), so_fileno)
if _se_has_fileno:
if using_command:
#XXX: disabled for now as it does not work from cmd under win32.
# Tests fail on msys
os.dup2(ferr.fileno(), se_fileno)
else:
os.dup2(fout.fileno(), se_fileno)
try:
status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)
except Exception:
errmess = str(get_exception())
status = 999
sys.stderr.write('%s: %s'%(errmess, argv[0]))
so_flush()
se_flush()
if _so_has_fileno:
os.dup2(so_dup, so_fileno)
os.close(so_dup)
if _se_has_fileno:
os.dup2(se_dup, se_fileno)
os.close(se_dup)
fout.close()
fout = open_latin1(outfile, 'r')
text = fout.read()
fout.close()
os.remove(outfile)
if using_command:
ferr.close()
ferr = open_latin1(errfile, 'r')
errmess = ferr.read()
ferr.close()
os.remove(errfile)
if errmess and not status:
# Not sure how to handle the case where errmess
# contains only warning messages and that should
# not be treated as errors.
#status = 998
if text:
text = text + '\n'
#text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)
text = text + errmess
print (errmess)
if text[-1:]=='\n':
text = text[:-1]
if status is None:
status = 0
if use_tee:
print (text)
return status, text
def test_nt(**kws):
pythonexe = get_pythonexe()
echo = find_executable('echo')
using_cygwin_echo = echo != 'echo'
if using_cygwin_echo:
log.warn('Using cygwin echo in win32 environment is not supported')
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'AAA\',\'\')"')
assert s==0 and o=='', (s, o)
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'AAA\')"',
AAA='Tere')
assert s==0 and o=='Tere', (s, o)
os.environ['BBB'] = 'Hi'
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'BBB\',\'\')"')
assert s==0 and o=='Hi', (s, o)
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'BBB\',\'\')"',
BBB='Hey')
assert s==0 and o=='Hey', (s, o)
s, o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'BBB\',\'\')"')
assert s==0 and o=='Hi', (s, o)
elif 0:
s, o=exec_command('echo Hello')
assert s==0 and o=='Hello', (s, o)
s, o=exec_command('echo a%AAA%')
assert s==0 and o=='a', (s, o)
s, o=exec_command('echo a%AAA%', AAA='Tere')
assert s==0 and o=='aTere', (s, o)
os.environ['BBB'] = 'Hi'
s, o=exec_command('echo a%BBB%')
assert s==0 and o=='aHi', (s, o)
s, o=exec_command('echo a%BBB%', BBB='Hey')
assert s==0 and o=='aHey', (s, o)
s, o=exec_command('echo a%BBB%')
assert s==0 and o=='aHi', (s, o)
s, o=exec_command('this_is_not_a_command')
assert s and o!='', (s, o)
s, o=exec_command('type not_existing_file')
assert s and o!='', (s, o)
s, o=exec_command('echo path=%path%')
assert s==0 and o!='', (s, o)
s, o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \
% pythonexe)
assert s==0 and o=='win32', (s, o)
s, o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe)
assert s==1 and o, (s, o)
s, o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\
% pythonexe)
assert s==0 and o=='012', (s, o)
s, o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe)
assert s==15 and o=='', (s, o)
s, o=exec_command('%s -c "print \'Heipa\'"' % pythonexe)
assert s==0 and o=='Heipa', (s, o)
print ('ok')
def test_posix(**kws):
s, o=exec_command("echo Hello",**kws)
assert s==0 and o=='Hello', (s, o)
s, o=exec_command('echo $AAA',**kws)
assert s==0 and o=='', (s, o)
s, o=exec_command('echo "$AAA"',AAA='Tere',**kws)
assert s==0 and o=='Tere', (s, o)
s, o=exec_command('echo "$AAA"',**kws)
assert s==0 and o=='', (s, o)
os.environ['BBB'] = 'Hi'
s, o=exec_command('echo "$BBB"',**kws)
assert s==0 and o=='Hi', (s, o)
s, o=exec_command('echo "$BBB"',BBB='Hey',**kws)
assert s==0 and o=='Hey', (s, o)
s, o=exec_command('echo "$BBB"',**kws)
assert s==0 and o=='Hi', (s, o)
s, o=exec_command('this_is_not_a_command',**kws)
assert s!=0 and o!='', (s, o)
s, o=exec_command('echo path=$PATH',**kws)
assert s==0 and o!='', (s, o)
s, o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws)
assert s==0 and o=='posix', (s, o)
s, o=exec_command('python -c "raise \'Ignore me.\'"',**kws)
assert s==1 and o, (s, o)
s, o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws)
assert s==0 and o=='012', (s, o)
s, o=exec_command('python -c "import sys;sys.exit(15)"',**kws)
assert s==15 and o=='', (s, o)
s, o=exec_command('python -c "print \'Heipa\'"',**kws)
assert s==0 and o=='Heipa', (s, o)
print ('ok')
def test_execute_in(**kws):
pythonexe = get_pythonexe()
tmpfile = temp_file_name()
fn = os.path.basename(tmpfile)
tmpdir = os.path.dirname(tmpfile)
f = open(tmpfile, 'w')
f.write('Hello')
f.close()
s, o = exec_command('%s -c "print \'Ignore the following IOError:\','\
'open(%r,\'r\')"' % (pythonexe, fn),**kws)
assert s and o!='', (s, o)
s, o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe, fn),
execute_in = tmpdir,**kws)
assert s==0 and o=='Hello', (s, o)
os.remove(tmpfile)
print ('ok')
def test_svn(**kws):
s, o = exec_command(['svn', 'status'],**kws)
assert s, (s, o)
print ('svn ok')
def test_cl(**kws):
if os.name=='nt':
s, o = exec_command(['cl', '/V'],**kws)
assert s, (s, o)
print ('cl ok')
if os.name=='posix':
test = test_posix
elif os.name in ['nt', 'dos']:
test = test_nt
else:
raise NotImplementedError('exec_command tests for ', os.name)
############################################################
if __name__ == "__main__":
test(use_tee=0)
test(use_tee=1)
test_execute_in(use_tee=0)
test_execute_in(use_tee=1)
test_svn(use_tee=1)
test_cl(use_tee=1)
|
bsd-3-clause
|
gurbrinder/Arduino
|
arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/_collections.py
|
309
|
2903
|
# urllib3/_collections.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
from threading import Lock
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
__all__ = ['RecentlyUsedContainer']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self._lock = Lock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self._lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self._lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self._lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self._lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self._lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self._lock:
return self._container.keys()
|
lgpl-2.1
|
c0defreak/python-for-android
|
python-build/python-libs/gdata/src/gdata/webmastertools/service.py
|
136
|
22175
|
#!/usr/bin/python
#
# Copyright (C) 2008 Yu-Jie Lin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GWebmasterToolsService extends the GDataService to streamline
Google Webmaster Tools operations.
GWebmasterToolsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
"""
__author__ = 'livibetter (Yu-Jie Lin)'
import urllib
import gdata
import atom.service
import gdata.service
import gdata.webmastertools as webmastertools
import atom
FEED_BASE = 'https://www.google.com/webmasters/tools/feeds/'
SITES_FEED = FEED_BASE + 'sites/'
SITE_TEMPLATE = SITES_FEED + '%s'
SITEMAPS_FEED_TEMPLATE = FEED_BASE + '%(site_id)s/sitemaps/'
SITEMAP_TEMPLATE = SITEMAPS_FEED_TEMPLATE + '%(sitemap_id)s'
class Error(Exception):
pass
class RequestError(Error):
pass
class GWebmasterToolsService(gdata.service.GDataService):
"""Client for the Google Webmaster Tools service."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com', **kwargs):
"""Creates a client for the Google Webmaster Tools service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'www.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='sitemaps', source=source,
server=server, **kwargs)
def GetSitesFeed(self, uri=SITES_FEED,
converter=webmastertools.SitesFeedFromString):
"""Gets sites feed.
Args:
uri: str (optional) URI to retrieve sites feed.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitesFeedFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitesFeed object.
"""
return self.Get(uri, converter=converter)
def AddSite(self, site_uri, uri=SITES_FEED,
url_params=None, escape_params=True, converter=None):
"""Adds a site to Google Webmaster Tools.
Args:
site_uri: str URI of which site to add.
uri: str (optional) URI to add a site.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitesEntryFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitesEntry object.
"""
site_entry = webmastertools.SitesEntry()
site_entry.content = atom.Content(src=site_uri)
response = self.Post(site_entry, uri,
url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return webmastertools.SitesEntryFromString(response.ToString())
return response
def DeleteSite(self, site_uri, uri=SITE_TEMPLATE,
url_params=None, escape_params=True):
"""Removes a site from Google Webmaster Tools.
Args:
site_uri: str URI of which site to remove.
uri: str (optional) A URI template to send DELETE request.
Default SITE_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
True if the delete succeeded.
"""
return self.Delete(
uri % urllib.quote_plus(site_uri),
url_params=url_params, escape_params=escape_params)
def VerifySite(self, site_uri, verification_method, uri=SITE_TEMPLATE,
url_params=None, escape_params=True, converter=None):
"""Requests a verification of a site.
Args:
site_uri: str URI of which site to add sitemap for.
verification_method: str The method to verify a site. Valid values are
'htmlpage', and 'metatag'.
uri: str (optional) URI template to update a site.
Default SITE_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitemapsEntryFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitesEntry object.
"""
site_entry = webmastertools.SitesEntry(
atom_id=atom.Id(text=site_uri),
category=atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/webmasters/tools/2007#sites-info'),
verification_method=webmastertools.VerificationMethod(
type=verification_method, in_user='true')
)
response = self.Put(
site_entry,
uri % urllib.quote_plus(site_uri),
url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return webmastertools.SitesEntryFromString(response.ToString())
return response
def UpdateGeoLocation(self, site_uri, geolocation, uri=SITE_TEMPLATE,
url_params=None, escape_params=True, converter=None):
"""Updates geolocation setting of a site.
Args:
site_uri: str URI of which site to add sitemap for.
geolocation: str The geographic location. Valid values are listed in
http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
uri: str (optional) URI template to update a site.
Default SITE_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitemapsEntryFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitesEntry object.
"""
site_entry = webmastertools.SitesEntry(
atom_id=atom.Id(text=site_uri),
category=atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/webmasters/tools/2007#sites-info'),
geolocation=webmastertools.GeoLocation(text=geolocation)
)
response = self.Put(
site_entry,
uri % urllib.quote_plus(site_uri),
url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return webmastertools.SitesEntryFromString(response.ToString())
return response
def UpdateCrawlRate(self, site_uri, crawl_rate, uri=SITE_TEMPLATE,
url_params=None, escape_params=True, converter=None):
"""Updates crawl rate setting of a site.
Args:
site_uri: str URI of which site to add sitemap for.
crawl_rate: str The crawl rate for a site. Valid values are 'slower',
'normal', and 'faster'.
uri: str (optional) URI template to update a site.
Default SITE_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitemapsEntryFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitesEntry object.
"""
site_entry = webmastertools.SitesEntry(
atom_id=atom.Id(text=site_uri),
category=atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/webmasters/tools/2007#sites-info'),
crawl_rate=webmastertools.CrawlRate(text=crawl_rate)
)
response = self.Put(
site_entry,
uri % urllib.quote_plus(site_uri),
url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return webmastertools.SitesEntryFromString(response.ToString())
return response
def UpdatePreferredDomain(self, site_uri, preferred_domain, uri=SITE_TEMPLATE,
url_params=None, escape_params=True, converter=None):
"""Updates preferred domain setting of a site.
Note that if using 'preferwww', will also need www.example.com in account to
take effect.
Args:
site_uri: str URI of which site to add sitemap for.
preferred_domain: str The preferred domain for a site. Valid values are 'none',
'preferwww', and 'prefernowww'.
uri: str (optional) URI template to update a site.
Default SITE_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitemapsEntryFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitesEntry object.
"""
site_entry = webmastertools.SitesEntry(
atom_id=atom.Id(text=site_uri),
category=atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/webmasters/tools/2007#sites-info'),
preferred_domain=webmastertools.PreferredDomain(text=preferred_domain)
)
response = self.Put(
site_entry,
uri % urllib.quote_plus(site_uri),
url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return webmastertools.SitesEntryFromString(response.ToString())
return response
def UpdateEnhancedImageSearch(self, site_uri, enhanced_image_search,
uri=SITE_TEMPLATE, url_params=None, escape_params=True, converter=None):
"""Updates enhanced image search setting of a site.
Args:
site_uri: str URI of which site to add sitemap for.
enhanced_image_search: str The enhanced image search setting for a site.
Valid values are 'true', and 'false'.
uri: str (optional) URI template to update a site.
Default SITE_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitemapsEntryFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitesEntry object.
"""
site_entry = webmastertools.SitesEntry(
atom_id=atom.Id(text=site_uri),
category=atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/webmasters/tools/2007#sites-info'),
enhanced_image_search=webmastertools.EnhancedImageSearch(
text=enhanced_image_search)
)
response = self.Put(
site_entry,
uri % urllib.quote_plus(site_uri),
url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return webmastertools.SitesEntryFromString(response.ToString())
return response
def GetSitemapsFeed(self, site_uri, uri=SITEMAPS_FEED_TEMPLATE,
converter=webmastertools.SitemapsFeedFromString):
"""Gets sitemaps feed of a site.
Args:
site_uri: str (optional) URI of which site to retrieve its sitemaps feed.
uri: str (optional) URI to retrieve sites feed.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitemapsFeedFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitemapsFeed object.
"""
return self.Get(uri % {'site_id': urllib.quote_plus(site_uri)},
converter=converter)
def AddSitemap(self, site_uri, sitemap_uri, sitemap_type='WEB',
uri=SITEMAPS_FEED_TEMPLATE,
url_params=None, escape_params=True, converter=None):
"""Adds a regular sitemap to a site.
Args:
site_uri: str URI of which site to add sitemap for.
sitemap_uri: str URI of sitemap to add to a site.
sitemap_type: str Type of added sitemap. Valid types: WEB, VIDEO, or CODE.
uri: str (optional) URI template to add a sitemap.
Default SITEMAP_FEED_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitemapsEntryFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitemapsEntry object.
"""
sitemap_entry = webmastertools.SitemapsEntry(
atom_id=atom.Id(text=sitemap_uri),
category=atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/webmasters/tools/2007#sitemap-regular'),
sitemap_type=webmastertools.SitemapType(text=sitemap_type))
response = self.Post(
sitemap_entry,
uri % {'site_id': urllib.quote_plus(site_uri)},
url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return webmastertools.SitemapsEntryFromString(response.ToString())
return response
def AddMobileSitemap(self, site_uri, sitemap_uri,
sitemap_mobile_markup_language='XHTML', uri=SITEMAPS_FEED_TEMPLATE,
url_params=None, escape_params=True, converter=None):
"""Adds a mobile sitemap to a site.
Args:
site_uri: str URI of which site to add sitemap for.
sitemap_uri: str URI of sitemap to add to a site.
sitemap_mobile_markup_language: str Format of added sitemap. Valid types:
XHTML, WML, or cHTML.
uri: str (optional) URI template to add a sitemap.
Default SITEMAP_FEED_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitemapsEntryFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitemapsEntry object.
"""
# FIXME
sitemap_entry = webmastertools.SitemapsEntry(
atom_id=atom.Id(text=sitemap_uri),
category=atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/webmasters/tools/2007#sitemap-mobile'),
sitemap_mobile_markup_language=\
webmastertools.SitemapMobileMarkupLanguage(
text=sitemap_mobile_markup_language))
print sitemap_entry
response = self.Post(
sitemap_entry,
uri % {'site_id': urllib.quote_plus(site_uri)},
url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return webmastertools.SitemapsEntryFromString(response.ToString())
return response
def AddNewsSitemap(self, site_uri, sitemap_uri,
sitemap_news_publication_label, uri=SITEMAPS_FEED_TEMPLATE,
url_params=None, escape_params=True, converter=None):
"""Adds a news sitemap to a site.
Args:
site_uri: str URI of which site to add sitemap for.
sitemap_uri: str URI of sitemap to add to a site.
sitemap_news_publication_label: str, list of str Publication Labels for
sitemap.
uri: str (optional) URI template to add a sitemap.
Default SITEMAP_FEED_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
converter: func (optional) Function which is executed on the server's
response before it is returned. Usually this is a function like
SitemapsEntryFromString which will parse the response and turn it into
an object.
Returns:
If converter is defined, the results of running converter on the server's
response. Otherwise, it will be a SitemapsEntry object.
"""
sitemap_entry = webmastertools.SitemapsEntry(
atom_id=atom.Id(text=sitemap_uri),
category=atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/webmasters/tools/2007#sitemap-news'),
sitemap_news_publication_label=[],
)
if isinstance(sitemap_news_publication_label, str):
sitemap_news_publication_label = [sitemap_news_publication_label]
for label in sitemap_news_publication_label:
sitemap_entry.sitemap_news_publication_label.append(
webmastertools.SitemapNewsPublicationLabel(text=label))
print sitemap_entry
response = self.Post(
sitemap_entry,
uri % {'site_id': urllib.quote_plus(site_uri)},
url_params=url_params,
escape_params=escape_params, converter=converter)
if not converter and isinstance(response, atom.Entry):
return webmastertools.SitemapsEntryFromString(response.ToString())
return response
def DeleteSitemap(self, site_uri, sitemap_uri, uri=SITEMAP_TEMPLATE,
url_params=None, escape_params=True):
"""Removes a sitemap from a site.
Args:
site_uri: str URI of which site to remove a sitemap from.
sitemap_uri: str URI of sitemap to remove from a site.
uri: str (optional) A URI template to send DELETE request.
Default SITEMAP_TEMPLATE.
url_params: dict (optional) Additional URL parameters to be included
in the insertion request.
escape_params: boolean (optional) If true, the url_parameters will be
escaped before they are included in the request.
Returns:
True if the delete succeeded.
"""
return self.Delete(
uri % {'site_id': urllib.quote_plus(site_uri),
'sitemap_id': urllib.quote_plus(sitemap_uri)},
url_params=url_params, escape_params=escape_params)
|
apache-2.0
|
yatinkumbhare/openstack-nova
|
nova/tests/unit/virt/xenapi/test_agent.py
|
65
|
17659
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import time
import uuid
import mock
from nova import exception
from nova import test
from nova.virt.xenapi import agent
from nova.virt.xenapi import fake as xenapi_fake
def _get_fake_instance(**kwargs):
system_metadata = []
for k, v in kwargs.items():
system_metadata.append({
"key": k,
"value": v
})
return {
"system_metadata": system_metadata,
"uuid": "uuid",
"key_data": "ssh-rsa asdf",
"os_type": "asdf",
}
class AgentTestCaseBase(test.NoDBTestCase):
def _create_agent(self, instance, session="session"):
self.session = session
self.virtapi = "virtapi"
self.vm_ref = "vm_ref"
return agent.XenAPIBasedAgent(self.session, self.virtapi,
instance, self.vm_ref)
class AgentImageFlagsTestCase(AgentTestCaseBase):
def test_agent_is_present(self):
self.flags(use_agent_default=False, group='xenserver')
instance = {"system_metadata":
[{"key": "image_xenapi_use_agent", "value": "true"}]}
self.assertTrue(agent.should_use_agent(instance))
def test_agent_is_disabled(self):
self.flags(use_agent_default=True, group='xenserver')
instance = {"system_metadata":
[{"key": "image_xenapi_use_agent", "value": "false"}]}
self.assertFalse(agent.should_use_agent(instance))
def test_agent_uses_deafault_when_prop_invalid(self):
self.flags(use_agent_default=True, group='xenserver')
instance = {"system_metadata":
[{"key": "image_xenapi_use_agent", "value": "bob"}],
"uuid": "uuid"}
self.assertTrue(agent.should_use_agent(instance))
def test_agent_default_not_present(self):
self.flags(use_agent_default=False, group='xenserver')
instance = {"system_metadata": []}
self.assertFalse(agent.should_use_agent(instance))
def test_agent_default_present(self):
self.flags(use_agent_default=True, group='xenserver')
instance = {"system_metadata": []}
self.assertTrue(agent.should_use_agent(instance))
class SysMetaKeyTestBase(object):
key = None
def _create_agent_with_value(self, value):
kwargs = {self.key: value}
instance = _get_fake_instance(**kwargs)
return self._create_agent(instance)
def test_get_sys_meta_key_true(self):
agent = self._create_agent_with_value("true")
self.assertTrue(agent._get_sys_meta_key(self.key))
def test_get_sys_meta_key_false(self):
agent = self._create_agent_with_value("False")
self.assertFalse(agent._get_sys_meta_key(self.key))
def test_get_sys_meta_key_invalid_is_false(self):
agent = self._create_agent_with_value("invalid")
self.assertFalse(agent._get_sys_meta_key(self.key))
def test_get_sys_meta_key_missing_is_false(self):
instance = _get_fake_instance()
agent = self._create_agent(instance)
self.assertFalse(agent._get_sys_meta_key(self.key))
class SkipSshFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
key = "image_xenapi_skip_agent_inject_ssh"
def test_skip_ssh_key_inject(self):
agent = self._create_agent_with_value("True")
self.assertTrue(agent._skip_ssh_key_inject())
class SkipFileInjectAtBootFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
key = "image_xenapi_skip_agent_inject_files_at_boot"
def test_skip_inject_files_at_boot(self):
agent = self._create_agent_with_value("True")
self.assertTrue(agent._skip_inject_files_at_boot())
class InjectSshTestCase(AgentTestCaseBase):
def test_inject_ssh_key_succeeds(self):
instance = _get_fake_instance()
agent = self._create_agent(instance)
self.mox.StubOutWithMock(agent, "inject_file")
agent.inject_file("/root/.ssh/authorized_keys",
"\n# The following ssh key was injected by Nova"
"\nssh-rsa asdf\n")
self.mox.ReplayAll()
agent.inject_ssh_key()
def _test_inject_ssh_key_skipped(self, instance):
agent = self._create_agent(instance)
# make sure its not called
self.mox.StubOutWithMock(agent, "inject_file")
self.mox.ReplayAll()
agent.inject_ssh_key()
def test_inject_ssh_key_skipped_no_key_data(self):
instance = _get_fake_instance()
instance["key_data"] = None
self._test_inject_ssh_key_skipped(instance)
def test_inject_ssh_key_skipped_windows(self):
instance = _get_fake_instance()
instance["os_type"] = "windows"
self._test_inject_ssh_key_skipped(instance)
def test_inject_ssh_key_skipped_cloud_init_present(self):
instance = _get_fake_instance(
image_xenapi_skip_agent_inject_ssh="True")
self._test_inject_ssh_key_skipped(instance)
class FileInjectionTestCase(AgentTestCaseBase):
def test_inject_file(self):
instance = _get_fake_instance()
agent = self._create_agent(instance)
self.mox.StubOutWithMock(agent, "_call_agent")
b64_path = base64.b64encode('path')
b64_contents = base64.b64encode('contents')
agent._call_agent('inject_file',
{'b64_contents': b64_contents,
'b64_path': b64_path})
self.mox.ReplayAll()
agent.inject_file("path", "contents")
def test_inject_files(self):
instance = _get_fake_instance()
agent = self._create_agent(instance)
self.mox.StubOutWithMock(agent, "inject_file")
files = [("path1", "content1"), ("path2", "content2")]
agent.inject_file(*files[0])
agent.inject_file(*files[1])
self.mox.ReplayAll()
agent.inject_files(files)
def test_inject_files_skipped_when_cloud_init_installed(self):
instance = _get_fake_instance(
image_xenapi_skip_agent_inject_files_at_boot="True")
agent = self._create_agent(instance)
self.mox.StubOutWithMock(agent, "inject_file")
files = [("path1", "content1"), ("path2", "content2")]
self.mox.ReplayAll()
agent.inject_files(files)
class FakeRebootException(Exception):
details = ["", "", "", "asdf REBOOT: asdf"]
class RebootRetryTestCase(AgentTestCaseBase):
@mock.patch.object(agent, '_wait_for_new_dom_id')
def test_retry_on_reboot(self, mock_wait):
mock_session = mock.Mock()
def fake_call_plugin(*args, **kwargs):
if fake_call_plugin.called:
return {"returncode": '0', "message": "done"}
else:
fake_call_plugin.called = True
raise FakeRebootException()
fake_call_plugin.called = False
mock_session.XenAPI.Failure = FakeRebootException
mock_session.VM.get_domid.return_value = "fake_dom_id"
mock_session.call_plugin.side_effect = fake_call_plugin
agent = self._create_agent(None, mock_session)
result = agent._call_agent("asdf")
self.assertEqual("done", result)
self.assertTrue(mock_session.VM.get_domid.called)
self.assertEqual(2, mock_session.call_plugin.call_count)
mock_wait.called_once_with(mock_session, self.vm_ref,
"fake_dom_id", "asdf")
@mock.patch.object(time, 'sleep')
@mock.patch.object(time, 'time')
def test_wait_for_new_dom_id_found(self, mock_time, mock_sleep):
mock_session = mock.Mock()
mock_session.VM.get_domid.return_value = "new"
agent._wait_for_new_dom_id(mock_session, "vm_ref", "old", "method")
mock_session.VM.get_domid.assert_called_once_with("vm_ref")
self.assertFalse(mock_sleep.called)
@mock.patch.object(time, 'sleep')
@mock.patch.object(time, 'time')
def test_wait_for_new_dom_id_after_retry(self, mock_time, mock_sleep):
self.flags(agent_timeout=3, group="xenserver")
mock_time.return_value = 0
mock_session = mock.Mock()
old = 40
new = 42
mock_session.VM.get_domid.side_effect = [old, -1, new]
agent._wait_for_new_dom_id(mock_session, "vm_ref", old, "method")
mock_session.VM.get_domid.assert_called_with("vm_ref")
self.assertEqual(3, mock_session.VM.get_domid.call_count)
self.assertEqual(2, mock_sleep.call_count)
@mock.patch.object(time, 'sleep')
@mock.patch.object(time, 'time')
def test_wait_for_new_dom_id_timeout(self, mock_time, mock_sleep):
self.flags(agent_timeout=3, group="xenserver")
def fake_time():
fake_time.time = fake_time.time + 1
return fake_time.time
fake_time.time = 0
mock_time.side_effect = fake_time
mock_session = mock.Mock()
mock_session.VM.get_domid.return_value = "old"
self.assertRaises(exception.AgentTimeout,
agent._wait_for_new_dom_id,
mock_session, "vm_ref", "old", "method")
self.assertEqual(4, mock_session.VM.get_domid.call_count)
class SetAdminPasswordTestCase(AgentTestCaseBase):
@mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
@mock.patch("nova.virt.xenapi.agent.SimpleDH")
def test_exchange_key_with_agent(self, mock_simple_dh, mock_call_agent):
agent = self._create_agent(None)
instance_mock = mock_simple_dh()
instance_mock.get_public.return_value = 4321
mock_call_agent.return_value = "1234"
result = agent._exchange_key_with_agent()
mock_call_agent.assert_called_once_with('key_init', {"pub": "4321"},
success_codes=['D0'],
ignore_errors=False)
result.compute_shared.assert_called_once_with(1234)
@mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
@mock.patch.object(agent.XenAPIBasedAgent,
'_save_instance_password_if_sshkey_present')
@mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
def test_set_admin_password_works(self, mock_exchange, mock_save,
mock_call_agent):
mock_dh = mock.Mock(spec_set=agent.SimpleDH)
mock_dh.encrypt.return_value = "enc_pass"
mock_exchange.return_value = mock_dh
agent_inst = self._create_agent(None)
agent_inst.set_admin_password("new_pass")
mock_dh.encrypt.assert_called_once_with("new_pass\n")
mock_call_agent.assert_called_once_with('password',
{'enc_pass': 'enc_pass'})
mock_save.assert_called_once_with("new_pass")
@mock.patch.object(agent.XenAPIBasedAgent, '_add_instance_fault')
@mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
def test_set_admin_password_silently_fails(self, mock_exchange,
mock_add_fault):
error = exception.AgentTimeout(method="fake")
mock_exchange.side_effect = error
agent_inst = self._create_agent(None)
agent_inst.set_admin_password("new_pass")
mock_add_fault.assert_called_once_with(error, mock.ANY)
class UpgradeRequiredTestCase(test.NoDBTestCase):
def test_less_than(self):
self.assertTrue(agent.is_upgrade_required('1.2.3.4', '1.2.3.5'))
def test_greater_than(self):
self.assertFalse(agent.is_upgrade_required('1.2.3.5', '1.2.3.4'))
def test_equal(self):
self.assertFalse(agent.is_upgrade_required('1.2.3.4', '1.2.3.4'))
def test_non_lexical(self):
self.assertFalse(agent.is_upgrade_required('1.2.3.10', '1.2.3.4'))
def test_length(self):
self.assertTrue(agent.is_upgrade_required('1.2.3', '1.2.3.4'))
@mock.patch.object(uuid, "uuid4")
class CallAgentTestCase(AgentTestCaseBase):
def test_call_agent_success(self, mock_uuid):
session = mock.Mock()
instance = {"uuid": "fake"}
addl_args = {"foo": "bar"}
session.VM.get_domid.return_value = '42'
mock_uuid.return_value = 1
session.call_plugin.return_value = {'returncode': '4',
'message': "asdf\\r\\n"}
self.assertEqual("asdf",
agent._call_agent(session, instance, "vm_ref",
"method", addl_args, timeout=300,
success_codes=['0', '4']))
expected_args = {
'id': '1',
'dom_id': '42',
'timeout': '300',
}
expected_args.update(addl_args)
session.VM.get_domid.assert_called_once_with("vm_ref")
session.call_plugin.assert_called_once_with("agent", "method",
expected_args)
def _call_agent_setup(self, session, mock_uuid,
returncode='0', success_codes=None,
exception=None):
session.XenAPI.Failure = xenapi_fake.Failure
instance = {"uuid": "fake"}
session.VM.get_domid.return_value = 42
mock_uuid.return_value = 1
if exception:
session.call_plugin.side_effect = exception
else:
session.call_plugin.return_value = {'returncode': returncode,
'message': "asdf\\r\\n"}
return agent._call_agent(session, instance, "vm_ref", "method",
success_codes=success_codes)
def _assert_agent_called(self, session, mock_uuid):
expected_args = {
'id': '1',
'dom_id': '42',
'timeout': '30',
}
session.call_plugin.assert_called_once_with("agent", "method",
expected_args)
session.VM.get_domid.assert_called_once_with("vm_ref")
def test_call_agent_works_with_defaults(self, mock_uuid):
session = mock.Mock()
self._call_agent_setup(session, mock_uuid)
self._assert_agent_called(session, mock_uuid)
def test_call_agent_fails_with_timeout(self, mock_uuid):
session = mock.Mock()
self.assertRaises(exception.AgentTimeout, self._call_agent_setup,
session, mock_uuid,
exception=xenapi_fake.Failure(["TIMEOUT:fake"]))
self._assert_agent_called(session, mock_uuid)
def test_call_agent_fails_with_not_implemented(self, mock_uuid):
session = mock.Mock()
self.assertRaises(exception.AgentNotImplemented,
self._call_agent_setup,
session, mock_uuid,
exception=xenapi_fake.Failure(["NOT IMPLEMENTED:"]))
self._assert_agent_called(session, mock_uuid)
def test_call_agent_fails_with_other_error(self, mock_uuid):
session = mock.Mock()
self.assertRaises(exception.AgentError, self._call_agent_setup,
session, mock_uuid,
exception=xenapi_fake.Failure(["asdf"]))
self._assert_agent_called(session, mock_uuid)
def test_call_agent_fails_with_returned_error(self, mock_uuid):
session = mock.Mock()
self.assertRaises(exception.AgentError, self._call_agent_setup,
session, mock_uuid, returncode='42')
self._assert_agent_called(session, mock_uuid)
class XenAPIBasedAgent(AgentTestCaseBase):
@mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
@mock.patch.object(agent, "_call_agent")
def test_call_agent_swallows_error(self, mock_call_agent,
mock_add_instance_fault):
fake_error = exception.AgentError(method="bob")
mock_call_agent.side_effect = fake_error
instance = _get_fake_instance()
agent = self._create_agent(instance)
agent._call_agent("bob")
mock_call_agent.assert_called_once_with(agent.session, agent.instance,
agent.vm_ref, "bob", None, None, None)
mock_add_instance_fault.assert_called_once_with(fake_error, mock.ANY)
@mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
@mock.patch.object(agent, "_call_agent")
def test_call_agent_throws_error(self, mock_call_agent,
mock_add_instance_fault):
fake_error = exception.AgentError(method="bob")
mock_call_agent.side_effect = fake_error
instance = _get_fake_instance()
agent = self._create_agent(instance)
self.assertRaises(exception.AgentError, agent._call_agent,
"bob", ignore_errors=False)
mock_call_agent.assert_called_once_with(agent.session, agent.instance,
agent.vm_ref, "bob", None, None, None)
self.assertFalse(mock_add_instance_fault.called)
|
apache-2.0
|
lamarmeigs/django-clean-fields
|
tests/test_utils.py
|
1
|
2929
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from unittest import TestCase
from django.db import models
from mock import patch
from clean_fields.utils import (
get_model_field_value, get_model_field_names, parse_field_ref,
)
class GetModelFieldValueTestCase(TestCase):
def test_model_has_no_field(self):
class MissingFieldModel(models.Model):
some_field = models.IntegerField()
dummy = MissingFieldModel(some_field=5)
with self.assertRaises(AttributeError) as ctx:
get_model_field_value(dummy, 'not_a_field')
self.assertEqual(
str(ctx.exception),
'Object {} has no field named "not_a_field"'.format(dummy)
)
def test_model_has_field(self):
class HasFieldModel(models.Model):
some_field = models.IntegerField()
dummy = HasFieldModel(some_field=5)
field_value = get_model_field_value(dummy, 'some_field')
self.assertEqual(field_value, 5)
class GetModelFieldNamesTestCase(TestCase):
def test_meta_has_get_field_method(self):
class GetFieldModel(models.Model):
some_field = models.IntegerField()
class MockField(object):
def __init__(self, name='test field'):
self.name = name
instance = GetFieldModel(some_field=5)
instance._meta.get_fields = lambda x: x
instance._meta.get_all_field_names = lambda x: x
with patch.object(
instance._meta,
'get_fields',
return_value=[MockField('id'), MockField('some_field')]
):
field_names = get_model_field_names(instance)
self.assertEqual(field_names, ['id', 'some_field'])
def test_meta_has_no_get_field_method(self):
class GetAllFieldNameModel(models.Model):
some_field = models.IntegerField()
instance = GetAllFieldNameModel(some_field=5)
instance._meta.get_fields = lambda x: x
instance._meta.get_all_field_names = lambda x: x
with patch.object(
instance._meta,
'get_fields',
side_effect=AttributeError()
):
with patch.object(
instance._meta,
'get_all_field_names',
return_value=['id', 'some_field']
):
field_names = get_model_field_names(instance)
self.assertEqual(field_names, ['id', 'some_field'])
class ParseFieldRefTestCase(TestCase):
def test_parsed_model_label(self):
model_label, _ = parse_field_ref('app_name.ModelName.field_name')
self.assertEqual(model_label, 'app_name.ModelName')
def test_parsed_field_name(self):
_, field_name = parse_field_ref('app_name.ModelName.field_name')
self.assertEqual(field_name, 'field_name')
|
mit
|
iivic/BoiseStateX
|
common/lib/capa/capa/safe_exec/safe_exec.py
|
179
|
4876
|
"""Capa's specialized use of codejail.safe_exec."""
from codejail.safe_exec import safe_exec as codejail_safe_exec
from codejail.safe_exec import not_safe_exec as codejail_not_safe_exec
from codejail.safe_exec import json_safe, SafeExecException
from . import lazymod
from dogapi import dog_stats_api
import hashlib
# Establish the Python environment for Capa.
# Capa assumes float-friendly division always.
# The name "random" is a properly-seeded stand-in for the random module.
CODE_PROLOG = """\
from __future__ import division
import random as random_module
import sys
random = random_module.Random(%r)
random.Random = random_module.Random
sys.modules['random'] = random
"""
ASSUMED_IMPORTS = [
("numpy", "numpy"),
("math", "math"),
("scipy", "scipy"),
("calc", "calc"),
("eia", "eia"),
("chemcalc", "chem.chemcalc"),
("chemtools", "chem.chemtools"),
("miller", "chem.miller"),
("draganddrop", "verifiers.draganddrop"),
]
# We'll need the code from lazymod.py for use in safe_exec, so read it now.
lazymod_py_file = lazymod.__file__
if lazymod_py_file.endswith("c"):
lazymod_py_file = lazymod_py_file[:-1]
lazymod_py = open(lazymod_py_file).read()
LAZY_IMPORTS = [lazymod_py]
for name, modname in ASSUMED_IMPORTS:
LAZY_IMPORTS.append("{} = LazyModule('{}')\n".format(name, modname))
LAZY_IMPORTS = "".join(LAZY_IMPORTS)
def update_hash(hasher, obj):
"""
Update a `hashlib` hasher with a nested object.
To properly cache nested structures, we need to compute a hash from the
entire structure, canonicalizing at every level.
`hasher`'s `.update()` method is called a number of times, touching all of
`obj` in the process. Only primitive JSON-safe types are supported.
"""
hasher.update(str(type(obj)))
if isinstance(obj, (tuple, list)):
for e in obj:
update_hash(hasher, e)
elif isinstance(obj, dict):
for k in sorted(obj):
update_hash(hasher, k)
update_hash(hasher, obj[k])
else:
hasher.update(repr(obj))
@dog_stats_api.timed('capa.safe_exec.time')
def safe_exec(
code,
globals_dict,
random_seed=None,
python_path=None,
extra_files=None,
cache=None,
slug=None,
unsafely=False,
):
"""
Execute python code safely.
`code` is the Python code to execute. It has access to the globals in `globals_dict`,
and any changes it makes to those globals are visible in `globals_dict` when this
function returns.
`random_seed` will be used to see the `random` module available to the code.
`python_path` is a list of filenames or directories to add to the Python
path before execution. If the name is not in `extra_files`, then it will
also be copied into the sandbox.
`extra_files` is a list of (filename, contents) pairs. These files are
created in the sandbox.
`cache` is an object with .get(key) and .set(key, value) methods. It will be used
to cache the execution, taking into account the code, the values of the globals,
and the random seed.
`slug` is an arbitrary string, a description that's meaningful to the
caller, that will be used in log messages.
If `unsafely` is true, then the code will actually be executed without sandboxing.
"""
# Check the cache for a previous result.
if cache:
safe_globals = json_safe(globals_dict)
md5er = hashlib.md5()
md5er.update(repr(code))
update_hash(md5er, safe_globals)
key = "safe_exec.%r.%s" % (random_seed, md5er.hexdigest())
cached = cache.get(key)
if cached is not None:
# We have a cached result. The result is a pair: the exception
# message, if any, else None; and the resulting globals dictionary.
emsg, cleaned_results = cached
globals_dict.update(cleaned_results)
if emsg:
raise SafeExecException(emsg)
return
# Create the complete code we'll run.
code_prolog = CODE_PROLOG % random_seed
# Decide which code executor to use.
if unsafely:
exec_fn = codejail_not_safe_exec
else:
exec_fn = codejail_safe_exec
# Run the code! Results are side effects in globals_dict.
try:
exec_fn(
code_prolog + LAZY_IMPORTS + code, globals_dict,
python_path=python_path, extra_files=extra_files, slug=slug,
)
except SafeExecException as e:
emsg = e.message
else:
emsg = None
# Put the result back in the cache. This is complicated by the fact that
# the globals dict might not be entirely serializable.
if cache:
cleaned_results = json_safe(globals_dict)
cache.set(key, (emsg, cleaned_results))
# If an exception happened, raise it now.
if emsg:
raise e
|
agpl-3.0
|
virtool/virtool
|
virtool/app_routes.py
|
2
|
2008
|
import logging
import os
import sys
import virtool.account.api
import virtool.analyses.api
import virtool.caches.api
import virtool.downloads.api
import virtool.files.api
import virtool.genbank.api
import virtool.groups.api
import virtool.history.api
import virtool.hmm.api
import virtool.http.auth
import virtool.http.root
import virtool.http.ws
import virtool.indexes.api
import virtool.jobs.api
import virtool.otus.api
import virtool.processes.api
import virtool.references.api
import virtool.samples.api
import virtool.settings.api
import virtool.software.api
import virtool.subtractions.api
import virtool.uploads.api
import virtool.users.api
import virtool.utils
logger = logging.getLogger(__name__)
INDEX_PATHS = [
"/",
r"/account{suffix:.*}",
r"/administration{suffix:.*}",
r"/home{suffix:.*}",
r"/hmm{suffix:.*}",
r"/jobs{suffix:.*}",
r"/otus{suffix:.*}",
r"/refs{suffix:.*}",
r"/samples{suffix:.*}",
r"/subtraction{suffix:.*}"
]
ROUTES = (
virtool.account.api.routes,
virtool.analyses.api.routes,
virtool.caches.api.routes,
virtool.downloads.api.routes,
virtool.files.api.routes,
virtool.genbank.api.routes,
virtool.groups.api.routes,
virtool.history.api.routes,
virtool.hmm.api.routes,
virtool.indexes.api.routes,
virtool.jobs.api.routes,
virtool.otus.api.routes,
virtool.processes.api.routes,
virtool.references.api.routes,
virtool.http.root.routes,
virtool.samples.api.routes,
virtool.settings.api.routes,
virtool.software.api.routes,
virtool.subtractions.api.routes,
virtool.uploads.api.routes,
virtool.users.api.routes
)
def setup_routes(app):
for path in INDEX_PATHS:
app.router.add_get(path, virtool.http.auth.index_handler)
app.router.add_get("/ws", virtool.http.ws.root)
for routes in ROUTES:
app.router.add_routes(routes)
static_path = os.path.join(sys.path[0], "static")
app.router.add_static("/assets", static_path)
|
mit
|
bright-sparks/chromium-spacewalk
|
tools/telemetry/telemetry/page/actions/tap.py
|
45
|
2540
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.page.actions import page_action
class TapAction(page_action.PageAction):
def __init__(self, selector=None, text=None, element_function=None,
left_position_percentage=0.5, top_position_percentage=0.5,
duration_ms=50):
super(TapAction, self).__init__()
self.selector = selector
self.text = text
self.element_function = element_function
self.left_position_percentage = left_position_percentage
self.top_position_percentage = top_position_percentage
self.duration_ms = duration_ms
def WillRunAction(self, tab):
for js_file in ['gesture_common.js', 'tap.js']:
with open(os.path.join(os.path.dirname(__file__), js_file)) as f:
js = f.read()
tab.ExecuteJavaScript(js)
# Fail if browser doesn't support synthetic tap gestures.
if not tab.EvaluateJavaScript('window.__TapAction_SupportedByBrowser()'):
raise page_action.PageActionNotSupported(
'Synthetic tap not supported for this browser')
done_callback = 'function() { window.__tapActionDone = true; }'
tab.ExecuteJavaScript("""
window.__tapActionDone = false;
window.__tapAction = new __TapAction(%s);"""
% (done_callback))
def HasElementSelector(self):
return (self.element_function is not None or self.selector is not None or
self.text is not None)
def RunAction(self, tab):
if not self.HasElementSelector():
self.element_function = 'document.body'
gesture_source_type = page_action.GetGestureSourceTypeFromOptions(tab)
tap_cmd = ('''
window.__tapAction.start({
element: element,
left_position_percentage: %s,
top_position_percentage: %s,
duration_ms: %s,
gesture_source_type: %s
});'''
% (self.left_position_percentage,
self.top_position_percentage,
self.duration_ms,
gesture_source_type))
code = '''
function(element, errorMsg) {
if (!element) {
throw Error('Cannot find element: ' + errorMsg);
}
%s;
}''' % tap_cmd
page_action.EvaluateCallbackWithElement(
tab, code, selector=self.selector, text=self.text,
element_function=self.element_function)
tab.WaitForJavaScriptExpression('window.__tapActionDone', 60)
|
bsd-3-clause
|
anushreejangid/csm-ut
|
tests/test_decorators.py
|
4
|
2597
|
# =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# # Author: Klaudiusz Staniek
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from unittest import TestCase, skip, skipIf
from csmpe.decorators import delegate
class Delegate():
def __init__(self):
self.attr1 = 1
self.attr2 = 2
def method1(self):
return self.attr1
def method2(self):
return self.attr2
def method3(self, arg1, arg2=None):
return arg1, arg2
@delegate("delegate", ("method1", "method2", "method3"), ("attr1", "attr2",))
class DelegateTest(object):
def __init__(self):
self.delegate = Delegate()
class TestDelegateDecorator(TestCase):
def test_method_delegate(self):
dc = DelegateTest()
self.assertEqual(dc.attr1, 1)
self.assertEqual(dc.attr2, 2)
self.assertEqual(dc.method1(), 1)
self.assertEqual(dc.method2(), 2)
dc.attr1 = 10
dc.attr2 = 20
self.assertEqual(dc.attr1, 10)
self.assertEqual(dc.attr2, 20)
self.assertEqual(dc.attr1, dc.delegate.attr1)
self.assertEqual(dc.attr2, dc.delegate.attr2)
self.assertEqual(dc.method3("10", arg2=20), ("10", 20))
|
bsd-2-clause
|
gbalme/Anacoinda
|
contrib/pyminer/pyminer.py
|
385
|
6434
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mit
|
grap/OCB
|
openerp/report/render/makohtml2html/__init__.py
|
76
|
1120
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from makohtml2html import parseNode
#.apidoc title: MAKO to HTML engine
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
smishenk/blink-crosswalk
|
Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
|
39
|
13796
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.layout_tests.controllers import repaint_overlay
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
def write_test_result(filesystem, port, results_directory, test_name, driver_output,
expected_driver_output, failures):
"""Write the test result to the result output directory."""
root_output_dir = results_directory
writer = TestResultWriter(filesystem, port, root_output_dir, test_name)
if driver_output.error:
writer.write_stderr(driver_output.error)
for failure in failures:
# FIXME: Instead of this long 'if' block, each failure class might
# have a responsibility for writing a test result.
if isinstance(failure, (test_failures.FailureMissingResult,
test_failures.FailureTextMismatch,
test_failures.FailureTestHarnessAssertion)):
writer.write_text_files(driver_output.text, expected_driver_output.text)
writer.create_text_diff_and_write_result(driver_output.text, expected_driver_output.text)
elif isinstance(failure, test_failures.FailureMissingImage):
writer.write_image_files(driver_output.image, expected_image=None)
elif isinstance(failure, test_failures.FailureMissingImageHash):
writer.write_image_files(driver_output.image, expected_driver_output.image)
elif isinstance(failure, test_failures.FailureImageHashMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
writer.write_image_diff_files(driver_output.image_diff)
elif isinstance(failure, (test_failures.FailureAudioMismatch,
test_failures.FailureMissingAudio)):
writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
elif isinstance(failure, test_failures.FailureCrash):
crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output
writer.write_crash_log(crashed_driver_output.crash_log)
elif isinstance(failure, test_failures.FailureLeak):
writer.write_leak_log(driver_output.leak_log)
elif isinstance(failure, test_failures.FailureReftestMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
# FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
# FIXME: We should always have 2 images here.
if driver_output.image and expected_driver_output.image:
diff_image, err_str = port.diff_image(expected_driver_output.image, driver_output.image)
if diff_image:
writer.write_image_diff_files(diff_image)
else:
_log.warn('ref test mismatch did not produce an image diff.')
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warn("reference %s was not found" % failure.reference_filename)
elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warn("reference %s was not found" % failure.reference_filename)
else:
assert isinstance(failure, (test_failures.FailureTimeout, test_failures.FailureReftestNoImagesGenerated))
if expected_driver_output is not None:
writer.create_repaint_overlay_result(driver_output.text, expected_driver_output.text)
class TestResultWriter(object):
"""A class which handles all writing operations to the result directory."""
# Filename pieces when writing failures to the test results directory.
FILENAME_SUFFIX_ACTUAL = "-actual"
FILENAME_SUFFIX_EXPECTED = "-expected"
FILENAME_SUFFIX_DIFF = "-diff"
FILENAME_SUFFIX_STDERR = "-stderr"
FILENAME_SUFFIX_CRASH_LOG = "-crash-log"
FILENAME_SUFFIX_SAMPLE = "-sample"
FILENAME_SUFFIX_LEAK_LOG = "-leak-log"
FILENAME_SUFFIX_WDIFF = "-wdiff.html"
FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
FILENAME_SUFFIX_IMAGE_DIFFS_HTML = "-diffs.html"
FILENAME_SUFFIX_OVERLAY = "-overlay.html"
def __init__(self, filesystem, port, root_output_dir, test_name):
self._filesystem = filesystem
self._port = port
self._root_output_dir = root_output_dir
self._test_name = test_name
def _make_output_directory(self):
"""Creates the output directory (if needed) for a given test filename."""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
fs.maybe_make_directory(fs.dirname(output_filename))
def output_filename(self, modifier):
"""Returns a filename inside the output dir that contains modifier.
For example, if test name is "fast/dom/foo.html" and modifier is "-expected.txt",
the return value is "/<path-to-root-output-dir>/fast/dom/foo-expected.txt".
Args:
modifier: a string to replace the extension of filename with
Return:
The absolute path to the output filename
"""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
return fs.splitext(output_filename)[0] + modifier
def _write_file(self, path, contents):
if contents is not None:
self._make_output_directory()
self._filesystem.write_binary_file(path, contents)
def _output_testname(self, modifier):
fs = self._filesystem
return fs.splitext(fs.basename(self._test_name))[0] + modifier
def write_output_files(self, file_type, output, expected):
"""Writes the test output, the expected output in the results directory.
The full output filename of the actual, for example, will be
<filename>-actual<file_type>
For instance,
my_test-actual.txt
Args:
file_type: A string describing the test output file type, e.g. ".txt"
output: A string containing the test output
expected: A string containing the expected test output
"""
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
self._write_file(actual_filename, output)
self._write_file(expected_filename, expected)
def write_stderr(self, error):
filename = self.output_filename(self.FILENAME_SUFFIX_STDERR + ".txt")
self._write_file(filename, error)
def write_crash_log(self, crash_log):
filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
self._write_file(filename, crash_log.encode('utf8', 'replace'))
def write_leak_log(self, leak_log):
filename = self.output_filename(self.FILENAME_SUFFIX_LEAK_LOG + ".txt")
self._write_file(filename, leak_log)
def copy_sample_file(self, sample_file):
filename = self.output_filename(self.FILENAME_SUFFIX_SAMPLE + ".txt")
self._filesystem.copyfile(sample_file, filename)
def write_text_files(self, actual_text, expected_text):
self.write_output_files(".txt", actual_text, expected_text)
def create_text_diff_and_write_result(self, actual_text, expected_text):
# FIXME: This function is actually doing the diffs as well as writing results.
# It might be better to extract code which does 'diff' and make it a separate function.
if not actual_text or not expected_text:
return
file_type = '.txt'
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
# We treat diff output as binary. Diff output may contain multiple files
# in conflicting encodings.
diff = self._port.diff_text(expected_text, actual_text, expected_filename, actual_filename)
diff_filename = self.output_filename(self.FILENAME_SUFFIX_DIFF + file_type)
self._write_file(diff_filename, diff)
# Shell out to wdiff to get colored inline diffs.
if self._port.wdiff_available():
wdiff = self._port.wdiff_text(expected_filename, actual_filename)
wdiff_filename = self.output_filename(self.FILENAME_SUFFIX_WDIFF)
self._write_file(wdiff_filename, wdiff)
# Use WebKit's PrettyPatch.rb to get an HTML diff.
if self._port.pretty_patch_available():
pretty_patch = self._port.pretty_patch_text(diff_filename)
pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)
self._write_file(pretty_patch_filename, pretty_patch)
def create_repaint_overlay_result(self, actual_text, expected_text):
html = repaint_overlay.generate_repaint_overlay_html(self._test_name, actual_text, expected_text)
if html:
overlay_filename = self.output_filename(self.FILENAME_SUFFIX_OVERLAY)
self._write_file(overlay_filename, html)
def write_audio_files(self, actual_audio, expected_audio):
self.write_output_files('.wav', actual_audio, expected_audio)
def write_image_files(self, actual_image, expected_image):
self.write_output_files('.png', actual_image, expected_image)
def write_image_diff_files(self, image_diff):
diff_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFF)
self._write_file(diff_filename, image_diff)
diffs_html_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFFS_HTML)
# FIXME: old-run-webkit-tests shows the diff percentage as the text contents of the "diff" link.
# FIXME: old-run-webkit-tests include a link to the test file.
html = """<!DOCTYPE HTML>
<html>
<head>
<title>%(title)s</title>
<style>.label{font-weight:bold}</style>
</head>
<body>
Difference between images: <a href="%(diff_filename)s">diff</a><br>
<div class=imageText></div>
<div class=imageContainer data-prefix="%(prefix)s">Loading...</div>
<script>
(function() {
var preloadedImageCount = 0;
function preloadComplete() {
++preloadedImageCount;
if (preloadedImageCount < 2)
return;
toggleImages();
setInterval(toggleImages, 2000)
}
function preloadImage(url) {
image = new Image();
image.addEventListener('load', preloadComplete);
image.src = url;
return image;
}
function toggleImages() {
if (text.textContent == 'Expected Image') {
text.textContent = 'Actual Image';
container.replaceChild(actualImage, container.firstChild);
} else {
text.textContent = 'Expected Image';
container.replaceChild(expectedImage, container.firstChild);
}
}
var text = document.querySelector('.imageText');
var container = document.querySelector('.imageContainer');
var actualImage = preloadImage(container.getAttribute('data-prefix') + '-actual.png');
var expectedImage = preloadImage(container.getAttribute('data-prefix') + '-expected.png');
})();
</script>
</body>
</html>
""" % {
'title': self._test_name,
'diff_filename': self._output_testname(self.FILENAME_SUFFIX_IMAGE_DIFF),
'prefix': self._output_testname(''),
}
self._write_file(diffs_html_filename, html)
def write_reftest(self, src_filepath):
fs = self._filesystem
dst_dir = fs.dirname(fs.join(self._root_output_dir, self._test_name))
dst_filepath = fs.join(dst_dir, fs.basename(src_filepath))
self._write_file(dst_filepath, fs.read_binary_file(src_filepath))
|
bsd-3-clause
|
VasLem/KinectPainting
|
OptGridSearchCV.py
|
1
|
7990
|
'''
An optimized method for GridSearchCV, which iteratively performs grid search
and reduces the span of the parameters after each iteration. Made to make the
life of an engineer less boring.
'''
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
def optGridSearchCV(classifier, xtrain, ytrain, parameters, reduction_ratio=2,
iter_num=3, scoring='f1_macro', fold_num=5, first_rand=False,
n_jobs=1,verbose=1,only_rand=False, only_brute=False):
'''
The local optimum resides inside the parameters space, with bounds defined
by the min and max of each parameter, thus a recommended way to run this
function, if no prior knowledge exists, is to set the min and max of each
parameter to the corresponding min and max allowed bounds.
<classifier>: initialized classifier object
<xtrain>: features of samples, with shape (n_samples, n_features)
<ytrain>: labels of samples
<parameters>: dictionary of parameters, same with GridSearchCV <params>
type
<reduction_ratio>: the scale of relative reduction of the span of the
number parameters
<iter_num>: number of iterations to take place
<fold_num>: number of folds for CrossValidation
<first_rand> : True to perform random parameter picking (normally
distributed) firstly and then brute parameter picking (using linspace).
If false, the turn of each method changes
<only_rand> : True to perform only random picking
<only_brute> : True to perform only brute picking
'''
def print_params(parameters, preset=''):
'''
print parameters in pandas form, if allowed
'''
try:
from pandas import DataFrame
if isinstance(parameters, list):
params = DataFrame(parameters)
else:
try:
params = DataFrame.from_dict(parameters)
except ValueError:
params = DataFrame([parameters])
print(params)
except ImportError:
print(preset+str(parameters))
def reduce_list(params, best_params):
'''
Reduce parameters list of dictionaries to a parameters dictionary,
which correspots to the <best_params> found by <GridSearchCV>
'''
best_keys = set(best_params.keys())
for count, dic in enumerate(params):
if best_keys == set(dic.keys()):
return dic, count
raise Exception
def update_parameters(prev_parameters, best_parameters, num_of_samples,
rate=2, israndom=True):
'''
Each new parameter has the same number of values as previous one and
its values are inside the bounds set by the min and max values of the
old parameter. Furthermore, best value from the previous paramter
exists inside the new parameter.
<num_of_samples>: dictionary with keys from the best_parameters.
<prev_parameters>: previous parameters, which hold all tested values
<best_parameters>: parameters found to provide the best score (using
GridSearchCV)
<israndom>: whether to perform random or brute method
<rate>: rate of parameters span relative reduction
'''
rate = float(rate)
new_parameters = {}
for key in best_parameters:
if (not isinstance(best_parameters[key], str) and
not isinstance(best_parameters[key], bool) and
not best_parameters[key] is None):
if israndom:
center = best_parameters[key]
std = np.std(prev_parameters[key]) / float(rate)
pick = np.random.normal(loc=center, scale=std,
size=100 * num_of_samples[key])
pick = pick[(pick >=
np.min(prev_parameters[key]))*
(pick <= np.max(prev_parameters[key]))]
new_parameters[key] = pick[
:(num_of_samples[key]-1)]
else:
center = best_parameters[key]
rang = np.max(prev_parameters[
key]) - np.min(prev_parameters[key])
rang = [max(center - rang /
float(rate), min(prev_parameters[key])),
min(center + rang /
float(rate), max(prev_parameters[key]))]
new_parameters[key] = np.linspace(
rang[0], rang[1], num_of_samples[key]-1)
if isinstance(best_parameters[key], int):
new_parameters[key] = new_parameters[key].astype(int)
new_parameters[key] = new_parameters[key].tolist()
new_parameters[key] += [best_parameters[key]]
else:
new_parameters[key] = [best_parameters[key]]
return new_parameters
num_of_samples = {}
if not isinstance(parameters, list):
num_of_samples = {}
for key in parameters:
num_of_samples[key] = len(parameters[key])
best_scores = []
best_params = []
best_estimators = []
rand_flags = [first_rand, not first_rand]
if only_brute:
rand_flags = [False]
if only_rand:
rand_flags = [True]
for it_count in range(iter_num):
for rand_flag in rand_flags:
if verbose==2:
print('Parameters to test on:')
print_params(parameters,'\t')
try:
grids = GridSearchCV(
classifier,
parameters,
scoring=scoring,
cv=fold_num,
n_jobs=n_jobs, verbose=verbose)
grids.fit(xtrain, ytrain)
best_scores.append(grids.best_score_)
best_params.append(grids.best_params_)
best_estimators.append(grids.best_estimator_)
grids_params = grids.best_params_
except ValueError:
print('Invalid parameters')
raise
best_params = parameters
if rand_flag == rand_flags[1]:
print('Iteration Number: ' + str(it_count))
print('\tBest Classifier Params:')
print_params(best_params[-1],'\t\t')
print('\tBest Score:' + str(best_scores[-1]))
if isinstance(parameters, list):
parameters, _ = reduce_list(parameters, grids_params)
for key in parameters:
num_of_samples[key] = len(parameters[key])
if rand_flag == rand_flags[1] and it_count == iter_num - 1:
break
print('Reducing Parameters using '+ ['random' if rand_flag else
'brute'][0] + ' method')
parameters = update_parameters(parameters, grids_params, num_of_samples,
rate=reduction_ratio,
israndom=rand_flag)
return best_params, best_scores, best_estimators
def example():
'''
An example of usage
'''
parameters = [{'C': [1, 10, 100, 1000], 'tol': [0.001, 0.0001],
'class_weight': [None, 'balanced']},
{'C': [1, 10, 100, 1000], 'multi_class': ['crammer_singer'],
'tol': [0.001, 0.0001]}]
xtrain = np.random.random((100, 20))
xtrain[xtrain < 0] = 0
ytrain = (np.random.random(100) > 0.5).astype(int)
lsvc = LinearSVC()
optGridSearchCV(lsvc, xtrain, ytrain, parameters, reduction_ratio=2,
iter_num=3, scoring='f1_macro', fold_num=5, first_rand=False,
n_jobs=4)
if __name__ == '__main__':
example()
|
bsd-3-clause
|
openstack/neutron-lib
|
neutron_lib/api/definitions/address_scope.py
|
1
|
3800
|
# Copyright (c) 2015 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib import constants
from neutron_lib.db import constants as db_constants
ADDRESS_SCOPE = 'address_scope'
ADDRESS_SCOPE_ID = 'address_scope_id'
IPV4_ADDRESS_SCOPE = 'ipv4_%s' % ADDRESS_SCOPE
IPV6_ADDRESS_SCOPE = 'ipv6_%s' % ADDRESS_SCOPE
ALIAS = 'address-scope'
IS_SHIM_EXTENSION = False
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Address scope'
API_PREFIX = ''
DESCRIPTION = 'Address scopes extension.'
UPDATED_TIMESTAMP = '2015-07-26T10:00:00-00:00'
RESOURCE_NAME = ADDRESS_SCOPE
COLLECTION_NAME = RESOURCE_NAME + 's'
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
'id': {'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_filter': True,
'is_sort_key': True,
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True,
'allow_put': True,
'default': '',
'validate': {'type:string': db_constants.NAME_FIELD_SIZE},
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'tenant_id': {'allow_post': True,
'allow_put': False,
'validate': {
'type:string': db_constants.PROJECT_ID_FIELD_SIZE},
'required_by_policy': True,
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
constants.SHARED: {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': converters.convert_to_boolean,
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'required_by_policy': True,
'enforce_policy': True},
'ip_version': {'allow_post': True, 'allow_put': False,
'convert_to': converters.convert_to_int,
'validate': {'type:values': [4, 6]},
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
},
'subnetpools': {
ADDRESS_SCOPE_ID: {'allow_post': True,
'allow_put': True,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {'type:uuid_or_none': None},
'is_filter': True,
'is_sort_key': True,
'is_visible': True}
},
'networks': {
IPV4_ADDRESS_SCOPE: {'allow_post': False,
'allow_put': False,
'is_visible': True},
IPV6_ADDRESS_SCOPE: {'allow_post': False,
'allow_put': False,
'is_visible': True},
}
}
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = []
OPTIONAL_EXTENSIONS = []
ACTION_STATUS = {}
|
apache-2.0
|
elemhsb/mallorca
|
sw/airborne/test/ahrs/ahrs_utils.py
|
15
|
5172
|
#! /usr/bin/env python
# $Id$
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#import os
#from optparse import OptionParser
#import scipy
#from scipy import optimize
import shlex, subprocess
from pylab import *
from array import array
import numpy
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print "\nBuilding ahrs"
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_"+ahrs_type] + build_opt
# print args
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print " # "+i,
print
print "Running simulation"
print " using traj " + str(traj_nb)
p = subprocess.Popen(args=["./run_ahrs_on_synth",str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print " "+i,
# print "\n"
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')
]
pos_data_type = [ ('x0_true', 'float32'), ('y0_true', 'float32'), ('z0_true', 'float32'),
('x1_true', 'float32'), ('y1_true', 'float32'), ('z1_true', 'float32'),
('x2_true', 'float32'), ('y2_true', 'float32'), ('z2_true', 'float32'),
('x3_true', 'float32'), ('y3_true', 'float32'), ('z3_true', 'float32'),
]
mydescr = numpy.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print " "+line,
else:
fields = line.strip().split(' ');
# print fields
for i, number in enumerate(fields):
data[i].append(number)
print
for i in xrange(len(mydescr)):
data[i] = cast[mydescr[i]](data[i])
return numpy.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, type, sim_res):
print "Plotting Results"
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
subplot(3,3,1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=type)
ylabel('degres')
title('phi')
legend()
subplot(3,3,2)
plot(sim_res.time, sim_res.theta_ahrs, lsty)
title('theta')
subplot(3,3,3)
plot(sim_res.time, sim_res.psi_ahrs, lsty)
title('psi')
subplot(3,3,4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
ylabel('degres/s')
title('p')
subplot(3,3,5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
title('q')
subplot(3,3,6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
title('r')
subplot(3,3,7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
ylabel('degres/s')
xlabel('time in s')
title('bp')
subplot(3,3,8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
xlabel('time in s')
title('bq')
subplot(3,3,9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
xlabel('time in s')
title('br')
if plot_true_state:
subplot(3,3,1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
subplot(3,3,2)
plot(sim_res.time, sim_res.theta_true, 'r--')
subplot(3,3,3)
plot(sim_res.time, sim_res.psi_true, 'r--')
subplot(3,3,4)
plot(sim_res.time, sim_res.p_true, 'r--')
subplot(3,3,5)
plot(sim_res.time, sim_res.q_true, 'r--')
subplot(3,3,6)
plot(sim_res.time, sim_res.r_true, 'r--')
subplot(3,3,7)
plot(sim_res.time, sim_res.bp_true, 'r--')
subplot(3,3,8)
plot(sim_res.time, sim_res.bq_true, 'r--')
subplot(3,3,9)
plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show();
|
gpl-2.0
|
tefasmile/Mi_Blog
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/_sourcemodbuiltins.py
|
95
|
21984
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._sourcemodbuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of SourceMod functions.
It is able to re-generate itself.
Do not edit the FUNCTIONS list by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
FUNCTIONS = ['TopMenuHandler',
'CreateTopMenu',
'LoadTopMenuConfig',
'AddToTopMenu',
'GetTopMenuInfoString',
'GetTopMenuObjName',
'RemoveFromTopMenu',
'DisplayTopMenu',
'FindTopMenuCategory',
'OnAdminMenuCreated',
'OnAdminMenuReady',
'GetAdminTopMenu',
'AddTargetsToMenu',
'AddTargetsToMenu2',
'RedisplayAdminMenu',
'TEHook',
'AddTempEntHook',
'RemoveTempEntHook',
'TE_Start',
'TE_IsValidProp',
'TE_WriteNum',
'TE_ReadNum',
'TE_WriteFloat',
'TE_ReadFloat',
'TE_WriteVector',
'TE_ReadVector',
'TE_WriteAngles',
'TE_WriteFloatArray',
'TE_Send',
'TE_WriteEncodedEnt',
'TE_SendToAll',
'TE_SendToClient',
'CreateKeyValues',
'KvSetString',
'KvSetNum',
'KvSetUInt64',
'KvSetFloat',
'KvSetColor',
'KvSetVector',
'KvGetString',
'KvGetNum',
'KvGetFloat',
'KvGetColor',
'KvGetUInt64',
'KvGetVector',
'KvJumpToKey',
'KvJumpToKeySymbol',
'KvGotoFirstSubKey',
'KvGotoNextKey',
'KvSavePosition',
'KvDeleteKey',
'KvDeleteThis',
'KvGoBack',
'KvRewind',
'KvGetSectionName',
'KvSetSectionName',
'KvGetDataType',
'KeyValuesToFile',
'FileToKeyValues',
'KvSetEscapeSequences',
'KvNodesInStack',
'KvCopySubkeys',
'KvFindKeyById',
'KvGetNameSymbol',
'KvGetSectionSymbol',
'TE_SetupSparks',
'TE_SetupSmoke',
'TE_SetupDust',
'TE_SetupMuzzleFlash',
'TE_SetupMetalSparks',
'TE_SetupEnergySplash',
'TE_SetupArmorRicochet',
'TE_SetupGlowSprite',
'TE_SetupExplosion',
'TE_SetupBloodSprite',
'TE_SetupBeamRingPoint',
'TE_SetupBeamPoints',
'TE_SetupBeamLaser',
'TE_SetupBeamRing',
'TE_SetupBeamFollow',
'HookEvent',
'HookEventEx',
'UnhookEvent',
'CreateEvent',
'FireEvent',
'CancelCreatedEvent',
'GetEventBool',
'SetEventBool',
'GetEventInt',
'SetEventInt',
'GetEventFloat',
'SetEventFloat',
'GetEventString',
'SetEventString',
'GetEventName',
'SetEventBroadcast',
'GetUserMessageId',
'GetUserMessageName',
'StartMessage',
'StartMessageEx',
'EndMessage',
'MsgHook',
'MsgPostHook',
'HookUserMessage',
'UnhookUserMessage',
'StartMessageAll',
'StartMessageOne',
'InactivateClient',
'ReconnectClient',
'GetMaxEntities',
'GetEntityCount',
'IsValidEntity',
'IsValidEdict',
'IsEntNetworkable',
'CreateEdict',
'RemoveEdict',
'GetEdictFlags',
'SetEdictFlags',
'GetEdictClassname',
'GetEntityNetClass',
'ChangeEdictState',
'GetEntData',
'SetEntData',
'GetEntDataFloat',
'SetEntDataFloat',
'GetEntDataEnt2',
'SetEntDataEnt2',
'GetEntDataVector',
'SetEntDataVector',
'GetEntDataString',
'SetEntDataString',
'FindSendPropOffs',
'FindSendPropInfo',
'FindDataMapOffs',
'GetEntSendPropOffs',
'GetEntProp',
'SetEntProp',
'GetEntPropFloat',
'SetEntPropFloat',
'GetEntPropEnt',
'SetEntPropEnt',
'GetEntPropVector',
'SetEntPropVector',
'GetEntPropString',
'SetEntPropString',
'GetEntPropArraySize',
'GetEntDataArray',
'SetEntDataArray',
'GetEntityClassname',
'float',
'FloatMul',
'FloatDiv',
'FloatAdd',
'FloatSub',
'FloatFraction',
'RoundToZero',
'RoundToCeil',
'RoundToFloor',
'RoundToNearest',
'FloatCompare',
'SquareRoot',
'Pow',
'Exponential',
'Logarithm',
'Sine',
'Cosine',
'Tangent',
'FloatAbs',
'ArcTangent',
'ArcCosine',
'ArcSine',
'ArcTangent2',
'RoundFloat',
'operator%',
'DegToRad',
'RadToDeg',
'GetURandomInt',
'GetURandomFloat',
'SetURandomSeed',
'SetURandomSeedSimple',
'RemovePlayerItem',
'GivePlayerItem',
'GetPlayerWeaponSlot',
'IgniteEntity',
'ExtinguishEntity',
'TeleportEntity',
'ForcePlayerSuicide',
'SlapPlayer',
'FindEntityByClassname',
'GetClientEyeAngles',
'CreateEntityByName',
'DispatchSpawn',
'DispatchKeyValue',
'DispatchKeyValueFloat',
'DispatchKeyValueVector',
'GetClientAimTarget',
'GetTeamCount',
'GetTeamName',
'GetTeamScore',
'SetTeamScore',
'GetTeamClientCount',
'SetEntityModel',
'GetPlayerDecalFile',
'GetServerNetStats',
'EquipPlayerWeapon',
'ActivateEntity',
'SetClientInfo',
'SetClientListeningFlags',
'GetClientListeningFlags',
'SetListenOverride',
'GetListenOverride',
'IsClientMuted',
'TR_GetPointContents',
'TR_GetPointContentsEnt',
'TR_TraceRay',
'TR_TraceHull',
'TR_TraceRayFilter',
'TR_TraceHullFilter',
'TR_TraceRayEx',
'TR_TraceHullEx',
'TR_TraceRayFilterEx',
'TR_TraceHullFilterEx',
'TR_GetFraction',
'TR_GetEndPosition',
'TR_GetEntityIndex',
'TR_DidHit',
'TR_GetHitGroup',
'TR_GetPlaneNormal',
'TR_PointOutsideWorld',
'SortIntegers',
'SortFloats',
'SortStrings',
'SortFunc1D',
'SortCustom1D',
'SortCustom2D',
'SortADTArray',
'SortFuncADTArray',
'SortADTArrayCustom',
'CompileRegex',
'MatchRegex',
'GetRegexSubString',
'SimpleRegexMatch',
'TF2_GetPlayerClass',
'TF2_SetPlayerClass',
'TF2_GetPlayerResourceData',
'TF2_SetPlayerResourceData',
'TF2_RemoveWeaponSlot',
'TF2_RemoveAllWeapons',
'TF2_IsPlayerInCondition',
'TF2_GetObjectType',
'TF2_GetObjectMode',
'NominateMap',
'RemoveNominationByMap',
'RemoveNominationByOwner',
'GetExcludeMapList',
'GetNominatedMapList',
'CanMapChooserStartVote',
'InitiateMapChooserVote',
'HasEndOfMapVoteFinished',
'EndOfMapVoteEnabled',
'OnNominationRemoved',
'OnMapVoteStarted',
'CreateTimer',
'KillTimer',
'TriggerTimer',
'GetTickedTime',
'GetMapTimeLeft',
'GetMapTimeLimit',
'ExtendMapTimeLimit',
'GetTickInterval',
'OnMapTimeLeftChanged',
'IsServerProcessing',
'CreateDataTimer',
'ByteCountToCells',
'CreateArray',
'ClearArray',
'CloneArray',
'ResizeArray',
'GetArraySize',
'PushArrayCell',
'PushArrayString',
'PushArrayArray',
'GetArrayCell',
'GetArrayString',
'GetArrayArray',
'SetArrayCell',
'SetArrayString',
'SetArrayArray',
'ShiftArrayUp',
'RemoveFromArray',
'SwapArrayItems',
'FindStringInArray',
'FindValueInArray',
'ProcessTargetString',
'ReplyToTargetError',
'MultiTargetFilter',
'AddMultiTargetFilter',
'RemoveMultiTargetFilter',
'OnBanClient',
'OnBanIdentity',
'OnRemoveBan',
'BanClient',
'BanIdentity',
'RemoveBan',
'CreateTrie',
'SetTrieValue',
'SetTrieArray',
'SetTrieString',
'GetTrieValue',
'GetTrieArray',
'GetTrieString',
'RemoveFromTrie',
'ClearTrie',
'GetTrieSize',
'GetFunctionByName',
'CreateGlobalForward',
'CreateForward',
'GetForwardFunctionCount',
'AddToForward',
'RemoveFromForward',
'RemoveAllFromForward',
'Call_StartForward',
'Call_StartFunction',
'Call_PushCell',
'Call_PushCellRef',
'Call_PushFloat',
'Call_PushFloatRef',
'Call_PushArray',
'Call_PushArrayEx',
'Call_PushString',
'Call_PushStringEx',
'Call_Finish',
'Call_Cancel',
'NativeCall',
'CreateNative',
'ThrowNativeError',
'GetNativeStringLength',
'GetNativeString',
'SetNativeString',
'GetNativeCell',
'GetNativeCellRef',
'SetNativeCellRef',
'GetNativeArray',
'SetNativeArray',
'FormatNativeString',
'OnRebuildAdminCache',
'DumpAdminCache',
'AddCommandOverride',
'GetCommandOverride',
'UnsetCommandOverride',
'CreateAdmGroup',
'FindAdmGroup',
'SetAdmGroupAddFlag',
'GetAdmGroupAddFlag',
'GetAdmGroupAddFlags',
'SetAdmGroupImmuneFrom',
'GetAdmGroupImmuneCount',
'GetAdmGroupImmuneFrom',
'AddAdmGroupCmdOverride',
'GetAdmGroupCmdOverride',
'RegisterAuthIdentType',
'CreateAdmin',
'GetAdminUsername',
'BindAdminIdentity',
'SetAdminFlag',
'GetAdminFlag',
'GetAdminFlags',
'AdminInheritGroup',
'GetAdminGroupCount',
'GetAdminGroup',
'SetAdminPassword',
'GetAdminPassword',
'FindAdminByIdentity',
'RemoveAdmin',
'FlagBitsToBitArray',
'FlagBitArrayToBits',
'FlagArrayToBits',
'FlagBitsToArray',
'FindFlagByName',
'FindFlagByChar',
'FindFlagChar',
'ReadFlagString',
'CanAdminTarget',
'CreateAuthMethod',
'SetAdmGroupImmunityLevel',
'GetAdmGroupImmunityLevel',
'SetAdminImmunityLevel',
'GetAdminImmunityLevel',
'FlagToBit',
'BitToFlag',
'ServerCommand',
'ServerCommandEx',
'InsertServerCommand',
'ServerExecute',
'ClientCommand',
'FakeClientCommand',
'FakeClientCommandEx',
'PrintToServer',
'PrintToConsole',
'ReplyToCommand',
'GetCmdReplySource',
'SetCmdReplySource',
'IsChatTrigger',
'ShowActivity2',
'ShowActivity',
'ShowActivityEx',
'FormatActivitySource',
'SrvCmd',
'RegServerCmd',
'ConCmd',
'RegConsoleCmd',
'RegAdminCmd',
'GetCmdArgs',
'GetCmdArg',
'GetCmdArgString',
'CreateConVar',
'FindConVar',
'ConVarChanged',
'HookConVarChange',
'UnhookConVarChange',
'GetConVarBool',
'SetConVarBool',
'GetConVarInt',
'SetConVarInt',
'GetConVarFloat',
'SetConVarFloat',
'GetConVarString',
'SetConVarString',
'ResetConVar',
'GetConVarDefault',
'GetConVarFlags',
'SetConVarFlags',
'GetConVarBounds',
'SetConVarBounds',
'GetConVarName',
'QueryClientConVar',
'GetCommandIterator',
'ReadCommandIterator',
'CheckCommandAccess',
'CheckAccess',
'IsValidConVarChar',
'GetCommandFlags',
'SetCommandFlags',
'FindFirstConCommand',
'FindNextConCommand',
'SendConVarValue',
'AddServerTag',
'RemoveServerTag',
'CommandListener',
'AddCommandListener',
'RemoveCommandListener',
'TF2_IgnitePlayer',
'TF2_RespawnPlayer',
'TF2_RegeneratePlayer',
'TF2_AddCondition',
'TF2_RemoveCondition',
'TF2_SetPlayerPowerPlay',
'TF2_DisguisePlayer',
'TF2_RemovePlayerDisguise',
'TF2_StunPlayer',
'TF2_MakeBleed',
'TF2_GetResourceEntity',
'TF2_GetClass',
'TF2_CalcIsAttackCritical',
'TF2_OnIsHolidayActive',
'TF2_IsPlayerInDuel',
'TF2_OnConditionAdded',
'TF2_OnConditionRemoved',
'TF2_OnWaitingForPlayersStart',
'TF2_OnWaitingForPlayersEnd',
'SQL_Connect',
'SQL_DefConnect',
'SQL_ConnectCustom',
'SQLite_UseDatabase',
'SQL_CheckConfig',
'SQL_GetDriver',
'SQL_ReadDriver',
'SQL_GetDriverIdent',
'SQL_GetDriverProduct',
'SQL_GetAffectedRows',
'SQL_GetInsertId',
'SQL_GetError',
'SQL_EscapeString',
'SQL_QuoteString',
'SQL_FastQuery',
'SQL_Query',
'SQL_PrepareQuery',
'SQL_FetchMoreResults',
'SQL_HasResultSet',
'SQL_GetRowCount',
'SQL_GetFieldCount',
'SQL_FieldNumToName',
'SQL_FieldNameToNum',
'SQL_FetchRow',
'SQL_MoreRows',
'SQL_Rewind',
'SQL_FetchString',
'SQL_FetchFloat',
'SQL_FetchInt',
'SQL_IsFieldNull',
'SQL_FetchSize',
'SQL_BindParamInt',
'SQL_BindParamFloat',
'SQL_BindParamString',
'SQL_Execute',
'SQL_LockDatabase',
'SQL_UnlockDatabase',
'SQLTCallback',
'SQL_IsSameConnection',
'SQL_TConnect',
'SQL_TQuery',
'CloseHandle',
'CloneHandle',
'MenuHandler',
'CreateMenu',
'DisplayMenu',
'DisplayMenuAtItem',
'AddMenuItem',
'InsertMenuItem',
'RemoveMenuItem',
'RemoveAllMenuItems',
'GetMenuItem',
'GetMenuSelectionPosition',
'GetMenuItemCount',
'SetMenuPagination',
'GetMenuPagination',
'GetMenuStyle',
'SetMenuTitle',
'GetMenuTitle',
'CreatePanelFromMenu',
'GetMenuExitButton',
'SetMenuExitButton',
'GetMenuExitBackButton',
'SetMenuExitBackButton',
'SetMenuNoVoteButton',
'CancelMenu',
'GetMenuOptionFlags',
'SetMenuOptionFlags',
'IsVoteInProgress',
'CancelVote',
'VoteMenu',
'VoteMenuToAll',
'VoteHandler',
'SetVoteResultCallback',
'CheckVoteDelay',
'IsClientInVotePool',
'RedrawClientVoteMenu',
'GetMenuStyleHandle',
'CreatePanel',
'CreateMenuEx',
'GetClientMenu',
'CancelClientMenu',
'GetMaxPageItems',
'GetPanelStyle',
'SetPanelTitle',
'DrawPanelItem',
'DrawPanelText',
'CanPanelDrawFlags',
'SetPanelKeys',
'SendPanelToClient',
'GetPanelTextRemaining',
'GetPanelCurrentKey',
'SetPanelCurrentKey',
'RedrawMenuItem',
'InternalShowMenu',
'GetMenuVoteInfo',
'IsNewVoteAllowed',
'PrefetchSound',
'EmitAmbientSound',
'FadeClientVolume',
'StopSound',
'EmitSound',
'EmitSentence',
'GetDistGainFromSoundLevel',
'AmbientSHook',
'NormalSHook',
'AddAmbientSoundHook',
'AddNormalSoundHook',
'RemoveAmbientSoundHook',
'RemoveNormalSoundHook',
'EmitSoundToClient',
'EmitSoundToAll',
'ATTN_TO_SNDLEVEL',
'strlen',
'StrContains',
'strcmp',
'strncmp',
'StrEqual',
'strcopy',
'Format',
'FormatEx',
'VFormat',
'StringToInt',
'StringToIntEx',
'IntToString',
'StringToFloat',
'StringToFloatEx',
'FloatToString',
'BreakString',
'TrimString',
'SplitString',
'ReplaceString',
'ReplaceStringEx',
'GetCharBytes',
'IsCharAlpha',
'IsCharNumeric',
'IsCharSpace',
'IsCharMB',
'IsCharUpper',
'IsCharLower',
'StripQuotes',
'CharToUpper',
'CharToLower',
'FindCharInString',
'StrCat',
'ExplodeString',
'ImplodeStrings',
'GetVectorLength',
'GetVectorDistance',
'GetVectorDotProduct',
'GetVectorCrossProduct',
'NormalizeVector',
'GetAngleVectors',
'GetVectorAngles',
'GetVectorVectors',
'AddVectors',
'SubtractVectors',
'ScaleVector',
'NegateVector',
'MakeVectorFromPoints',
'BaseComm_IsClientGagged',
'BaseComm_IsClientMuted',
'BaseComm_SetClientGag',
'BaseComm_SetClientMute',
'FormatUserLogText',
'FindPluginByFile',
'FindTarget',
'AcceptEntityInput',
'SetVariantBool',
'SetVariantString',
'SetVariantInt',
'SetVariantFloat',
'SetVariantVector3D',
'SetVariantPosVector3D',
'SetVariantColor',
'SetVariantEntity',
'GameRules_GetProp',
'GameRules_SetProp',
'GameRules_GetPropFloat',
'GameRules_SetPropFloat',
'GameRules_GetPropEnt',
'GameRules_SetPropEnt',
'GameRules_GetPropVector',
'GameRules_SetPropVector',
'GameRules_GetPropString',
'GameRules_SetPropString',
'GameRules_GetRoundState',
'OnClientConnect',
'OnClientConnected',
'OnClientPutInServer',
'OnClientDisconnect',
'OnClientDisconnect_Post',
'OnClientCommand',
'OnClientSettingsChanged',
'OnClientAuthorized',
'OnClientPreAdminCheck',
'OnClientPostAdminFilter',
'OnClientPostAdminCheck',
'GetMaxClients',
'GetClientCount',
'GetClientName',
'GetClientIP',
'GetClientAuthString',
'GetClientUserId',
'IsClientConnected',
'IsClientInGame',
'IsClientInKickQueue',
'IsClientAuthorized',
'IsFakeClient',
'IsClientSourceTV',
'IsClientReplay',
'IsClientObserver',
'IsPlayerAlive',
'GetClientInfo',
'GetClientTeam',
'SetUserAdmin',
'GetUserAdmin',
'AddUserFlags',
'RemoveUserFlags',
'SetUserFlagBits',
'GetUserFlagBits',
'CanUserTarget',
'RunAdminCacheChecks',
'NotifyPostAdminCheck',
'CreateFakeClient',
'SetFakeClientConVar',
'GetClientHealth',
'GetClientModel',
'GetClientWeapon',
'GetClientMaxs',
'GetClientMins',
'GetClientAbsAngles',
'GetClientAbsOrigin',
'GetClientArmor',
'GetClientDeaths',
'GetClientFrags',
'GetClientDataRate',
'IsClientTimingOut',
'GetClientTime',
'GetClientLatency',
'GetClientAvgLatency',
'GetClientAvgLoss',
'GetClientAvgChoke',
'GetClientAvgData',
'GetClientAvgPackets',
'GetClientOfUserId',
'KickClient',
'KickClientEx',
'ChangeClientTeam',
'GetClientSerial',
'GetClientFromSerial',
'FindStringTable',
'GetNumStringTables',
'GetStringTableNumStrings',
'GetStringTableMaxStrings',
'GetStringTableName',
'FindStringIndex',
'ReadStringTable',
'GetStringTableDataLength',
'GetStringTableData',
'SetStringTableData',
'AddToStringTable',
'LockStringTables',
'AddFileToDownloadsTable',
'GetEntityFlags',
'SetEntityFlags',
'GetEntityMoveType',
'SetEntityMoveType',
'GetEntityRenderMode',
'SetEntityRenderMode',
'GetEntityRenderFx',
'SetEntityRenderFx',
'SetEntityRenderColor',
'GetEntityGravity',
'SetEntityGravity',
'SetEntityHealth',
'GetClientButtons',
'EntityOutput',
'HookEntityOutput',
'UnhookEntityOutput',
'HookSingleEntityOutput',
'UnhookSingleEntityOutput',
'SMC_CreateParser',
'SMC_ParseFile',
'SMC_GetErrorString',
'SMC_ParseStart',
'SMC_SetParseStart',
'SMC_ParseEnd',
'SMC_SetParseEnd',
'SMC_NewSection',
'SMC_KeyValue',
'SMC_EndSection',
'SMC_SetReaders',
'SMC_RawLine',
'SMC_SetRawLine',
'BfWriteBool',
'BfWriteByte',
'BfWriteChar',
'BfWriteShort',
'BfWriteWord',
'BfWriteNum',
'BfWriteFloat',
'BfWriteString',
'BfWriteEntity',
'BfWriteAngle',
'BfWriteCoord',
'BfWriteVecCoord',
'BfWriteVecNormal',
'BfWriteAngles',
'BfReadBool',
'BfReadByte',
'BfReadChar',
'BfReadShort',
'BfReadWord',
'BfReadNum',
'BfReadFloat',
'BfReadString',
'BfReadEntity',
'BfReadAngle',
'BfReadCoord',
'BfReadVecCoord',
'BfReadVecNormal',
'BfReadAngles',
'BfGetNumBytesLeft',
'CreateProfiler',
'StartProfiling',
'StopProfiling',
'GetProfilerTime',
'OnPluginStart',
'AskPluginLoad2',
'OnPluginEnd',
'OnPluginPauseChange',
'OnGameFrame',
'OnMapStart',
'OnMapEnd',
'OnConfigsExecuted',
'OnAutoConfigsBuffered',
'OnAllPluginsLoaded',
'GetMyHandle',
'GetPluginIterator',
'MorePlugins',
'ReadPlugin',
'GetPluginStatus',
'GetPluginFilename',
'IsPluginDebugging',
'GetPluginInfo',
'FindPluginByNumber',
'SetFailState',
'ThrowError',
'GetTime',
'FormatTime',
'LoadGameConfigFile',
'GameConfGetOffset',
'GameConfGetKeyValue',
'GetSysTickCount',
'AutoExecConfig',
'RegPluginLibrary',
'LibraryExists',
'GetExtensionFileStatus',
'OnLibraryAdded',
'OnLibraryRemoved',
'ReadMapList',
'SetMapListCompatBind',
'OnClientFloodCheck',
'OnClientFloodResult',
'CanTestFeatures',
'GetFeatureStatus',
'RequireFeature',
'LoadFromAddress',
'StoreToAddress',
'CreateStack',
'PushStackCell',
'PushStackString',
'PushStackArray',
'PopStackCell',
'PopStackString',
'PopStackArray',
'IsStackEmpty',
'PopStack',
'OnPlayerRunCmd',
'BuildPath',
'OpenDirectory',
'ReadDirEntry',
'OpenFile',
'DeleteFile',
'ReadFileLine',
'ReadFile',
'ReadFileString',
'WriteFile',
'WriteFileString',
'WriteFileLine',
'ReadFileCell',
'WriteFileCell',
'IsEndOfFile',
'FileSeek',
'FilePosition',
'FileExists',
'RenameFile',
'DirExists',
'FileSize',
'FlushFile',
'RemoveDir',
'CreateDirectory',
'GetFileTime',
'LogToOpenFile',
'LogToOpenFileEx',
'SetNextMap',
'GetNextMap',
'ForceChangeLevel',
'GetMapHistorySize',
'GetMapHistory',
'GeoipCode2',
'GeoipCode3',
'GeoipCountry',
'MarkNativeAsOptional',
'RegClientCookie',
'FindClientCookie',
'SetClientCookie',
'GetClientCookie',
'SetAuthIdCookie',
'AreClientCookiesCached',
'OnClientCookiesCached',
'CookieMenuHandler',
'SetCookiePrefabMenu',
'SetCookieMenuItem',
'ShowCookieMenu',
'GetCookieIterator',
'ReadCookieIterator',
'GetCookieAccess',
'GetClientCookieTime',
'LoadTranslations',
'SetGlobalTransTarget',
'GetClientLanguage',
'GetServerLanguage',
'GetLanguageCount',
'GetLanguageInfo',
'SetClientLanguage',
'GetLanguageByCode',
'GetLanguageByName',
'CS_OnBuyCommand',
'CS_OnCSWeaponDrop',
'CS_OnGetWeaponPrice',
'CS_OnTerminateRound',
'CS_RespawnPlayer',
'CS_SwitchTeam',
'CS_DropWeapon',
'CS_TerminateRound',
'CS_GetTranslatedWeaponAlias',
'CS_GetWeaponPrice',
'CS_GetClientClanTag',
'CS_SetClientClanTag',
'LogToGame',
'SetRandomSeed',
'GetRandomFloat',
'GetRandomInt',
'IsMapValid',
'IsDedicatedServer',
'GetEngineTime',
'GetGameTime',
'GetGameTickCount',
'GetGameDescription',
'GetGameFolderName',
'GetCurrentMap',
'PrecacheModel',
'PrecacheSentenceFile',
'PrecacheDecal',
'PrecacheGeneric',
'IsModelPrecached',
'IsDecalPrecached',
'IsGenericPrecached',
'PrecacheSound',
'IsSoundPrecached',
'CreateDialog',
'GuessSDKVersion',
'PrintToChat',
'PrintToChatAll',
'PrintCenterText',
'PrintCenterTextAll',
'PrintHintText',
'PrintHintTextToAll',
'ShowVGUIPanel',
'CreateHudSynchronizer',
'SetHudTextParams',
'SetHudTextParamsEx',
'ShowSyncHudText',
'ClearSyncHud',
'ShowHudText',
'ShowMOTDPanel',
'DisplayAskConnectBox',
'EntIndexToEntRef',
'EntRefToEntIndex',
'MakeCompatEntRef',
'SetClientViewEntity',
'SetLightStyle',
'GetClientEyePosition',
'CreateDataPack',
'WritePackCell',
'WritePackFloat',
'WritePackString',
'ReadPackCell',
'ReadPackFloat',
'ReadPackString',
'ResetPack',
'GetPackPosition',
'SetPackPosition',
'IsPackReadable',
'LogMessage',
'LogMessageEx',
'LogToFile',
'LogToFileEx',
'LogAction',
'LogError',
'OnLogAction',
'GameLogHook',
'AddGameLogHook',
'RemoveGameLogHook',
'FindTeamByName',
'StartPrepSDKCall',
'PrepSDKCall_SetVirtual',
'PrepSDKCall_SetSignature',
'PrepSDKCall_SetFromConf',
'PrepSDKCall_SetReturnInfo',
'PrepSDKCall_AddParameter',
'EndPrepSDKCall',
'SDKCall']
if __name__ == '__main__':
import pprint
import re
import sys
import urllib.request, urllib.parse, urllib.error
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
for i in range(len(sys.path)-1, -1, -1):
if sys.path[i].endswith('/lexers'):
del sys.path[i]
def get_version():
f = urllib.request.urlopen('http://docs.sourcemod.net/api/index.php')
r = re.compile(r'SourceMod v\.<b>([\d\.]+)</td>')
for line in f:
m = r.search(line)
if m is not None:
return m.groups()[0]
def get_sm_functions():
f = urllib.request.urlopen('http://docs.sourcemod.net/api/SMfuncs.js')
r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def regenerate(filename, natives):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('FUNCTIONS = [')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('FUNCTIONS = %s\n\n' % pprint.pformat(natives))
f.write(footer)
f.close()
def run():
version = get_version()
print('> Downloading function index for SourceMod %s' % version)
functions = get_sm_functions()
print('> %d functions found:' % len(functions))
functionlist = []
for full_function_name in functions:
print('>> %s' % full_function_name)
functionlist.append(full_function_name)
regenerate(__file__, functionlist)
run()
|
mit
|
msingh172/youtube-dl
|
youtube_dl/extractor/udemy.py
|
33
|
7277
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class UdemyIE(InfoExtractor):
IE_NAME = 'udemy'
_VALID_URL = r'https?://www\.udemy\.com/(?:[^#]+#/lecture/|lecture/view/?\?lectureId=)(?P<id>\d+)'
_LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1'
_ORIGIN_URL = 'https://www.udemy.com'
_NETRC_MACHINE = 'udemy'
_TESTS = [{
'url': 'https://www.udemy.com/java-tutorial/#/lecture/172757',
'md5': '98eda5b657e752cf945d8445e261b5c5',
'info_dict': {
'id': '160614',
'ext': 'mp4',
'title': 'Introduction and Installation',
'description': 'md5:c0d51f6f21ef4ec65f091055a5eef876',
'duration': 579.29,
},
'skip': 'Requires udemy account credentials',
}]
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
error_str = 'Udemy returned error #%s: %s' % (error.get('code'), error.get('message'))
error_data = error.get('data')
if error_data:
error_str += ' - %s' % error_data.get('formErrors')
raise ExtractorError(error_str, expected=True)
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata'):
headers = {
'X-Udemy-Snail-Case': 'true',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'client_id':
headers['X-Udemy-Client-Id'] = cookie.value
elif cookie.name == 'access_token':
headers['X-Udemy-Bearer-Token'] = cookie.value
if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items():
url_or_request.add_header(header, value)
else:
url_or_request = compat_urllib_request.Request(url_or_request, headers=headers)
response = super(UdemyIE, self)._download_json(url_or_request, video_id, note)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
raise ExtractorError(
'Udemy account is required, use --username and --password options to provide account credentials.',
expected=True)
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
def is_logged(webpage):
return any(p in webpage for p in ['href="https://www.udemy.com/user/logout/', '>Logout<'])
# already logged in
if is_logged(login_popup):
return
login_form = self._form_hidden_inputs('login-form', login_popup)
login_form.update({
'email': username.encode('utf-8'),
'password': password.encode('utf-8'),
})
request = compat_urllib_request.Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._ORIGIN_URL)
request.add_header('Origin', self._ORIGIN_URL)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
if not is_logged(response):
error = self._html_search_regex(
r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>',
response, 'error message', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_extract(self, url):
lecture_id = self._match_id(url)
lecture = self._download_json(
'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id,
lecture_id, 'Downloading lecture JSON')
asset_type = lecture.get('assetType') or lecture.get('asset_type')
if asset_type != 'Video':
raise ExtractorError(
'Lecture %s is not a video' % lecture_id, expected=True)
asset = lecture['asset']
stream_url = asset.get('streamUrl') or asset.get('stream_url')
mobj = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url)
if mobj:
return self.url_result(mobj.group(1), 'Youtube')
video_id = asset['id']
thumbnail = asset.get('thumbnailUrl') or asset.get('thumbnail_url')
duration = asset['data']['duration']
download_url = asset.get('downloadUrl') or asset.get('download_url')
video = download_url.get('Video') or download_url.get('video')
video_480p = download_url.get('Video480p') or download_url.get('video_480p')
formats = [
{
'url': video_480p[0],
'format_id': '360p',
},
{
'url': video[0],
'format_id': '720p',
},
]
title = lecture['title']
description = lecture['description']
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats
}
class UdemyCourseIE(UdemyIE):
IE_NAME = 'udemy:course'
_VALID_URL = r'https?://www\.udemy\.com/(?P<coursepath>[\da-z-]+)'
_SUCCESSFULLY_ENROLLED = '>You have enrolled in this course!<'
_ALREADY_ENROLLED = '>You are already taking this course.<'
_TESTS = []
@classmethod
def suitable(cls, url):
return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
course_path = mobj.group('coursepath')
response = self._download_json(
'https://www.udemy.com/api-1.1/courses/%s' % course_path,
course_path, 'Downloading course JSON')
course_id = int(response['id'])
course_title = response['title']
webpage = self._download_webpage(
'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id,
course_id, 'Enrolling in the course')
if self._SUCCESSFULLY_ENROLLED in webpage:
self.to_screen('%s: Successfully enrolled in' % course_id)
elif self._ALREADY_ENROLLED in webpage:
self.to_screen('%s: Already enrolled in' % course_id)
response = self._download_json(
'https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id,
course_id, 'Downloading course curriculum')
entries = [
self.url_result(
'https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']), 'Udemy')
for asset in response if asset.get('assetType') or asset.get('asset_type') == 'Video'
]
return self.playlist_result(entries, course_id, course_title)
|
unlicense
|
rvalyi/OpenUpgrade
|
addons/account_analytic_analysis/account_analytic_analysis.py
|
6
|
46929
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from dateutil.relativedelta import relativedelta
import datetime
import logging
import time
import traceback
from openerp.osv import osv, fields
from openerp.osv.orm import intersect, except_orm
import openerp.tools
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
_logger = logging.getLogger(__name__)
class account_analytic_invoice_line(osv.osv):
_name = "account.analytic.invoice.line"
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.quantity * line.price_unit
if line.analytic_account_id.pricelist_id:
cur = line.analytic_account_id.pricelist_id.currency_id
res[line.id] = self.pool.get('res.currency').round(cr, uid, cur, res[line.id])
return res
_columns = {
'product_id': fields.many2one('product.product','Product',required=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'name': fields.text('Description', required=True),
'quantity': fields.float('Quantity', required=True),
'uom_id': fields.many2one('product.uom', 'Unit of Measure',required=True),
'price_unit': fields.float('Unit Price', required=True),
'price_subtotal': fields.function(_amount_line, string='Sub Total', type="float",digits_compute= dp.get_precision('Account')),
}
_defaults = {
'quantity' : 1,
}
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', partner_id=False, price_unit=False, pricelist_id=False, company_id=None, context=None):
context = context or {}
uom_obj = self.pool.get('product.uom')
company_id = company_id or False
local_context = dict(context, company_id=company_id, force_company=company_id, pricelist=pricelist_id)
if not product:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
if partner_id:
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=local_context)
if part.lang:
local_context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=local_context)
if price_unit is not False:
price = price_unit
elif pricelist_id:
price = res.price
else:
price = res.list_price
if not name:
name = self.pool.get('product.product').name_get(cr, uid, [res.id], context=local_context)[0][1]
if res.description_sale:
name += '\n'+res.description_sale
result.update({'name': name or False,'uom_id': uom_id or res.uom_id.id or False, 'price_unit': price})
res_final = {'value':result}
if result['uom_id'] != res.uom_id.id:
selected_uom = uom_obj.browse(cr, uid, result['uom_id'], context=local_context)
new_price = uom_obj._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uom_id'])
res_final['value']['price_unit'] = new_price
return res_final
class account_analytic_account(osv.osv):
_name = "account.analytic.account"
_inherit = "account.analytic.account"
def _analysis_all(self, cr, uid, ids, fields, arg, context=None):
dp = 2
res = dict([(i, {}) for i in ids])
parent_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
accounts = self.browse(cr, uid, ids, context=context)
for f in fields:
if f == 'user_ids':
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
if parent_ids:
cr.execute('SELECT DISTINCT("user") FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int((id * max_user) + x[0]) for x in result]
elif f == 'month_ids':
if parent_ids:
cr.execute('SELECT DISTINCT(month_id) FROM account_analytic_analysis_summary_month ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int(id * 1000000 + int(x[0])) for x in result]
elif f == 'last_worked_invoiced_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id;", (parent_ids,))
for account_id, sum in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = sum
elif f == 'ca_to_invoice':
for id in ids:
res[id][f] = 0.0
res2 = {}
for account in accounts:
cr.execute("""
SELECT product_id, sum(amount), user_id, to_invoice, sum(unit_amount), product_uom_id, line.name
FROM account_analytic_line line
LEFT JOIN account_analytic_journal journal ON (journal.id = line.journal_id)
WHERE account_id = %s
AND journal.type != 'purchase'
AND invoice_id IS NULL
AND to_invoice IS NOT NULL
GROUP BY product_id, user_id, to_invoice, product_uom_id, line.name""", (account.id,))
res[account.id][f] = 0.0
for product_id, price, user_id, factor_id, qty, uom, line_name in cr.fetchall():
price = -price
if product_id:
price = self.pool.get('account.analytic.line')._get_invoice_price(cr, uid, account, product_id, user_id, qty, context)
factor = self.pool.get('hr_timesheet_invoice.factor').browse(cr, uid, factor_id, context=context)
res[account.id][f] += price * qty * (100-factor.factor or 0.0) / 100.0
# sum both result on account_id
for id in ids:
res[id][f] = round(res.get(id, {}).get(f, 0.0), dp) + round(res2.get(id, 0.0), 2)
elif f == 'last_invoice_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute ("SELECT account_analytic_line.account_id, \
DATE(MAX(account_invoice.date_invoice)) \
FROM account_analytic_line \
JOIN account_invoice \
ON account_analytic_line.invoice_id = account_invoice.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_line.invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lid in cr.fetchall():
res[account_id][f] = lid
elif f == 'last_worked_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lwd in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = lwd
elif f == 'hours_qtt_non_invoiced':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
AND invoice_id IS NULL \
AND to_invoice IS NOT NULL \
GROUP BY account_analytic_line.account_id;",(parent_ids,))
for account_id, sua in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(sua, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'hours_quantity':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
GROUP BY account_analytic_line.account_id",(parent_ids,))
ff = cr.fetchall()
for account_id, hq in ff:
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(hq, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'ca_theorical':
# TODO Take care of pricelist and purchase !
for id in ids:
res[id][f] = 0.0
# Warning
# This computation doesn't take care of pricelist !
# Just consider list_price
if parent_ids:
cr.execute("""SELECT account_analytic_line.account_id AS account_id, \
COALESCE(SUM((account_analytic_line.unit_amount * pt.list_price) \
- (account_analytic_line.unit_amount * pt.list_price \
* hr.factor)), 0.0) AS somme
FROM account_analytic_line \
LEFT JOIN account_analytic_journal \
ON (account_analytic_line.journal_id = account_analytic_journal.id) \
JOIN product_product pp \
ON (account_analytic_line.product_id = pp.id) \
JOIN product_template pt \
ON (pp.product_tmpl_id = pt.id) \
JOIN account_analytic_account a \
ON (a.id=account_analytic_line.account_id) \
JOIN hr_timesheet_invoice_factor hr \
ON (hr.id=a.to_invoice) \
WHERE account_analytic_line.account_id IN %s \
AND a.to_invoice IS NOT NULL \
AND account_analytic_journal.type IN ('purchase', 'general')
GROUP BY account_analytic_line.account_id""",(parent_ids,))
for account_id, sum in cr.fetchall():
res[account_id][f] = round(sum, dp)
return res
def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
#Search all invoice lines not in cancelled state that refer to this analytic account
inv_line_obj = self.pool.get("account.invoice.line")
inv_lines = inv_line_obj.search(cr, uid, ['&', ('account_analytic_id', 'in', child_ids), ('invoice_id.state', '!=', 'cancel')], context=context)
for line in inv_line_obj.browse(cr, uid, inv_lines, context=context):
res[line.account_analytic_id.id] += line.price_subtotal
for acc in self.browse(cr, uid, res.keys(), context=context):
res[acc.id] = res[acc.id] - (acc.timesheet_ca_invoiced or 0.0)
res_final = res
return res_final
def _total_cost_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
cr.execute("""SELECT account_analytic_line.account_id, COALESCE(SUM(amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND amount<0 \
GROUP BY account_analytic_line.account_id""",(child_ids,))
for account_id, sum in cr.fetchall():
res[account_id] = round(sum,2)
res_final = res
return res_final
def _remaining_hours_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.quantity_max != 0:
res[account.id] = account.quantity_max - account.hours_quantity
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _remaining_hours_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.hours_qtt_est - account.timesheet_ca_invoiced, account.ca_to_invoice)
return res
def _hours_qtt_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.hours_quantity - account.hours_qtt_non_invoiced
if res[account.id] < 0:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _revenue_per_hour_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.hours_qtt_invoiced == 0:
res[account.id]=0.0
else:
res[account.id] = account.ca_invoiced / account.hours_qtt_invoiced
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _real_margin_rate_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.ca_invoiced == 0:
res[account.id]=0.0
elif account.total_cost != 0.0:
res[account.id] = -(account.real_margin / account.total_cost) * 100
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _fix_price_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
sale_obj = self.pool.get('sale.order')
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
sale_ids = sale_obj.search(cr, uid, [('project_id','=', account.id), ('state', '=', 'manual')], context=context)
for sale in sale_obj.browse(cr, uid, sale_ids, context=context):
res[account.id] += sale.amount_untaxed
for invoice in sale.invoice_ids:
if invoice.state != 'cancel':
res[account.id] -= invoice.amount_untaxed
return res
def _timesheet_ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
lines_obj = self.pool.get('account.analytic.line')
res = {}
inv_ids = []
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
line_ids = lines_obj.search(cr, uid, [('account_id','=', account.id), ('invoice_id','!=',False), ('to_invoice','!=', False), ('journal_id.type', '=', 'general')], context=context)
for line in lines_obj.browse(cr, uid, line_ids, context=context):
if line.invoice_id not in inv_ids:
inv_ids.append(line.invoice_id)
res[account.id] += line.invoice_id.amount_untaxed
return res
def _remaining_ca_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.amount_max - account.ca_invoiced, account.fix_price_to_invoice)
return res
def _real_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_invoiced + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _theorical_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_theorical + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _is_overdue_quantity(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0)
for record in self.browse(cr, uid, ids, context=context):
if record.quantity_max > 0.0:
result[record.id] = int(record.hours_quantity >= record.quantity_max)
else:
result[record.id] = 0
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
result = set()
for line in self.pool.get('account.analytic.line').browse(cr, uid, ids, context=context):
result.add(line.account_id.id)
return list(result)
def _get_total_estimation(self, account):
tot_est = 0.0
if account.fix_price_invoices:
tot_est += account.amount_max
if account.invoice_on_timesheets:
tot_est += account.hours_qtt_est
return tot_est
def _get_total_invoiced(self, account):
total_invoiced = 0.0
if account.fix_price_invoices:
total_invoiced += account.ca_invoiced
if account.invoice_on_timesheets:
total_invoiced += account.timesheet_ca_invoiced
return total_invoiced
def _get_total_remaining(self, account):
total_remaining = 0.0
if account.fix_price_invoices:
total_remaining += account.remaining_ca
if account.invoice_on_timesheets:
total_remaining += account.remaining_hours_to_invoice
return total_remaining
def _get_total_toinvoice(self, account):
total_toinvoice = 0.0
if account.fix_price_invoices:
total_toinvoice += account.fix_price_to_invoice
if account.invoice_on_timesheets:
total_toinvoice += account.ca_to_invoice
return total_toinvoice
def _sum_of_fields(self, cr, uid, ids, name, arg, context=None):
res = dict([(i, {}) for i in ids])
for account in self.browse(cr, uid, ids, context=context):
res[account.id]['est_total'] = self._get_total_estimation(account)
res[account.id]['invoiced_total'] = self._get_total_invoiced(account)
res[account.id]['remaining_total'] = self._get_total_remaining(account)
res[account.id]['toinvoice_total'] = self._get_total_toinvoice(account)
return res
_columns = {
'is_overdue_quantity' : fields.function(_is_overdue_quantity, method=True, type='boolean', string='Overdue Quantity',
store={
'account.analytic.line' : (_get_analytic_account, None, 20),
'account.analytic.account': (lambda self, cr, uid, ids, c=None: ids, ['quantity_max'], 10),
}),
'ca_invoiced': fields.function(_ca_invoiced_calc, type='float', string='Invoiced Amount',
help="Total customer invoiced amount for this account.",
digits_compute=dp.get_precision('Account')),
'total_cost': fields.function(_total_cost_calc, type='float', string='Total Costs',
help="Total of costs for this account. It includes real costs (from invoices) and indirect costs, like time spent on timesheets.",
digits_compute=dp.get_precision('Account')),
'ca_to_invoice': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Amount',
help="If invoice from analytic account, the remaining amount you can invoice to the customer based on the total costs.",
digits_compute=dp.get_precision('Account')),
'ca_theorical': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Theoretical Revenue',
help="Based on the costs you had on the project, what would have been the revenue if all these costs have been invoiced at the normal sale price provided by the pricelist.",
digits_compute=dp.get_precision('Account')),
'hours_quantity': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Total Worked Time',
help="Number of time you spent on the analytic account (from timesheet). It computes quantities on all journal of type 'general'."),
'last_invoice_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Last Invoice Date',
help="If invoice from the costs, this is the date of the latest invoiced."),
'last_worked_invoiced_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Invoiced Cost',
help="If invoice from the costs, this is the date of the latest work or cost that have been invoiced."),
'last_worked_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Cost/Work',
help="Date of the latest work done on this account."),
'hours_qtt_non_invoiced': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Time',
help="Number of time (hours/days) (from journal of type 'general') that can be invoiced if you invoice based on analytic account."),
'hours_qtt_invoiced': fields.function(_hours_qtt_invoiced_calc, type='float', string='Invoiced Time',
help="Number of time (hours/days) that can be invoiced plus those that already have been invoiced."),
'remaining_hours': fields.function(_remaining_hours_calc, type='float', string='Remaining Time',
help="Computed using the formula: Maximum Time - Total Worked Time"),
'remaining_hours_to_invoice': fields.function(_remaining_hours_to_invoice_calc, type='float', string='Remaining Time',
help="Computed using the formula: Expected on timesheets - Total invoiced on timesheets"),
'fix_price_to_invoice': fields.function(_fix_price_to_invoice_calc, type='float', string='Remaining Time',
help="Sum of quotations for this contract."),
'timesheet_ca_invoiced': fields.function(_timesheet_ca_invoiced_calc, type='float', string='Remaining Time',
help="Sum of timesheet lines invoiced for this contract."),
'remaining_ca': fields.function(_remaining_ca_calc, type='float', string='Remaining Revenue',
help="Computed using the formula: Max Invoice Price - Invoiced Amount.",
digits_compute=dp.get_precision('Account')),
'revenue_per_hour': fields.function(_revenue_per_hour_calc, type='float', string='Revenue per Time (real)',
help="Computed using the formula: Invoiced Amount / Total Time",
digits_compute=dp.get_precision('Account')),
'real_margin': fields.function(_real_margin_calc, type='float', string='Real Margin',
help="Computed using the formula: Invoiced Amount - Total Costs.",
digits_compute=dp.get_precision('Account')),
'theorical_margin': fields.function(_theorical_margin_calc, type='float', string='Theoretical Margin',
help="Computed using the formula: Theoretical Revenue - Total Costs",
digits_compute=dp.get_precision('Account')),
'real_margin_rate': fields.function(_real_margin_rate_calc, type='float', string='Real Margin Rate (%)',
help="Computes using the formula: (Real Margin / Total Costs) * 100.",
digits_compute=dp.get_precision('Account')),
'fix_price_invoices' : fields.boolean('Fixed Price'),
'invoice_on_timesheets' : fields.boolean("On Timesheets"),
'month_ids': fields.function(_analysis_all, multi='analytic_analysis', type='many2many', relation='account_analytic_analysis.summary.month', string='Month'),
'user_ids': fields.function(_analysis_all, multi='analytic_analysis', type="many2many", relation='account_analytic_analysis.summary.user', string='User'),
'hours_qtt_est': fields.float('Estimation of Hours to Invoice'),
'est_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Estimation"),
'invoiced_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Invoiced"),
'remaining_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Remaining", help="Expectation of remaining income for this contract. Computed as the sum of remaining subtotals which, in turn, are computed as the maximum between '(Estimation - Invoiced)' and 'To Invoice' amounts"),
'toinvoice_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total to Invoice", help=" Sum of everything that could be invoiced for this contract."),
'recurring_invoice_line_ids': fields.one2many('account.analytic.invoice.line', 'analytic_account_id', 'Invoice Lines'),
'recurring_invoices' : fields.boolean('Generate recurring invoices automatically'),
'recurring_rule_type': fields.selection([
('daily', 'Day(s)'),
('weekly', 'Week(s)'),
('monthly', 'Month(s)'),
('yearly', 'Year(s)'),
], 'Recurrency', help="Invoice automatically repeat at specified interval"),
'recurring_interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'recurring_next_date': fields.date('Date of Next Invoice'),
}
_defaults = {
'recurring_interval': 1,
'recurring_next_date': lambda *a: time.strftime('%Y-%m-%d'),
'recurring_rule_type':'monthly'
}
def open_sale_order_lines(self,cr,uid,ids,context=None):
if context is None:
context = {}
sale_ids = self.pool.get('sale.order').search(cr,uid,[('project_id','=',context.get('search_default_project_id',False)),('partner_id','in',context.get('search_default_partner_id',False))])
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Sales Order Lines to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'context': context,
'domain' : [('order_id','in',sale_ids)],
'res_model': 'sale.order.line',
'nodestroy': True,
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, fix_price_invoices=False, invoice_on_timesheets=False, recurring_invoices=False, context=None):
if not template_id:
return {}
obj_analytic_line = self.pool.get('account.analytic.invoice.line')
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
template = self.browse(cr, uid, template_id, context=context)
if not fix_price_invoices:
res['value']['fix_price_invoices'] = template.fix_price_invoices
res['value']['amount_max'] = template.amount_max
if not invoice_on_timesheets:
res['value']['invoice_on_timesheets'] = template.invoice_on_timesheets
res['value']['hours_qtt_est'] = template.hours_qtt_est
if template.to_invoice.id:
res['value']['to_invoice'] = template.to_invoice.id
if template.pricelist_id.id:
res['value']['pricelist_id'] = template.pricelist_id.id
if not recurring_invoices:
invoice_line_ids = []
for x in template.recurring_invoice_line_ids:
invoice_line_ids.append((0, 0, {
'product_id': x.product_id.id,
'uom_id': x.uom_id.id,
'name': x.name,
'quantity': x.quantity,
'price_unit': x.price_unit,
'analytic_account_id': x.analytic_account_id and x.analytic_account_id.id or False,
}))
res['value']['recurring_invoices'] = template.recurring_invoices
res['value']['recurring_interval'] = template.recurring_interval
res['value']['recurring_rule_type'] = template.recurring_rule_type
res['value']['recurring_invoice_line_ids'] = invoice_line_ids
return res
def onchange_recurring_invoices(self, cr, uid, ids, recurring_invoices, date_start=False, context=None):
value = {}
if date_start and recurring_invoices:
value = {'value': {'recurring_next_date': date_start}}
return value
def cron_account_analytic_account(self, cr, uid, context=None):
if context is None:
context = {}
remind = {}
def fill_remind(key, domain, write_pending=False):
base_domain = [
('type', '=', 'contract'),
('partner_id', '!=', False),
('manager_id', '!=', False),
('manager_id.email', '!=', False),
]
base_domain.extend(domain)
accounts_ids = self.search(cr, uid, base_domain, context=context, order='name asc')
accounts = self.browse(cr, uid, accounts_ids, context=context)
for account in accounts:
if write_pending:
account.write({'state' : 'pending'}, context=context)
remind_user = remind.setdefault(account.manager_id.id, {})
remind_type = remind_user.setdefault(key, {})
remind_partner = remind_type.setdefault(account.partner_id, []).append(account)
# Already expired
fill_remind("old", [('state', 'in', ['pending'])])
# Expires now
fill_remind("new", [('state', 'in', ['draft', 'open']), '|', '&', ('date', '!=', False), ('date', '<=', time.strftime('%Y-%m-%d')), ('is_overdue_quantity', '=', True)], True)
# Expires in less than 30 days
fill_remind("future", [('state', 'in', ['draft', 'open']), ('date', '!=', False), ('date', '<', (datetime.datetime.now() + datetime.timedelta(30)).strftime("%Y-%m-%d"))])
context['base_url'] = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
context['action_id'] = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'action_account_analytic_overdue_all')[1]
template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'account_analytic_cron_email_template')[1]
for user_id, data in remind.items():
context["data"] = data
_logger.debug("Sending reminder to uid %s", user_id)
self.pool.get('email.template').send_mail(cr, uid, template_id, user_id, force_send=True, context=context)
return True
def onchange_invoice_on_timesheets(self, cr, uid, ids, invoice_on_timesheets, context=None):
if not invoice_on_timesheets:
return {'value': {'to_invoice': False}}
result = {'value': {'use_timesheets': True}}
try:
to_invoice = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'hr_timesheet_invoice', 'timesheet_invoice_factor1')
result['value']['to_invoice'] = to_invoice[1]
except ValueError:
pass
return result
def hr_to_invoice_timesheets(self, cr, uid, ids, context=None):
domain = [('invoice_id','=',False),('to_invoice','!=',False), ('journal_id.type', '=', 'general'), ('account_id', 'in', ids)]
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Timesheets to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'domain' : domain,
'res_model': 'account.analytic.line',
'nodestroy': True,
}
def _prepare_invoice_data(self, cr, uid, contract, context=None):
context = context or {}
journal_obj = self.pool.get('account.journal')
if not contract.partner_id:
raise osv.except_osv(_('No Customer Defined!'),_("You must first select a Customer for Contract %s!") % contract.name )
fpos = contract.partner_id.property_account_position or False
journal_ids = journal_obj.search(cr, uid, [('type', '=','sale'),('company_id', '=', contract.company_id.id or False)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define a sale journal for the company "%s".') % (contract.company_id.name or '', ))
partner_payment_term = contract.partner_id.property_payment_term and contract.partner_id.property_payment_term.id or False
currency_id = False
if contract.pricelist_id:
currency_id = contract.pricelist_id.currency_id.id
elif contract.partner_id.property_product_pricelist:
currency_id = contract.partner_id.property_product_pricelist.currency_id.id
elif contract.company_id:
currency_id = contract.company_id.currency_id.id
invoice = {
'account_id': contract.partner_id.property_account_receivable.id,
'type': 'out_invoice',
'partner_id': contract.partner_id.id,
'currency_id': currency_id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'date_invoice': contract.recurring_next_date,
'origin': contract.code,
'fiscal_position': fpos and fpos.id,
'payment_term': partner_payment_term,
'company_id': contract.company_id.id or False,
}
return invoice
def _prepare_invoice_lines(self, cr, uid, contract, fiscal_position_id, context=None):
fpos_obj = self.pool.get('account.fiscal.position')
fiscal_position = fpos_obj.browse(cr, uid, fiscal_position_id, context=context)
invoice_lines = []
for line in contract.recurring_invoice_line_ids:
res = line.product_id
account_id = res.property_account_income.id
if not account_id:
account_id = res.categ_id.property_account_income_categ.id
account_id = fpos_obj.map_account(cr, uid, fiscal_position, account_id)
taxes = res.taxes_id or False
tax_id = fpos_obj.map_tax(cr, uid, fiscal_position, taxes)
invoice_lines.append((0, 0, {
'name': line.name,
'account_id': account_id,
'account_analytic_id': contract.id,
'price_unit': line.price_unit or 0.0,
'quantity': line.quantity,
'uos_id': line.uom_id.id or False,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, tax_id)],
}))
return invoice_lines
def _prepare_invoice(self, cr, uid, contract, context=None):
invoice = self._prepare_invoice_data(cr, uid, contract, context=context)
invoice['invoice_line'] = self._prepare_invoice_lines(cr, uid, contract, invoice['fiscal_position'], context=context)
return invoice
def recurring_create_invoice(self, cr, uid, ids, context=None):
return self._recurring_create_invoice(cr, uid, ids, context=context)
def _cron_recurring_create_invoice(self, cr, uid, context=None):
return self._recurring_create_invoice(cr, uid, [], automatic=True, context=context)
def _recurring_create_invoice(self, cr, uid, ids, automatic=False, context=None):
context = context or {}
invoice_ids = []
current_date = time.strftime('%Y-%m-%d')
if ids:
contract_ids = ids
else:
contract_ids = self.search(cr, uid, [('recurring_next_date','<=', current_date), ('state','=', 'open'), ('recurring_invoices','=', True), ('type', '=', 'contract')])
for contract in self.browse(cr, uid, contract_ids, context=context):
try:
invoice_values = self._prepare_invoice(cr, uid, contract, context=context)
invoice_ids.append(self.pool['account.invoice'].create(cr, uid, invoice_values, context=context))
next_date = datetime.datetime.strptime(contract.recurring_next_date or current_date, "%Y-%m-%d")
interval = contract.recurring_interval
if contract.recurring_rule_type == 'daily':
new_date = next_date+relativedelta(days=+interval)
elif contract.recurring_rule_type == 'weekly':
new_date = next_date+relativedelta(weeks=+interval)
elif contract.recurring_rule_type == 'monthly':
new_date = next_date+relativedelta(months=+interval)
else:
new_date = next_date+relativedelta(years=+interval)
self.write(cr, uid, [contract.id], {'recurring_next_date': new_date.strftime('%Y-%m-%d')}, context=context)
if automatic:
cr.commit()
except Exception:
if automatic:
cr.rollback()
_logger.error(traceback.format_exc())
else:
raise
return invoice_ids
class account_analytic_account_summary_user(osv.osv):
_name = "account_analytic_analysis.summary.user"
_description = "Hours Summary by User"
_order='user'
_auto = False
_rec_name = 'user'
def _unit_amount(self, cr, uid, ids, name, arg, context=None):
res = {}
account_obj = self.pool.get('account.analytic.account')
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
account_ids = [int(str(x/max_user - (x%max_user == 0 and 1 or 0))) for x in ids]
user_ids = [int(str(x-((x/max_user - (x%max_user == 0 and 1 or 0)) *max_user))) for x in ids]
parent_ids = tuple(account_ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
if parent_ids:
cr.execute('SELECT id, unit_amount ' \
'FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s ' \
'AND "user" IN %s',(parent_ids, tuple(user_ids),))
for sum_id, unit_amount in cr.fetchall():
res[sum_id] = unit_amount
for id in ids:
res[id] = round(res.get(id, 0.0), 2)
return res
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'user': fields.many2one('res.users', 'User'),
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_user')
cr.execute('''CREATE OR REPLACE VIEW account_analytic_analysis_summary_user AS (
with mu as
(select max(id) as max_user from res_users)
, lu AS
(SELECT
l.account_id AS account_id,
coalesce(l.user_id, 0) AS user_id,
SUM(l.unit_amount) AS unit_amount
FROM account_analytic_line AS l,
account_analytic_journal AS j
WHERE (j.type = 'general' ) and (j.id=l.journal_id)
GROUP BY l.account_id, l.user_id
)
select (lu.account_id * mu.max_user) + lu.user_id as id,
lu.account_id as account_id,
lu.user_id as "user",
unit_amount
from lu, mu)''')
class account_analytic_account_summary_month(osv.osv):
_name = "account_analytic_analysis.summary.month"
_description = "Hours summary by month"
_auto = False
_rec_name = 'month'
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'month': fields.char('Month', size=32, readonly=True),
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_month')
cr.execute('CREATE VIEW account_analytic_analysis_summary_month AS (' \
'SELECT ' \
'(TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') + (d.account_id * 1000000::bigint))::bigint AS id, ' \
'd.account_id AS account_id, ' \
'TO_CHAR(d.month, \'Mon YYYY\') AS month, ' \
'TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') AS month_id, ' \
'COALESCE(SUM(l.unit_amount), 0.0) AS unit_amount ' \
'FROM ' \
'(SELECT ' \
'd2.account_id, ' \
'd2.month ' \
'FROM ' \
'(SELECT ' \
'a.id AS account_id, ' \
'l.month AS month ' \
'FROM ' \
'(SELECT ' \
'DATE_TRUNC(\'month\', l.date) AS month ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE j.type = \'general\' ' \
'GROUP BY DATE_TRUNC(\'month\', l.date) ' \
') AS l, ' \
'account_analytic_account AS a ' \
'GROUP BY l.month, a.id ' \
') AS d2 ' \
'GROUP BY d2.account_id, d2.month ' \
') AS d ' \
'LEFT JOIN ' \
'(SELECT ' \
'l.account_id AS account_id, ' \
'DATE_TRUNC(\'month\', l.date) AS month, ' \
'SUM(l.unit_amount) AS unit_amount ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE (j.type = \'general\') and (j.id=l.journal_id) ' \
'GROUP BY l.account_id, DATE_TRUNC(\'month\', l.date) ' \
') AS l '
'ON (' \
'd.account_id = l.account_id ' \
'AND d.month = l.month' \
') ' \
'GROUP BY d.month, d.account_id ' \
')')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
bdang2012/taiga-back
|
taiga/projects/attachments/admin.py
|
21
|
1362
|
# Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from django.contrib.contenttypes import generic
from . import models
class AttachmentAdmin(admin.ModelAdmin):
list_display = ["id", "project", "attached_file", "owner", "content_type", "content_object"]
list_display_links = ["id", "attached_file",]
list_filter = ["project", "content_type"]
class AttachmentInline(generic.GenericTabularInline):
model = models.Attachment
fields = ("attached_file", "owner")
extra = 0
admin.site.register(models.Attachment, AttachmentAdmin)
|
agpl-3.0
|
Phil-LiDAR2-Geonode/pl2-geonode
|
geonode/contrib/geosites/tests.py
|
22
|
9742
|
import json
from tastypie.test import ResourceTestCase
from django.test.utils import override_settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from guardian.shortcuts import get_anonymous_user
from guardian.shortcuts import remove_perm
from geonode.base.populate_test_data import create_models
from geonode.people.models import Profile
from geonode.layers.models import Layer
from geonode.groups.models import Group
from .populate_sites_data import create_sites
from .models import SiteResources, SitePeople
@override_settings(SITE_NAME='Slave')
@override_settings(SITE_ID=2)
class SiteTests(ResourceTestCase):
"""Tests the sites functionality
"""
fixtures = ['bobby']
def setUp(self):
super(SiteTests, self).setUp()
create_sites()
create_models(type='layer')
self.user = 'admin'
self.passwd = 'admin'
self.admin = Profile.objects.get(username='admin')
self.bobby = Profile.objects.get(username='bobby')
self.master_site = Site.objects.get(name='Master')
self.slave_site = Site.objects.get(name='Slave')
self.api_site_url = reverse('api_dispatch_list',
kwargs={
'api_name': 'api',
'resource_name': 'sites'})
self.api_slave_detail_url = reverse('api_dispatch_detail',
kwargs={
'api_name': 'api',
'resource_name': 'sites',
'pk': self.slave_site.pk})
self.api_layer_url = reverse('api_dispatch_list',
kwargs={
'api_name': 'api',
'resource_name': 'layers'})
self.anonymous_user = get_anonymous_user()
self.slave2_data = {'name': 'Slave2',
'domain': 'slave2.test.org'}
# all layers belong to slave but let's remove one resource from it (CA)
SiteResources.objects.get(site=self.slave_site).resources.remove(Layer.objects.get(typename='geonode:CA'))
def test_create_new_site(self):
"""
Test the creation of new sites
"""
# Test unauthenticated first
response = self.api_client.post(
self.api_site_url,
data=self.slave2_data)
# Check the correct http response
self.assertEqual(response.status_code, 401)
# Test as admin
self.api_client.client.login(username=self.user, password=self.passwd)
response = self.api_client.post(
self.api_site_url,
format='json',
data=self.slave2_data)
# Check the correct http response
self.assertEqual(response.status_code, 201)
# Check the object is created in the db
self.assertTrue(Site.objects.filter(name='Slave2').exists())
def test_delete_site(self):
"""
Test the deletion of sites also removes the SiteResources
"""
# Test unauthenticated first
response = self.client.delete(
self.api_slave_detail_url,
data={'name': 'Slave'})
# Check the correct http response
self.assertEqual(response.status_code, 401)
# Test as admin
self.client.login(username=self.user, password=self.passwd)
# Check the correct http response
self.assertHttpAccepted(self.client.delete(self.api_slave_detail_url))
# Check the object is created in the db
self.assertFalse(Site.objects.filter(name='Slave').exists())
# Check that the SiteResources has been deleted as well
self.assertFalse(SiteResources.objects.filter(site=self.slave_site).exists())
def test_layer_detail_page_slave_site(self):
"""
Test that the detail page is not found of the resource is on another site
"""
# test that the CA layer detail page, that does not belong to the SlaveSite, is not found
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(reverse('layer_detail', args=[Layer.objects.all()[0].typename]))
self.assertEqual(response.status_code, 404)
@override_settings(SITE_ID=1)
def test_layer_detail_page_master_site(self):
"""
Test that the detail page is allowed from the master site
"""
# test that the CA layer detail page, that does not belong to the SlaveSite, is not found
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(reverse('layer_detail', args=[Layer.objects.all()[0].typename]))
self.assertEqual(response.status_code, 200)
def test_master_site_all_layers(self):
"""
Test that the master site owns all the layers available in the database
"""
self.assertEqual(SiteResources.objects.get(site=self.master_site).resources.count(), 8)
def test_normal_site_subset_layers(self):
"""
Test that a normal site can see to the correct subset of layers
"""
self.assertEqual(SiteResources.objects.get(site=self.slave_site).resources.count(), 7)
def test_non_superuser_normal_site_subset_layers(self):
"""
Test that a non superuser, that can see different layers on different sites,
can see the correct subset of layer on a normal site
"""
# Remove some view permissions for bobby
anonymous_group = Group.objects.get(name='anonymous')
for layer in Layer.objects.all()[:3]:
remove_perm('view_resourcebase', self.bobby, layer.get_self_resource())
remove_perm('view_resourcebase', anonymous_group, layer.get_self_resource())
self.client.login(username='bobby', password='bob')
response = self.client.get(self.api_layer_url)
self.assertEquals(len(json.loads(response.content)['objects']), 5)
# now test with superuser
self.client.logout()
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(self.api_layer_url)
self.assertEquals(len(json.loads(response.content)['objects']), 7)
def test_new_site_cant_see_layers(self):
"""
Test that a new site can't see any layer
"""
# Create a Slave2 Site
slave2 = Site.objects.create(name='Slave2', domain="slave2.test.org")
self.assertEqual(SiteResources.objects.get(site=slave2).resources.count(), 0)
def test_layer_acls_slave_site(self):
"""Test that the layer_acls overridden function behaves correctly on a slave site"""
acls_site_url = reverse('layer_acls_dep')
# first with unauthenticated user
response = self.client.get(acls_site_url)
self.assertValidJSONResponse(response)
self.assertEqual(len(self.deserialize(response)['rw']), 0)
self.assertEqual(len(self.deserialize(response)['ro']), 7)
# then as bobby
self.client.login(username='bobby', password='bob')
response = self.client.get(acls_site_url)
self.assertValidJSONResponse(response)
self.assertEqual(len(self.deserialize(response)['rw']), 0)
self.assertEqual(len(self.deserialize(response)['ro']), 7)
# then as admin
self.client.logout()
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(acls_site_url)
self.assertValidJSONResponse(response)
self.assertEqual(len(self.deserialize(response)['rw']), 7)
self.assertEqual(len(self.deserialize(response)['ro']), 0)
@override_settings(SITE_ID=1)
def test_layer_acls_master_site(self):
"""Test that the layer_acls overridden function behaves correctly on a master site"""
acls_site_url = reverse('layer_acls_dep')
# first with unauthenticated user
response = self.client.get(acls_site_url)
self.assertValidJSONResponse(response)
self.assertEqual(len(self.deserialize(response)['rw']), 0)
self.assertEqual(len(self.deserialize(response)['ro']), 8)
# then as bobby
self.client.login(username='bobby', password='bob')
response = self.client.get(acls_site_url)
self.assertValidJSONResponse(response)
self.assertEqual(len(self.deserialize(response)['rw']), 0)
self.assertEqual(len(self.deserialize(response)['ro']), 8)
# then as admin
self.client.logout()
self.client.login(username=self.user, password=self.passwd)
response = self.client.get(acls_site_url)
self.assertValidJSONResponse(response)
self.assertEqual(len(self.deserialize(response)['rw']), 8)
self.assertEqual(len(self.deserialize(response)['ro']), 0)
def test_people_belong_to_correct_site(self):
"""Test that the users belong to the correct site"""
master_siteppl = SitePeople.objects.get(site=self.master_site)
slave_siteppl = SitePeople.objects.get(site=self.slave_site)
master_siteppl.people.add(self.admin)
master_siteppl.people.add(self.bobby)
slave_siteppl.people.add(self.admin)
slave_siteppl.people.add(self.bobby)
self.assertEqual(master_siteppl.people.count(), 2)
self.assertEqual(slave_siteppl.people.count(), 7)
Profile.objects.create(username='testsite', password='test1')
self.assertEqual(master_siteppl.people.count(), 2)
self.assertEqual(slave_siteppl.people.count(), 8)
|
gpl-3.0
|
StuartLittlefair/astropy
|
astropy/table/operations.py
|
3
|
54015
|
"""
High-level table operations:
- join()
- setdiff()
- hstack()
- vstack()
- dstack()
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import collections
import itertools
from collections import OrderedDict, Counter
from collections.abc import Mapping, Sequence
import numpy as np
from astropy.utils import metadata
from .table import Table, QTable, Row, Column, MaskedColumn
from astropy.units import Quantity
from . import _np_utils
from .np_utils import fix_column_name, TableMergeError
__all__ = ['join', 'setdiff', 'hstack', 'vstack', 'unique',
'join_skycoord', 'join_distance']
__doctest_requires__ = {'join_skycoord': ['scipy'], 'join_distance': ['scipy']}
def _merge_table_meta(out, tables, metadata_conflicts='warn'):
out_meta = deepcopy(tables[0].meta)
for table in tables[1:]:
out_meta = metadata.merge(out_meta, table.meta, metadata_conflicts=metadata_conflicts)
out.meta.update(out_meta)
def _get_list_of_tables(tables):
"""
Check that tables is a Table or sequence of Tables. Returns the
corresponding list of Tables.
"""
# Make sure we have a list of things
if not isinstance(tables, Sequence):
tables = [tables]
# Make sure there is something to stack
if len(tables) == 0:
raise ValueError('no values provided to stack.')
# Convert inputs (Table, Row, or anything column-like) to Tables.
# Special case that Quantity converts to a QTable.
for ii, val in enumerate(tables):
if isinstance(val, Table):
pass
elif isinstance(val, Row):
tables[ii] = Table(val)
elif isinstance(val, Quantity):
tables[ii] = QTable([val])
else:
try:
tables[ii] = Table([val])
except (ValueError, TypeError):
raise TypeError(f'cannot convert {val} to table column.')
return tables
def _get_out_class(objs):
"""
From a list of input objects ``objs`` get merged output object class.
This is just taken as the deepest subclass. This doesn't handle complicated
inheritance schemes, but as a special case, classes which share ``info``
are taken to be compatible.
"""
out_class = objs[0].__class__
for obj in objs[1:]:
if issubclass(obj.__class__, out_class):
out_class = obj.__class__
if any(not (issubclass(out_class, obj.__class__)
or out_class.info is obj.__class__.info) for obj in objs):
raise ValueError('unmergeable object classes {}'
.format([obj.__class__.__name__ for obj in objs]))
return out_class
def join_skycoord(distance, distance_func='search_around_sky'):
"""Helper function to join on SkyCoord columns using distance matching.
This function is intended for use in ``table.join()`` to allow performing a
table join where the key columns are both ``SkyCoord`` objects, matched by
computing the distance between points and accepting values below
``distance``.
The distance cross-matching is done using either
`~astropy.coordinates.search_around_sky` or
`~astropy.coordinates.search_around_3d`, depending on the value of
``distance_func``. The default is ``'search_around_sky'``.
One can also provide a function object for ``distance_func``, in which case
it must be a function that follows the same input and output API as
`~astropy.coordinates.search_around_sky`. In this case the function will
be called with ``(skycoord1, skycoord2, distance)`` as arguments.
Parameters
----------
distance : Quantity (angle or length)
Maximum distance between points to be considered a join match
distance_func : str or function
Specifies the function for performing the cross-match based on
``distance``. If supplied as a string this specifies the name of a
function in `astropy.coordinates`. If supplied as a function then that
function is called directly.
Returns
-------
join_func : function
Function that accepts two ``SkyCoord`` columns (col1, col2) and returns
the tuple (ids1, ids2) of pair-matched unique identifiers.
Examples
--------
This example shows an inner join of two ``SkyCoord`` columns, taking any
sources within 0.2 deg to be a match. Note the new ``sc_id`` column which
is added and provides a unique source identifier for the matches.
>>> from astropy.coordinates import SkyCoord
>>> import astropy.units as u
>>> from astropy.table import Table, join_skycoord
>>> from astropy import table
>>> sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg')
>>> sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg')
>>> join_func = join_skycoord(0.2 * u.deg)
>>> join_func(sc1, sc2) # Associate each coordinate with unique source ID
(array([3, 1, 1, 2]), array([4, 1, 2]))
>>> t1 = Table([sc1], names=['sc'])
>>> t2 = Table([sc2], names=['sc'])
>>> t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)})
>>> print(t12) # Note new `sc_id` column with the IDs from join_func()
sc_id sc_1 sc_2
deg,deg deg,deg
----- ------- --------
1 1.0,0.0 1.05,0.0
1 1.1,0.0 1.05,0.0
2 2.0,0.0 2.1,0.0
"""
if isinstance(distance_func, str):
import astropy.coordinates as coords
try:
distance_func = getattr(coords, distance_func)
except AttributeError:
raise ValueError('distance_func must be a function in astropy.coordinates')
else:
from inspect import isfunction
if not isfunction(distance_func):
raise ValueError('distance_func must be a str or function')
def join_func(sc1, sc2):
# Call the appropriate SkyCoord method to find pairs within distance
idxs1, idxs2, d2d, d3d = distance_func(sc1, sc2, distance)
# Now convert that into unique identifiers for each near-pair. This is
# taken to be transitive, so that if points 1 and 2 are "near" and points
# 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier.
# This identifier will then be used in the table join matching.
# Identifiers for each column, initialized to all zero.
ids1 = np.zeros(len(sc1), dtype=int)
ids2 = np.zeros(len(sc2), dtype=int)
# Start the identifier count at 1
id_ = 1
for idx1, idx2 in zip(idxs1, idxs2):
# If this col1 point is previously identified then set corresponding
# col2 point to same identifier. Likewise for col2 and col1.
if ids1[idx1] > 0:
ids2[idx2] = ids1[idx1]
elif ids2[idx2] > 0:
ids1[idx1] = ids2[idx2]
else:
# Not yet seen so set identifer for col1 and col2
ids1[idx1] = id_
ids2[idx2] = id_
id_ += 1
# Fill in unique identifiers for points with no near neighbor
for ids in (ids1, ids2):
for idx in np.flatnonzero(ids == 0):
ids[idx] = id_
id_ += 1
# End of enclosure join_func()
return ids1, ids2
return join_func
def join_distance(distance, kdtree_args=None, query_args=None):
"""Helper function to join table columns using distance matching.
This function is intended for use in ``table.join()`` to allow performing
a table join where the key columns are matched by computing the distance
between points and accepting values below ``distance``. This numerical
"fuzzy" match can apply to 1-D or 2-D columns, where in the latter case
the distance is a vector distance.
The distance cross-matching is done using `scipy.spatial.cKDTree`. If
necessary you can tweak the default behavior by providing ``dict`` values
for the ``kdtree_args`` or ``query_args``.
Parameters
----------
distance : float, Quantity
Maximum distance between points to be considered a join match
kdtree_args : dict, None
Optional extra args for `~scipy.spatial.cKDTree`
query_args : dict, None
Optional extra args for `~scipy.spatial.cKDTree.query_ball_tree`
Returns
-------
join_func : function
Function that accepts (skycoord1, skycoord2) and returns the tuple
(ids1, ids2) of pair-matched unique identifiers.
Examples
--------
>>> from astropy.table import Table, join_distance
>>> from astropy import table
>>> c1 = [0, 1, 1.1, 2]
>>> c2 = [0.5, 1.05, 2.1]
>>> t1 = Table([c1], names=['col'])
>>> t2 = Table([c2], names=['col'])
>>> t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_distance(0.2)})
>>> print(t12)
col_id col_1 col_2
------ ----- -----
1 1.0 1.05
1 1.1 1.05
2 2.0 2.1
3 0.0 --
4 -- 0.5
"""
try:
from scipy.spatial import cKDTree
except ImportError as exc:
raise ImportError('scipy is required to use join_distance()') from exc
if kdtree_args is None:
kdtree_args = {}
if query_args is None:
query_args = {}
def join_func(col1, col2):
if col1.ndim > 2 or col2.ndim > 2:
raise ValueError('columns for isclose_join must be 1- or 2-dimensional')
if isinstance(distance, Quantity):
# Convert to np.array with common unit
col1 = col1.to_value(distance.unit)
col2 = col2.to_value(distance.unit)
dist = distance.value
else:
# Convert to np.array to allow later in-place shape changing
col1 = np.asarray(col1)
col2 = np.asarray(col2)
dist = distance
# Ensure columns are pure np.array and are 2-D for use with KDTree
if col1.ndim == 1:
col1.shape = col1.shape + (1,)
if col2.ndim == 1:
col2.shape = col2.shape + (1,)
# Cross-match col1 and col2 within dist using KDTree
kd1 = cKDTree(col1, **kdtree_args)
kd2 = cKDTree(col2, **kdtree_args)
nears = kd1.query_ball_tree(kd2, r=dist, **query_args)
# Output of above is nears which is a list of lists, where the outer
# list corresponds to each item in col1, and where the inner lists are
# indexes into col2 of elements within the distance tolerance. This
# identifies col1 / col2 near pairs.
# Now convert that into unique identifiers for each near-pair. This is
# taken to be transitive, so that if points 1 and 2 are "near" and points
# 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier.
# This identifier will then be used in the table join matching.
# Identifiers for each column, initialized to all zero.
ids1 = np.zeros(len(col1), dtype=int)
ids2 = np.zeros(len(col2), dtype=int)
# Start the identifier count at 1
id_ = 1
for idx1, idxs2 in enumerate(nears):
for idx2 in idxs2:
# If this col1 point is previously identified then set corresponding
# col2 point to same identifier. Likewise for col2 and col1.
if ids1[idx1] > 0:
ids2[idx2] = ids1[idx1]
elif ids2[idx2] > 0:
ids1[idx1] = ids2[idx2]
else:
# Not yet seen so set identifer for col1 and col2
ids1[idx1] = id_
ids2[idx2] = id_
id_ += 1
# Fill in unique identifiers for points with no near neighbor
for ids in (ids1, ids2):
for idx in np.flatnonzero(ids == 0):
ids[idx] = id_
id_ += 1
# End of enclosure join_func()
return ids1, ids2
return join_func
def join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'], metadata_conflicts='warn',
join_funcs=None):
"""
Perform a join of the left table with the right table on specified keys.
Parameters
----------
left : Table object or a value that will initialize a Table object
Left side table in the join
right : Table object or a value that will initialize a Table object
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Try converting inputs to Table as needed
if not isinstance(left, Table):
left = Table(left)
if not isinstance(right, Table):
right = Table(right)
col_name_map = OrderedDict()
out = _join(left, right, keys, join_type,
uniq_col_name, table_names, col_name_map, metadata_conflicts,
join_funcs)
# Merge the column and table meta data. Table subclasses might override
# these methods for custom merge behavior.
_merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts)
return out
def setdiff(table1, table2, keys=None):
"""
Take a set difference of table rows.
The row set difference will contain all rows in ``table1`` that are not
present in ``table2``. If the keys parameter is not defined, all columns in
``table1`` will be included in the output table.
Parameters
----------
table1 : `~astropy.table.Table`
``table1`` is on the left side of the set difference.
table2 : `~astropy.table.Table`
``table2`` is on the right side of the set difference.
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns in ``table1``.
Returns
-------
diff_table : `~astropy.table.Table`
New table containing the set difference between tables. If the set
difference is none, an empty table will be returned.
Examples
--------
To get a set difference between two tables::
>>> from astropy.table import setdiff, Table
>>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b'))
>>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 c
4 d
9 f
>>> print(t2)
a b
--- ---
1 c
5 b
9 f
>>> print(setdiff(t1, t2))
a b
--- ---
4 d
>>> print(setdiff(t2, t1))
a b
--- ---
5 b
"""
if keys is None:
keys = table1.colnames
# Check that all keys are in table1 and table2
for tbl, tbl_str in ((table1, 'table1'), (table2, 'table2')):
diff_keys = np.setdiff1d(keys, tbl.colnames)
if len(diff_keys) != 0:
raise ValueError("The {} columns are missing from {}, cannot take "
"a set difference.".format(diff_keys, tbl_str))
# Make a light internal copy of both tables
t1 = table1.copy(copy_data=False)
t1.meta = {}
t1.keep_columns(keys)
t1['__index1__'] = np.arange(len(table1)) # Keep track of rows indices
# Make a light internal copy to avoid touching table2
t2 = table2.copy(copy_data=False)
t2.meta = {}
t2.keep_columns(keys)
# Dummy column to recover rows after join
t2['__index2__'] = np.zeros(len(t2), dtype=np.uint8) # dummy column
t12 = _join(t1, t2, join_type='left', keys=keys,
metadata_conflicts='silent')
# If t12 index2 is masked then that means some rows were in table1 but not table2.
if hasattr(t12['__index2__'], 'mask'):
# Define bool mask of table1 rows not in table2
diff = t12['__index2__'].mask
# Get the row indices of table1 for those rows
idx = t12['__index1__'][diff]
# Select corresponding table1 rows straight from table1 to ensure
# correct table and column types.
t12_diff = table1[idx]
else:
t12_diff = table1[[]]
return t12_diff
def dstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack columns within tables depth-wise
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : Table or list of Table or Row objects
Table(s) to stack along depth-wise with the current table
Table columns should have same shape and name for depth-wise stacking
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(dstack([t1, t2]))
a [2] b [2]
------ ------
1 .. 5 3 .. 7
2 .. 6 4 .. 8
"""
_check_join_type(join_type, 'dstack')
tables = _get_list_of_tables(tables)
if len(tables) == 1:
return tables[0] # no point in stacking a single table
n_rows = set(len(table) for table in tables)
if len(n_rows) != 1:
raise ValueError('Table lengths must all match for dstack')
n_row = n_rows.pop()
out = vstack(tables, join_type, metadata_conflicts)
for name, col in out.columns.items():
col = out[name]
# Reshape to so each original column is now in a row.
# If entries are not 0-dim then those additional shape dims
# are just carried along.
# [x x x y y y] => [[x x x],
# [y y y]]
new_shape = (len(tables), n_row) + col.shape[1:]
try:
col.shape = (len(tables), n_row) + col.shape[1:]
except AttributeError:
col = col.reshape(new_shape)
# Transpose the table and row axes to get to
# [[x, y],
# [x, y]
# [x, y]]
axes = np.arange(len(col.shape))
axes[:2] = [1, 0]
# This temporarily makes `out` be corrupted (columns of different
# length) but it all works out in the end.
out.columns.__setitem__(name, col.transpose(axes), validated=True)
return out
def vstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack tables vertically (along rows)
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : Table or list of Table or Row objects
Table(s) to stack along rows (vertically)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(vstack([t1, t2]))
a b
--- ---
1 3
2 4
5 7
6 8
"""
_check_join_type(join_type, 'vstack')
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _vstack(tables, join_type, col_name_map, metadata_conflicts)
# Merge table metadata
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def hstack(tables, join_type='outer',
uniq_col_name='{col_name}_{table_name}', table_names=None,
metadata_conflicts='warn'):
"""
Stack tables along columns (horizontally)
A ``join_type`` of 'exact' means that the tables must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' (default)
means the output will have the union of all rows, with table values being
masked where no common values are available.
Parameters
----------
tables : Table or list of Table or Row objects
Tables to stack along columns (horizontally)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables horizontally (along columns) do::
>>> from astropy.table import Table, hstack
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
c d
--- ---
5 7
6 8
>>> print(hstack([t1, t2]))
a b c d
--- --- --- ---
1 3 5 7
2 4 6 8
"""
_check_join_type(join_type, 'hstack')
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _hstack(tables, join_type, uniq_col_name, table_names,
col_name_map)
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def unique(input_table, keys=None, silent=False, keep='first'):
"""
Returns the unique rows of a table.
Parameters
----------
input_table : `~astropy.table.Table` object or a value that
will initialize a `~astropy.table.Table` object
keys : str or list of str
Name(s) of column(s) used to create unique rows.
Default is to use all columns.
keep : one of 'first', 'last' or 'none'
Whether to keep the first or last row for each set of
duplicates. If 'none', all rows that are duplicate are
removed, leaving only rows that are already unique in
the input.
Default is 'first'.
silent : bool
If `True`, masked value column(s) are silently removed from
``keys``. If `False`, an exception is raised when ``keys``
contains masked value column(s).
Default is `False`.
Returns
-------
unique_table : `~astropy.table.Table` object
New table containing only the unique rows of ``input_table``.
Examples
--------
>>> from astropy.table import unique, Table
>>> import numpy as np
>>> table = Table(data=[[1,2,3,2,3,3],
... [2,3,4,5,4,6],
... [3,4,5,6,7,8]],
... names=['col1', 'col2', 'col3'],
... dtype=[np.int32, np.int32, np.int32])
>>> table
<Table length=6>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
2 5 6
3 4 7
3 6 8
>>> unique(table, keys='col1')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
>>> unique(table, keys=['col1'], keep='last')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 5 6
3 6 8
>>> unique(table, keys=['col1', 'col2'])
<Table length=5>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 4 5
3 6 8
>>> unique(table, keys=['col1', 'col2'], keep='none')
<Table length=4>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 6 8
>>> unique(table, keys=['col1'], keep='none')
<Table length=1>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
"""
if keep not in ('first', 'last', 'none'):
raise ValueError("'keep' should be one of 'first', 'last', 'none'")
if isinstance(keys, str):
keys = [keys]
if keys is None:
keys = input_table.colnames
else:
if len(set(keys)) != len(keys):
raise ValueError("duplicate key names")
# Check for columns with masked values
for key in keys[:]:
col = input_table[key]
if hasattr(col, 'mask') and np.any(col.mask):
if not silent:
raise ValueError(
"cannot use columns with masked values as keys; "
"remove column '{}' from keys and rerun "
"unique()".format(key))
del keys[keys.index(key)]
if len(keys) == 0:
raise ValueError("no column remained in ``keys``; "
"unique() cannot work with masked value "
"key columns")
grouped_table = input_table.group_by(keys)
indices = grouped_table.groups.indices
if keep == 'first':
indices = indices[:-1]
elif keep == 'last':
indices = indices[1:] - 1
else:
indices = indices[:-1][np.diff(indices) == 1]
return grouped_table[indices]
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of tables
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.colnames:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.colnames for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(names[0], tme._incompat_types))
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in in_cols)
if len(uniq_shapes) != 1:
raise TableMergeError(f'Key columns {names!r} have different shape')
shape = uniq_shapes.pop()
out_descrs.append((fix_column_name(out_name), dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
try:
return metadata.common_dtype(cols)
except metadata.MergeConflictError as err:
tme = TableMergeError(f'Columns have incompatible types {err._incompat_types}')
tme._incompat_types = err._incompat_types
raise tme
def _get_join_sort_idxs(keys, left, right):
# Go through each of the key columns in order and make columns for
# a new structured array that represents the lexical ordering of those
# key columns. This structured array is then argsort'ed. The trick here
# is that some columns (e.g. Time) may need to be expanded into multiple
# columns for ordering here.
ii = 0 # Index for uniquely naming the sort columns
sort_keys_dtypes = [] # sortable_table dtypes as list of (name, dtype_str, shape) tuples
sort_keys = [] # sortable_table (structured ndarray) column names
sort_left = {} # sortable ndarrays from left table
sort_right = {} # sortable ndarray from right table
for key in keys:
# get_sortable_arrays() returns a list of ndarrays that can be lexically
# sorted to represent the order of the column. In most cases this is just
# a single element of the column itself.
left_sort_cols = left[key].info.get_sortable_arrays()
right_sort_cols = right[key].info.get_sortable_arrays()
if len(left_sort_cols) != len(right_sort_cols):
# Should never happen because cols are screened beforehand for compatibility
raise RuntimeError('mismatch in sort cols lengths')
for left_sort_col, right_sort_col in zip(left_sort_cols, right_sort_cols):
# Check for consistency of shapes. Mismatch should never happen.
shape = left_sort_col.shape[1:]
if shape != right_sort_col.shape[1:]:
raise RuntimeError('mismatch in shape of left vs. right sort array')
if shape != ():
raise ValueError(f'sort key column {key!r} must be 1-d')
sort_key = str(ii)
sort_keys.append(sort_key)
sort_left[sort_key] = left_sort_col
sort_right[sort_key] = right_sort_col
# Build up dtypes for the structured array that gets sorted.
dtype_str = common_dtype([left_sort_col, right_sort_col])
sort_keys_dtypes.append((sort_key, dtype_str))
ii += 1
# Make the empty sortable table and fill it
len_left = len(left)
sortable_table = np.empty(len_left + len(right), dtype=sort_keys_dtypes)
for key in sort_keys:
sortable_table[key][:len_left] = sort_left[key]
sortable_table[key][len_left:] = sort_right[key]
# Finally do the (lexical) argsort and make a new sorted version
idx_sort = sortable_table.argsort(order=sort_keys)
sorted_table = sortable_table[idx_sort]
# Get indexes of unique elements (i.e. the group boundaries)
diffs = np.concatenate(([True], sorted_table[1:] != sorted_table[:-1], [True]))
idxs = np.flatnonzero(diffs)
return idxs, idx_sort
def _apply_join_funcs(left, right, keys, join_funcs):
"""Apply join_funcs
"""
# Make light copies of left and right, then add new index columns.
left = left.copy(copy_data=False)
right = right.copy(copy_data=False)
for key, join_func in join_funcs.items():
ids1, ids2 = join_func(left[key], right[key])
for ii in itertools.count(1):
id_key = key + '_' * ii + 'id'
if id_key not in left.columns and id_key not in right.columns:
break
keys = tuple(id_key if orig_key == key else orig_key for orig_key in keys)
left.add_column(ids1, index=0, name=id_key) # [id_key] = ids1
right.add_column(ids2, index=0, name=id_key) # [id_key] = ids2
return left, right, keys
def _join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'],
col_name_map=None, metadata_conflicts='warn',
join_funcs=None):
"""
Perform a join of the left and right Tables on specified keys.
Parameters
----------
left : Table
Left side table in the join
right : Table
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Special column name for cartesian join, should never collide with real column
cartesian_index_name = '__table_cartesian_join_temp_index__'
if join_type not in ('inner', 'outer', 'left', 'right', 'cartesian'):
raise ValueError("The 'join_type' argument should be in 'inner', "
"'outer', 'left', 'right', or 'cartesian' "
"(got '{}' instead)".
format(join_type))
if join_type == 'cartesian':
if keys:
raise ValueError('cannot supply keys for a cartesian join')
if join_funcs:
raise ValueError('cannot supply join_funcs for a cartesian join')
# Make light copies of left and right, then add temporary index columns
# with all the same value so later an outer join turns into a cartesian join.
left = left.copy(copy_data=False)
right = right.copy(copy_data=False)
left[cartesian_index_name] = np.uint8(0)
right[cartesian_index_name] = np.uint8(0)
keys = (cartesian_index_name, )
# If we have a single key, put it in a tuple
if keys is None:
keys = tuple(name for name in left.colnames if name in right.colnames)
if len(keys) == 0:
raise TableMergeError('No keys in common between left and right tables')
elif isinstance(keys, str):
keys = (keys,)
# Check the key columns
for arr, arr_label in ((left, 'Left'), (right, 'Right')):
for name in keys:
if name not in arr.colnames:
raise TableMergeError('{} table does not have key column {!r}'
.format(arr_label, name))
if hasattr(arr[name], 'mask') and np.any(arr[name].mask):
raise TableMergeError('{} key column {!r} has missing values'
.format(arr_label, name))
if join_funcs is not None:
if not all(key in keys for key in join_funcs):
raise ValueError(f'join_funcs keys {join_funcs.keys()} must be a '
f'subset of join keys {keys}')
left, right, keys = _apply_join_funcs(left, right, keys, join_funcs)
len_left, len_right = len(left), len(right)
if len_left == 0 or len_right == 0:
raise ValueError('input tables for join must both have at least one row')
# Joined array dtype as a list of descr (name, type_str, shape) tuples
col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names)
out_descrs = get_descrs([left, right], col_name_map)
try:
idxs, idx_sort = _get_join_sort_idxs(keys, left, right)
except NotImplementedError:
raise TypeError('one or more key columns are not sortable')
# Main inner loop in Cython to compute the cartesian product
# indices for the given join type
int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3,
'cartesian': 1}[join_type]
masked, n_out, left_out, left_mask, right_out, right_mask = \
_np_utils.join_inner(idxs, idx_sort, len_left, int_join_type)
out = _get_out_class([left, right])()
for out_name, dtype, shape in out_descrs:
if out_name == cartesian_index_name:
continue
left_name, right_name = col_name_map[out_name]
if left_name and right_name: # this is a key which comes from left and right
cols = [left[left_name], right[right_name]]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('join unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
out[out_name] = col_cls.info.new_like(cols, n_out, metadata_conflicts, out_name)
out[out_name][:] = np.where(right_mask,
left[left_name].take(left_out),
right[right_name].take(right_out))
continue
elif left_name: # out_name came from the left table
name, array, array_out, array_mask = left_name, left, left_out, left_mask
elif right_name:
name, array, array_out, array_mask = right_name, right, right_out, right_mask
else:
raise TableMergeError('Unexpected column names (maybe one is ""?)')
# Select the correct elements from the original table
col = array[name][array_out]
# If the output column is masked then set the output column masking
# accordingly. Check for columns that don't support a mask attribute.
if masked and np.any(array_mask):
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
# array_mask is 1-d corresponding to length of output column. We need
# make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..).
# Mixin columns might not have ndim attribute so use len(col.shape).
array_mask.shape = (col.shape[0],) + (1,) * (len(col.shape) - 1)
# Now broadcast to the correct final shape
array_mask = np.broadcast_to(array_mask, col.shape)
try:
col[array_mask] = col.info.mask_val
except Exception: # Not clear how different classes will fail here
raise NotImplementedError(
"join requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__))
# Set the output table column to the new joined column
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _check_join_type(join_type, func_name):
"""Check join_type arg in hstack and vstack.
This specifically checks for the common mistake of call vstack(t1, t2)
instead of vstack([t1, t2]). The subsequent check of
``join_type in ('inner', ..)`` does not raise in this case.
"""
if not isinstance(join_type, str):
msg = '`join_type` arg must be a string'
if isinstance(join_type, Table):
msg += ('. Did you accidentally '
f'call {func_name}(t1, t2, ..) instead of '
f'{func_name}([t1, t2], ..)?')
raise TypeError(msg)
if join_type not in ('inner', 'exact', 'outer'):
raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")
def _vstack(arrays, join_type='outer', col_name_map=None, metadata_conflicts='warn'):
"""
Stack Tables vertically (by rows)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same column names (though the order can vary). If
``join_type`` is 'inner' then the intersection of common columns will
be the output. A value of 'outer' means the output will have the union of
all columns, with array values being masked where no common values are
available.
Parameters
----------
arrays : list of Tables
Tables to stack by rows (vertically)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Trivial case of one input array
if len(arrays) == 1:
return arrays[0]
# Start by assuming an outer match where all names go to output
names = set(itertools.chain(*[arr.colnames for arr in arrays]))
col_name_map = get_col_name_map(arrays, names)
# If require_match is True then the output must have exactly the same
# number of columns as each input array
if join_type == 'exact':
for names in col_name_map.values():
if any(x is None for x in names):
raise TableMergeError('Inconsistent columns in input arrays '
"(use 'inner' or 'outer' join_type to "
"allow non-matching columns)")
join_type = 'outer'
# For an inner join, keep only columns where all input arrays have that column
if join_type == 'inner':
col_name_map = OrderedDict((name, in_names) for name, in_names in col_name_map.items()
if all(x is not None for x in in_names))
if len(col_name_map) == 0:
raise TableMergeError('Input arrays have no columns in common')
lens = [len(arr) for arr in arrays]
n_rows = sum(lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('vstack unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
try:
col = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name)
except metadata.MergeConflictError as err:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(out_name, err._incompat_types))
idx0 = 0
for name, array in zip(in_names, arrays):
idx1 = idx0 + len(array)
if name in array.colnames:
col[idx0:idx1] = array[name]
else:
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
try:
col[idx0:idx1] = col.info.mask_val
except Exception:
raise NotImplementedError(
"vstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__))
idx0 = idx1
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _hstack(arrays, join_type='outer', uniq_col_name='{col_name}_{table_name}',
table_names=None, col_name_map=None):
"""
Stack tables horizontally (by columns)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' means
the output will have the union of all rows, with array values being
masked where no common values are available.
Parameters
----------
arrays : List of tables
Tables to stack by columns (horizontally)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
if table_names is None:
table_names = [f'{ii + 1}' for ii in range(len(arrays))]
if len(arrays) != len(table_names):
raise ValueError('Number of arrays must match number of table_names')
# Trivial case of one input arrays
if len(arrays) == 1:
return arrays[0]
col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)
# If require_match is True then all input arrays must have the same length
arr_lens = [len(arr) for arr in arrays]
if join_type == 'exact':
if len(set(arr_lens)) > 1:
raise TableMergeError("Inconsistent number of rows in input arrays "
"(use 'inner' or 'outer' join_type to allow "
"non-matching rows)")
join_type = 'outer'
# For an inner join, keep only the common rows
if join_type == 'inner':
min_arr_len = min(arr_lens)
if len(set(arr_lens)) > 1:
arrays = [arr[:min_arr_len] for arr in arrays]
arr_lens = [min_arr_len for arr in arrays]
# If there are any output rows where one or more input arrays are missing
# then the output must be masked. If any input arrays are masked then
# output is masked.
n_rows = max(arr_lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
for name, array, arr_len in zip(in_names, arrays, arr_lens):
if name is None:
continue
if n_rows > arr_len:
indices = np.arange(n_rows)
indices[arr_len:] = 0
col = array[name][indices]
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
try:
col[arr_len:] = col.info.mask_val
except Exception:
raise NotImplementedError(
"hstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__))
else:
col = array[name][:n_rows]
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
|
bsd-3-clause
|
AsherBond/MondocosmOS
|
opencollada/Externals/LibXML/genChRanges.py
|
13
|
15491
|
#!/usr/bin/python -u
#
# Portions of this script have been (shamelessly) stolen from the
# prior work of Daniel Veillard (genUnicode.py)
#
# I, however, take full credit for any bugs, errors or difficulties :-)
#
# William Brack
# October 2003
#
# 18 October 2003
# Modified to maintain binary compatibility with previous library versions
# by adding a suffix 'Q' ('quick') to the macro generated for the original,
# function, and adding generation of a function (with the original name) which
# instantiates the macro.
#
import sys
import string
import time
#
# A routine to take a list of yes/no (1, 0) values and turn it
# into a list of ranges. This will later be used to determine whether
# to generate single-byte lookup tables, or inline comparisons
#
def makeRange(lst):
ret = []
pos = 0
while pos < len(lst):
try: # index generates exception if not present
s = lst[pos:].index(1) # look for start of next range
except:
break # if no more, finished
pos += s # pointer to start of possible range
try:
e = lst[pos:].index(0) # look for end of range
e += pos
except: # if no end, set to end of list
e = len(lst)
ret.append((pos, e-1)) # append range tuple to list
pos = e + 1 # ready to check for next range
return ret
sources = "chvalid.def" # input filename
# minTableSize gives the minimum number of ranges which must be present
# before a 256-byte lookup table is produced. If there are less than this
# number, a macro with inline comparisons is generated
minTableSize = 6
# dictionary of functions, key=name, element contains char-map and range-list
Functs = {}
state = 0
try:
defines = open("chvalid.def", "r")
except:
print "Missing chvalid.def, aborting ..."
sys.exit(1)
#
# The lines in the .def file have three types:-
# name: Defines a new function block
# ur: Defines individual or ranges of unicode values
# end: Indicates the end of the function block
#
# These lines are processed below.
#
for line in defines.readlines():
# ignore blank lines, or lines beginning with '#'
if line[0] == '#':
continue
line = string.strip(line)
if line == '':
continue
# split line into space-separated fields, then split on type
try:
fields = string.split(line, ' ')
#
# name line:
# validate any previous function block already ended
# validate this function not already defined
# initialize an entry in the function dicitonary
# including a mask table with no values yet defined
#
if fields[0] == 'name':
name = fields[1]
if state != 0:
print "'name' %s found before previous name" \
"completed" % (fields[1])
continue
state = 1
if Functs.has_key(name):
print "name '%s' already present - may give" \
" wrong results" % (name)
else:
# dict entry with two list elements (chdata, rangedata)
Functs[name] = [ [], [] ]
for v in range(256):
Functs[name][0].append(0)
#
# end line:
# validate there was a preceding function name line
# set state to show no current function active
#
elif fields[0] == 'end':
if state == 0:
print "'end' found outside of function block"
continue
state = 0
#
# ur line:
# validate function has been defined
# process remaining fields on the line, which may be either
# individual unicode values or ranges of values
#
elif fields[0] == 'ur':
if state != 1:
raise ValidationError, "'ur' found outside of 'name' block"
for el in fields[1:]:
pos = string.find(el, '..')
# pos <=0 means not a range, so must be individual value
if pos <= 0:
# cheap handling of hex or decimal values
if el[0:2] == '0x':
value = int(el[2:],16)
elif el[0] == "'":
value = ord(el[1])
else:
value = int(el)
if ((value < 0) | (value > 0x1fffff)):
raise ValidationError, 'Illegal value (%s) in ch for'\
' name %s' % (el,name)
# for ur we have only ranges (makes things simpler),
# so convert val to range
currange = (value, value)
# pos > 0 means this is a range, so isolate/validate
# the interval
else:
# split the range into it's first-val, last-val
(first, last) = string.split(el, "..")
# convert values from text into binary
if first[0:2] == '0x':
start = int(first[2:],16)
elif first[0] == "'":
start = ord(first[1])
else:
start = int(first)
if last[0:2] == '0x':
end = int(last[2:],16)
elif last[0] == "'":
end = ord(last[1])
else:
end = int(last)
if (start < 0) | (end > 0x1fffff) | (start > end):
raise ValidationError, "Invalid range '%s'" % el
currange = (start, end)
# common path - 'currange' has the range, now take care of it
# We split on single-byte values vs. multibyte
if currange[1] < 0x100: # single-byte
for ch in range(currange[0],currange[1]+1):
# validate that value not previously defined
if Functs[name][0][ch]:
msg = "Duplicate ch value '%s' for name '%s'" % (el, name)
raise ValidationError, msg
Functs[name][0][ch] = 1
else: # multi-byte
if currange in Functs[name][1]:
raise ValidationError, "range already defined in" \
" function"
else:
Functs[name][1].append(currange)
except:
print "Failed to process line: %s" % (line)
raise
#
# At this point, the entire definition file has been processed. Now we
# enter the output phase, where we generate the two files chvalid.c and'
# chvalid.h
#
# To do this, we first output the 'static' data (heading, fixed
# definitions, etc.), then output the 'dynamic' data (the results
# of the above processing), and finally output closing 'static' data
# (e.g. the subroutine to process the ranges)
#
#
# Generate the headings:
#
try:
header = open("include/libxml/chvalid.h", "w")
except:
print "Failed to open include/libxml/chvalid.h"
sys.exit(1)
try:
output = open("chvalid.c", "w")
except:
print "Failed to open chvalid.c"
sys.exit(1)
date = time.asctime(time.localtime(time.time()))
header.write(
"""/*
* Summary: Unicode character range checking
* Description: this module exports interfaces for the character
* range validation APIs
*
* This file is automatically generated from the cvs source
* definition files using the genChRanges.py Python script
*
* Generation date: %s
* Sources: %s
* Author: William Brack <[email protected]>
*/
#ifndef __XML_CHVALID_H__
#define __XML_CHVALID_H__
#include <libxml/xmlversion.h>
#include <libxml/xmlstring.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Define our typedefs and structures
*
*/
typedef struct _xmlChSRange xmlChSRange;
typedef xmlChSRange *xmlChSRangePtr;
struct _xmlChSRange {
unsigned short low;
unsigned short high;
};
typedef struct _xmlChLRange xmlChLRange;
typedef xmlChLRange *xmlChLRangePtr;
struct _xmlChLRange {
unsigned int low;
unsigned int high;
};
typedef struct _xmlChRangeGroup xmlChRangeGroup;
typedef xmlChRangeGroup *xmlChRangeGroupPtr;
struct _xmlChRangeGroup {
int nbShortRange;
int nbLongRange;
const xmlChSRange *shortRange; /* points to an array of ranges */
const xmlChLRange *longRange;
};
/**
* Range checking routine
*/
XMLPUBFUN int XMLCALL
xmlCharInRange(unsigned int val, const xmlChRangeGroup *group);
""" % (date, sources));
output.write(
"""/*
* chvalid.c: this module implements the character range
* validation APIs
*
* This file is automatically generated from the cvs source
* definition files using the genChRanges.py Python script
*
* Generation date: %s
* Sources: %s
* William Brack <[email protected]>
*/
#define IN_LIBXML
#include "libxml.h"
#include <libxml/chvalid.h>
/*
* The initial tables ({func_name}_tab) are used to validate whether a
* single-byte character is within the specified group. Each table
* contains 256 bytes, with each byte representing one of the 256
* possible characters. If the table byte is set, the character is
* allowed.
*
*/
""" % (date, sources));
#
# Now output the generated data.
# We try to produce the best execution times. Tests have shown that validation
# with direct table lookup is, when there are a "small" number of valid items,
# still not as fast as a sequence of inline compares. So, if the single-byte
# portion of a range has a "small" number of ranges, we output a macro for inline
# compares, otherwise we output a 256-byte table and a macro to use it.
#
fkeys = Functs.keys() # Dictionary of all defined functions
fkeys.sort() # Put some order to our output
for f in fkeys:
# First we convert the specified single-byte values into a group of ranges.
# If the total number of such ranges is less than minTableSize, we generate
# an inline macro for direct comparisons; if greater, we generate a lookup
# table.
if max(Functs[f][0]) > 0: # only check if at least one entry
rangeTable = makeRange(Functs[f][0])
numRanges = len(rangeTable)
if numRanges >= minTableSize: # table is worthwhile
header.write("XMLPUBVAR const unsigned char %s_tab[256];\n" % f)
header.write("""
/**
* %s_ch:
* @c: char to validate
*
* Automatically generated by genChRanges.py
*/
""" % f)
header.write("#define %s_ch(c)\t(%s_tab[(c)])\n" % (f, f))
# write the constant data to the code file
output.write("const unsigned char %s_tab[256] = {\n" % f)
pline = " "
for n in range(255):
pline += " 0x%02x," % Functs[f][0][n]
if len(pline) > 72:
output.write(pline + "\n")
pline = " "
output.write(pline + " 0x%02x };\n\n" % Functs[f][0][255])
else: # inline check is used
# first another little optimisation - if space is present,
# put it at the front of the list so it is checked first
try:
ix = rangeTable.remove((0x20, 0x20))
rangeTable.insert(0, (0x20, 0x20))
except:
pass
firstFlag = 1
header.write("""
/**
* %s_ch:
* @c: char to validate
*
* Automatically generated by genChRanges.py
*/
""" % f)
# okay, I'm tired of the messy lineup - let's automate it!
pline = "#define %s_ch(c)" % f
# 'ntab' is number of tabs needed to position to col. 33 from name end
ntab = 4 - (len(pline)) / 8
if ntab < 0:
ntab = 0
just = ""
for i in range(ntab):
just += "\t"
pline = pline + just + "("
for rg in rangeTable:
if not firstFlag:
pline += " || \\\n\t\t\t\t "
else:
firstFlag = 0
if rg[0] == rg[1]: # single value - check equal
pline += "((c) == 0x%x)" % rg[0]
else: # value range
# since we are doing char, also change range ending in 0xff
if rg[1] != 0xff:
pline += "((0x%x <= (c)) &&" % rg[0]
pline += " ((c) <= 0x%x))" % rg[1]
else:
pline += " (0x%x <= (c))" % rg[0]
pline += ")\n"
header.write(pline)
header.write("""
/**
* %sQ:
* @c: char to validate
*
* Automatically generated by genChRanges.py
*/
""" % f)
pline = "#define %sQ(c)" % f
ntab = 4 - (len(pline)) / 8
if ntab < 0:
ntab = 0
just = ""
for i in range(ntab):
just += "\t"
header.write(pline + just + "(((c) < 0x100) ? \\\n\t\t\t\t ")
if max(Functs[f][0]) > 0:
header.write("%s_ch((c)) :" % f)
else:
header.write("0 :")
# if no ranges defined, value invalid if >= 0x100
numRanges = len(Functs[f][1])
if numRanges == 0:
header.write(" 0)\n\n")
else:
if numRanges >= minTableSize:
header.write(" \\\n\t\t\t\t xmlCharInRange((c), &%sGroup))\n\n" % f)
else: # if < minTableSize, generate inline code
firstFlag = 1
for rg in Functs[f][1]:
if not firstFlag:
pline += " || \\\n\t\t\t\t "
else:
firstFlag = 0
pline = "\\\n\t\t\t\t("
if rg[0] == rg[1]: # single value - check equal
pline += "((c) == 0x%x)" % rg[0]
else: # value range
pline += "((0x%x <= (c)) &&" % rg[0]
pline += " ((c) <= 0x%x))" % rg[1]
pline += "))\n\n"
header.write(pline)
if len(Functs[f][1]) > 0:
header.write("XMLPUBVAR const xmlChRangeGroup %sGroup;\n" % f)
#
# Next we do the unicode ranges
#
for f in fkeys:
if len(Functs[f][1]) > 0: # only generate if unicode ranges present
rangeTable = Functs[f][1]
rangeTable.sort() # ascending tuple sequence
numShort = 0
numLong = 0
for rg in rangeTable:
if rg[1] < 0x10000: # if short value
if numShort == 0: # first occurence
pline = "static const xmlChSRange %s_srng[] = { " % f
else:
pline += ", "
numShort += 1
if len(pline) > 60:
output.write(pline + "\n")
pline = " "
pline += "{0x%x, 0x%x}" % (rg[0], rg[1])
else: # if long value
if numLong == 0: # first occurence
if numShort > 0: # if there were shorts, finish them off
output.write(pline + "};\n")
pline = "static const xmlChLRange %s_lrng[] = { " % f
else:
pline += ", "
numLong += 1
if len(pline) > 60:
output.write(pline + "\n")
pline = " "
pline += "{0x%x, 0x%x}" % (rg[0], rg[1])
output.write(pline + "};\n") # finish off last group
pline = "const xmlChRangeGroup %sGroup =\n\t{%d, %d, " % (f, numShort, numLong)
if numShort > 0:
pline += "%s_srng" % f
else:
pline += "(xmlChSRangePtr)0"
if numLong > 0:
pline += ", %s_lrng" % f
else:
pline += ", (xmlChLRangePtr)0"
output.write(pline + "};\n\n")
output.write(
"""
/**
* xmlCharInRange:
* @val: character to be validated
* @rptr: pointer to range to be used to validate
*
* Does a binary search of the range table to determine if char
* is valid
*
* Returns: true if character valid, false otherwise
*/
int
xmlCharInRange (unsigned int val, const xmlChRangeGroup *rptr) {
int low, high, mid;
const xmlChSRange *sptr;
const xmlChLRange *lptr;
if (rptr == NULL) return(0);
if (val < 0x10000) { /* is val in 'short' or 'long' array? */
if (rptr->nbShortRange == 0)
return 0;
low = 0;
high = rptr->nbShortRange - 1;
sptr = rptr->shortRange;
while (low <= high) {
mid = (low + high) / 2;
if ((unsigned short) val < sptr[mid].low) {
high = mid - 1;
} else {
if ((unsigned short) val > sptr[mid].high) {
low = mid + 1;
} else {
return 1;
}
}
}
} else {
if (rptr->nbLongRange == 0) {
return 0;
}
low = 0;
high = rptr->nbLongRange - 1;
lptr = rptr->longRange;
while (low <= high) {
mid = (low + high) / 2;
if (val < lptr[mid].low) {
high = mid - 1;
} else {
if (val > lptr[mid].high) {
low = mid + 1;
} else {
return 1;
}
}
}
}
return 0;
}
""");
#
# finally, generate the ABI compatibility functions
#
for f in fkeys:
output.write("""
/**
* %s:
* @ch: character to validate
*
* This function is DEPRECATED.
""" % f);
if max(Functs[f][0]) > 0:
output.write(" * Use %s_ch or %sQ instead" % (f, f))
else:
output.write(" * Use %sQ instead" % f)
output.write("""
*
* Returns true if argument valid, false otherwise
*/
""")
output.write("int\n%s(unsigned int ch) {\n return(%sQ(ch));\n}\n\n" % (f,f))
header.write("XMLPUBFUN int XMLCALL\n\t\t%s(unsigned int ch);\n" % f);
#
# Run complete - write trailers and close the output files
#
header.write("""
#ifdef __cplusplus
}
#endif
#endif /* __XML_CHVALID_H__ */
""")
header.close()
output.write("""#define bottom_chvalid
#include "elfgcchack.h"
""")
output.close()
|
agpl-3.0
|
drix00/microanalysis_file_format
|
pySpectrumFileFormat/OxfordInstruments/INCA/test_ReadSpectrumFullResults.py
|
1
|
3406
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: pySpectrumFileFormat.OxfordInstruments.INCA.test_ReadSpectrumFullResults
:synopsis: Tests for the module :py:mod:`pySpectrumFileFormat.OxfordInstruments.INCA.ReadSpectrumFullResults`
.. moduleauthor:: Hendrix Demers <[email protected]>
Tests for the module :py:mod:`pySpectrumFileFormat.OxfordInstruments.INCA.ReadSpectrumFullResults`.
"""
###############################################################################
# Copyright 2007 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import unittest
import os.path
# Third party modules.
from nose.plugins.skip import SkipTest
# Local modules.
# Project modules.
import pySpectrumFileFormat.OxfordInstruments.INCA.ReadSpectrumFullResults as ReadSpectrumFullResults
from pySpectrumFileFormat import get_current_module_path, is_test_data_file
# Globals and constants variables.
class TestReadSpectrumFullResults(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.filepath = get_current_module_path(__file__, "../../../test_data/SpectrumFullResults 10.txt")
if not is_test_data_file(self.filepath):
raise SkipTest
self.results = ReadSpectrumFullResults.ReadSpectrumFullResults(self.filepath)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testSkeleton(self):
#self.fail("Test if the TestCase is working.")
self.assertTrue(True)
def testConstructor(self):
results = ReadSpectrumFullResults.ReadSpectrumFullResults(self.filepath)
#self.fail("Test if the TestCase is working.")
self.assertTrue(True)
def test_read(self):
self.results.read(self.filepath)
data = self.results.data
self.assertAlmostEquals(0.0088, data["O"][2], 4)
self.assertAlmostEquals(0.28664, data["Zr"][2], 5)
self.assertAlmostEquals(100.00, data["Totals"], 2)
#self.fail("Test if the TestCase is working.")
self.assertTrue(True)
def test_isValidFile(self):
folderpath = get_current_module_path(__file__, "../../../test_data")
filepath = os.path.join(folderpath, "SpectrumFullResults 10.txt")
self.assertEquals(True, ReadSpectrumFullResults.isValidFile(filepath))
filepath = os.path.join(folderpath, "SpectrumProcessing 10.txt")
self.assertEquals(False, ReadSpectrumFullResults.isValidFile(filepath))
filepath = os.path.join(folderpath, "AllSpectra.txt")
self.assertEquals(False, ReadSpectrumFullResults.isValidFile(filepath))
#self.fail("Test if the TestCase is working.")
self.assertTrue(True)
if __name__ == '__main__': # pragma: no cover
import nose
nose.runmodule()
|
apache-2.0
|
UdhayaBalaji/pdf-annotation
|
jni/freetype/src/tools/docmaker/utils.py
|
515
|
3063
|
# Utils (c) 2002, 2004, 2007, 2008 David Turner <[email protected]>
#
import string, sys, os, glob
# current output directory
#
output_dir = None
# This function is used to sort the index. It is a simple lexicographical
# sort, except that it places capital letters before lowercase ones.
#
def index_sort( s1, s2 ):
if not s1:
return -1
if not s2:
return 1
l1 = len( s1 )
l2 = len( s2 )
m1 = string.lower( s1 )
m2 = string.lower( s2 )
for i in range( l1 ):
if i >= l2 or m1[i] > m2[i]:
return 1
if m1[i] < m2[i]:
return -1
if s1[i] < s2[i]:
return -1
if s1[i] > s2[i]:
return 1
if l2 > l1:
return -1
return 0
# Sort input_list, placing the elements of order_list in front.
#
def sort_order_list( input_list, order_list ):
new_list = order_list[:]
for id in input_list:
if not id in order_list:
new_list.append( id )
return new_list
# Open the standard output to a given project documentation file. Use
# "output_dir" to determine the filename location if necessary and save the
# old stdout in a tuple that is returned by this function.
#
def open_output( filename ):
global output_dir
if output_dir and output_dir != "":
filename = output_dir + os.sep + filename
old_stdout = sys.stdout
new_file = open( filename, "w" )
sys.stdout = new_file
return ( new_file, old_stdout )
# Close the output that was returned by "close_output".
#
def close_output( output ):
output[0].close()
sys.stdout = output[1]
# Check output directory.
#
def check_output():
global output_dir
if output_dir:
if output_dir != "":
if not os.path.isdir( output_dir ):
sys.stderr.write( "argument" + " '" + output_dir + "' " + \
"is not a valid directory" )
sys.exit( 2 )
else:
output_dir = None
def file_exists( pathname ):
"""checks that a given file exists"""
result = 1
try:
file = open( pathname, "r" )
file.close()
except:
result = None
sys.stderr.write( pathname + " couldn't be accessed\n" )
return result
def make_file_list( args = None ):
"""builds a list of input files from command-line arguments"""
file_list = []
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
if not args:
args = sys.argv[1 :]
for pathname in args:
if string.find( pathname, '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
else:
newpath = [pathname]
file_list.extend( newpath )
if len( file_list ) == 0:
file_list = None
else:
# now filter the file list to remove non-existing ones
file_list = filter( file_exists, file_list )
return file_list
# eof
|
gpl-3.0
|
ecederstrand/django
|
django/contrib/sessions/models.py
|
347
|
1298
|
from __future__ import unicode_literals
from django.contrib.sessions.base_session import (
AbstractBaseSession, BaseSessionManager,
)
class SessionManager(BaseSessionManager):
use_in_migrations = True
class Session(AbstractBaseSession):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django Web site).
"""
objects = SessionManager()
@classmethod
def get_session_store_class(cls):
from django.contrib.sessions.backends.db import SessionStore
return SessionStore
class Meta(AbstractBaseSession.Meta):
db_table = 'django_session'
|
bsd-3-clause
|
besser82/shogun
|
examples/undocumented/python/tests_check_commwordkernel_memleak.py
|
4
|
4446
|
#!/usr/bin/env python
import shogun as sg
parameter_list=[[10,7,0,False]]
def tests_check_commwordkernel_memleak (num, order, gap, reverse):
import gc
from shogun import Alphabet,StringCharFeatures,StringWordFeatures,DNA
from shogun import MSG_DEBUG
from shogun import CommWordStringKernel, IdentityKernelNormalizer
from numpy import mat
POS=[num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'TTGT', num*'TTGT', num*'TTGT',num*'TTGT', num*'TTGT',
num*'TTGT',num*'TTGT', num*'TTGT', num*'TTGT',num*'TTGT', num*'TTGT',
num*'TTGT',num*'TTGT', num*'TTGT', num*'TTGT',num*'TTGT', num*'TTGT',
num*'TTGT',num*'TTGT', num*'TTGT', num*'TTGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT']
NEG=[num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'TTGT', num*'TTGT', num*'TTGT',num*'TTGT', num*'TTGT',
num*'TTGT',num*'TTGT', num*'TTGT', num*'TTGT',num*'TTGT', num*'TTGT',
num*'TTGT',num*'TTGT', num*'TTGT', num*'TTGT',num*'TTGT', num*'TTGT',
num*'TTGT',num*'TTGT', num*'TTGT', num*'TTGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT',num*'ACGT', num*'ACGT',
num*'ACGT',num*'ACGT', num*'ACGT', num*'ACGT']
for i in range(10):
alpha=Alphabet(DNA)
traindat=StringCharFeatures(alpha)
traindat.set_features(POS+NEG)
trainudat=StringWordFeatures(traindat.get_alphabet());
trainudat.obtain_from_char(traindat, order-1, order, gap, reverse)
#trainudat.io.set_loglevel(MSG_DEBUG)
pre = sg.transformer("SortWordString")
#pre.io.set_loglevel(MSG_DEBUG)
pre.fit(trainudat)
trainudat = pre.transform(trainudat)
spec = CommWordStringKernel(10, False)
spec.set_normalizer(IdentityKernelNormalizer())
spec.init(trainudat, trainudat)
K=spec.get_kernel_matrix()
del POS
del NEG
del order
del gap
del reverse
return K
if __name__=='__main__':
print('Leak Check Comm Word Kernel')
tests_check_commwordkernel_memleak(*parameter_list[0])
|
bsd-3-clause
|
windedge/odoo
|
addons/crm_claim/report/__init__.py
|
446
|
1080
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_claim_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
minhphung171093/GreenERP_V7
|
openerp/addons/base_report_designer/__openerp__.py
|
110
|
1796
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'OpenOffice Report Designer',
'version': '0.1',
'category': 'Reporting',
'description': """
This module is used along with OpenERP OpenOffice Plugin.
=========================================================
This module adds wizards to Import/Export .sxw report that you can modify in OpenOffice.
Once you have modified it you can upload the report using the same wizard.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['wizard/base_report_design_view.xml' , 'base_report_designer_installer.xml'],
'demo': [],
'installable': True,
'auto_install': False,
'images': ['images/base_report_designer1.jpeg','images/base_report_designer2.jpeg',],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
fjorba/invenio
|
modules/weblinkback/lib/weblinkbackadminlib.py
|
25
|
9958
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebLinkback - Administrative Lib"""
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL
from invenio.urlutils import wash_url_argument
from invenio.messages import gettext_set_language, wash_language
from invenio.webuser import collect_user_info
from invenio.weblinkback_dblayer import get_all_linkbacks, \
approve_linkback,\
reject_linkback, \
remove_url, \
add_url_to_list, \
url_exists, \
get_urls,\
get_url_title
from invenio.weblinkback_config import CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME, \
CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION, \
CFG_WEBLINKBACK_STATUS, \
CFG_WEBLINKBACK_ACTION_RETURN_CODE
from invenio.bibrankadminlib import addadminbox, \
tupletotable
from invenio.dateutils import convert_datetext_to_dategui
from invenio.bibformat import format_record
import cgi
import urllib
import invenio.template
weblinkback_templates = invenio.template.load('weblinkback')
def get_navtrail(previous = '', ln=CFG_SITE_LANG):
"""Get the navtrail"""
previous = wash_url_argument(previous, 'str')
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail = """<a class="navtrail" href="%s/help/admin">%s</a> """ % (CFG_SITE_URL, _("Admin Area"))
navtrail = navtrail + previous
return navtrail
def perform_request_index(ln=CFG_SITE_LANG):
"""
Display main admin page
"""
return weblinkback_templates.tmpl_admin_index(ln)
def perform_request_display_list(return_code, url_field_value, ln=CFG_SITE_LANG):
"""
Display a list
@param return_code: might indicate errors from a previous action, of CFG_WEBLINKBACK_ACTION_RETURN_CODE
@param url_field_value: value of the url text field
"""
_ = gettext_set_language(ln)
urls = get_urls()
entries = []
for url in urls:
entries.append(('<a href="%s">%s</a>' % (cgi.escape(url[0]), cgi.escape(url[0])),
url[1].lower(),
'<a href="moderatelist?url=%s&action=%s&ln=%s">%s</a>' % (urllib.quote(url[0]), CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['DELETE'], ln, CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['DELETE'].lower())))
header = ['URL', 'List', '']
error_message = ""
if return_code != CFG_WEBLINKBACK_ACTION_RETURN_CODE['OK']:
error_message = _("Unknown error")
if return_code == CFG_WEBLINKBACK_ACTION_RETURN_CODE['DUPLICATE']:
error_message = _("The URL already exists in one of the lists")
elif return_code == CFG_WEBLINKBACK_ACTION_RETURN_CODE['INVALID_ACTION']:
error_message = _("Invalid action")
elif return_code == CFG_WEBLINKBACK_ACTION_RETURN_CODE['BAD_INPUT']:
error_message = _("Invalid URL, might contain spaces")
error_message_html = ""
if error_message != "":
error_message_html = "<dt><b><font color=red>" + error_message + "</font></b></dt>" + "<br>"
out = """
<dl>
%(error_message)s
<dt>%(whitelist)s</dt>
<dd>%(whitelistText)s</dd>
<dt>%(blacklist)s</dt>
<dd>%(blacklistText)s</dd>
<dt>%(explanation)s</dt>
</dl>
<table class="admin_wvar" cellspacing="0">
<tr><td>
<form action='moderatelist'>
URL:
<input type="text" name="url" value="%(url)s" />
<input type="hidden" name="action" value="%(action)s" />
<select name="listtype" size="1">
<option value=whitelist>whitelist</option>
<option value=blacklist>blacklist</option>
</select>
<input type="submit" class="adminbutton" value="%(buttonText)s">
</form>
</td></tr></table>
""" % {'whitelist': _('Whitelist'),
'whitelistText': _('linkback requests from these URLs will be approved automatically.'),
'blacklist': _('Blacklist'),
'blacklistText': _('linkback requests from these URLs will be refused automatically, no data will be saved.'),
'explanation': _('All URLs in these lists are checked for containment (infix) in any linkback request URL. A whitelist match has precedence over a blacklist match.'),
'url': cgi.escape(url_field_value),
'action': CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['INSERT'],
'buttonText': _('Add URL'),
'error_message': error_message_html}
if entries:
out += tupletotable(header=header, tuple=entries, highlight_rows_p=True,
alternate_row_colors_p=True)
else:
out += "<i>%s</i>" % _('There are no URLs in both lists.')
return addadminbox('<b>%s</b>'% _("Reduce the amount of future pending linkback requests"), [out])
def perform_moderate_url(req, url, action, list_type):
"""
Perform a url action
@param url
@param action: CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['INSERT'] or CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['DELETE']
@param list_type: of CFG_WEBLINKBACK_LIST_TYPE
@return (String, CFG_WEBLINKBACK_ACTION_RETURN_CODE) the String is url if CFG_WEBLINKBACK_ACTION_RETURN_CODE['BAD_INPUT')
"""
if url == '' or ' ' in url:
return (url, CFG_WEBLINKBACK_ACTION_RETURN_CODE['BAD_INPUT'])
elif action == CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['INSERT']:
if url_exists(url):
return ('', CFG_WEBLINKBACK_ACTION_RETURN_CODE['DUPLICATE'])
else:
add_url_to_list(url, list_type, collect_user_info(req))
elif action == CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['DELETE']:
remove_url(url)
else:
return ('', CFG_WEBLINKBACK_ACTION_RETURN_CODE['INVALID_ACTION'])
return ('', CFG_WEBLINKBACK_ACTION_RETURN_CODE['OK'])
def perform_request_display_linkbacks(status, return_code, ln=CFG_SITE_LANG):
"""
Display linkbacks
@param status: of CFG_WEBLINKBACK_STATUS, currently only CFG_WEBLINKBACK_STATUS['PENDING'] is supported
"""
_ = gettext_set_language(ln)
if status == CFG_WEBLINKBACK_STATUS['PENDING']:
linkbacks = get_all_linkbacks(status=status, order=CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME['DESC'])
entries = []
for (linkbackid, origin_url, recid, additional_properties, linkback_type, linkback_status, insert_time) in linkbacks: # pylint: disable=W0612
moderation_prefix = '<a href="moderatelinkback?action=%%s&linkbackid=%s&ln=%s">%%s</a>' % (linkbackid, ln)
entries.append((linkback_type,
format_record(recID=recid, of='hs', ln=ln),
'<a href="%s">%s</a>' % (cgi.escape(origin_url), cgi.escape(get_url_title(origin_url))),
convert_datetext_to_dategui(str(insert_time)),
moderation_prefix % (CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['APPROVE'], 'Approve') + " / " + moderation_prefix % (CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['REJECT'], 'Reject')))
header = ['Linkback type', 'Record', 'Origin', 'Submitted on', '']
error_message = ""
if return_code != CFG_WEBLINKBACK_ACTION_RETURN_CODE['OK']:
error_message = _("Unknown error")
if return_code == CFG_WEBLINKBACK_ACTION_RETURN_CODE['INVALID_ACTION']:
error_message = _("Invalid action")
error_message_html = ""
if error_message != "":
error_message_html = "<dt><b><font color=red>" + error_message + "</font></b></dt>" + "<br>"
out = """
<dl>
%(error_message)s
<dt>%(heading)s</dt>
<dd>%(description)s</dd>
</dl>
""" % {'heading': _("Pending linkbacks"),
'description': _("these linkbacks are not visible to users, they must be approved or rejected."),
'error_message': error_message_html}
if entries:
out += tupletotable(header=header, tuple=entries, highlight_rows_p=True,
alternate_row_colors_p=True)
else:
out += "<i>There are no %s linkbacks.</i>" % status.lower()
return addadminbox('<b>%s</b>'% _("Reduce the amount of currently pending linkback requests"), [out])
else:
return "<i>%s</i>" % _('Currently only pending linkbacks are supported.')
def perform_moderate_linkback(req, linkbackid, action):
"""
Moderate linkbacks
@param linkbackid: linkback id
@param action: of CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION
@return CFG_WEBLINKBACK_ACTION_RETURN_CODE
"""
if action == CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['APPROVE']:
approve_linkback(linkbackid, collect_user_info(req))
elif action == CFG_WEBLINKBACK_ADMIN_MODERATION_ACTION['REJECT']:
reject_linkback(linkbackid, collect_user_info(req))
else:
return CFG_WEBLINKBACK_ACTION_RETURN_CODE['INVALID_ACTION']
return CFG_WEBLINKBACK_ACTION_RETURN_CODE['OK']
|
gpl-2.0
|
daajoe/asp_horn_backdoors
|
reduct.py
|
1
|
3403
|
#!/usr/bin/env python
#
# Copyright 2015
# Johannes K. Fichte, Vienna University of Technology, Austria
#
# reduct.py is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. reduct.py is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with reduct.py. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import logging.config
logging.config.fileConfig('logging.conf')
from signal_handling import *
import contextlib
import sys
import optparse
def options():
usage = "usage: %prog [options] [backdoor]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-o", "--output", dest="out", type="string", help="Output file", default=None)
parser.add_option("-c", "--clasp", dest="clasp", action="store_true", help="Use clasp for solving", default=False)
parser.add_option("--horn", dest="horn", action="store_true", help="Check for Horn backdoor", default=False)
parser.add_option('-x', '--use_predicate', dest="use_predicate", action="store_true", help='Backdoor atoms stored in predicate "_backdoor(X)"', default=False)
opts, bd_files = parser.parse_args(sys.argv[1:])
if len(bd_files)<1:
raise TypeError('No backdoor given')
if len(bd_files)>1:
raise TypeError('More than one backdoor given.')
return opts, bd_files[0]
from lp_parse import *
from reduct_delBd import *
def read_backdoor_ids_from_file(bd_file_name,l,use_predicate):
atoms={}
for k,v in l.symtab.tab.iteritems():
atoms[v] = k
import re
regex = re.compile(r'_backdoor\(([\w,\(\)]+)\).')
backdoor = set([])
with open(bd_file_name, 'r') as f:
for line in f.readlines():
line=line.rstrip('\n\r')
if use_predicate:
pred=re.findall(regex,line)
if len(pred)!=1:
raise ValueError('_backdoor(..) not found in line')
line=pred[0]
try:
value=atoms[line]
except KeyError, e:
value=line[2:]
backdoor.add(int(value))
backdoor.add(-int(value))
return backdoor
def parse_and_run(f,bd_file_name,output,clasp,horn,use_predicate):
logging.info('Parsing starts')
p = Parser()
try:
l = p.parse('x_', f)
logging.info('Parsing done')
logging.info('Reading Backdoor')
backdoor=read_backdoor_ids_from_file(bd_file_name,l,use_predicate)
logging.info('Applying Backdoor')
apply_del_backdoor2program(l,backdoor)
if horn:
exit(is_horn(l))
else:
l.write(sys.stdout)
except IOError:
stderr.write("error reading from: {0}\n".format(sin.filename()))
stderr.flush()
raise IOError
if __name__ == '__main__':
opts,files=options()
if sys.stdin:
parse_and_run(sys.stdin,files,opts.out,opts.clasp,opts.horn,opts.use_predicate)
else:
raise RuntimeError('No stdin')
exit(1)
|
gpl-2.0
|
furiousdave/django-avatar
|
avatar/tests.py
|
54
|
5563
|
import os.path
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.models import User
from avatar.settings import AVATAR_DEFAULT_URL, AVATAR_MAX_AVATARS_PER_USER
from avatar.util import get_primary_avatar
from avatar.models import Avatar
try:
from PIL import Image
dir(Image) # Placate PyFlakes
except ImportError:
import Image
def upload_helper(o, filename):
f = open(os.path.join(o.testdatapath, filename), "rb")
response = o.client.post(reverse('avatar_add'), {
'avatar': f,
}, follow=True)
f.close()
return response
class AvatarUploadTests(TestCase):
def setUp(self):
self.testdatapath = os.path.join(os.path.dirname(__file__), "testdata")
self.user = User.objects.create_user('test', '[email protected]', 'testpassword')
self.user.save()
self.client.login(username='test', password='testpassword')
Image.init()
def testNonImageUpload(self):
response = upload_helper(self, "nonimagefile")
self.failUnlessEqual(response.status_code, 200)
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testNormalImageUpload(self):
response = upload_helper(self, "test.png")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 1)
self.failUnlessEqual(response.context['upload_avatar_form'].errors, {})
avatar = get_primary_avatar(self.user)
self.failIfEqual(avatar, None)
def testImageWithoutExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithoutext")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testImageWithWrongExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithwrongext.ogg")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testImageTooBig(self):
# use with AVATAR_MAX_SIZE = 1024 * 1024
response = upload_helper(self, "testbig.png")
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
def testDefaultUrl(self):
response = self.client.get(reverse('avatar_render_primary', kwargs={
'user': self.user.username,
'size': 80,
}))
loc = response['Location']
base_url = getattr(settings, 'STATIC_URL', None)
if not base_url:
base_url = settings.MEDIA_URL
self.assertTrue(base_url in loc)
self.assertTrue(loc.endswith(AVATAR_DEFAULT_URL))
def testNonExistingUser(self):
a = get_primary_avatar("nonexistinguser")
self.failUnlessEqual(a, None)
def testThereCanBeOnlyOnePrimaryAvatar(self):
for i in range(1, 10):
self.testNormalImageUpload()
count = Avatar.objects.filter(user=self.user, primary=True).count()
self.failUnlessEqual(count, 1)
def testDeleteAvatar(self):
self.testNormalImageUpload()
avatar = Avatar.objects.filter(user=self.user)
self.failUnlessEqual(len(avatar), 1)
response = self.client.post(reverse('avatar_delete'), {
'choices': [avatar[0].id],
}, follow=True)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 1)
count = Avatar.objects.filter(user=self.user).count()
self.failUnlessEqual(count, 0)
def testDeletePrimaryAvatarAndNewPrimary(self):
self.testThereCanBeOnlyOnePrimaryAvatar()
primary = get_primary_avatar(self.user)
oid = primary.id
response = self.client.post(reverse('avatar_delete'), {
'choices': [oid],
})
primaries = Avatar.objects.filter(user=self.user, primary=True)
self.failUnlessEqual(len(primaries), 1)
self.failIfEqual(oid, primaries[0].id)
avatars = Avatar.objects.filter(user=self.user)
self.failUnlessEqual(avatars[0].id, primaries[0].id)
def testTooManyAvatars(self):
for i in range(0, AVATAR_MAX_AVATARS_PER_USER):
self.testNormalImageUpload()
count_before = Avatar.objects.filter(user=self.user).count()
response = upload_helper(self, "test.png")
count_after = Avatar.objects.filter(user=self.user).count()
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.failIfEqual(response.context['upload_avatar_form'].errors, {})
self.failUnlessEqual(count_before, count_after)
# def testAvatarOrder
# def testReplaceAvatarWhenMaxIsOne
# def testHashFileName
# def testHashUserName
# def testChangePrimaryAvatar
# def testDeleteThumbnailAndRecreation
# def testAutomaticThumbnailCreation
|
bsd-3-clause
|
MSOpenTech/edx-platform
|
openedx/core/lib/django_test_client_utils.py
|
20
|
1555
|
"""
This file includes the monkey-patch for requests' PATCH method, as we are using
older version of django that does not contains the PATCH method in its test client.
"""
# pylint: disable=protected-access
from __future__ import unicode_literals
from urlparse import urlparse
from django.test.client import RequestFactory, Client, FakePayload
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
def request_factory_patch(self, path, data=None, content_type=MULTIPART_CONTENT, **extra):
"""
Construct a PATCH request.
"""
# pylint: disable=invalid-name
patch_data = self._encode_data(data or {}, content_type)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(patch_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'PATCH',
'wsgi.input': FakePayload(patch_data),
}
r.update(extra)
return self.request(**r)
def client_patch(self, path, data=None, content_type=MULTIPART_CONTENT, follow=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data or {}, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
if not hasattr(RequestFactory, 'patch'):
setattr(RequestFactory, 'patch', request_factory_patch)
if not hasattr(Client, 'patch'):
setattr(Client, 'patch', client_patch)
|
agpl-3.0
|
systemd/linux
|
scripts/tracing/draw_functrace.py
|
14676
|
3560
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
gpl-2.0
|
hfp/tensorflow-xsmm
|
tensorflow/python/data/experimental/benchmarks/optimize_benchmark.py
|
2
|
6143
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for static optimizations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks too.
class OptimizationBenchmark(test.Benchmark):
"""Benchmarks for static optimizations."""
def benchmarkMapFusion(self):
"""Evaluates performance map of fusion."""
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmarkMapFusion(chain_length, False)
self._benchmarkMapFusion(chain_length, True)
def _benchmarkMapFusion(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x)
if optimize_dataset:
options = dataset_ops.Options()
options.experimental_optimization.map_fusion = True
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "noopt"
print("Map dataset {} chain length: {} Median wall time: {}".format(
opt_mark, chain_length, median_wall_time))
self.report_benchmark(
iters=100,
wall_time=median_wall_time,
name="map_fusion_{}_chain_length_{}".format(
opt_mark, chain_length))
def benchmarkMapAndFilterFusion(self):
"""Evaluates performance map of fusion."""
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmarkMapAndFilterFusion(chain_length, False)
self._benchmarkMapAndFilterFusion(chain_length, True)
def _benchmarkMapAndFilterFusion(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x + 5).filter(
lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
options = dataset_ops.Options()
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "noopt"
print("Map and filter dataset {} chain length: {} Median wall time: {}"
.format(opt_mark, chain_length, median_wall_time))
self.report_benchmark(
iters=100,
wall_time=median_wall_time,
name="map_and_filter_fusion_{}_chain_length_{}".format(
opt_mark, chain_length))
# This benchmark compares the performance of pipeline with multiple chained
# filter with and without filter fusion.
def benchmarkFilterFusion(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmarkFilterFusion(chain_length, False)
self._benchmarkFilterFusion(chain_length, True)
def _benchmarkFilterFusion(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(5).repeat(None)
for _ in range(chain_length):
dataset = dataset.filter(lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
options = dataset_ops.Options()
options.experimental_optimization.filter_fusion = True
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "no-opt"
print("Filter dataset {} chain length: {} Median wall time: {}".format(
opt_mark, chain_length, median_wall_time))
self.report_benchmark(
iters=1000,
wall_time=median_wall_time,
name="chain_length_{}_{}".format(opt_mark, chain_length))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
ktriponis/ansible-modules-core
|
cloud/rackspace/rax_clb_nodes.py
|
43
|
8603
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_clb_nodes
short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
description:
- Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
version_added: "1.4"
options:
address:
required: false
description:
- IP address or domain name of the node
condition:
required: false
choices:
- enabled
- disabled
- draining
description:
- Condition for the node, which determines its role within the load
balancer
load_balancer_id:
required: true
type: integer
description:
- Load balancer id
node_id:
required: false
type: integer
description:
- Node id
port:
required: false
type: integer
description:
- Port number of the load balanced service on the node
state:
required: false
default: "present"
choices:
- present
- absent
description:
- Indicate desired state of the node
type:
required: false
choices:
- primary
- secondary
description:
- Type of node
wait:
required: false
default: "no"
choices:
- "yes"
- "no"
description:
- Wait for the load balancer to become active before returning
wait_timeout:
required: false
type: integer
default: 30
description:
- How long to wait before giving up and returning an error
weight:
required: false
description:
- Weight of node
author: Lukasz Kawczynski
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
# Add a new node to the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
address: 10.2.2.3
port: 80
condition: enabled
type: primary
wait: yes
credentials: /path/to/credentials
# Drain connections from a node
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
condition: draining
wait: yes
credentials: /path/to/credentials
# Remove a node from the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
state: absent
wait: yes
credentials: /path/to/credentials
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def _activate_virtualenv(path):
path = os.path.expanduser(path)
activate_this = os.path.join(path, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
def _get_node(lb, node_id=None, address=None, port=None):
"""Return a matching node"""
for node in getattr(lb, 'nodes', []):
match_list = []
if node_id is not None:
match_list.append(getattr(node, 'id', None) == node_id)
if address is not None:
match_list.append(getattr(node, 'address', None) == address)
if port is not None:
match_list.append(getattr(node, 'port', None) == port)
if match_list and all(match_list):
return node
return None
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
address=dict(),
condition=dict(choices=['enabled', 'disabled', 'draining']),
load_balancer_id=dict(required=True, type='int'),
node_id=dict(type='int'),
port=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
type=dict(choices=['primary', 'secondary']),
virtualenv=dict(),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=30, type='int'),
weight=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
address = module.params['address']
condition = (module.params['condition'] and
module.params['condition'].upper())
load_balancer_id = module.params['load_balancer_id']
node_id = module.params['node_id']
port = module.params['port']
state = module.params['state']
typ = module.params['type'] and module.params['type'].upper()
virtualenv = module.params['virtualenv']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout'] or 1
weight = module.params['weight']
if virtualenv:
try:
_activate_virtualenv(virtualenv)
except IOError, e:
module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
virtualenv, e))
setup_rax_module(module, pyrax)
if not pyrax.cloud_loadbalancers:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
try:
lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
node = _get_node(lb, node_id, address, port)
result = rax_clb_node_to_dict(node)
if state == 'absent':
if not node: # Removing a non-existent node
module.exit_json(changed=False, state=state)
try:
lb.delete_node(node)
result = {}
except pyrax.exc.NotFound:
module.exit_json(changed=False, state=state)
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
else: # present
if not node:
if node_id: # Updating a non-existent node
msg = 'Node %d not found' % node_id
if lb.nodes:
msg += (' (available nodes: %s)' %
', '.join([str(x.id) for x in lb.nodes]))
module.fail_json(msg=msg)
else: # Creating a new node
try:
node = pyrax.cloudloadbalancers.Node(
address=address, port=port, condition=condition,
weight=weight, type=typ)
resp, body = lb.add_nodes([node])
result.update(body['nodes'][0])
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
else: # Updating an existing node
mutable = {
'condition': condition,
'type': typ,
'weight': weight,
}
for name, value in mutable.items():
if value is None or value == getattr(node, name):
mutable.pop(name)
if not mutable:
module.exit_json(changed=False, state=state, node=result)
try:
# The diff has to be set explicitly to update node's weight and
# type; this should probably be fixed in pyrax
lb.update_node(node, diff=mutable)
result.update(mutable)
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
if wait:
pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
attempts=wait_timeout)
if lb.status != 'ACTIVE':
module.fail_json(
msg='Load balancer not active after %ds (current status: %s)' %
(wait_timeout, lb.status.lower()))
kwargs = {'node': result} if result else {}
module.exit_json(changed=True, state=state, **kwargs)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
|
gpl-3.0
|
MissCatLady/AlarmEZ
|
venv/lib/python2.7/site-packages/pip/vendor/html5lib/ihatexml.py
|
1727
|
16581
|
from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
|
mit
|
mdboom/pytest
|
testing/test_helpconfig.py
|
188
|
2031
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
import pytest
def test_version(testdir, pytestconfig):
result = testdir.runpytest("--version")
assert result.ret == 0
#p = py.path.local(py.__file__).dirpath()
result.stderr.fnmatch_lines([
'*pytest*%s*imported from*' % (pytest.__version__, )
])
if pytestconfig.pluginmanager.list_plugin_distinfo():
result.stderr.fnmatch_lines([
"*setuptools registered plugins:",
"*at*",
])
def test_help(testdir):
result = testdir.runpytest("--help")
assert result.ret == 0
result.stdout.fnmatch_lines("""
*-v*verbose*
*setup.cfg*
*minversion*
*to see*markers*py.test --markers*
*to see*fixtures*py.test --fixtures*
""")
def test_hookvalidation_unknown(testdir):
testdir.makeconftest("""
def pytest_hello(xyz):
pass
""")
result = testdir.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines([
'*unknown hook*pytest_hello*'
])
def test_hookvalidation_optional(testdir):
testdir.makeconftest("""
import pytest
@pytest.hookimpl(optionalhook=True)
def pytest_hello(xyz):
pass
""")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_traceconfig(testdir):
result = testdir.runpytest("--traceconfig")
result.stdout.fnmatch_lines([
"*using*pytest*py*",
"*active plugins*",
])
def test_debug(testdir, monkeypatch):
result = testdir.runpytest_subprocess("--debug")
assert result.ret == EXIT_NOTESTSCOLLECTED
p = testdir.tmpdir.join("pytestdebug.log")
assert "pytest_sessionstart" in p.read()
def test_PYTEST_DEBUG(testdir, monkeypatch):
monkeypatch.setenv("PYTEST_DEBUG", "1")
result = testdir.runpytest_subprocess()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines([
"*pytest_plugin_registered*",
"*manager*PluginManager*"
])
|
mit
|
shubhdev/edxOnBaadal
|
lms/djangoapps/courseware/features/conditional.py
|
102
|
4723
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, steps
from nose.tools import assert_in, assert_true # pylint: disable=no-name-in-module
from common import i_am_registered_for_the_course, visit_scenario_item
from problems_setup import add_problem_to_course, answer_problem
@steps
class ConditionalSteps(object):
COURSE_NUM = 'test_course'
def setup_conditional(self, step, condition_type, condition, cond_value):
r'that a course has a Conditional conditioned on (?P<condition_type>\w+) (?P<condition>\w+)=(?P<cond_value>\w+)$'
i_am_registered_for_the_course(step, self.COURSE_NUM)
world.scenario_dict['VERTICAL'] = world.ItemFactory(
parent_location=world.scenario_dict['SECTION'].location,
category='vertical',
display_name="Test Vertical",
)
world.scenario_dict['WRAPPER'] = world.ItemFactory(
parent_location=world.scenario_dict['VERTICAL'].location,
category='wrapper',
display_name="Test Poll Wrapper"
)
if condition_type == 'problem':
world.scenario_dict['CONDITION_SOURCE'] = add_problem_to_course(self.COURSE_NUM, 'string')
elif condition_type == 'poll':
world.scenario_dict['CONDITION_SOURCE'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='poll_question',
display_name='Conditional Poll',
data={
'question': 'Is this a good poll?',
'answers': [
{'id': 'yes', 'text': 'Yes, of course'},
{'id': 'no', 'text': 'Of course not!'}
],
}
)
else:
raise Exception("Unknown condition type: {!r}".format(condition_type))
metadata = {
'xml_attributes': {
condition: cond_value
}
}
world.scenario_dict['CONDITIONAL'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='conditional',
display_name="Test Conditional",
metadata=metadata,
sources_list=[world.scenario_dict['CONDITION_SOURCE'].location],
)
world.ItemFactory(
parent_location=world.scenario_dict['CONDITIONAL'].location,
category='html',
display_name='Conditional Contents',
data='<html><div class="hidden-contents">Hidden Contents</p></html>'
)
def setup_problem_attempts(self, step, not_attempted=None):
r'that the conditioned problem has (?P<not_attempted>not )?been attempted$'
visit_scenario_item('CONDITION_SOURCE')
if not_attempted is None:
answer_problem(self.COURSE_NUM, 'string', True)
world.css_click("button.check")
def when_i_view_the_conditional(self, step):
r'I view the conditional$'
visit_scenario_item('CONDITIONAL')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Conditional]").data("initialized")')
def check_visibility(self, step, visible):
r'the conditional contents are (?P<visible>\w+)$'
world.wait_for_ajax_complete()
assert_in(visible, ('visible', 'hidden'))
if visible == 'visible':
world.wait_for_visible('.hidden-contents')
assert_true(world.css_visible('.hidden-contents'))
else:
assert_true(world.is_css_not_present('.hidden-contents'))
assert_true(
world.css_contains_text(
'.conditional-message',
'must be attempted before this will become visible.'
)
)
def answer_poll(self, step, answer):
r' I answer the conditioned poll "([^"]*)"$'
visit_scenario_item('CONDITION_SOURCE')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Poll]").data("initialized")')
world.wait_for_ajax_complete()
answer_text = [
poll_answer['text']
for poll_answer
in world.scenario_dict['CONDITION_SOURCE'].answers
if poll_answer['id'] == answer
][0]
text_selector = '.poll_answer .text'
poll_texts = world.retry_on_exception(
lambda: [elem.text for elem in world.css_find(text_selector)]
)
for idx, poll_text in enumerate(poll_texts):
if poll_text == answer_text:
world.css_click(text_selector, index=idx)
return
ConditionalSteps()
|
agpl-3.0
|
xflows/clowdflows
|
streams/management/commands/run_streams.py
|
2
|
1655
|
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = 'check for streams that need to be executed and execute them'
option_list = NoArgsCommand.option_list
def handle_noargs(self, **options):
import time
from streams.models import Stream
self.stdout.write("Working on streams...")
self.stdout.flush()
while True:
streams = Stream.objects.filter(active=True)
for stream in streams:
#self.stdout.write(u"\nChecking stream "+unicode(stream)+"...\n")
#self.stdout.flush()
#preverimo ce je ze dost casa pretekl
import django
now = django.utils.timezone.now()
delta = now - stream.last_executed
delta_seconds = delta.seconds + delta.days * 86400
if delta_seconds > stream.period:
stream.last_executed = now
stream.save()
self.stdout.write(u"Executing "+unicode(stream)+"...")
self.stdout.flush()
try:
stream.execute()
except:
import traceback
self.stdout.write("\n ERROR in executing stream:\n")
traceback.print_exc(file=self.stdout)
self.stdout.write("done!\n")
self.stdout.flush()
#print stream.execute()
time.sleep(1)
#self.stdout.write(".")
#self.stdout.flush()
|
mit
|
bobcolner/material-girl
|
tests/test_user_model.py
|
118
|
6851
|
import unittest
import time
from datetime import datetime
from app import create_app, db
from app.models import User, AnonymousUser, Role, Permission, Follow
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_setter(self):
u = User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password='cat')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u = User(password='cat')
self.assertTrue(u.verify_password('cat'))
self.assertFalse(u.verify_password('dog'))
def test_password_salts_are_random(self):
u = User(password='cat')
u2 = User(password='cat')
self.assertTrue(u.password_hash != u2.password_hash)
def test_valid_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token()
self.assertTrue(u.confirm(token))
def test_invalid_confirmation_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_confirmation_token()
self.assertFalse(u2.confirm(token))
def test_expired_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token(1)
time.sleep(2)
self.assertFalse(u.confirm(token))
def test_valid_reset_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_reset_token()
self.assertTrue(u.reset_password(token, 'dog'))
self.assertTrue(u.verify_password('dog'))
def test_invalid_reset_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_reset_token()
self.assertFalse(u2.reset_password(token, 'horse'))
self.assertTrue(u2.verify_password('dog'))
def test_valid_email_change_token(self):
u = User(email='[email protected]', password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_email_change_token('[email protected]')
self.assertTrue(u.change_email(token))
self.assertTrue(u.email == '[email protected]')
def test_invalid_email_change_token(self):
u1 = User(email='[email protected]', password='cat')
u2 = User(email='[email protected]', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_email_change_token('[email protected]')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == '[email protected]')
def test_duplicate_email_change_token(self):
u1 = User(email='[email protected]', password='cat')
u2 = User(email='[email protected]', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u2.generate_email_change_token('[email protected]')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == '[email protected]')
def test_roles_and_permissions(self):
u = User(email='[email protected]', password='cat')
self.assertTrue(u.can(Permission.WRITE_ARTICLES))
self.assertFalse(u.can(Permission.MODERATE_COMMENTS))
def test_anonymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
def test_timestamps(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
self.assertTrue(
(datetime.utcnow() - u.member_since).total_seconds() < 3)
self.assertTrue(
(datetime.utcnow() - u.last_seen).total_seconds() < 3)
def test_ping(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
time.sleep(2)
last_seen_before = u.last_seen
u.ping()
self.assertTrue(u.last_seen > last_seen_before)
def test_gravatar(self):
u = User(email='[email protected]', password='cat')
with self.app.test_request_context('/'):
gravatar = u.gravatar()
gravatar_256 = u.gravatar(size=256)
gravatar_pg = u.gravatar(rating='pg')
gravatar_retro = u.gravatar(default='retro')
with self.app.test_request_context('/', base_url='https://example.com'):
gravatar_ssl = u.gravatar()
self.assertTrue('http://www.gravatar.com/avatar/' +
'd4c74594d841139328695756648b6bd6'in gravatar)
self.assertTrue('s=256' in gravatar_256)
self.assertTrue('r=pg' in gravatar_pg)
self.assertTrue('d=retro' in gravatar_retro)
self.assertTrue('https://secure.gravatar.com/avatar/' +
'd4c74594d841139328695756648b6bd6' in gravatar_ssl)
def test_follows(self):
u1 = User(email='[email protected]', password='cat')
u2 = User(email='[email protected]', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
timestamp_before = datetime.utcnow()
u1.follow(u2)
db.session.add(u1)
db.session.commit()
timestamp_after = datetime.utcnow()
self.assertTrue(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
self.assertTrue(u2.is_followed_by(u1))
self.assertTrue(u1.followed.count() == 2)
self.assertTrue(u2.followers.count() == 2)
f = u1.followed.all()[-1]
self.assertTrue(f.followed == u2)
self.assertTrue(timestamp_before <= f.timestamp <= timestamp_after)
f = u2.followers.all()[-1]
self.assertTrue(f.follower == u1)
u1.unfollow(u2)
db.session.add(u1)
db.session.commit()
self.assertTrue(u1.followed.count() == 1)
self.assertTrue(u2.followers.count() == 1)
self.assertTrue(Follow.query.count() == 2)
u2.follow(u1)
db.session.add(u1)
db.session.add(u2)
db.session.commit()
db.session.delete(u2)
db.session.commit()
self.assertTrue(Follow.query.count() == 1)
|
mit
|
sunu/oh-missions-oppia-beta
|
extensions/rules/coord_two_dim_test.py
|
5
|
1551
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classification of 2D coordinates."""
__author__ = 'Sean Lip'
from extensions.rules import coord_two_dim
import test_utils
class CoordTwoDimRuleUnitTests(test_utils.GenericTestBase):
"""Tests for rules operating on CoordTwoDim objects."""
def test_within_rule(self):
self.assertFalse(coord_two_dim.Within(10, [10, 10]).eval([0, 0]))
self.assertTrue(coord_two_dim.Within(20, [10, 10]).eval([0, 0]))
self.assertFalse(coord_two_dim.Within(10, [5, 10]).eval([-5, 0]))
self.assertTrue(coord_two_dim.Within(20, [5, 10]).eval([-5, 0]))
def test_not_within_rule(self):
self.assertTrue(coord_two_dim.NotWithin(10, [10, 10]).eval([0, 0]))
self.assertFalse(coord_two_dim.NotWithin(20, [10, 10]).eval([0, 0]))
self.assertTrue(coord_two_dim.NotWithin(10, [5, 10]).eval([-5, 0]))
self.assertFalse(coord_two_dim.NotWithin(20, [5, 10]).eval([-5, 0]))
|
apache-2.0
|
magus424/powerline
|
tests/test_provided_config_files.py
|
1
|
6114
|
# vim:fileencoding=utf-8:noet
'''Dynamic configuration files tests.'''
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import os
import json
import tests.vim as vim_module
from tests.lib import Args, urllib_read, replace_attr
from tests import TestCase
VBLOCK = chr(ord('V') - 0x40)
SBLOCK = chr(ord('S') - 0x40)
class TestConfig(TestCase):
def test_vim(self):
from powerline.vim import VimPowerline
cfg_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'powerline', 'config_files')
buffers = (
(('bufoptions',), {'buftype': 'help'}),
(('bufname', '[Command Line]'), {}),
(('bufoptions',), {'buftype': 'quickfix'}),
(('bufname', 'NERD_tree_1'), {}),
(('bufname', '__Gundo__'), {}),
(('bufname', '__Gundo_Preview__'), {}),
(('bufname', 'ControlP'), {}),
)
with open(os.path.join(cfg_path, 'config.json'), 'r') as f:
local_themes_raw = json.load(f)['ext']['vim']['local_themes']
# Don't run tests on external/plugin segments
local_themes = dict((k, v) for (k, v) in local_themes_raw.items())
self.assertEqual(len(buffers), len(local_themes) - 1)
outputs = {}
i = 0
with vim_module._with('split'):
with VimPowerline() as powerline:
def check_output(mode, args, kwargs):
if mode == 'nc':
window = vim_module.windows[0]
window_id = 2
else:
vim_module._start_mode(mode)
window = vim_module.current.window
window_id = 1
winnr = window.number
out = powerline.render(window, window_id, winnr)
if out in outputs:
self.fail('Duplicate in set #{0} ({1}) for mode {2!r} (previously defined in set #{3} ({4!r}) for mode {5!r})'.format(i, (args, kwargs), mode, *outputs[out]))
outputs[out] = (i, (args, kwargs), mode)
with vim_module._with('bufname', '/tmp/foo.txt'):
out = powerline.render(vim_module.current.window, 1, vim_module.current.window.number, is_tabline=True)
outputs[out] = (-1, (None, None), 'tab')
with vim_module._with('globals', powerline_config_path=cfg_path):
exclude = set(('no', 'v', 'V', VBLOCK, 's', 'S', SBLOCK, 'R', 'Rv', 'c', 'cv', 'ce', 'r', 'rm', 'r?', '!'))
try:
for mode in ['n', 'nc', 'no', 'v', 'V', VBLOCK, 's', 'S', SBLOCK, 'i', 'R', 'Rv', 'c', 'cv', 'ce', 'r', 'rm', 'r?', '!']:
check_output(mode, None, None)
for args, kwargs in buffers:
i += 1
if mode in exclude:
continue
if mode == 'nc' and args == ('bufname', 'ControlP'):
# ControlP window is not supposed to not
# be in the focus
continue
with vim_module._with(*args, **kwargs):
check_output(mode, args, kwargs)
finally:
vim_module._start_mode('n')
def test_tmux(self):
from powerline.segments import common
from imp import reload
reload(common)
from powerline.shell import ShellPowerline
with replace_attr(common, 'urllib_read', urllib_read):
with ShellPowerline(Args(ext=['tmux']), run_once=False) as powerline:
powerline.render()
with ShellPowerline(Args(ext=['tmux']), run_once=False) as powerline:
powerline.render()
def test_zsh(self):
from powerline.shell import ShellPowerline
args = Args(last_pipe_status=[1, 0], jobnum=0, ext=['shell'], renderer_module='.zsh')
segment_info = {'args': args}
with ShellPowerline(args, run_once=False) as powerline:
powerline.render(segment_info=segment_info)
with ShellPowerline(args, run_once=False) as powerline:
powerline.render(segment_info=segment_info)
segment_info['local_theme'] = 'select'
with ShellPowerline(args, run_once=False) as powerline:
powerline.render(segment_info=segment_info)
segment_info['local_theme'] = 'continuation'
segment_info['parser_state'] = 'if cmdsubst'
with ShellPowerline(args, run_once=False) as powerline:
powerline.render(segment_info=segment_info)
def test_bash(self):
from powerline.shell import ShellPowerline
args = Args(last_exit_code=1, jobnum=0, ext=['shell'], renderer_module='.bash', config={'ext': {'shell': {'theme': 'default_leftonly'}}})
with ShellPowerline(args, run_once=False) as powerline:
powerline.render(segment_info={'args': args})
with ShellPowerline(args, run_once=False) as powerline:
powerline.render(segment_info={'args': args})
def test_ipython(self):
from powerline.ipython import IPythonPowerline
class IpyPowerline(IPythonPowerline):
paths = None
config_overrides = None
theme_overrides = {}
segment_info = Args(prompt_count=1)
with IpyPowerline() as powerline:
for prompt_type in ['in', 'in2']:
powerline.render(is_prompt=True, matcher_info=prompt_type, segment_info=segment_info)
powerline.render(is_prompt=True, matcher_info=prompt_type, segment_info=segment_info)
with IpyPowerline() as powerline:
for prompt_type in ['out', 'rewrite']:
powerline.render(is_prompt=False, matcher_info=prompt_type, segment_info=segment_info)
powerline.render(is_prompt=False, matcher_info=prompt_type, segment_info=segment_info)
def test_wm(self):
from powerline.segments import common
from imp import reload
reload(common)
from powerline import Powerline
with replace_attr(common, 'urllib_read', urllib_read):
Powerline(ext='wm', renderer_module='pango_markup', run_once=True).render()
reload(common)
old_cwd = None
saved_get_config_paths = None
def setUpModule():
global old_cwd
global saved_get_config_paths
import powerline
saved_get_config_paths = powerline.get_config_paths
path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'powerline', 'config_files')
powerline.get_config_paths = lambda: [path]
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'path')))
old_cwd = os.getcwd()
from powerline.segments import vim
globals()['vim'] = vim
def tearDownModule():
global old_cwd
global saved_get_config_paths
import powerline
powerline.get_config_paths = saved_get_config_paths
os.chdir(old_cwd)
old_cwd = None
sys.path.pop(0)
if __name__ == '__main__':
from tests import main
main()
|
mit
|
jimsimon/sky_engine
|
sky/engine/bindings/scripts/blink_idl_parser.py
|
14
|
19028
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Parser for Blink IDL.
The parser uses the PLY (Python Lex-Yacc) library to build a set of parsing
rules which understand the Blink dialect of Web IDL.
It derives from a standard Web IDL parser, overriding rules where Blink IDL
differs syntactically or semantically from the base parser, or where the base
parser diverges from the Web IDL standard.
Web IDL:
http://www.w3.org/TR/WebIDL/
Web IDL Grammar:
http://www.w3.org/TR/WebIDL/#idl-grammar
PLY:
http://www.dabeaz.com/ply/
Design doc:
http://www.chromium.org/developers/design-documents/idl-compiler#TOC-Front-end
"""
# Disable check for line length and Member as Function due to how grammar rules
# are defined with PLY
#
# pylint: disable=R0201
# pylint: disable=C0301
#
# Disable attribute validation, as lint can't import parent class to check
# pylint: disable=E1101
import os.path
import sys
# PLY is in Chromium src/third_party/ply
module_path, module_name = os.path.split(__file__)
third_party = os.path.join(module_path, os.pardir, os.pardir, os.pardir, os.pardir, 'third_party')
# Insert at front to override system libraries, and after path[0] == script dir
sys.path.insert(1, third_party)
from ply import yacc
# Base parser is in Chromium src/tools/idl_parser
tools_dir = os.path.join(module_path, os.pardir, os.pardir, os.pardir, os.pardir, 'tools')
sys.path.append(tools_dir)
from idl_parser.idl_parser import IDLParser, ListFromConcat
from idl_parser.idl_parser import ParseFile as parse_file
from blink_idl_lexer import BlinkIDLLexer
import blink_idl_lexer
# Explicitly set starting symbol to rule defined only in base parser.
# BEWARE that the starting symbol should NOT be defined in both the base parser
# and the derived one, as otherwise which is used depends on which line number
# is lower, which is fragile. Instead, either use one in base parser or
# create a new symbol, so that this is unambiguous.
# FIXME: unfortunately, this doesn't work in PLY 3.4, so need to duplicate the
# rule below.
STARTING_SYMBOL = 'Definitions'
# We ignore comments (and hence don't need 'Top') but base parser preserves them
# FIXME: Upstream: comments should be removed in base parser
REMOVED_RULES = ['Top', # [0]
'Comments', # [0.1]
'CommentsRest', # [0.2]
]
# Remove rules from base class
# FIXME: add a class method upstream: @classmethod IDLParser._RemoveRules
for rule in REMOVED_RULES:
production_name = 'p_' + rule
delattr(IDLParser, production_name)
class BlinkIDLParser(IDLParser):
# [1]
# FIXME: Need to duplicate rule for starting symbol here, with line number
# *lower* than in the base parser (idl_parser.py).
# This is a bug in PLY: it determines starting symbol by lowest line number.
# This can be overridden by the 'start' parameter, but as of PLY 3.4 this
# doesn't work correctly.
def p_Definitions(self, p):
"""Definitions : ExtendedAttributeList Definition Definitions
| """
if len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
# Below are grammar rules used by yacc, given by functions named p_<RULE>.
# * The docstring is the production rule in BNF (grammar).
# * The body is the yacc action (semantics).
#
# The PLY framework builds the actual low-level parser by introspecting this
# parser object, selecting all attributes named p_<RULE> as grammar rules.
# It extracts the docstrings and uses them as the production rules, building
# the table of a LALR parser, and uses the body of the functions as actions.
#
# Reference:
# http://www.dabeaz.com/ply/ply.html#ply_nn23
#
# Review of yacc:
# Yacc parses a token stream, internally producing a Concrete Syntax Tree
# (CST), where each node corresponds to a production rule in the grammar.
# At each node, it runs an action, which is usually "produce a node in the
# Abstract Syntax Tree (AST)" or "ignore this node" (for nodes in the CST
# that aren't included in the AST, since only needed for parsing).
#
# The rules use pseudo-variables; in PLY syntax:
# p[0] is the left side: assign return value to p[0] instead of returning,
# p[1] ... p[n] are the right side: the values can be accessed, and they
# can be modified.
# (In yacc these are $$ and $1 ... $n.)
#
# The rules can look cryptic at first, but there are a few standard
# transforms from the CST to AST. With these in mind, the actions should
# be reasonably legible.
#
# * Ignore production
# Discard this branch. Primarily used when one alternative is empty.
#
# Sample code:
# if len(p) > 1:
# p[0] = ...
# # Note no assignment if len(p) == 1
#
# * Eliminate singleton production
# Discard this node in the CST, pass the next level down up the tree.
# Used to ignore productions only necessary for parsing, but not needed
# in the AST.
#
# Sample code:
# p[0] = p[1]
#
# * Build node
# The key type of rule. In this parser, produces object of class IDLNode.
# There are several helper functions:
# * BuildProduction: actually builds an IDLNode, based on a production.
# * BuildAttribute: builds an IDLAttribute, which is a temporary
# object to hold a name-value pair, which is then
# set as a Property of the IDLNode when the IDLNode
# is built.
# * BuildNamed: Same as BuildProduction, and sets the 'NAME' property.
# * BuildTrue: BuildAttribute with value True, for flags.
# See base idl_parser.py for definitions and more examples of use.
#
# Sample code:
# # Build node of type NodeType, with value p[1], and children.
# p[0] = self.BuildProduction('NodeType', p, 1, children)
#
# # Build named node of type NodeType, with name and value p[1].
# # (children optional)
# p[0] = self.BuildNamed('NodeType', p, 1)
#
# # Make a list
# # Used if one node has several children.
# children = ListFromConcat(p[2], p[3])
# p[0] = self.BuildProduction('NodeType', p, 1, children)
#
# # Also used to collapse the right-associative tree
# # produced by parsing a list back into a single list.
# """Foos : Foo Foos
# |"""
# if len(p) > 1:
# p[0] = ListFromConcat(p[1], p[2])
#
# # Add children.
# # Primarily used to add attributes, produced via BuildTrue.
# # p_StaticAttribute
# """StaticAttribute : STATIC Attribute"""
# p[2].AddChildren(self.BuildTrue('STATIC'))
# p[0] = p[2]
#
# Numbering scheme for the rules is:
# [1] for Web IDL spec (or additions in base parser)
# These should all be upstreamed to the base parser.
# [b1] for Blink IDL changes (overrides Web IDL)
# [b1.1] for Blink IDL additions, auxiliary rules for [b1]
# Numbers are as per Candidate Recommendation 19 April 2012:
# http://www.w3.org/TR/2012/CR-WebIDL-20120419/
# [3] Override action, since we distinguish callbacks
# FIXME: Upstream
def p_CallbackOrInterface(self, p):
"""CallbackOrInterface : CALLBACK CallbackRestOrInterface
| Interface"""
if len(p) > 2:
p[2].AddChildren(self.BuildTrue('CALLBACK'))
p[0] = p[2]
else:
p[0] = p[1]
# [b27] Add strings, more 'Literal' productions
# 'Literal's needed because integers and strings are both internally strings
def p_ConstValue(self, p):
"""ConstValue : BooleanLiteral
| FloatLiteral
| IntegerLiteral
| StringLiteral
| null"""
# Standard is (no 'string', fewer 'Literal's):
# ConstValue : BooleanLiteral
# | FloatLiteral
# | integer
# | NULL
p[0] = p[1]
# [b27.1]
def p_IntegerLiteral(self, p):
"""IntegerLiteral : integer"""
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'integer'),
self.BuildAttribute('NAME', p[1]))
# [b27.2]
def p_StringLiteral(self, p):
"""StringLiteral : string"""
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'DOMString'),
self.BuildAttribute('NAME', p[1]))
# [b47]
def p_ExceptionMember(self, p):
"""ExceptionMember : Const
| ExceptionField
| Attribute
| ExceptionOperation"""
# Standard is (no Attribute, no ExceptionOperation):
# ExceptionMember : Const
# | ExceptionField
# FIXME: In DOMException.idl, Attributes should be changed to
# ExceptionFields, and Attribute removed from this rule.
p[0] = p[1]
# [b47.1] FIXME: rename to ExceptionAttribute
def p_Attribute(self, p):
"""Attribute : ReadOnly ATTRIBUTE Type identifier ';'"""
p[0] = self.BuildNamed('Attribute', p, 4,
ListFromConcat(p[1], p[3]))
# [b47.2]
def p_ExceptionOperation(self, p):
"""ExceptionOperation : Type identifier '(' ')' ';'"""
# Needed to handle one case in DOMException.idl:
# // Override in a Mozilla compatible format
# [NotEnumerable] DOMString toString();
# Limited form of Operation to prevent others from being added.
# FIXME: Should be a stringifier instead.
p[0] = self.BuildNamed('ExceptionOperation', p, 2, p[1])
# Extended attributes
# [b49] Override base parser: remove comment field, since comments stripped
# FIXME: Upstream
def p_ExtendedAttributeList(self, p):
"""ExtendedAttributeList : '[' ExtendedAttribute ExtendedAttributes ']'
| '[' ']'
| """
if len(p) > 3:
items = ListFromConcat(p[2], p[3])
p[0] = self.BuildProduction('ExtAttributes', p, 1, items)
# Error handling for ExtendedAttributeList.
# We can't upstream this because we override ExtendedAttributeList.
def p_ExtendedAttributeListError(self, p):
"""ExtendedAttributeList : '[' ExtendedAttribute ',' error"""
p[0] = self.BuildError(p, "ExtendedAttributeList")
# [b50] Allow optional trailing comma
# Blink-only, marked as WONTFIX in Web IDL spec:
# https://www.w3.org/Bugs/Public/show_bug.cgi?id=22156
def p_ExtendedAttributes(self, p):
"""ExtendedAttributes : ',' ExtendedAttribute ExtendedAttributes
| ','
|"""
if len(p) > 3:
p[0] = ListFromConcat(p[2], p[3])
# [b51] Add ExtendedAttributeStringLiteral and ExtendedAttributeStringLiteralList
def p_ExtendedAttribute(self, p):
"""ExtendedAttribute : ExtendedAttributeNoArgs
| ExtendedAttributeArgList
| ExtendedAttributeIdent
| ExtendedAttributeIdentList
| ExtendedAttributeNamedArgList
| ExtendedAttributeStringLiteral
| ExtendedAttributeStringLiteralList"""
p[0] = p[1]
# [59]
# FIXME: Upstream UnionType
def p_UnionType(self, p):
"""UnionType : '(' UnionMemberType OR UnionMemberType UnionMemberTypes ')'"""
members = ListFromConcat(p[2], p[4], p[5])
p[0] = self.BuildProduction('UnionType', p, 1, members)
# [60]
def p_UnionMemberType(self, p):
"""UnionMemberType : NonAnyType
| UnionType TypeSuffix
| ANY '[' ']' TypeSuffix"""
if len(p) == 2:
p[0] = self.BuildProduction('Type', p, 1, p[1])
elif len(p) == 3:
p[0] = self.BuildProduction('Type', p, 1, ListFromConcat(p[1], p[2]))
else:
any_node = ListFromConcat(self.BuildProduction('Any', p, 1), p[4])
p[0] = self.BuildProduction('Type', p, 1, any_node)
# [61]
def p_UnionMemberTypes(self, p):
"""UnionMemberTypes : OR UnionMemberType UnionMemberTypes
|"""
if len(p) > 2:
p[0] = ListFromConcat(p[2], p[3])
# [70] Override base parser to remove non-standard sized array
# FIXME: Upstream
def p_TypeSuffix(self, p):
"""TypeSuffix : '[' ']' TypeSuffix
| '?' TypeSuffixStartingWithArray
|"""
if len(p) == 4:
p[0] = self.BuildProduction('Array', p, 1, p[3])
elif len(p) == 3:
p[0] = ListFromConcat(self.BuildTrue('NULLABLE'), p[2])
# Blink extension: Add support for string literal Extended Attribute values
def p_ExtendedAttributeStringLiteral(self, p):
"""ExtendedAttributeStringLiteral : identifier '=' StringLiteral """
def unwrap_string(ls):
"""Reach in and grab the string literal's "NAME"."""
return ls[1].value
value = self.BuildAttribute('VALUE', unwrap_string(p[3]))
p[0] = self.BuildNamed('ExtAttribute', p, 1, value)
# Blink extension: Add support for compound Extended Attribute values over string literals ("A","B")
def p_ExtendedAttributeStringLiteralList(self, p):
"""ExtendedAttributeStringLiteralList : identifier '=' '(' StringLiteralList ')' """
value = self.BuildAttribute('VALUE', p[4])
p[0] = self.BuildNamed('ExtAttribute', p, 1, value)
# Blink extension: one or more string literals. The values aren't propagated as literals,
# but their by their value only.
def p_StringLiteralList(self, p):
"""StringLiteralList : StringLiteral ',' StringLiteralList
| StringLiteral"""
def unwrap_string(ls):
"""Reach in and grab the string literal's "NAME"."""
return ls[1].value
if len(p) > 3:
p[0] = ListFromConcat(unwrap_string(p[1]), p[3])
else:
p[0] = ListFromConcat(unwrap_string(p[1]))
def __init__(self,
# common parameters
debug=False,
# local parameters
rewrite_tables=False,
# idl_parser parameters
lexer=None, verbose=False, mute_error=False,
# yacc parameters
outputdir='', optimize=True, write_tables=False,
picklefile=None):
if debug:
# Turn off optimization and caching, and write out tables,
# to help debugging
optimize = False
outputdir = None
picklefile = None
write_tables = True
if outputdir:
picklefile = picklefile or os.path.join(outputdir, 'parsetab.pickle')
if rewrite_tables:
try:
os.unlink(picklefile)
except OSError:
pass
lexer = lexer or BlinkIDLLexer(debug=debug,
outputdir=outputdir,
optimize=optimize)
self.lexer = lexer
self.tokens = lexer.KnownTokens()
# Using SLR (instead of LALR) generates the table faster,
# but produces the same output. This is ok b/c Web IDL (and Blink IDL)
# is an SLR grammar (as is often the case for simple LL(1) grammars).
#
# Optimized mode substantially decreases startup time (by disabling
# error checking), and also allows use of Python's optimized mode.
# See: Using Python's Optimized Mode
# http://www.dabeaz.com/ply/ply.html#ply_nn38
#
# |picklefile| allows simpler importing than |tabmodule| (parsetab.py),
# as we don't need to modify sys.path; virtually identical speed.
# See: CHANGES, Version 3.2
# http://ply.googlecode.com/svn/trunk/CHANGES
self.yaccobj = yacc.yacc(module=self,
start=STARTING_SYMBOL,
method='SLR',
debug=debug,
optimize=optimize,
write_tables=write_tables,
picklefile=picklefile)
self.parse_debug = debug
self.verbose = verbose
self.mute_error = mute_error
self._parse_errors = 0
self._parse_warnings = 0
self._last_error_msg = None
self._last_error_lineno = 0
self._last_error_pos = 0
################################################################################
def main(argv):
# If file itself executed, cache lex/parse tables
try:
outputdir = argv[1]
except IndexError as err:
print 'Usage: %s OUTPUT_DIR' % argv[0]
return 1
blink_idl_lexer.main(argv)
# Important: rewrite_tables=True causes the cache file to be deleted if it
# exists, thus making sure that PLY doesn't load it instead of regenerating
# the parse table.
parser = BlinkIDLParser(outputdir=outputdir, rewrite_tables=True)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bsd-3-clause
|
Juniper/tempest
|
tempest/lib/services/volume/v2/volumes_client.py
|
1
|
14434
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
import six
from six.moves.urllib import parse as urllib
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.volume import base_client
class VolumesClient(base_client.BaseClient):
"""Client class to send CRUD Volume V2 API requests"""
api_version = "v2"
def _prepare_params(self, params):
"""Prepares params for use in get or _ext_get methods.
If params is a string it will be left as it is, but if it's not it will
be urlencoded.
"""
if isinstance(params, six.string_types):
return params
return urllib.urlencode(params)
def list_volumes(self, detail=False, params=None):
"""List all the volumes created.
Params can be a string (must be urlencoded) or a dictionary.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#list-volumes-with-details
http://developer.openstack.org/api-ref/block-storage/v2/#list-volumes
"""
url = 'volumes'
if detail:
url += '/detail'
if params:
url += '?%s' % self._prepare_params(params)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def show_volume(self, volume_id):
"""Returns the details of a single volume."""
url = "volumes/%s" % volume_id
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def create_volume(self, **kwargs):
"""Creates a new Volume.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#create-volume
"""
post_body = json.dumps({'volume': kwargs})
resp, body = self.post('volumes', post_body)
body = json.loads(body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def update_volume(self, volume_id, **kwargs):
"""Updates the Specified Volume.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#update-volume
"""
put_body = json.dumps({'volume': kwargs})
resp, body = self.put('volumes/%s' % volume_id, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_volume(self, volume_id, **params):
"""Deletes the Specified Volume.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v2/#delete-volume
"""
url = 'volumes/%s' % volume_id
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.delete(url)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def upload_volume(self, volume_id, **kwargs):
"""Uploads a volume in Glance."""
post_body = json.dumps({'os-volume_upload_image': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
body = json.loads(body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def attach_volume(self, volume_id, **kwargs):
"""Attaches a volume to a given instance on a given mountpoint.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#attach-volume-to-server
"""
post_body = json.dumps({'os-attach': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def set_bootable_volume(self, volume_id, **kwargs):
"""Set a bootable flag for a volume - true or false.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#update-volume-bootable-status
"""
post_body = json.dumps({'os-set_bootable': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def detach_volume(self, volume_id):
"""Detaches a volume from an instance."""
post_body = json.dumps({'os-detach': {}})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def reserve_volume(self, volume_id):
"""Reserves a volume."""
post_body = json.dumps({'os-reserve': {}})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def unreserve_volume(self, volume_id):
"""Restore a reserved volume ."""
post_body = json.dumps({'os-unreserve': {}})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
"""Check the specified resource is deleted or not.
:param id: A checked resource id
:raises lib_exc.DeleteErrorException: If the specified resource is on
the status the delete was failed.
"""
try:
volume = self.show_volume(id)
except lib_exc.NotFound:
return True
if volume["volume"]["status"] == "error_deleting":
raise lib_exc.DeleteErrorException(resource_id=id)
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'volume'
def extend_volume(self, volume_id, **kwargs):
"""Extend a volume.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#extend-volume-size
"""
post_body = json.dumps({'os-extend': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def reset_volume_status(self, volume_id, **kwargs):
"""Reset the Specified Volume's Status.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#reset-volume-statuses
"""
post_body = json.dumps({'os-reset_status': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def update_volume_readonly(self, volume_id, **kwargs):
"""Update the Specified Volume readonly."""
post_body = json.dumps({'os-update_readonly_flag': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def force_delete_volume(self, volume_id):
"""Force Delete Volume."""
post_body = json.dumps({'os-force_delete': {}})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def create_volume_metadata(self, volume_id, metadata):
"""Create metadata for the volume.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#create-volume-metadata
"""
put_body = json.dumps({'metadata': metadata})
url = "volumes/%s/metadata" % volume_id
resp, body = self.post(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def show_volume_metadata(self, volume_id):
"""Get metadata of the volume."""
url = "volumes/%s/metadata" % volume_id
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def update_volume_metadata(self, volume_id, metadata):
"""Update metadata for the volume.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#update-volume-metadata
"""
put_body = json.dumps({'metadata': metadata})
url = "volumes/%s/metadata" % volume_id
resp, body = self.put(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def show_volume_metadata_item(self, volume_id, id):
"""Show metadata item for the volume."""
url = "volumes/%s/metadata/%s" % (volume_id, id)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def update_volume_metadata_item(self, volume_id, id, meta_item):
"""Update metadata item for the volume."""
put_body = json.dumps({'meta': meta_item})
url = "volumes/%s/metadata/%s" % (volume_id, id)
resp, body = self.put(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_volume_metadata_item(self, volume_id, id):
"""Delete metadata item for the volume."""
url = "volumes/%s/metadata/%s" % (volume_id, id)
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def retype_volume(self, volume_id, **kwargs):
"""Updates volume with new volume type.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v2/#retype-volume
"""
post_body = json.dumps({'os-retype': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def force_detach_volume(self, volume_id, **kwargs):
"""Force detach a volume.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v2/#force-detach-volume
"""
post_body = json.dumps({'os-force_detach': kwargs})
url = 'volumes/%s/action' % volume_id
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def update_volume_image_metadata(self, volume_id, **kwargs):
"""Update image metadata for the volume.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/block-storage/v2/#set-image-metadata-for-volume
"""
post_body = json.dumps({'os-set_image_metadata': {'metadata': kwargs}})
url = "volumes/%s/action" % (volume_id)
resp, body = self.post(url, post_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_volume_image_metadata(self, volume_id, key_name):
"""Delete image metadata item for the volume."""
post_body = json.dumps({'os-unset_image_metadata': {'key': key_name}})
url = "volumes/%s/action" % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def show_volume_image_metadata(self, volume_id):
"""Show image metadata for the volume."""
post_body = json.dumps({'os-show_image_metadata': {}})
url = "volumes/%s/action" % volume_id
resp, body = self.post(url, post_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def unmanage_volume(self, volume_id):
"""Unmanage volume.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v2/#unmanage-volume
"""
post_body = json.dumps({'os-unmanage': {}})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
|
apache-2.0
|
camagenta/youtube-dl
|
test/test_utils.py
|
34
|
31908
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Various small unit tests
import io
import json
import xml.etree.ElementTree
from youtube_dl.utils import (
age_restricted,
args_to_str,
clean_html,
DateRange,
detect_exe_version,
encodeFilename,
escape_rfc3986,
escape_url,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
InAdvancePagedList,
intlist_to_bytes,
is_html,
js_to_json,
limit_length,
OnDemandPagedList,
orderedSet,
parse_duration,
parse_filesize,
parse_iso8601,
read_batch_urls,
sanitize_filename,
sanitize_path,
prepend_extension,
replace_extension,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
struct_unpack,
timeconvert,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
lowercase_escape,
url_basename,
urlencode_postdata,
version_tuple,
xpath_with_ns,
xpath_element,
xpath_text,
xpath_attr,
render_table,
match_str,
parse_dfxp_time_expr,
dfxp2srt,
cli_option,
cli_valueless_option,
cli_bool_option,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'a\xe4b\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(
unescapeHTML('é'), 'é')
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""")
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
def test_paged_list(self):
def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
for i in range(firstid, upto):
yield i
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_struct_unpack(self):
self.assertEqual(struct_unpack('!B', b'\x00'), (0,))
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': '[email protected]', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
# Ignore JavaScript code as well
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\''
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]]),
'a bcd\n'
'123 4\n'
'9999 51')
def test_match_str(self):
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), 0.0)
self.assertEqual(parse_dfxp_time_expr(''), 0.0)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
if __name__ == '__main__':
unittest.main()
|
unlicense
|
CydarLtd/ansible
|
test/runner/injector/injector.py
|
163
|
7660
|
#!/usr/bin/env python
"""Interpreter and code coverage injector for use with ansible-test.
The injector serves two main purposes:
1) Control the python interpreter used to run test tools and ansible code.
2) Provide optional code coverage analysis of ansible code.
The injector is executed one of two ways:
1) On the controller via a symbolic link such as ansible or pytest.
This is accomplished by prepending the injector directory to the PATH by ansible-test.
2) As the python interpreter when running ansible modules.
This is only supported when connecting to the local host.
Otherwise set the ANSIBLE_TEST_REMOTE_INTERPRETER environment variable.
It can be empty to auto-detect the python interpreter on the remote host.
If not empty it will be used to set ansible_python_interpreter.
NOTE: Running ansible-test with the --tox option or inside a virtual environment
may prevent the injector from working for tests which use connection
types other than local, or which use become, due to lack of permissions
to access the interpreter for the virtual environment.
"""
from __future__ import absolute_import, print_function
import errno
import json
import os
import sys
import pipes
import logging
import getpass
logger = logging.getLogger('injector') # pylint: disable=locally-disabled, invalid-name
# pylint: disable=locally-disabled, invalid-name
config = None # type: InjectorConfig
class InjectorConfig(object):
"""Mandatory configuration."""
def __init__(self, config_path):
"""Initialize config."""
with open(config_path) as config_fd:
_config = json.load(config_fd)
self.python_interpreter = _config['python_interpreter']
self.coverage_file = _config['coverage_file']
# Read from the environment instead of config since it needs to be changed by integration test scripts.
# It also does not need to flow from the controller to the remote. It is only used on the controller.
self.remote_interpreter = os.environ.get('ANSIBLE_TEST_REMOTE_INTERPRETER', None)
self.arguments = [to_text(c) for c in sys.argv]
def to_text(value):
"""
:type value: str | None
:rtype: str | None
"""
if value is None:
return None
if isinstance(value, bytes):
return value.decode('utf-8')
return u'%s' % value
def main():
"""Main entry point."""
global config # pylint: disable=locally-disabled, global-statement
formatter = logging.Formatter('%(asctime)s %(process)d %(levelname)s %(message)s')
log_name = 'ansible-test-coverage.%s.log' % getpass.getuser()
self_dir = os.path.dirname(os.path.abspath(__file__))
handler = logging.FileHandler(os.path.join('/tmp', log_name))
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.FileHandler(os.path.abspath(os.path.join(self_dir, '..', 'logs', log_name)))
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
try:
logger.debug('Self: %s', __file__)
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'injector.json')
try:
config = InjectorConfig(config_path)
except IOError:
logger.exception('Error reading config: %s', config_path)
exit('No injector config found. Set ANSIBLE_TEST_REMOTE_INTERPRETER if the test is not connecting to the local host.')
logger.debug('Arguments: %s', ' '.join(pipes.quote(c) for c in config.arguments))
logger.debug('Python interpreter: %s', config.python_interpreter)
logger.debug('Remote interpreter: %s', config.remote_interpreter)
logger.debug('Coverage file: %s', config.coverage_file)
require_cwd = False
if os.path.basename(__file__) == 'injector.py':
if config.coverage_file:
args, env, require_cwd = cover()
else:
args, env = runner()
else:
args, env = injector()
logger.debug('Run command: %s', ' '.join(pipes.quote(c) for c in args))
altered_cwd = False
try:
cwd = os.getcwd()
except OSError as ex:
# some platforms, such as OS X, may not allow querying the working directory when using become to drop privileges
if ex.errno != errno.EACCES:
raise
if require_cwd:
# make sure the program we execute can determine the working directory if it's required
cwd = '/'
os.chdir(cwd)
altered_cwd = True
else:
cwd = None
logger.debug('Working directory: %s%s', cwd or '?', ' (altered)' if altered_cwd else '')
for key in sorted(env.keys()):
logger.debug('%s=%s', key, env[key])
os.execvpe(args[0], args, env)
except Exception as ex:
logger.fatal(ex)
raise
def injector():
"""
:rtype: list[str], dict[str, str]
"""
command = os.path.basename(__file__)
executable = find_executable(command)
if config.coverage_file:
args, env = coverage_command()
else:
args, env = [config.python_interpreter], os.environ.copy()
args += [executable]
if command in ('ansible', 'ansible-playbook', 'ansible-pull'):
if config.remote_interpreter is None:
interpreter = os.path.join(os.path.dirname(__file__), 'injector.py')
elif config.remote_interpreter == '':
interpreter = None
else:
interpreter = config.remote_interpreter
if interpreter:
args += ['--extra-vars', 'ansible_python_interpreter=' + interpreter]
args += config.arguments[1:]
return args, env
def runner():
"""
:rtype: list[str], dict[str, str]
"""
args, env = [config.python_interpreter], os.environ.copy()
args += config.arguments[1:]
return args, env
def cover():
"""
:rtype: list[str], dict[str, str], bool
"""
if len(config.arguments) > 1:
executable = config.arguments[1]
else:
executable = ''
require_cwd = False
if os.path.basename(executable).startswith('ansible_module_'):
args, env = coverage_command()
# coverage requires knowing the working directory
require_cwd = True
else:
args, env = [config.python_interpreter], os.environ.copy()
args += config.arguments[1:]
return args, env, require_cwd
def coverage_command():
"""
:rtype: list[str], dict[str, str]
"""
self_dir = os.path.dirname(os.path.abspath(__file__))
args = [
config.python_interpreter,
'-m',
'coverage.__main__',
'run',
'--rcfile',
os.path.join(self_dir, '.coveragerc'),
]
env = os.environ.copy()
env['COVERAGE_FILE'] = config.coverage_file
return args, env
def find_executable(executable):
"""
:type executable: str
:rtype: str
"""
self = os.path.abspath(__file__)
path = os.environ.get('PATH', os.defpath)
seen_dirs = set()
for path_dir in path.split(os.pathsep):
if path_dir in seen_dirs:
continue
seen_dirs.add(path_dir)
candidate = os.path.abspath(os.path.join(path_dir, executable))
if candidate == self:
continue
if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
return candidate
raise Exception('Executable "%s" not found in path: %s' % (executable, path))
if __name__ == '__main__':
main()
|
gpl-3.0
|
DistortedSignal/markov-lib
|
markov_test.py
|
1
|
4459
|
# -*- coding: utf-8 -*-
import markov_tool as mt
ins = mt.InstanceList()
if ins._get_native_types() != (0, {}):
print "There was a problem instantiating the InstanceList."
print ins._get_native_types()
ins['a'] = 1
if ins['a'] != 1 or ins._get_native_types() != (1, {'a': 1}):
print "There was a problem setting/getting an item."
print ins._get_native_types()
ins['a'] += 1
if ins['a'] != 2 or ins._get_native_types() != (2, {'a': 2}):
print "There was a problem setting/getting an item."
print ins._get_native_types()
ins['b'] = 2
result = ins._InstanceList__get_next_token(lambda x, y: 0)
if result != 'a':
print "There was a problem getting the next token (value: 0). Actual value: " + \
str(result)
print ins._get_native_types()
result = ins._InstanceList__get_next_token(lambda x, y: 1)
if result != 'a':
print "There was a problem getting the next token (value: 1). Actual value: " + \
str(result)
print ins._get_native_types()
result = ins._InstanceList__get_next_token(lambda x, y: 2)
if result != 'b':
print "There was a problem getting the next token (value: 2). Actual value: " + \
str(result)
print ins._get_native_types()
result = ins._InstanceList__get_next_token(lambda x, y: 3)
if result != 'b':
print "There was a problem getting the next token (value: 3). Actual value: " + \
str(result)
print ins._get_native_types()
try:
result = ins._InstanceList__get_next_token(lambda x, y: 4)
except IndexError as e:
print "Caught Index error, not a problem."
else:
print "We didn't catch any errors, and that's a problem."
instance = mt.InstanceMatrix()
instance.load('this')
a = instance.generate()
if a != "this":
print "There was a problem generating from the instance matrix."
print "Result: " + a
print "Tests finished."
pantheon = [u"Abholos", u"Alala", u"Ammutseba", u"Amon-Gorloth", u"Aphoom-Zhah", u"Apocolothoth", \
u"Arwassa", u"Atlach-Nacha", u"Ayi'ig", u"Aylith", u"Baoht Z'uqqa-Mogg", u"Basatan", u"B'gnu-Thun", \
u"Bokrug", u"Bugg-Shash", u"Byatis", u"Chaugnar Faugn", u"Caug-Narfagn", u"Coatlicue", u"Coinchenn", \
u"Crom Cruach", u"Cthaat", u"Cthaat Aquadingen", u"Cthaeghya", u"Cthugha", u"Cthulhu", u"Cthylla", \
u"Ctoggha", u"Cyäegha", u"Cynothoglys", u"Dhumin", u"Dygra", u"Dythalla", u"Dzéwà", u"Eihort", u"Ei'lor", \
u"Etepsed Egnis", u"Ghadamon", u"Ghatanothoa", u"Ghisguth", u"Glaaki", u"Gleeth", u"Gloon", u"Gobogeg", \
u"Gog-Hoor", u"Gol-goroth", u"Golothess", u"Groth-Golka", u"Gtuhanai", u"Gurathnaka", u"Gur'la-ya", \
u"Gzxtyos", u"Han", u"Hastalÿk", u"Hastur", u"H'chtelegoth", u"Hnarqu", u"Hziulquoigmnzhah", u"Idh-yaa", \
u"Inpesca", u"Iod", u"Istasha", u"Ithaqua", u"Janai'ngo", u"Juk-Shabb", u"Kaalut", u"Kag'Naru", \
u"Kassogtha", u"Kaunuzoth", u"Khal'kru", u"Klosmiebhyx", u"K'nar'st", u"Krang", u"Kurpannga", u"Lam", \
u"Lythalia", u"Mappo no Ryujin", u"M'basui Gwandu", u"M'Nagalah", u"Mnomquah", u"Mordiggian", u"Mormo", \
u"Mortllgh", u"Mynoghra", u"Nctosa & Nctolhu", u"Ngirrth'lu", u"Northot", u"Nssu-Ghahnb", u"Nug and Yeb", \
u"Nyaghoggua", u"Nycrama", u"Nyogtha", u"Ob'mbu", u"Oorn", u"Othuum", u"Othuyeg", u"Pharol", u"Poseidon", \
u"Psuchawrl", u"Ptar-Axtlan", u"Quachil Uttaus", u"Quyagen", u"Q'yth-az", u"Raandaii-B'nk", u"Ragnalla", \
u"Raphanasuan", u"Rhan-Tegoth", u"Rhogog", u"Rh'Thulla", u"Rokon", u"Ruhtra Dyoll", u"Saa'itii", u"Scathach", \
u"Sebek", u"Sedmelluq", u"Sfatlicllp", u"Shaklatal", u"Shathak", u"Shaurash-Ho", u"Sheb-Teth", u"Shlithneth", \
u"Sho-Gath", u"Shterot", u"Shudde M'ell", u"Shuy-Nihl", u"Sthanee", u"S'tya-Yg'Nalle", u"Summanus", u"Swarog", \
u"Thanaroa", u"Tharapithia", u"Thog", u"Th'rygh", u"Tsathoggua", u"Tulushuggua", u"Turua", u"Uitzilcapac", \
u"Ut'Ulls-Hr'Her", u"Vhuzompha", u"Vibur", u"Vile-Oct", u"Volgna-Gath", u"Voltiyig", u"Vthyarilops", \
u"Vulthoom", u"Gsarthotegga", u"Xalafu", u"Xcthol", u"Xinlurgash", u"Xirdneth", u"Xotli", u"Xoxiigghua", \
u"Yegg-Ha", u"Y'golonac", u"Yhagni", u"Yhashtur", u"Yig", u"Y'lla", u"'Ymnar", u"Yog-Sapha", u"Yorith", \
u"Ysbaddaden", u"Ythogtha", u"Yug-Siturath", u"Zathog", u"Zhar and Lloigor", u"Zindarak", u"Zoth-Ommog", \
u"Zstylzhemghi", u"Zystulzhemgni", u"Zushakon", u"Zuchequon", u"Z'toggua", u"Zvilpogghua"]
gen = mt.InstanceMatrix()
for g in pantheon:
gen.load(g)
print gen.generate()
print gen.generate()
print gen.generate()
print gen.generate()
# a = gen._get_native_types()
# print repr(a)
|
mit
|
mancoast/CPythonPyc_test
|
fail/313_test_tokenize.py
|
2
|
34326
|
# -*- coding: utf-8 -*-
doctests = """
Tests for the tokenize module.
The tests can be really simple. Given a small fragment of source
code, print out a table with tokens. The ENDMARK is omitted for
brevity.
>>> dump_tokens("1 + 1")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '1' (1, 0) (1, 1)
OP '+' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
>>> dump_tokens("if False:\\n"
... " # NL\\n"
... " True = False # NEWLINE\\n")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'if' (1, 0) (1, 2)
NAME 'False' (1, 3) (1, 8)
OP ':' (1, 8) (1, 9)
NEWLINE '\\n' (1, 9) (1, 10)
COMMENT '# NL' (2, 4) (2, 8)
NL '\\n' (2, 8) (2, 9)
INDENT ' ' (3, 0) (3, 4)
NAME 'True' (3, 4) (3, 8)
OP '=' (3, 9) (3, 10)
NAME 'False' (3, 11) (3, 16)
COMMENT '# NEWLINE' (3, 17) (3, 26)
NEWLINE '\\n' (3, 26) (3, 27)
DEDENT '' (4, 0) (4, 0)
>>> indent_error_file = \"""
... def k(x):
... x += 2
... x += 5
... \"""
>>> readline = BytesIO(indent_error_file.encode('utf-8')).readline
>>> for tok in tokenize(readline): pass
Traceback (most recent call last):
...
IndentationError: unindent does not match any outer indentation level
There are some standard formattig practises that are easy to get right.
>>> roundtrip("if x == 1:\\n"
... " print(x)\\n")
True
>>> roundtrip("# This is a comment\\n# This also")
True
Some people use different formatting conventions, which makes
untokenize a little trickier. Note that this test involves trailing
whitespace after the colon. Note that we use hex escapes to make the
two trailing blanks apparent in the expected output.
>>> roundtrip("if x == 1 : \\n"
... " print(x)\\n")
True
>>> f = support.findfile("tokenize_tests.txt")
>>> roundtrip(open(f, 'rb'))
True
>>> roundtrip("if x == 1:\\n"
... " # A comment by itself.\\n"
... " print(x) # Comment here, too.\\n"
... " # Another comment.\\n"
... "after_if = True\\n")
True
>>> roundtrip("if (x # The comments need to go in the right place\\n"
... " == 1):\\n"
... " print('x==1')\\n")
True
>>> roundtrip("class Test: # A comment here\\n"
... " # A comment with weird indent\\n"
... " after_com = 5\\n"
... " def x(m): return m*5 # a one liner\\n"
... " def y(m): # A whitespace after the colon\\n"
... " return y*4 # 3-space indent\\n")
True
Some error-handling code
>>> roundtrip("try: import somemodule\\n"
... "except ImportError: # comment\\n"
... " print('Can not import' # comment2\\n)"
... "else: print('Loaded')\\n")
True
Balancing continuation
>>> roundtrip("a = (3,4, \\n"
... "5,6)\\n"
... "y = [3, 4,\\n"
... "5]\\n"
... "z = {'a': 5,\\n"
... "'b':15, 'c':True}\\n"
... "x = len(y) + 5 - a[\\n"
... "3] - a[2]\\n"
... "+ len(z) - z[\\n"
... "'b']\\n")
True
Ordinary integers and binary operators
>>> dump_tokens("0xff <= 255")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0xff' (1, 0) (1, 4)
OP '<=' (1, 5) (1, 7)
NUMBER '255' (1, 8) (1, 11)
>>> dump_tokens("0b10 <= 255")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0b10' (1, 0) (1, 4)
OP '<=' (1, 5) (1, 7)
NUMBER '255' (1, 8) (1, 11)
>>> dump_tokens("0o123 <= 0O123")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0o123' (1, 0) (1, 5)
OP '<=' (1, 6) (1, 8)
NUMBER '0O123' (1, 9) (1, 14)
>>> dump_tokens("1234567 > ~0x15")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '1234567' (1, 0) (1, 7)
OP '>' (1, 8) (1, 9)
OP '~' (1, 10) (1, 11)
NUMBER '0x15' (1, 11) (1, 15)
>>> dump_tokens("2134568 != 1231515")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '2134568' (1, 0) (1, 7)
OP '!=' (1, 8) (1, 10)
NUMBER '1231515' (1, 11) (1, 18)
>>> dump_tokens("(-124561-1) & 200000000")
ENCODING 'utf-8' (0, 0) (0, 0)
OP '(' (1, 0) (1, 1)
OP '-' (1, 1) (1, 2)
NUMBER '124561' (1, 2) (1, 8)
OP '-' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP ')' (1, 10) (1, 11)
OP '&' (1, 12) (1, 13)
NUMBER '200000000' (1, 14) (1, 23)
>>> dump_tokens("0xdeadbeef != -1")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0xdeadbeef' (1, 0) (1, 10)
OP '!=' (1, 11) (1, 13)
OP '-' (1, 14) (1, 15)
NUMBER '1' (1, 15) (1, 16)
>>> dump_tokens("0xdeadc0de & 12345")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0xdeadc0de' (1, 0) (1, 10)
OP '&' (1, 11) (1, 12)
NUMBER '12345' (1, 13) (1, 18)
>>> dump_tokens("0xFF & 0x15 | 1234")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0xFF' (1, 0) (1, 4)
OP '&' (1, 5) (1, 6)
NUMBER '0x15' (1, 7) (1, 11)
OP '|' (1, 12) (1, 13)
NUMBER '1234' (1, 14) (1, 18)
Long integers
>>> dump_tokens("x = 0")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '0' (1, 4) (1, 5)
>>> dump_tokens("x = 0xfffffffffff")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '0xffffffffff (1, 4) (1, 17)
>>> dump_tokens("x = 123141242151251616110")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '123141242151 (1, 4) (1, 25)
>>> dump_tokens("x = -15921590215012591")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
OP '-' (1, 4) (1, 5)
NUMBER '159215902150 (1, 5) (1, 22)
Floating point numbers
>>> dump_tokens("x = 3.14159")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3.14159' (1, 4) (1, 11)
>>> dump_tokens("x = 314159.")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '314159.' (1, 4) (1, 11)
>>> dump_tokens("x = .314159")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '.314159' (1, 4) (1, 11)
>>> dump_tokens("x = 3e14159")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3e14159' (1, 4) (1, 11)
>>> dump_tokens("x = 3E123")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3E123' (1, 4) (1, 9)
>>> dump_tokens("x+y = 3e-1230")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '+' (1, 1) (1, 2)
NAME 'y' (1, 2) (1, 3)
OP '=' (1, 4) (1, 5)
NUMBER '3e-1230' (1, 6) (1, 13)
>>> dump_tokens("x = 3.14e159")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3.14e159' (1, 4) (1, 12)
String literals
>>> dump_tokens("x = ''; y = \\\"\\\"")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "''" (1, 4) (1, 6)
OP ';' (1, 6) (1, 7)
NAME 'y' (1, 8) (1, 9)
OP '=' (1, 10) (1, 11)
STRING '""' (1, 12) (1, 14)
>>> dump_tokens("x = '\\\"'; y = \\\"'\\\"")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '\\'"\\'' (1, 4) (1, 7)
OP ';' (1, 7) (1, 8)
NAME 'y' (1, 9) (1, 10)
OP '=' (1, 11) (1, 12)
STRING '"\\'"' (1, 13) (1, 16)
>>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '"doesn\\'t "' (1, 4) (1, 14)
NAME 'shrink' (1, 14) (1, 20)
STRING '", does it"' (1, 20) (1, 31)
>>> dump_tokens("x = 'abc' + 'ABC'")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "'abc'" (1, 4) (1, 9)
OP '+' (1, 10) (1, 11)
STRING "'ABC'" (1, 12) (1, 17)
>>> dump_tokens('y = "ABC" + "ABC"')
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'y' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '"ABC"' (1, 4) (1, 9)
OP '+' (1, 10) (1, 11)
STRING '"ABC"' (1, 12) (1, 17)
>>> dump_tokens("x = r'abc' + r'ABC' + R'ABC' + R'ABC'")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "r'abc'" (1, 4) (1, 10)
OP '+' (1, 11) (1, 12)
STRING "r'ABC'" (1, 13) (1, 19)
OP '+' (1, 20) (1, 21)
STRING "R'ABC'" (1, 22) (1, 28)
OP '+' (1, 29) (1, 30)
STRING "R'ABC'" (1, 31) (1, 37)
>>> dump_tokens('y = r"abc" + r"ABC" + R"ABC" + R"ABC"')
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'y' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING 'r"abc"' (1, 4) (1, 10)
OP '+' (1, 11) (1, 12)
STRING 'r"ABC"' (1, 13) (1, 19)
OP '+' (1, 20) (1, 21)
STRING 'R"ABC"' (1, 22) (1, 28)
OP '+' (1, 29) (1, 30)
STRING 'R"ABC"' (1, 31) (1, 37)
Operators
>>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'def' (1, 0) (1, 3)
NAME 'd22' (1, 4) (1, 7)
OP '(' (1, 7) (1, 8)
NAME 'a' (1, 8) (1, 9)
OP ',' (1, 9) (1, 10)
NAME 'b' (1, 11) (1, 12)
OP ',' (1, 12) (1, 13)
NAME 'c' (1, 14) (1, 15)
OP '=' (1, 15) (1, 16)
NUMBER '2' (1, 16) (1, 17)
OP ',' (1, 17) (1, 18)
NAME 'd' (1, 19) (1, 20)
OP '=' (1, 20) (1, 21)
NUMBER '2' (1, 21) (1, 22)
OP ',' (1, 22) (1, 23)
OP '*' (1, 24) (1, 25)
NAME 'k' (1, 25) (1, 26)
OP ')' (1, 26) (1, 27)
OP ':' (1, 27) (1, 28)
NAME 'pass' (1, 29) (1, 33)
>>> dump_tokens("def d01v_(a=1, *k, **w): pass")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'def' (1, 0) (1, 3)
NAME 'd01v_' (1, 4) (1, 9)
OP '(' (1, 9) (1, 10)
NAME 'a' (1, 10) (1, 11)
OP '=' (1, 11) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP ',' (1, 13) (1, 14)
OP '*' (1, 15) (1, 16)
NAME 'k' (1, 16) (1, 17)
OP ',' (1, 17) (1, 18)
OP '**' (1, 19) (1, 21)
NAME 'w' (1, 21) (1, 22)
OP ')' (1, 22) (1, 23)
OP ':' (1, 23) (1, 24)
NAME 'pass' (1, 25) (1, 29)
Comparison
>>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " +
... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'if' (1, 0) (1, 2)
NUMBER '1' (1, 3) (1, 4)
OP '<' (1, 5) (1, 6)
NUMBER '1' (1, 7) (1, 8)
OP '>' (1, 9) (1, 10)
NUMBER '1' (1, 11) (1, 12)
OP '==' (1, 13) (1, 15)
NUMBER '1' (1, 16) (1, 17)
OP '>=' (1, 18) (1, 20)
NUMBER '5' (1, 21) (1, 22)
OP '<=' (1, 23) (1, 25)
NUMBER '0x15' (1, 26) (1, 30)
OP '<=' (1, 31) (1, 33)
NUMBER '0x12' (1, 34) (1, 38)
OP '!=' (1, 39) (1, 41)
NUMBER '1' (1, 42) (1, 43)
NAME 'and' (1, 44) (1, 47)
NUMBER '5' (1, 48) (1, 49)
NAME 'in' (1, 50) (1, 52)
NUMBER '1' (1, 53) (1, 54)
NAME 'not' (1, 55) (1, 58)
NAME 'in' (1, 59) (1, 61)
NUMBER '1' (1, 62) (1, 63)
NAME 'is' (1, 64) (1, 66)
NUMBER '1' (1, 67) (1, 68)
NAME 'or' (1, 69) (1, 71)
NUMBER '5' (1, 72) (1, 73)
NAME 'is' (1, 74) (1, 76)
NAME 'not' (1, 77) (1, 80)
NUMBER '1' (1, 81) (1, 82)
OP ':' (1, 82) (1, 83)
NAME 'pass' (1, 84) (1, 88)
Shift
>>> dump_tokens("x = 1 << 1 >> 5")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '<<' (1, 6) (1, 8)
NUMBER '1' (1, 9) (1, 10)
OP '>>' (1, 11) (1, 13)
NUMBER '5' (1, 14) (1, 15)
Additive
>>> dump_tokens("x = 1 - y + 15 - 1 + 0x124 + z + a[5]")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '-' (1, 6) (1, 7)
NAME 'y' (1, 8) (1, 9)
OP '+' (1, 10) (1, 11)
NUMBER '15' (1, 12) (1, 14)
OP '-' (1, 15) (1, 16)
NUMBER '1' (1, 17) (1, 18)
OP '+' (1, 19) (1, 20)
NUMBER '0x124' (1, 21) (1, 26)
OP '+' (1, 27) (1, 28)
NAME 'z' (1, 29) (1, 30)
OP '+' (1, 31) (1, 32)
NAME 'a' (1, 33) (1, 34)
OP '[' (1, 34) (1, 35)
NUMBER '5' (1, 35) (1, 36)
OP ']' (1, 36) (1, 37)
Multiplicative
>>> dump_tokens("x = 1//1*1/5*12%0x12")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '//' (1, 5) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '*' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP '/' (1, 10) (1, 11)
NUMBER '5' (1, 11) (1, 12)
OP '*' (1, 12) (1, 13)
NUMBER '12' (1, 13) (1, 15)
OP '%' (1, 15) (1, 16)
NUMBER '0x12' (1, 16) (1, 20)
Unary
>>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1")
ENCODING 'utf-8' (0, 0) (0, 0)
OP '~' (1, 0) (1, 1)
NUMBER '1' (1, 1) (1, 2)
OP '^' (1, 3) (1, 4)
NUMBER '1' (1, 5) (1, 6)
OP '&' (1, 7) (1, 8)
NUMBER '1' (1, 9) (1, 10)
OP '|' (1, 11) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP '^' (1, 14) (1, 15)
OP '-' (1, 16) (1, 17)
NUMBER '1' (1, 17) (1, 18)
>>> dump_tokens("-1*1/1+1*1//1 - ---1**1")
ENCODING 'utf-8' (0, 0) (0, 0)
OP '-' (1, 0) (1, 1)
NUMBER '1' (1, 1) (1, 2)
OP '*' (1, 2) (1, 3)
NUMBER '1' (1, 3) (1, 4)
OP '/' (1, 4) (1, 5)
NUMBER '1' (1, 5) (1, 6)
OP '+' (1, 6) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '*' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP '//' (1, 10) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP '-' (1, 14) (1, 15)
OP '-' (1, 16) (1, 17)
OP '-' (1, 17) (1, 18)
OP '-' (1, 18) (1, 19)
NUMBER '1' (1, 19) (1, 20)
OP '**' (1, 20) (1, 22)
NUMBER '1' (1, 22) (1, 23)
Selector
>>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'import' (1, 0) (1, 6)
NAME 'sys' (1, 7) (1, 10)
OP ',' (1, 10) (1, 11)
NAME 'time' (1, 12) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
NAME 'x' (2, 0) (2, 1)
OP '=' (2, 2) (2, 3)
NAME 'sys' (2, 4) (2, 7)
OP '.' (2, 7) (2, 8)
NAME 'modules' (2, 8) (2, 15)
OP '[' (2, 15) (2, 16)
STRING "'time'" (2, 16) (2, 22)
OP ']' (2, 22) (2, 23)
OP '.' (2, 23) (2, 24)
NAME 'time' (2, 24) (2, 28)
OP '(' (2, 28) (2, 29)
OP ')' (2, 29) (2, 30)
Methods
>>> dump_tokens("@staticmethod\\ndef foo(x,y): pass")
ENCODING 'utf-8' (0, 0) (0, 0)
OP '@' (1, 0) (1, 1)
NAME 'staticmethod (1, 1) (1, 13)
NEWLINE '\\n' (1, 13) (1, 14)
NAME 'def' (2, 0) (2, 3)
NAME 'foo' (2, 4) (2, 7)
OP '(' (2, 7) (2, 8)
NAME 'x' (2, 8) (2, 9)
OP ',' (2, 9) (2, 10)
NAME 'y' (2, 10) (2, 11)
OP ')' (2, 11) (2, 12)
OP ':' (2, 12) (2, 13)
NAME 'pass' (2, 14) (2, 18)
Backslash means line continuation, except for comments
>>> roundtrip("x=1+\\\\n"
... "1\\n"
... "# This is a comment\\\\n"
... "# This also\\n")
True
>>> roundtrip("# Comment \\\\nx = 0")
True
Two string literals on the same line
>>> roundtrip("'' ''")
True
Test roundtrip on random python modules.
pass the '-ucpu' option to process the full directory.
>>> import random
>>> tempdir = os.path.dirname(f) or os.curdir
>>> testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
>>> if not support.is_resource_enabled("cpu"):
... testfiles = random.sample(testfiles, 10)
...
>>> for testfile in testfiles:
... if not roundtrip(open(testfile, 'rb')):
... print("Roundtrip failed for file %s" % testfile)
... break
... else: True
True
Evil tabs
>>> dump_tokens("def f():\\n\\tif x\\n \\tpass")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'def' (1, 0) (1, 3)
NAME 'f' (1, 4) (1, 5)
OP '(' (1, 5) (1, 6)
OP ')' (1, 6) (1, 7)
OP ':' (1, 7) (1, 8)
NEWLINE '\\n' (1, 8) (1, 9)
INDENT '\\t' (2, 0) (2, 1)
NAME 'if' (2, 1) (2, 3)
NAME 'x' (2, 4) (2, 5)
NEWLINE '\\n' (2, 5) (2, 6)
INDENT ' \\t' (3, 0) (3, 9)
NAME 'pass' (3, 9) (3, 13)
DEDENT '' (4, 0) (4, 0)
DEDENT '' (4, 0) (4, 0)
Non-ascii identifiers
>>> dump_tokens("Örter = 'places'\\ngrün = 'green'")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'Örter' (1, 0) (1, 5)
OP '=' (1, 6) (1, 7)
STRING "'places'" (1, 8) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
NAME 'grün' (2, 0) (2, 4)
OP '=' (2, 5) (2, 6)
STRING "'green'" (2, 7) (2, 14)
"""
from test import support
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, tok_name, detect_encoding)
from io import BytesIO
from unittest import TestCase
import os, sys, glob
def dump_tokens(s):
"""Print out the tokens in s in a table format.
The ENDMARKER is omitted.
"""
f = BytesIO(s.encode('utf-8'))
for type, token, start, end, line in tokenize(f.readline):
if type == ENDMARKER:
break
type = tok_name[type]
print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals())
def roundtrip(f):
"""
Test roundtrip for `untokenize`. `f` is an open file or a string.
The source code in f is tokenized, converted back to source code via
tokenize.untokenize(), and tokenized again from the latter. The test
fails if the second tokenization doesn't match the first.
"""
if isinstance(f, str):
f = BytesIO(f.encode('utf-8'))
try:
token_list = list(tokenize(f.readline))
finally:
f.close()
tokens1 = [tok[:2] for tok in token_list]
new_bytes = untokenize(tokens1)
readline = (line for line in new_bytes.splitlines(1)).__next__
tokens2 = [tok[:2] for tok in tokenize(readline)]
return tokens1 == tokens2
# This is an example from the docs, set up as a doctest.
def decistmt(s):
"""Substitute Decimals for floats in a string of statements.
>>> from decimal import Decimal
>>> s = 'print(+21.3e-5*-.1234/81.7)'
>>> decistmt(s)
"print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))"
The format of the exponent is inherited from the platform C library.
Known cases are "e-007" (Windows) and "e-07" (not Windows). Since
we're only showing 12 digits, and the 13th isn't close to 5, the
rest of the output should be platform-independent.
>>> exec(s) #doctest: +ELLIPSIS
-3.21716034272e-0...7
Output from calculations with Decimal should be identical across all
platforms.
>>> exec(decistmt(s))
-3.217160342717258261933904529E-7
"""
result = []
g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and '.' in tokval: # replace NUMBER tokens
result.extend([
(NAME, 'Decimal'),
(OP, '('),
(STRING, repr(tokval)),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result).decode('utf-8')
class TestTokenizerAdheresToPep0263(TestCase):
"""
Test that tokenizer adheres to the coding behaviour stipulated in PEP 0263.
"""
def _testFile(self, filename):
path = os.path.join(os.path.dirname(__file__), filename)
return roundtrip(open(path, 'rb'))
def test_utf8_coding_cookie_and_no_utf8_bom(self):
f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt'
self.assertTrue(self._testFile(f))
def test_latin1_coding_cookie_and_utf8_bom(self):
"""
As per PEP 0263, if a file starts with a utf-8 BOM signature, the only
allowed encoding for the comment is 'utf-8'. The text file used in
this test starts with a BOM signature, but specifies latin1 as the
coding, so verify that a SyntaxError is raised, which matches the
behaviour of the interpreter when it encounters a similar condition.
"""
f = 'tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt'
self.assertRaises(SyntaxError, self._testFile, f)
def test_no_coding_cookie_and_utf8_bom(self):
f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt'
self.assertTrue(self._testFile(f))
def test_utf8_coding_cookie_and_utf8_bom(self):
f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt'
self.assertTrue(self._testFile(f))
class Test_Tokenize(TestCase):
def test__tokenize_decodes_with_specified_encoding(self):
literal = '"ЉЊЈЁЂ"'
line = literal.encode('utf-8')
first = False
def readline():
nonlocal first
if not first:
first = True
return line
else:
return b''
# skip the initial encoding token and the end token
tokens = list(_tokenize(readline, encoding='utf-8'))[1:-1]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"bytes not decoded with encoding")
def test__tokenize_does_not_decode_with_encoding_none(self):
literal = '"ЉЊЈЁЂ"'
first = False
def readline():
nonlocal first
if not first:
first = True
return literal
else:
return b''
# skip the end token
tokens = list(_tokenize(readline, encoding=None))[:-1]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"string not tokenized when encoding is None")
class TestDetectEncoding(TestCase):
def get_readline(self, lines):
index = 0
def readline():
nonlocal index
if index == len(lines):
raise StopIteration
line = lines[index]
index += 1
return line
return readline
def test_no_bom_no_encoding_cookie(self):
lines = (
b'# something\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, list(lines[:2]))
def test_bom_no_cookie(self):
lines = (
b'\xef\xbb\xbf# something\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines,
[b'# something\n', b'print(something)\n'])
def test_cookie_first_line_no_bom(self):
lines = (
b'# -*- coding: latin-1 -*-\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'iso-8859-1')
self.assertEqual(consumed_lines, [b'# -*- coding: latin-1 -*-\n'])
def test_matched_bom_and_cookie_first_line(self):
lines = (
b'\xef\xbb\xbf# coding=utf-8\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [b'# coding=utf-8\n'])
def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self):
lines = (
b'\xef\xbb\xbf# vim: set fileencoding=ascii :\n',
b'print(something)\n',
b'do_something(else)\n'
)
readline = self.get_readline(lines)
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_cookie_second_line_no_bom(self):
lines = (
b'#! something\n',
b'# vim: set fileencoding=ascii :\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'ascii')
expected = [b'#! something\n', b'# vim: set fileencoding=ascii :\n']
self.assertEqual(consumed_lines, expected)
def test_matched_bom_and_cookie_second_line(self):
lines = (
b'\xef\xbb\xbf#! something\n',
b'f# coding=utf-8\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines,
[b'#! something\n', b'f# coding=utf-8\n'])
def test_mismatched_bom_and_cookie_second_line_raises_syntaxerror(self):
lines = (
b'\xef\xbb\xbf#! something\n',
b'# vim: set fileencoding=ascii :\n',
b'print(something)\n',
b'do_something(else)\n'
)
readline = self.get_readline(lines)
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_latin1_normalization(self):
# See get_normal_name() in tokenizer.c.
encodings = ("latin-1", "iso-8859-1", "iso-latin-1", "latin-1-unix",
"iso-8859-1-unix", "iso-latin-1-mac")
for encoding in encodings:
for rep in ("-", "_"):
enc = encoding.replace("-", rep)
lines = (b"#!/usr/bin/python\n",
b"# coding: " + enc.encode("ascii") + b"\n",
b"print(things)\n",
b"do_something += 4\n")
rl = self.get_readline(lines)
found, consumed_lines = detect_encoding(rl)
self.assertEqual(found, "iso-8859-1")
def test_utf8_normalization(self):
# See get_normal_name() in tokenizer.c.
encodings = ("utf-8", "utf-8-mac", "utf-8-unix")
for encoding in encodings:
for rep in ("-", "_"):
enc = encoding.replace("-", rep)
lines = (b"#!/usr/bin/python\n",
b"# coding: " + enc.encode("ascii") + b"\n",
b"1 + 3\n")
rl = self.get_readline(lines)
found, consumed_lines = detect_encoding(rl)
self.assertEqual(found, "utf-8")
def test_short_files(self):
readline = self.get_readline((b'print(something)\n',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [b'print(something)\n'])
encoding, consumed_lines = detect_encoding(self.get_readline(()))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [])
readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [b'print(something)\n'])
readline = self.get_readline((b'\xef\xbb\xbf',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [])
readline = self.get_readline((b'# coding: bad\n',))
self.assertRaises(SyntaxError, detect_encoding, readline)
class TestTokenize(TestCase):
def test_tokenize(self):
import tokenize as tokenize_module
encoding = object()
encoding_used = None
def mock_detect_encoding(readline):
return encoding, ['first', 'second']
def mock__tokenize(readline, encoding):
nonlocal encoding_used
encoding_used = encoding
out = []
while True:
next_line = readline()
if next_line:
out.append(next_line)
continue
return out
counter = 0
def mock_readline():
nonlocal counter
counter += 1
if counter == 5:
return b''
return counter
orig_detect_encoding = tokenize_module.detect_encoding
orig__tokenize = tokenize_module._tokenize
tokenize_module.detect_encoding = mock_detect_encoding
tokenize_module._tokenize = mock__tokenize
try:
results = tokenize(mock_readline)
self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4])
finally:
tokenize_module.detect_encoding = orig_detect_encoding
tokenize_module._tokenize = orig__tokenize
self.assertTrue(encoding_used, encoding)
__test__ = {"doctests" : doctests, 'decistmt': decistmt}
def test_main():
from test import test_tokenize
support.run_doctest(test_tokenize, True)
support.run_unittest(TestTokenizerAdheresToPep0263)
support.run_unittest(Test_Tokenize)
support.run_unittest(TestDetectEncoding)
support.run_unittest(TestTokenize)
if __name__ == "__main__":
test_main()
|
gpl-3.0
|
junmin-kim/TizenRT
|
build/tools/esp32/esptool_py/partition_offset.py
|
10
|
1806
|
#!/usr/bin/python
# Copyright 2019 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
from __future__ import print_function
import sys
import re
config_file="../../../os/.config"
infile = open(config_file,'r')
for line in infile:
line = line.strip('\n')
if re.search("CONFIG_ESP_FLASH_BASE", line) != None:
flash_base_addr = line.split('=', 2)[1]
elif re.search("CONFIG_FLASH_PART_SIZE", line) != None:
flash_size_list = line.split('=')[1].strip('"')
flash_size_list = flash_size_list.split(",")
elif re.search("CONFIG_FLASH_PART_TYPE", line) != None:
flash_type_list = line.split('=',2)[1].strip('"')
flash_type_list = flash_type_list.split(',')
else:
continue
index = int(sys.argv[1])
index_startadr = int(flash_base_addr,16)
size = 0
lowest_index = 0
if index < lowest_index:
for i in range(len(flash_type_list)-1):
print(i,":",flash_type_list[i])
else:
for offset in range(index):
size+= int(flash_size_list[offset]) * 1024
index_startadr+=size
index_startadr = hex(index_startadr)
cursize=int(flash_size_list[index])*1024
cursize = hex(cursize)
print(index_startadr,cursize)
|
apache-2.0
|
lulandco/SickRage
|
lib/chardet/sjisprober.py
|
53
|
3690
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJIS_SM_MODEL
from .enums import ProbingState, MachineState
class SJISProber(MultiByteCharSetProber):
def __init__(self):
super(SJISProber, self).__init__()
self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
self.distribution_analyzer = SJISDistributionAnalysis()
self.context_analyzer = SJISContextAnalysis()
self.reset()
def reset(self):
super(SJISProber, self).reset()
self.context_analyzer.reset()
@property
def charset_name(self):
return self.context_analyzer.charset_name
def feed(self, byte_str):
for i in range(len(byte_str)):
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.error:
self.logger.debug('%s prober hit error at byte %s',
self.charset_name, i)
self._state = ProbingState.not_me
break
elif coding_state == MachineState.its_me:
self._state = ProbingState.found_it
break
elif coding_state == MachineState.start:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.context_analyzer.feed(self._last_char[2 - char_len:],
char_len)
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3
- char_len], char_len)
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.detecting:
if (self.context_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.found_it
return self.state
def get_confidence(self):
context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf)
|
gpl-3.0
|
MPIB/Lagerregal
|
devicetypes/tests.py
|
1
|
2366
|
from django.test import TestCase
from django.test.client import Client
from django.urls import reverse
from model_mommy import mommy
from devicetypes.models import Type
from users.models import Lageruser
class TypeTests(TestCase):
def setUp(self):
self.client = Client()
Lageruser.objects.create_superuser('test', '[email protected]', "test")
self.client.login(username="test", password="test")
def test_type_creation(self):
devicetype = mommy.make(Type)
self.assertTrue(isinstance(devicetype, Type))
self.assertEqual(str(devicetype), devicetype.name)
self.assertEqual(devicetype.get_absolute_url(), reverse('type-detail', kwargs={'pk': devicetype.pk}))
self.assertEqual(devicetype.get_edit_url(), reverse('type-edit', kwargs={'pk': devicetype.pk}))
def test_list_view(self):
mommy.make(Type, _quantity=40)
response = self.client.get('/types/')
self.assertEqual(response.status_code, 200)
# testing the presentation of only 30 results of query on one page
self.assertEqual(len(response.context["type_list"]), 30)
self.assertEqual(response.context["paginator"].num_pages, 2)
# testing the successful loading of second page of devicetype-list (statuscode 2xx)
response = self.client.get('/types/?page=2')
self.assertEqual(response.status_code, 200)
def test_detail_view(self):
devicetype = mommy.make(Type)
response = self.client.get('/types/%i/' % devicetype.pk)
self.assertEqual(response.status_code, 200)
def test_create_view(self):
response = self.client.get('/types/add/')
self.assertEqual(response.status_code, 200)
def test_update_view(self):
devicetype = mommy.make(Type)
response = self.client.get('/types/%i/edit/' % devicetype.pk)
self.assertEqual(response.status_code, 200)
def test_delete_view(self):
devicetype = mommy.make(Type)
response = self.client.get('/types/%i/delete/' % devicetype.pk)
self.assertEqual(response.status_code, 200)
def test_merge_view(self):
devicetype1 = mommy.make(Type)
devicetype2 = mommy.make(Type)
response = self.client.get('/types/%i/merge/%i/' % (devicetype1.pk, devicetype2.pk))
self.assertEqual(response.status_code, 200)
|
bsd-3-clause
|
mollstam/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/postgres_tests/models.py
|
26
|
1983
|
from django.contrib.postgres.fields import (
ArrayField, BigIntegerRangeField, DateRangeField, DateTimeRangeField,
FloatRangeField, HStoreField, IntegerRangeField,
)
from django.db import connection, models
class IntegerArrayModel(models.Model):
field = ArrayField(models.IntegerField())
class NullableIntegerArrayModel(models.Model):
field = ArrayField(models.IntegerField(), blank=True, null=True)
class CharArrayModel(models.Model):
field = ArrayField(models.CharField(max_length=10))
class DateTimeArrayModel(models.Model):
datetimes = ArrayField(models.DateTimeField())
dates = ArrayField(models.DateField())
times = ArrayField(models.TimeField())
class NestedIntegerArrayModel(models.Model):
field = ArrayField(ArrayField(models.IntegerField()))
class OtherTypesArrayModel(models.Model):
ips = ArrayField(models.GenericIPAddressField())
uuids = ArrayField(models.UUIDField())
decimals = ArrayField(models.DecimalField(max_digits=5, decimal_places=2))
class HStoreModel(models.Model):
field = HStoreField(blank=True, null=True)
class CharFieldModel(models.Model):
field = models.CharField(max_length=16)
class TextFieldModel(models.Model):
field = models.TextField()
# Only create this model for databases which support it
if connection.vendor == 'postgresql' and connection.pg_version >= 90200:
class RangesModel(models.Model):
ints = IntegerRangeField(blank=True, null=True)
bigints = BigIntegerRangeField(blank=True, null=True)
floats = FloatRangeField(blank=True, null=True)
timestamps = DateTimeRangeField(blank=True, null=True)
dates = DateRangeField(blank=True, null=True)
else:
# create an object with this name so we don't have failing imports
class RangesModel(object):
pass
class ArrayFieldSubclass(ArrayField):
def __init__(self, *args, **kwargs):
super(ArrayFieldSubclass, self).__init__(models.IntegerField())
|
mit
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/stringprep.py
|
205
|
12917
|
# This file is generated by mkstringprep.py. DO NOT EDIT.
"""Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
"""
from unicodedata import ucd_3_2_0 as unicodedata
assert unicodedata.unidata_version == '3.2.0'
def in_table_a1(code):
if unicodedata.category(code) != 'Cn': return False
c = ord(code)
if 0xFDD0 <= c < 0xFDF0: return False
return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + list(range(65024,65040)))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {
0xb5:'\u03bc', 0xdf:'ss', 0x130:'i\u0307', 0x149:'\u02bcn',
0x17f:'s', 0x1f0:'j\u030c', 0x345:'\u03b9', 0x37a:' \u03b9',
0x390:'\u03b9\u0308\u0301', 0x3b0:'\u03c5\u0308\u0301', 0x3c2:'\u03c3', 0x3d0:'\u03b2',
0x3d1:'\u03b8', 0x3d2:'\u03c5', 0x3d3:'\u03cd', 0x3d4:'\u03cb',
0x3d5:'\u03c6', 0x3d6:'\u03c0', 0x3f0:'\u03ba', 0x3f1:'\u03c1',
0x3f2:'\u03c3', 0x3f5:'\u03b5', 0x587:'\u0565\u0582', 0x1e96:'h\u0331',
0x1e97:'t\u0308', 0x1e98:'w\u030a', 0x1e99:'y\u030a', 0x1e9a:'a\u02be',
0x1e9b:'\u1e61', 0x1f50:'\u03c5\u0313', 0x1f52:'\u03c5\u0313\u0300', 0x1f54:'\u03c5\u0313\u0301',
0x1f56:'\u03c5\u0313\u0342', 0x1f80:'\u1f00\u03b9', 0x1f81:'\u1f01\u03b9', 0x1f82:'\u1f02\u03b9',
0x1f83:'\u1f03\u03b9', 0x1f84:'\u1f04\u03b9', 0x1f85:'\u1f05\u03b9', 0x1f86:'\u1f06\u03b9',
0x1f87:'\u1f07\u03b9', 0x1f88:'\u1f00\u03b9', 0x1f89:'\u1f01\u03b9', 0x1f8a:'\u1f02\u03b9',
0x1f8b:'\u1f03\u03b9', 0x1f8c:'\u1f04\u03b9', 0x1f8d:'\u1f05\u03b9', 0x1f8e:'\u1f06\u03b9',
0x1f8f:'\u1f07\u03b9', 0x1f90:'\u1f20\u03b9', 0x1f91:'\u1f21\u03b9', 0x1f92:'\u1f22\u03b9',
0x1f93:'\u1f23\u03b9', 0x1f94:'\u1f24\u03b9', 0x1f95:'\u1f25\u03b9', 0x1f96:'\u1f26\u03b9',
0x1f97:'\u1f27\u03b9', 0x1f98:'\u1f20\u03b9', 0x1f99:'\u1f21\u03b9', 0x1f9a:'\u1f22\u03b9',
0x1f9b:'\u1f23\u03b9', 0x1f9c:'\u1f24\u03b9', 0x1f9d:'\u1f25\u03b9', 0x1f9e:'\u1f26\u03b9',
0x1f9f:'\u1f27\u03b9', 0x1fa0:'\u1f60\u03b9', 0x1fa1:'\u1f61\u03b9', 0x1fa2:'\u1f62\u03b9',
0x1fa3:'\u1f63\u03b9', 0x1fa4:'\u1f64\u03b9', 0x1fa5:'\u1f65\u03b9', 0x1fa6:'\u1f66\u03b9',
0x1fa7:'\u1f67\u03b9', 0x1fa8:'\u1f60\u03b9', 0x1fa9:'\u1f61\u03b9', 0x1faa:'\u1f62\u03b9',
0x1fab:'\u1f63\u03b9', 0x1fac:'\u1f64\u03b9', 0x1fad:'\u1f65\u03b9', 0x1fae:'\u1f66\u03b9',
0x1faf:'\u1f67\u03b9', 0x1fb2:'\u1f70\u03b9', 0x1fb3:'\u03b1\u03b9', 0x1fb4:'\u03ac\u03b9',
0x1fb6:'\u03b1\u0342', 0x1fb7:'\u03b1\u0342\u03b9', 0x1fbc:'\u03b1\u03b9', 0x1fbe:'\u03b9',
0x1fc2:'\u1f74\u03b9', 0x1fc3:'\u03b7\u03b9', 0x1fc4:'\u03ae\u03b9', 0x1fc6:'\u03b7\u0342',
0x1fc7:'\u03b7\u0342\u03b9', 0x1fcc:'\u03b7\u03b9', 0x1fd2:'\u03b9\u0308\u0300', 0x1fd3:'\u03b9\u0308\u0301',
0x1fd6:'\u03b9\u0342', 0x1fd7:'\u03b9\u0308\u0342', 0x1fe2:'\u03c5\u0308\u0300', 0x1fe3:'\u03c5\u0308\u0301',
0x1fe4:'\u03c1\u0313', 0x1fe6:'\u03c5\u0342', 0x1fe7:'\u03c5\u0308\u0342', 0x1ff2:'\u1f7c\u03b9',
0x1ff3:'\u03c9\u03b9', 0x1ff4:'\u03ce\u03b9', 0x1ff6:'\u03c9\u0342', 0x1ff7:'\u03c9\u0342\u03b9',
0x1ffc:'\u03c9\u03b9', 0x20a8:'rs', 0x2102:'c', 0x2103:'\xb0c',
0x2107:'\u025b', 0x2109:'\xb0f', 0x210b:'h', 0x210c:'h',
0x210d:'h', 0x2110:'i', 0x2111:'i', 0x2112:'l',
0x2115:'n', 0x2116:'no', 0x2119:'p', 0x211a:'q',
0x211b:'r', 0x211c:'r', 0x211d:'r', 0x2120:'sm',
0x2121:'tel', 0x2122:'tm', 0x2124:'z', 0x2128:'z',
0x212c:'b', 0x212d:'c', 0x2130:'e', 0x2131:'f',
0x2133:'m', 0x213e:'\u03b3', 0x213f:'\u03c0', 0x2145:'d',
0x3371:'hpa', 0x3373:'au', 0x3375:'ov', 0x3380:'pa',
0x3381:'na', 0x3382:'\u03bca', 0x3383:'ma', 0x3384:'ka',
0x3385:'kb', 0x3386:'mb', 0x3387:'gb', 0x338a:'pf',
0x338b:'nf', 0x338c:'\u03bcf', 0x3390:'hz', 0x3391:'khz',
0x3392:'mhz', 0x3393:'ghz', 0x3394:'thz', 0x33a9:'pa',
0x33aa:'kpa', 0x33ab:'mpa', 0x33ac:'gpa', 0x33b4:'pv',
0x33b5:'nv', 0x33b6:'\u03bcv', 0x33b7:'mv', 0x33b8:'kv',
0x33b9:'mv', 0x33ba:'pw', 0x33bb:'nw', 0x33bc:'\u03bcw',
0x33bd:'mw', 0x33be:'kw', 0x33bf:'mw', 0x33c0:'k\u03c9',
0x33c1:'m\u03c9', 0x33c3:'bq', 0x33c6:'c\u2215kg', 0x33c7:'co.',
0x33c8:'db', 0x33c9:'gy', 0x33cb:'hp', 0x33cd:'kk',
0x33ce:'km', 0x33d7:'ph', 0x33d9:'ppm', 0x33da:'pr',
0x33dc:'sv', 0x33dd:'wb', 0xfb00:'ff', 0xfb01:'fi',
0xfb02:'fl', 0xfb03:'ffi', 0xfb04:'ffl', 0xfb05:'st',
0xfb06:'st', 0xfb13:'\u0574\u0576', 0xfb14:'\u0574\u0565', 0xfb15:'\u0574\u056b',
0xfb16:'\u057e\u0576', 0xfb17:'\u0574\u056d', 0x1d400:'a', 0x1d401:'b',
0x1d402:'c', 0x1d403:'d', 0x1d404:'e', 0x1d405:'f',
0x1d406:'g', 0x1d407:'h', 0x1d408:'i', 0x1d409:'j',
0x1d40a:'k', 0x1d40b:'l', 0x1d40c:'m', 0x1d40d:'n',
0x1d40e:'o', 0x1d40f:'p', 0x1d410:'q', 0x1d411:'r',
0x1d412:'s', 0x1d413:'t', 0x1d414:'u', 0x1d415:'v',
0x1d416:'w', 0x1d417:'x', 0x1d418:'y', 0x1d419:'z',
0x1d434:'a', 0x1d435:'b', 0x1d436:'c', 0x1d437:'d',
0x1d438:'e', 0x1d439:'f', 0x1d43a:'g', 0x1d43b:'h',
0x1d43c:'i', 0x1d43d:'j', 0x1d43e:'k', 0x1d43f:'l',
0x1d440:'m', 0x1d441:'n', 0x1d442:'o', 0x1d443:'p',
0x1d444:'q', 0x1d445:'r', 0x1d446:'s', 0x1d447:'t',
0x1d448:'u', 0x1d449:'v', 0x1d44a:'w', 0x1d44b:'x',
0x1d44c:'y', 0x1d44d:'z', 0x1d468:'a', 0x1d469:'b',
0x1d46a:'c', 0x1d46b:'d', 0x1d46c:'e', 0x1d46d:'f',
0x1d46e:'g', 0x1d46f:'h', 0x1d470:'i', 0x1d471:'j',
0x1d472:'k', 0x1d473:'l', 0x1d474:'m', 0x1d475:'n',
0x1d476:'o', 0x1d477:'p', 0x1d478:'q', 0x1d479:'r',
0x1d47a:'s', 0x1d47b:'t', 0x1d47c:'u', 0x1d47d:'v',
0x1d47e:'w', 0x1d47f:'x', 0x1d480:'y', 0x1d481:'z',
0x1d49c:'a', 0x1d49e:'c', 0x1d49f:'d', 0x1d4a2:'g',
0x1d4a5:'j', 0x1d4a6:'k', 0x1d4a9:'n', 0x1d4aa:'o',
0x1d4ab:'p', 0x1d4ac:'q', 0x1d4ae:'s', 0x1d4af:'t',
0x1d4b0:'u', 0x1d4b1:'v', 0x1d4b2:'w', 0x1d4b3:'x',
0x1d4b4:'y', 0x1d4b5:'z', 0x1d4d0:'a', 0x1d4d1:'b',
0x1d4d2:'c', 0x1d4d3:'d', 0x1d4d4:'e', 0x1d4d5:'f',
0x1d4d6:'g', 0x1d4d7:'h', 0x1d4d8:'i', 0x1d4d9:'j',
0x1d4da:'k', 0x1d4db:'l', 0x1d4dc:'m', 0x1d4dd:'n',
0x1d4de:'o', 0x1d4df:'p', 0x1d4e0:'q', 0x1d4e1:'r',
0x1d4e2:'s', 0x1d4e3:'t', 0x1d4e4:'u', 0x1d4e5:'v',
0x1d4e6:'w', 0x1d4e7:'x', 0x1d4e8:'y', 0x1d4e9:'z',
0x1d504:'a', 0x1d505:'b', 0x1d507:'d', 0x1d508:'e',
0x1d509:'f', 0x1d50a:'g', 0x1d50d:'j', 0x1d50e:'k',
0x1d50f:'l', 0x1d510:'m', 0x1d511:'n', 0x1d512:'o',
0x1d513:'p', 0x1d514:'q', 0x1d516:'s', 0x1d517:'t',
0x1d518:'u', 0x1d519:'v', 0x1d51a:'w', 0x1d51b:'x',
0x1d51c:'y', 0x1d538:'a', 0x1d539:'b', 0x1d53b:'d',
0x1d53c:'e', 0x1d53d:'f', 0x1d53e:'g', 0x1d540:'i',
0x1d541:'j', 0x1d542:'k', 0x1d543:'l', 0x1d544:'m',
0x1d546:'o', 0x1d54a:'s', 0x1d54b:'t', 0x1d54c:'u',
0x1d54d:'v', 0x1d54e:'w', 0x1d54f:'x', 0x1d550:'y',
0x1d56c:'a', 0x1d56d:'b', 0x1d56e:'c', 0x1d56f:'d',
0x1d570:'e', 0x1d571:'f', 0x1d572:'g', 0x1d573:'h',
0x1d574:'i', 0x1d575:'j', 0x1d576:'k', 0x1d577:'l',
0x1d578:'m', 0x1d579:'n', 0x1d57a:'o', 0x1d57b:'p',
0x1d57c:'q', 0x1d57d:'r', 0x1d57e:'s', 0x1d57f:'t',
0x1d580:'u', 0x1d581:'v', 0x1d582:'w', 0x1d583:'x',
0x1d584:'y', 0x1d585:'z', 0x1d5a0:'a', 0x1d5a1:'b',
0x1d5a2:'c', 0x1d5a3:'d', 0x1d5a4:'e', 0x1d5a5:'f',
0x1d5a6:'g', 0x1d5a7:'h', 0x1d5a8:'i', 0x1d5a9:'j',
0x1d5aa:'k', 0x1d5ab:'l', 0x1d5ac:'m', 0x1d5ad:'n',
0x1d5ae:'o', 0x1d5af:'p', 0x1d5b0:'q', 0x1d5b1:'r',
0x1d5b2:'s', 0x1d5b3:'t', 0x1d5b4:'u', 0x1d5b5:'v',
0x1d5b6:'w', 0x1d5b7:'x', 0x1d5b8:'y', 0x1d5b9:'z',
0x1d5d4:'a', 0x1d5d5:'b', 0x1d5d6:'c', 0x1d5d7:'d',
0x1d5d8:'e', 0x1d5d9:'f', 0x1d5da:'g', 0x1d5db:'h',
0x1d5dc:'i', 0x1d5dd:'j', 0x1d5de:'k', 0x1d5df:'l',
0x1d5e0:'m', 0x1d5e1:'n', 0x1d5e2:'o', 0x1d5e3:'p',
0x1d5e4:'q', 0x1d5e5:'r', 0x1d5e6:'s', 0x1d5e7:'t',
0x1d5e8:'u', 0x1d5e9:'v', 0x1d5ea:'w', 0x1d5eb:'x',
0x1d5ec:'y', 0x1d5ed:'z', 0x1d608:'a', 0x1d609:'b',
0x1d60a:'c', 0x1d60b:'d', 0x1d60c:'e', 0x1d60d:'f',
0x1d60e:'g', 0x1d60f:'h', 0x1d610:'i', 0x1d611:'j',
0x1d612:'k', 0x1d613:'l', 0x1d614:'m', 0x1d615:'n',
0x1d616:'o', 0x1d617:'p', 0x1d618:'q', 0x1d619:'r',
0x1d61a:'s', 0x1d61b:'t', 0x1d61c:'u', 0x1d61d:'v',
0x1d61e:'w', 0x1d61f:'x', 0x1d620:'y', 0x1d621:'z',
0x1d63c:'a', 0x1d63d:'b', 0x1d63e:'c', 0x1d63f:'d',
0x1d640:'e', 0x1d641:'f', 0x1d642:'g', 0x1d643:'h',
0x1d644:'i', 0x1d645:'j', 0x1d646:'k', 0x1d647:'l',
0x1d648:'m', 0x1d649:'n', 0x1d64a:'o', 0x1d64b:'p',
0x1d64c:'q', 0x1d64d:'r', 0x1d64e:'s', 0x1d64f:'t',
0x1d650:'u', 0x1d651:'v', 0x1d652:'w', 0x1d653:'x',
0x1d654:'y', 0x1d655:'z', 0x1d670:'a', 0x1d671:'b',
0x1d672:'c', 0x1d673:'d', 0x1d674:'e', 0x1d675:'f',
0x1d676:'g', 0x1d677:'h', 0x1d678:'i', 0x1d679:'j',
0x1d67a:'k', 0x1d67b:'l', 0x1d67c:'m', 0x1d67d:'n',
0x1d67e:'o', 0x1d67f:'p', 0x1d680:'q', 0x1d681:'r',
0x1d682:'s', 0x1d683:'t', 0x1d684:'u', 0x1d685:'v',
0x1d686:'w', 0x1d687:'x', 0x1d688:'y', 0x1d689:'z',
0x1d6a8:'\u03b1', 0x1d6a9:'\u03b2', 0x1d6aa:'\u03b3', 0x1d6ab:'\u03b4',
0x1d6ac:'\u03b5', 0x1d6ad:'\u03b6', 0x1d6ae:'\u03b7', 0x1d6af:'\u03b8',
0x1d6b0:'\u03b9', 0x1d6b1:'\u03ba', 0x1d6b2:'\u03bb', 0x1d6b3:'\u03bc',
0x1d6b4:'\u03bd', 0x1d6b5:'\u03be', 0x1d6b6:'\u03bf', 0x1d6b7:'\u03c0',
0x1d6b8:'\u03c1', 0x1d6b9:'\u03b8', 0x1d6ba:'\u03c3', 0x1d6bb:'\u03c4',
0x1d6bc:'\u03c5', 0x1d6bd:'\u03c6', 0x1d6be:'\u03c7', 0x1d6bf:'\u03c8',
0x1d6c0:'\u03c9', 0x1d6d3:'\u03c3', 0x1d6e2:'\u03b1', 0x1d6e3:'\u03b2',
0x1d6e4:'\u03b3', 0x1d6e5:'\u03b4', 0x1d6e6:'\u03b5', 0x1d6e7:'\u03b6',
0x1d6e8:'\u03b7', 0x1d6e9:'\u03b8', 0x1d6ea:'\u03b9', 0x1d6eb:'\u03ba',
0x1d6ec:'\u03bb', 0x1d6ed:'\u03bc', 0x1d6ee:'\u03bd', 0x1d6ef:'\u03be',
0x1d6f0:'\u03bf', 0x1d6f1:'\u03c0', 0x1d6f2:'\u03c1', 0x1d6f3:'\u03b8',
0x1d6f4:'\u03c3', 0x1d6f5:'\u03c4', 0x1d6f6:'\u03c5', 0x1d6f7:'\u03c6',
0x1d6f8:'\u03c7', 0x1d6f9:'\u03c8', 0x1d6fa:'\u03c9', 0x1d70d:'\u03c3',
0x1d71c:'\u03b1', 0x1d71d:'\u03b2', 0x1d71e:'\u03b3', 0x1d71f:'\u03b4',
0x1d720:'\u03b5', 0x1d721:'\u03b6', 0x1d722:'\u03b7', 0x1d723:'\u03b8',
0x1d724:'\u03b9', 0x1d725:'\u03ba', 0x1d726:'\u03bb', 0x1d727:'\u03bc',
0x1d728:'\u03bd', 0x1d729:'\u03be', 0x1d72a:'\u03bf', 0x1d72b:'\u03c0',
0x1d72c:'\u03c1', 0x1d72d:'\u03b8', 0x1d72e:'\u03c3', 0x1d72f:'\u03c4',
0x1d730:'\u03c5', 0x1d731:'\u03c6', 0x1d732:'\u03c7', 0x1d733:'\u03c8',
0x1d734:'\u03c9', 0x1d747:'\u03c3', 0x1d756:'\u03b1', 0x1d757:'\u03b2',
0x1d758:'\u03b3', 0x1d759:'\u03b4', 0x1d75a:'\u03b5', 0x1d75b:'\u03b6',
0x1d75c:'\u03b7', 0x1d75d:'\u03b8', 0x1d75e:'\u03b9', 0x1d75f:'\u03ba',
0x1d760:'\u03bb', 0x1d761:'\u03bc', 0x1d762:'\u03bd', 0x1d763:'\u03be',
0x1d764:'\u03bf', 0x1d765:'\u03c0', 0x1d766:'\u03c1', 0x1d767:'\u03b8',
0x1d768:'\u03c3', 0x1d769:'\u03c4', 0x1d76a:'\u03c5', 0x1d76b:'\u03c6',
0x1d76c:'\u03c7', 0x1d76d:'\u03c8', 0x1d76e:'\u03c9', 0x1d781:'\u03c3',
0x1d790:'\u03b1', 0x1d791:'\u03b2', 0x1d792:'\u03b3', 0x1d793:'\u03b4',
0x1d794:'\u03b5', 0x1d795:'\u03b6', 0x1d796:'\u03b7', 0x1d797:'\u03b8',
0x1d798:'\u03b9', 0x1d799:'\u03ba', 0x1d79a:'\u03bb', 0x1d79b:'\u03bc',
0x1d79c:'\u03bd', 0x1d79d:'\u03be', 0x1d79e:'\u03bf', 0x1d79f:'\u03c0',
0x1d7a0:'\u03c1', 0x1d7a1:'\u03b8', 0x1d7a2:'\u03c3', 0x1d7a3:'\u03c4',
0x1d7a4:'\u03c5', 0x1d7a5:'\u03c6', 0x1d7a6:'\u03c7', 0x1d7a7:'\u03c8',
0x1d7a8:'\u03c9', 0x1d7bb:'\u03c3', }
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = "".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == " "
def in_table_c12(code):
return unicodedata.category(code) == "Zs" and code != " "
def in_table_c11_c12(code):
return unicodedata.category(code) == "Zs"
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == "Cc"
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + list(range(8288,8292)) + list(range(8298,8304)) + list(range(65529,65533)) + list(range(119155,119163)))
def in_table_c22(code):
c = ord(code)
if c < 128: return False
if unicodedata.category(code) == "Cc": return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == "Cc" or \
ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == "Co"
def in_table_c4(code):
c = ord(code)
if c < 0xFDD0: return False
if c < 0xFDF0: return True
return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
def in_table_c5(code):
return unicodedata.category(code) == "Cs"
c6_set = set(range(65529,65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272,12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + list(range(8234,8239)) + list(range(8298,8304)))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + list(range(917536,917632)))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ("R","AL")
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
|
apache-2.0
|
simontakite/sysadmin
|
pythonscripts/pythonnetworkingcoookbook/chapter7/7_7_configure_Apache_for_hosting_website_remotely.py
|
4
|
1458
|
#!/usr/bin/env python
# Python Network Programming Cookbook -- Chapter - 7
# This program is optimized for Python 2.7.
# It may run on any other version with/without modifications.
from getpass import getpass
from fabric.api import env, put, sudo, prompt
from fabric.contrib.files import exists
WWW_DOC_ROOT = "/data/apache/test/"
WWW_USER = "www-data"
WWW_GROUP = "www-data"
APACHE_SITES_PATH = "/etc/apache2/sites-enabled/"
APACHE_INIT_SCRIPT = "/etc/init.d/apache2 "
def remote_server():
env.hosts = ['127.0.0.1']
env.user = prompt('Enter user name: ')
env.password = getpass('Enter your system password: ')
def setup_vhost():
""" Setup a test website """
print "Preparing the Apache vhost setup..."
print "Setting up the document root..."
if exists(WWW_DOC_ROOT):
sudo("rm -rf %s" %WWW_DOC_ROOT)
sudo("mkdir -p %s" %WWW_DOC_ROOT)
sudo("chown -R %s.%s %s" %(env.user, env.user, WWW_DOC_ROOT))
put(local_path="index.html", remote_path=WWW_DOC_ROOT)
sudo("chown -R %s.%s %s" %(WWW_USER, WWW_GROUP, WWW_DOC_ROOT))
print "Setting up the vhost..."
sudo("chown -R %s.%s %s" %(env.user, env.user, APACHE_SITES_PATH))
put(local_path="vhost.conf", remote_path=APACHE_SITES_PATH)
sudo("chown -R %s.%s %s" %('root', 'root', APACHE_SITES_PATH))
sudo("%s restart" %APACHE_INIT_SCRIPT)
print "Setup complete. Now open the server path http://abc.remote-server.org/ in your web browser."
|
gpl-2.0
|
alphafoobar/intellij-community
|
python/helpers/docutils/utils.py
|
40
|
24558
|
# $Id: utils.py 6394 2010-08-20 11:26:58Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import warnings
import unicodedata
from docutils import ApplicationError, DataError
from docutils import nodes
from docutils._compat import bytes
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced iff the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding=None, error_handler='backslashreplace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The output encoding.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if stream is None:
stream = sys.stderr
elif stream and type(stream) in (unicode, bytes):
# if `stream` is a file name, open it
if type(stream) is bytes:
stream = open(stream, 'w')
else:
stream = open(stream.encode(), 'w')
self.stream = stream
"""Where warning output is sent."""
if encoding is None:
try:
encoding = stream.encoding
except AttributeError:
pass
self.encoding = encoding or 'ascii'
"""The output character encoding."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if stream is None:
stream = sys.stderr
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
# assert source is not None, "node has line- but no source-argument"
if not 'source' in attributes: # 'line' is absolute line number
try: # look up (source, line-in-source)
source, line = self.locator(attributes.get('line'))
# print "locator lookup", kwargs.get('line'), "->", source, line
except AttributeError:
source, line = None, None
if source is not None:
attributes['source'] = source
if line is not None:
attributes['line'] = line
# assert attributes['line'] is not None, (message, kwargs)
# assert attributes['source'] is not None, (message, kwargs)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL
or level >= self.halt_level):
msgtext = msg.astext() + '\n'
try:
self.stream.write(msgtext)
except UnicodeEncodeError:
self.stream.write(msgtext.encode(self.encoding,
self.error_handler))
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def decode_path(path):
"""
Decode file/path string. Return `nodes.reprunicode` object.
Convert to Unicode without the UnicodeDecode error of the
implicit 'ascii:strict' decoding.
"""
# see also http://article.gmane.org/gmane.text.docutils.user/2905
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except AttributeError: # default value None has no decode method
return nodes.reprunicode(path)
except UnicodeDecodeError:
try:
path = path.decode('utf-8', 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none are provided, a default core set will
be used. If you will use the document object with any Docutils
components, you must provide their default settings as well. For
example, if parsing, at least provide the parser settings,
obtainable as follows::
settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
).get_default_values()
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
source_path = decode_path(source_path)
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_reference_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
assert not (settings.stylesheet and settings.stylesheet_path), (
'stylesheet and stylesheet_path are mutually exclusive.')
if settings.stylesheet_path:
sheets = settings.stylesheet_path.split(",")
elif settings.stylesheet:
sheets = settings.stylesheet.split(",")
else:
sheets = []
# strip whitespace (frequently occuring in config files)
return [sheet.strip(u' \t\n\r') for sheet in sheets]
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def east_asian_column_width(text):
if isinstance(text, unicode):
total = 0
for c in text:
total += east_asian_widths[unicodedata.east_asian_width(c)]
return total
else:
return len(text)
if hasattr(unicodedata, 'east_asian_width'):
column_width = east_asian_column_width
else:
column_width = len
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
|
apache-2.0
|
AhmadHamzeei/Amir-Accounting
|
amir/factor.py
|
1
|
1168
|
class factor:
number = None # factor number
bill_id = None
trans_id = None
dirty = False
## type of factor can be sell, buy
facotr_type = "sell"
def __init__(self, number):
if(number):
#get bill_id and trans_id and type from database
## set current factor deatils
#
# @param detail dictionary contains details
# \note keys:
def set_detail(self, detail):
self.type=type
## add a new product to list of products.
#
# it doesn't save to db you should call factor::save to save all products at once
def add_product(self, product_id, count, price, discount, description):
pass
def add_noncash_peyment(self, cheque_id, description):
pass
##save or update the factor row and set bill_id and transaction id and return factor number
#
# do not call any other function after save
def save(self):
pass
##return all factors
def get_product(self):
pass
def get_noncash_peyment(self):
pass
## return factor detail
#
# @return dictionary of details
def get_detail(self, number=0):
pass
|
gpl-3.0
|
orlenko/bccf
|
src/mezzanine/blog/management/commands/import_tumblr.py
|
3
|
5052
|
from datetime import datetime
from optparse import make_option
from time import sleep
from urllib import urlopen
from django.core.management.base import CommandError
from django.utils.html import strip_tags
try:
from json import loads
except ImportError: # Python < 2.6
from django.utils.simplejson import loads
from mezzanine.blog.management.base import BaseImporterCommand
MAX_POSTS_PER_CALL = 20 # Max number of posts Tumblr API will return per call.
MAX_RETRIES_PER_CALL = 3 # Max times to retry API call after failing.
SLEEP_PER_RETRY = 3 # Seconds to pause for between retries.
def title_from_content(content):
"""
Try and extract the first sentence from a block of test to use as a title.
"""
for end in (". ", "?", "!", "<br />", "\n", "</p>"):
if end in content:
content = content.split(end)[0] + end
break
return strip_tags(content)
class Command(BaseImporterCommand):
"""
Import Tumblr blog posts into the blog app.
"""
option_list = BaseImporterCommand.option_list + (
make_option("-t", "--tumblr-user", dest="tumblr_user",
help="Tumblr username"),
)
help = "Import Tumblr blog posts into the blog app."
def handle_import(self, options):
tumblr_user = options.get("tumblr_user")
if tumblr_user is None:
raise CommandError("Usage is import_tumblr %s" % self.args)
verbosity = int(options.get("verbosity", 1))
json_url = "http://%s.tumblr.com/api/read/json" % tumblr_user
json_start = "var tumblr_api_read ="
date_format = "%a, %d %b %Y %H:%M:%S"
start_index = 0
while True:
retries = MAX_RETRIES_PER_CALL
try:
call_url = "%s?start=%s" % (json_url, start_index)
if verbosity >= 2:
print "Calling %s" % call_url
response = urlopen(call_url)
if response.code == 404:
raise CommandError("Invalid Tumblr user.")
elif response.code == 503:
# The Tumblr API is frequently unavailable so make a
# few tries, pausing between each.
retries -= 1
if not retries:
error = "Tumblr API unavailable, try again shortly."
raise CommandError(error)
sleep(3)
continue
elif response.code != 200:
raise IOError("HTTP status %s" % response.code)
except IOError, e:
error = "Error communicating with Tumblr API (%s)" % e
raise CommandError(error)
data = response.read()
json = loads(data.split(json_start, 1)[1].strip().rstrip(";"))
posts = json["posts"]
start_index += MAX_POSTS_PER_CALL
for post in posts:
handler = getattr(self, "handle_%s_post" % post["type"])
if handler is not None:
title, content = handler(post)
pub_date = datetime.strptime(post["date"], date_format)
self.add_post(title=title, content=content,
pub_date=pub_date, tags=post.get("tags"),
old_url=post["url-with-slug"])
if len(posts) < MAX_POSTS_PER_CALL:
break
def handle_regular_post(self, post):
return post["regular-title"], post["regular-body"]
def handle_link_post(self, post):
title = post["link-text"]
content = ('<p><a href="%(link-url)s">%(link-text)s</a></p>'
'%(link-description)s') % post
return title, content
def handle_quote_post(self, post):
title = post["quote-text"]
content = ("<blockquote>%(quote-text)s</blockquote>"
"<p>%(quote-source)s</p>") % post
return title, content
def handle_photo_post(self, post):
title = title_from_content(post["photo-caption"])
content = '<p><img src="%(photo-url-400)s"></p>%(photo-caption)s'
content = content % post
return title, content
def handle_conversation_post(self, post):
title = post["conversation-title"]
content = post["conversation-text"].replace("\n", "<br />")
content = "<p>%s</p>" % content
return title, content
def handle_video_post(self, post):
title = title_from_content(post["video-caption"])
content = "<p>%(video-player)s</p>" % post
return title, content
def handle_audio_post(self, post):
title = post.get("id3-title")
content = "%(audio-caption)s<p>%(audio-player)s</p>" % post
if not title:
title = title_from_content(post["audio-caption"])
content = "<p>%(audio-player)s</p>" % post
return title, content
def handle_answer_post(self, post):
return post["question"], post["answer"]
|
unlicense
|
TheSimoms/Felleshoelet
|
spotifyconnector/venv/lib/python3.6/site-packages/pip/_vendor/packaging/utils.py
|
62
|
1520
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import re
from .version import InvalidVersion, Version
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
# This is taken from PEP 503.
return _canonicalize_regex.sub("-", name).lower()
def canonicalize_version(version):
"""
This is very similar to Version.__str__, but has one subtle differences
with the way it handles the release segment.
"""
try:
version = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
parts = []
# Epoch
if version.epoch != 0:
parts.append("{0}!".format(version.epoch))
# Release segment
# NB: This strips trailing '.0's to normalize
parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release)))
# Pre-release
if version.pre is not None:
parts.append("".join(str(x) for x in version.pre))
# Post-release
if version.post is not None:
parts.append(".post{0}".format(version.post))
# Development release
if version.dev is not None:
parts.append(".dev{0}".format(version.dev))
# Local version segment
if version.local is not None:
parts.append("+{0}".format(version.local))
return "".join(parts)
|
gpl-2.0
|
elijah513/ice
|
python/test/Ice/inheritance/Server.py
|
3
|
1086
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice
Ice.loadSlice('Test.ice')
import Test, TestI
def run(args, communicator):
communicator.getProperties().setProperty("TestAdapter.Endpoints", "default -p 12010")
adapter = communicator.createObjectAdapter("TestAdapter")
object = TestI.InitialI(adapter)
adapter.add(object, communicator.stringToIdentity("initial"))
adapter.activate()
communicator.waitForShutdown()
return True
try:
communicator = Ice.initialize(sys.argv)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
|
gpl-2.0
|
balloob/python-wink
|
src/pywink/test/devices/sensor_test.py
|
3
|
11909
|
import json
import os
import unittest
from unittest.mock import MagicMock
from pywink.api import get_devices_from_response_dict
from pywink.devices import types as device_types
from pywink.devices.piggy_bank import WinkPorkfolioBalanceSensor
from pywink.devices.smoke_detector import WinkSmokeDetector, WinkCoDetector, WinkSmokeSeverity, WinkCoSeverity
class SensorTests(unittest.TestCase):
def setUp(self):
super(SensorTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_capability_should_not_be_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SENSOR_POD)
for device in devices:
self.assertIsNotNone(device.capability())
def test_tamper_detected_should_be_false(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SENSOR_POD)
for device in devices:
self.assertFalse(device.tamper_detected())
def test_unit_is_valid(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SENSOR_POD)
for device in devices:
if device.unit_type() == "boolean":
self.assertIsNone(device.unit())
else:
self.assertIsNotNone(device.unit())
class EggtrayTests(unittest.TestCase):
def setUp(self):
super(EggtrayTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_state_should_be_2(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.EGGTRAY)
for device in devices:
self.assertEqual(device.state(), 2)
def test_capability_is_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.EGGTRAY)
for device in devices:
self.assertEqual(device.capability(), None)
def test_unit_is_eggs(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.EGGTRAY)
for device in devices:
self.assertEqual(device.unit(), "eggs")
def test_eggs(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.EGGTRAY)
for device in devices:
for egg in device.eggs():
try:
val = float(egg)
except ValueError:
self.fail("test_eggs raised ValueError unexpectedly.")
class KeyTests(unittest.TestCase):
def setUp(self):
super(KeyTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_state_should_be_false(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
self.assertEqual(len(devices), 1)
for device in devices:
self.assertFalse(device.state())
def test_parent_id_should_not_be_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
for device in devices:
self.assertIsNotNone(device.parent_id())
def test_availble_is_true(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
for device in devices:
self.assertTrue(device.available())
def test_capability_is_activity_detected(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
for device in devices:
self.assertEqual(device.capability(), "activity_detected")
def test_unit_is_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
for device in devices:
self.assertIsNone(device.unit())
class PorkfolioTests(unittest.TestCase):
def setUp(self):
super(PorkfolioTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_unit_is_usd(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PIGGY_BANK)
self.assertEqual(len(devices), 2)
for device in devices:
if isinstance(device, WinkPorkfolioBalanceSensor):
self.assertEqual(device.unit(), "USD")
def test_capability_is_balance(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PIGGY_BANK)
for device in devices:
if isinstance(device, WinkPorkfolioBalanceSensor):
self.assertEqual(device.capability(), "balance")
def test_state_is_180(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PIGGY_BANK)
for device in devices:
if isinstance(device, WinkPorkfolioBalanceSensor):
self.assertEqual(device.state(), 180)
def test_available_is_true(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PIGGY_BANK)
for device in devices:
if isinstance(device, WinkPorkfolioBalanceSensor):
self.assertTrue(device.available())
class GangTests(unittest.TestCase):
def setUp(self):
super(GangTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_unit_is_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.GANG)
for device in devices:
self.assertIsNone(device.unit())
class SmokeDetectorTests(unittest.TestCase):
def setUp(self):
super(SmokeDetectorTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_test_activated_is_false(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SMOKE_DETECTOR)
for device in devices:
self.assertFalse(device.test_activated())
def test_unit_is_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SMOKE_DETECTOR)
for device in devices:
if isinstance(device, WinkSmokeDetector):
self.assertIsNone(device.unit())
self.assertEqual(device.unit_type(), "boolean")
if isinstance(device, WinkCoDetector):
self.assertIsNone(device.unit())
self.assertEqual(device.unit_type(), "boolean")
if isinstance(device, WinkSmokeSeverity):
self.assertIsNone(device.unit())
self.assertIsNone(device.unit_type())
if isinstance(device, WinkCoSeverity):
self.assertIsNone(device.unit())
self.assertIsNone(device.unit_type())
class RemoteTests(unittest.TestCase):
def setUp(self):
super(RemoteTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_buttons_press_is_false(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.REMOTE)
remote = devices[0]
self.assertFalse(remote.button_on_pressed())
self.assertFalse(remote.button_off_pressed())
self.assertFalse(remote.button_up_pressed())
self.assertFalse(remote.button_down_pressed())
def test_unit_and_capability(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.REMOTE)
remote = devices[0]
self.assertIsNone(remote.unit())
self.assertEqual(remote.capability(), "opened")
class PropaneTankTests(unittest.TestCase):
def setUp(self):
super(PropaneTankTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_unit_and_capability(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PROPANE_TANK)
tank = devices[0]
self.assertIsNone(tank.unit())
self.assertIsNone(tank.capability())
|
mit
|
peterm-itr/edx-platform
|
lms/djangoapps/courseware/tests/test_video_xml.py
|
13
|
3186
|
# -*- coding: utf-8 -*-
# pylint: disable=protected-access
"""Test for Video Xmodule functional logic.
These test data read from xml, not from mongo.
We have a ModuleStoreTestCase class defined in
common/lib/xmodule/xmodule/modulestore/tests/django_utils.py.
You can search for usages of this in the cms and lms tests for examples.
You use this so that it will do things like point the modulestore
setting to mongo, flush the contentstore before and after, load the
templates, etc.
You can then use the CourseFactory and XModuleItemFactory as defined in
common/lib/xmodule/xmodule/modulestore/tests/factories.py to create the
course, section, subsection, unit, etc.
"""
from xmodule.video_module import VideoDescriptor
from xmodule.tests import get_test_system, LogicTest, get_test_descriptor_system
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
sub="a_sub_file.srt.sjson"
download_video="true"
start_time="01:00:03" end_time="01:00:10"
>
<source src="example.mp4"/>
<source src="example.webm"/>
<transcript language="uk" src="ukrainian_translation.srt" />
</video>
"""
class VideoModuleLogicTest(LogicTest):
"""Tests for logic of Video Xmodule."""
descriptor_class = VideoDescriptor
raw_field_data = {
'data': '<video />'
}
def test_parse_youtube(self):
"""Test parsing old-style Youtube ID strings into a dict."""
youtube_str = '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': 'ZwkTiUPN0mg',
'1.25': 'rsq9auxASqI',
'1.50': 'kMyNdzVHHgg'})
def test_parse_youtube_one_video(self):
"""
Ensure that all keys are present and missing speeds map to the
empty string.
"""
youtube_str = '0.75:jNCf2gIqpeE'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': '',
'1.25': '',
'1.50': ''})
def test_parse_youtube_key_format(self):
"""
Make sure that inconsistent speed keys are parsed correctly.
"""
youtube_str = '1.00:p2Q6BrNhdh8'
youtube_str_hack = '1.0:p2Q6BrNhdh8'
self.assertEqual(
VideoDescriptor._parse_youtube(youtube_str),
VideoDescriptor._parse_youtube(youtube_str_hack)
)
def test_parse_youtube_empty(self):
"""
Some courses have empty youtube attributes, so we should handle
that well.
"""
self.assertEqual(VideoDescriptor._parse_youtube(''),
{'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.